query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Compute one step of the map for a score (of the given axis and with given neighbours) as a function of the opposite score
def _one_step(self, gamma, axis, opp_scores): opp_exp = opp_scores**gamma s = _np.array([]) for i in range(self.d[axis]): s = _np.append(s, _np.take(opp_exp, self._neighb[axis][i]).sum()) return s/_np.mean(s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x,\n score_distance_offset):\n y_diff = y_grid[:, :, tf.newaxis] - points_y\n x_diff = x_grid[:, :, tf.newaxis] - points_x\n distance = tf.math.sqrt(y_diff**2 + x_diff**2)\n return tf.math.divide(heatmap, distance + score_distance_offset)", "def fn(i, j):\n if i < 0 or j < 0 or obstacleGrid[i][j]: return 0\n if i == 0 and j == 0: return 1 \n return fn(i-1, j) + fn(i, j-1)", "def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))", "def get_neighbours(self, pos: tuple):\n x, y = pos[0], pos[1]\n neighbours = [(x + 1, y), (x + 1, y + 1), (x, y + 1), (x - 1, y + 1),\n (x - 1, y), (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)]\n\n return {k: self.move_cost(pos, k) for k in neighbours if self.within_map(k)}", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def __inverse_scores(scores):\n minobj = scores[0]\n normobj = scores - minobj if minobj < 0 else scores\n \n return (np.amax(normobj) + 1) - normobj", "def compute_positions(scores, layers, cells, direction):\n prior = 1/np.arange(3, layers+3)\n prior = prior/prior.sum()\n x = np.linspace(-5, 5, layers)[::-1] \n prior = 1/(1 + np.exp(-x))\n prior = prior/prior.sum()\n\n # Compute the probability depending on the direction in which we want to know where the change is\n if direction == \"lr\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+j, np.arange(cells[0])*cells[1]+j+1].sum() for j in range(layers)])\n elif direction == \"rl\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+cells[1]-1-j, np.arange(cells[0])*cells[1]+cells[1]-1-j-1].sum() for j in range(layers)])\n elif direction == \"tb\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*j, np.arange(cells[1])+cells[1]*(j+1)].sum() for j in range(layers)])\n elif direction == \"bt\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*(cells[0]-1-j), np.arange(cells[1])+cells[1]*(cells[0]-1-j-1)].sum() for j in range(layers)])\n \n # Apply softmax + multiply by prior -> Then get the most likely position\n col_scores = sm(col_scores)\n position = np.argmax(col_scores*prior)\n return position", "def fn(mask, j):\n ans = 0 \n for i in range(m): \n if not mask & (1<<i): \n ans = max(ans, fn(mask^(1<<i), j-1) + score[i][j])\n return ans", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def run(self, axis, gamma):\n \n # Trajectories of the main score to compute and the opposite one\n traj_s, traj_o = [_np.ones(self.d[axis])], [_np.ones(self.d[1-axis])]\n # Ranked indices of the scores\n rank_s, rank_o = _np.array([], dtype=int), _np.array([], dtype=int)\n # List of node indices that have reached the zero threshold\n zeros_s, zeros_o = _np.array([], dtype=int), _np.array([], dtype=int)\n \n # Main loop\n for t in range(int(self.params['t_max'])):\n \n # Computing the opposite score without approx\n o = self._one_step(gamma, 1-axis, traj_s[-1])\n rank_o, zeros_o = self._update_zero_rank(o, zeros_o, rank_o)\n traj_o = _np.concatenate((traj_o, [o]))\n \n # Computing the main score (given the opposite one) without approx\n s = self._one_step(gamma, axis, o)\n rank_s, zeros_s = self._update_zero_rank(s, zeros_s, rank_s)\n # Imposing the threshold to the score\n s[zeros_s] = self.params['low_bound']\n traj_s = _np.concatenate((traj_s, [s]))\n \n # Checking the convergence\n if self._converg_check(axis, t, traj_s):\n break\n\n # Finalize the ranking of the positive scores\n rank_s = _np.append(rank_s, _np.argsort(s)[len(zeros_s):])[::-1]\n rank_o = _np.append(rank_o, _np.argsort(o)[len(zeros_o):])[::-1]\n\n # Update the class variables\n self._update_vars(axis, traj_s, traj_o, rank_s, rank_o, t)\n \n if self.params['print_info']:\n print (\"Convergence in \" + str(t) + \" time steps.\")\n if t >= self.params['t_max']:\n print(\"Warning. Stationary state not reached.\")", "def fn(i, j):\n if grid[i][j] <= 0: return 0\n grid[i][j] *= -1 # mark as visited \n ans = 0\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n: \n ans = max(ans, fn(ii, jj) - grid[i][j])\n grid[i][j] *= -1 # backtracking \n return ans", "def check_neighbours(coordinates):\n x_coord = coordinates[0]\n y_coord = coordinates[1]\n coordinates_value = 0\n for x_move in [-1, 0, 1]:\n x = x_coord + x_move\n for y_move in [-1, 0, 1]:\n y = y_coord + y_move\n try:\n value = grid[(x,y)]\n coordinates_value += value\n except KeyError:\n pass\n\n grid[coordinates] = coordinates_value\n # print(coordinates_value)\n return coordinates_value", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def forward(ctx, scores):\n idx = argmax(scores, dim=-1)\n scores_net = eye(scores.size(-1), device=scores.device)\n return scores_net[idx]", "def score(self, X, y):\n ...", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]", "def __call__(self, score_map, one_hot_label) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n assert not one_hot_label.requires_grad\n pi = one_hot_label.to(torch.float)\n\n assert not torch.any(torch.isinf(score_map))\n assert not torch.any(torch.isnan(score_map))\n\n log_qi = torch.log(self.clamp_softmax(score_map))\n\n assert not torch.any(torch.isnan(log_qi))\n\n log_fg_qi = log_qi[:, 1:, :, :]\n fg_pi = pi[:, 1:, :, :]\n fg_count = torch.sum(fg_pi, dim=(1, 2, 3)) + self.eps\n\n log_bg_qi = log_qi[:, 0:1, :, :]\n bg_pi = pi[:, 0:1, :, :]\n bg_count = torch.sum(bg_pi, dim=(1, 2, 3)) + self.eps\n\n fg_loss_ = torch.sum(fg_pi * log_fg_qi, dim=(1, 2, 3))\n fg_loss = -1 * torch.mean(fg_loss_ / fg_count) # mean reduce on batch\n\n bg_loss_ = torch.sum(bg_pi * log_bg_qi, dim=(1, 2, 3))\n bg_loss = -1 * torch.mean(bg_loss_ / bg_count) # mean reduce on batch\n\n total_loss = bg_loss + fg_loss\n assert not torch.any(torch.isnan(total_loss)), \\\n \"fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}\".format(fg_loss, fg_count, bg_loss, bg_count)\n\n return total_loss, bg_loss, fg_loss", "def infonce_lower_bound(scores):\n nll = tf.reduce_mean(input_tensor=tf.linalg.diag_part(scores) - tf.reduce_logsumexp(input_tensor=scores, axis=1))\n mi = tf.math.log(tf.cast(scores.shape[0], tf.float32)) + nll\n return mi", "def fn(i, j):\n if i == 0 and j == 0: return grid[0][0], grid[0][0]\n if i < 0 or j < 0: return -inf, inf\n if grid[i][j] == 0: return 0, 0\n mx1, mn1 = fn(i-1, j) # from top\n mx2, mn2 = fn(i, j-1) # from left \n mx, mn = max(mx1, mx2)*grid[i][j], min(mn1, mn2)*grid[i][j]\n return (mx, mn) if grid[i][j] > 0 else (mn, mx)", "def styblinskitankfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n scores = np.zeros((x.shape[0], 1))\n for i in range(n):\n scores += x[:, i] ** 4 - 16 * x[:, i] ** 2 + 5 * x[:, i]\n scores *= 0.5\n return scores", "def bidimensional_map_nonlin_2(h, t, x, y, x_0, y_0):\n\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r\n\n return h(t - f(r))", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def return_map(self):\n idx1 = self.y[1]>np.pi\n idx2 = self.y[1]<np.pi\n \n if np.sum(idx1) > 0:\n self.y[1][idx1] = self.y[1][idx1]-np.floor((self.y[1][idx1]+np.pi)/(2*np.pi))*2*np.pi\n if np.sum(idx2) > 0:\n self.y[1][idx2] = self.y[1][idx2]-np.ceil((self.y[1][idx2]-np.pi)/(2*np.pi))*2*np.pi", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def schwefel220fcn(x: np.ndarray) -> np.ndarray:\n scores = np.sum(np.abs(x), axis=1)\n return scores", "def loss(self, targets, scores):\n return (2. * numpy.arctan(targets * scores) - 1.)**2", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def ackleyn2fcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"Ackley N. 2 function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = -200 * np.exp(-0.02 * np.sqrt(X**2 + Y**2))\n return scores", "def schwefel222fcn(x: np.ndarray) -> np.ndarray:\n absx = np.abs(x)\n scores = np.sum(absx, axis=1) + np.prod(absx, axis=1)\n return scores", "def soft_argmax(self, score_map):\n # (bs, feat_size * feat_size)\n score_vec = score_map.view((-1, self.feat_size * self.feat_size))\n prob_vec = nn.functional.softmax(score_vec, dim=1)\n\n if not hasattr(self, 'coord_x'):\n # generate coordinates and indexes\n self.indice = torch.arange(\n 0, self.feat_size, device=score_map.device).view(\n -1, 1) * self.stride\n # generate mesh-grid\n self.coord_x = self.indice.repeat((self.feat_size, 1)) \\\n .view((self.feat_size * self.feat_size,)).float()\n self.coord_y = self.indice.repeat((1, self.feat_size)) \\\n .view((self.feat_size * self.feat_size,)).float()\n\n soft_argmax_x = torch.sum((self.coord_x * prob_vec), dim=1)\n soft_argmax_y = torch.sum((self.coord_y * prob_vec), dim=1)\n return soft_argmax_x, soft_argmax_y", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def localize_advanced(self, scores):\n\n sz = scores.shape[-2:]\n\n if self.output_window is not None and getattr(self.params, 'perform_hn_without_windowing', False):\n scores_orig = scores.clone()\n\n scores_orig = torch.cat([scores_orig[..., (sz[0] + 1) // 2:, :], scores_orig[..., :(sz[0] + 1) // 2, :]], -2)\n scores_orig = torch.cat([scores_orig[..., :, (sz[1] + 1) // 2:], scores_orig[..., :, :(sz[1] + 1) // 2]], -1)\n\n scores *= self.output_window\n\n # Shift scores back\n scores = torch.cat([scores[...,(sz[0]+1)//2:,:], scores[...,:(sz[0]+1)//2,:]], -2)\n scores = torch.cat([scores[...,:,(sz[1]+1)//2:], scores[...,:,:(sz[1]+1)//2]], -1)\n\n # Find maximum\n max_score1, max_disp1 = dcf.max2d(scores)\n _, scale_ind = torch.max(max_score1, dim=0)\n max_score1 = max_score1[scale_ind]\n max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1)\n target_disp1 = max_disp1 - self.output_sz // 2\n translation_vec1 = target_disp1 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n if max_score1.item() < self.params.target_not_found_threshold:\n return translation_vec1, scale_ind, scores, 'not_found'\n\n if self.output_window is not None and getattr(self.params, 'perform_hn_without_windowing', False):\n scores = scores_orig\n\n # Mask out target neighborhood\n target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale\n tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0)\n tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0])\n tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0)\n tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1])\n scores_masked = scores[scale_ind:scale_ind+1,...].clone()\n scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0\n\n # Find new maximum\n max_score2, max_disp2 = dcf.max2d(scores_masked)\n max_disp2 = max_disp2.float().cpu().view(-1)\n target_disp2 = max_disp2 - self.output_sz // 2\n translation_vec2 = target_disp2 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n # Handle the different cases\n if max_score2 > self.params.distractor_threshold * max_score1:\n disp_norm1 = torch.sqrt(torch.sum(target_disp1**2))\n disp_norm2 = torch.sqrt(torch.sum(target_disp2**2))\n disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2\n\n if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:\n return translation_vec1, scale_ind, scores, 'hard_negative'\n if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:\n return translation_vec2, scale_ind, scores, 'hard_negative'\n if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:\n return translation_vec1, scale_ind, scores, 'uncertain'\n\n # If also the distractor is close, return with highest score\n return translation_vec1, scale_ind, scores, 'uncertain'\n\n if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:\n return translation_vec1, scale_ind, scores, 'hard_negative'\n\n return translation_vec1, scale_ind, scores, None", "def minus_priority(self):\n #return (-self.size, self.vec, self.score) # kinda \"depth-first\"\n #return (self.vec, self.score, -self.size) # kinda \"breadth-first\"\n return (self.score, -self.size, self.vec) # kinda \"depth-first with back-tracking\"", "def fn(lo, hi):\n if lo == hi: return piles[lo]\n return max(piles[lo] - fn(lo+1, hi), piles[hi] - fn(lo, hi-1))", "def ridgefcn(x: np.ndarray, d: float = 1, alpha: float = 0.5) -> np.ndarray:\n x1 = x[:, 0]\n scores = x1 + d * (np.sum(x[:, 1:] ** 2, axis=1) ** alpha)\n return scores", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def griewankfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n sumcomp = np.zeros((x.shape[0], 1))\n prodcomp = np.ones((x.shape[0], 1))\n\n for i in range(n):\n sumcomp += x[:, i : i + 1] ** 2\n prodcomp *= np.cos(x[:, i : i + 1] / np.sqrt(i + 1))\n\n scores = (sumcomp / 4000) - prodcomp + 1\n return scores", "def Loss_s2s(score, g_pnt_idxs):\r\n # WHERE string part\r\n loss = 0\r\n\r\n for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):\r\n ed = len(g_pnt_idxs1) - 1\r\n score_part = score[b, :ed]\r\n loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.\r\n return loss", "def island_loss_of_weight(self):\n for y in self.island_map:\n for cell in y:\n cell.loss_of_weight()", "def bidimensional_map_nonlin_1(h, t, x, y, x_0, y_0):\n\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r + (gamma / 2) * r**2 + np.sqrt(r)\n\n return h(t - f(r))", "def forward(self, x):\n score_map_tl, score_map_br = self.get_score_map(x)\n coorx_tl, coory_tl = self.soft_argmax(score_map_tl)\n coorx_br, coory_br = self.soft_argmax(score_map_br)\n return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1)", "def compute_jacobi_map(self,jacobian):\n jacobi_abs = - np.sum(jacobian[jacobian < 0.]) #\n jacobi_num = np.sum(jacobian < 0.)\n print(\"the jacobi_value of fold points for current batch is {}\".format(jacobi_abs))\n print(\"the number of fold points for current batch is {}\".format(jacobi_num))\n # np.sum(np.abs(dfx[dfx<0])) + np.sum(np.abs(dfy[dfy<0])) + np.sum(np.abs(dfz[dfz<0]))\n #jacobi_abs_mean = jacobi_abs # / np.prod(map.shape)\n return jacobi_abs, jacobi_num", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def l1_inv_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None):\n depth_prediction, depth_gt = preprocess_roi(depth_prediction, depth_gt, roi)\n depth_prediction, depth_gt = get_positive_depth(depth_prediction, depth_gt)\n\n\n return torch.mean(torch.abs(depth_prediction - depth_gt))", "def zscore(vals):", "def forward(self, node_hidden, edge_hidden, adj_matrix, mask):\n out_scores, in_scores = None, None\n for layer in self.layers:\n node_hidden, edge_hidden = layer(node_hidden, edge_hidden, adj_matrix, mask)\n return self.norm(node_hidden)", "def math_map(val, src, dst):\n return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]", "def relu_forward(x):\r\n cache = x\r\n out = np.maximum(0, x)\r\n return out, cache", "def apply_edges(self, edges):\n h = torch.cat([edges.src['h'], edges.dst['h']], 1)\n out_score = self.W2(F.relu(self.W1(h))).squeeze(1)\n out_label = torch.round(torch.sigmoid(out_score))\n # print(out_score, out_label)\n out_dict = {'score': out_score, 'label': out_label}\n return out_dict", "def compute_saliency_maps(X, y, model):\n model.eval()\n X.requires_grad_()\n\n scores = model(X)\n loss = scores.gather(1, y.view(-1, 1)).squeeze()\n loss.backward(torch.ones(scores.shape[0]))\n grad = X.grad.data\n saliency, _ = torch.max(grad.abs(), dim=1)\n return saliency", "def schwefel221fcn(x: np.ndarray) -> np.ndarray:\n scores = np.max(np.abs(x), axis=1)\n return scores", "def relu_forward(x):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.maximum(0,x)\n cache=x\n return out,cache", "def get_neighbours(pos):\n neighbours = {tuple(sum(x) for x in zip(pos, offset)) for offset in relative_positions}\n return neighbours", "def _neighbours(self, query):\n\n # Because the query and memory keys are aready normalized, cosine\n # similarity can be calculated through a single matrix multiplication.\n similarity = T.dot(query, self.K.T)\n\n # Find the k-nearest neighbours\n k_nbrs = T.argsort(similarity, axis=1)[:, ::-1][:, :self.k_nbrs]\n k_nbrs_y = self.V[k_nbrs.flatten()].reshape(k_nbrs.shape)\n\n # Make a pseude row index via repeat\n idx = T.extra_ops.repeat(T.arange(query.shape[0]), self.k_nbrs)\n k_nbrs_sim = similarity[idx, k_nbrs.flatten()].reshape(k_nbrs.shape)\n\n return k_nbrs, k_nbrs_y, k_nbrs_sim", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def linearAssignmentParcellation(col_labels, label_mapping, slabels):\n\n z = np.zeros((len(col_labels),))\n\n for k, v in label_mapping.items():\n indv = np.where(col_labels == v)[0]\n z[indv] = k\n\n maxt = np.max(z)\n inds = np.where(col_labels>0)[0]\n zeros = inds[(z[inds]==0)]\n\n leftovers = np.unique(col_labels[zeros])\n\n for j,left in enumerate(leftovers):\n indlft = np.where(col_labels == left)\n z[indlft] = maxt + j + 1\n\n return z", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def neighbours(box, kps):\n box_duplicate = box.unsqueeze(2).repeat(1, 1, len(kps.t())).transpose(0, 1)\n kps_duplicate = kps.unsqueeze(1).repeat(1, len(box), 1)\n\n xmin = kps_duplicate[0].ge(box_duplicate[0])\n ymin = kps_duplicate[1].ge(box_duplicate[1])\n xmax = kps_duplicate[0].le(box_duplicate[2])\n ymax = kps_duplicate[1].le(box_duplicate[3])\n\n nbr_onehot = torch.mul(torch.mul(xmin, ymin), torch.mul(xmax, ymax)).t()\n n_neighbours = nbr_onehot.sum(dim=1)\n\n return nbr_onehot, n_neighbours", "def new_evaluate(board):\n\n #Logic for new_evaluate function:\n #1)Traverse through each of the columns\n #2)For each of the columns, find the top most element.\n\t #If the topmost element = Current Player\n\t\t \t#3)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a POSITIVE value\n\t #Else\n\t\t \t#4)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a NEGATIVE value\n #5)Sort these Positive and Negative scores\n #6)IF the highest negative score is greater than the highest positive score, then it means that the opposition has MORE chances to WIN.\n #So, that has to be blocked and so we will return that HIGHEST NEGATIVE value as the score for that board\n #7)ELSE we go ahead and return the HIGHEST POSITIVE value as the score for that board\n #->This logic has increasing the AGGRESSION of the player a lot and it makes senses we hope.\n\n posdict = {}\n negdict = {}\n for col in range(7):\n if(board.get_top_elt_in_column(col)==board.get_current_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = board._max_length_from_cell(rowValue,col)\n posdict[col]=score\n elif(board.get_top_elt_in_column(col)==board.get_other_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = -(board._max_length_from_cell(rowValue,col))\n negdict[col]=score\n\n\n sorted(posdict.values(),reverse= True)\n sorted(negdict.values())\n if((bool(posdict))and (bool(negdict))):\n if(abs(negdict.values()[0]) >= ((posdict.values()[0]))):\n return negdict[negdict.keys()[0]]\n else:\n return posdict[posdict.keys()[0]]\n elif(bool(posdict)):\n return posdict[posdict.keys()[0]]\n elif(bool(negdict)):\n return negdict[negdict.keys()[0]]\n else:\n return 0", "def trace_neighbours(self, x, y):\r\n return list(filter(lambda n: n != None, (self.see_neighbour(x, y, i, j) for i in [-1, 0, 1] for j in [-1, 0, 1])))", "def get_score_matrix(self) -> int:", "def cemap_cal(y_pred,y_true):\r\n nTest = y_true.shape[0]\r\n nLabel = y_true.shape[1]\r\n ap = np.zeros(nTest)\r\n for i in range(0,nTest):\r\n for j in range(0,nLabel):\r\n R = np.sum(y_true[i,:])\r\n if y_true[i,j]==1:\r\n r = np.sum(y_pred[i,:]>=y_pred[i,j])\r\n rb = np.sum(y_pred[i,np.nonzero(y_true[i,:])] >= y_pred[i,j])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n imap = np.nanmean(ap)\r\n\r\n ap = np.zeros(nLabel)\r\n for i in range(0,nLabel):\r\n for j in range(0,nTest):\r\n R = np.sum(y_true[:,i])\r\n if y_true[j,i]==1:\r\n r = np.sum(y_pred[:,i] >= y_pred[j,i])\r\n rb = np.sum(y_pred[np.nonzero(y_true[:,i]),i] >= y_pred[j,i])\r\n ap[i] = ap[i] + rb/(r*1.0)\r\n ap[i] = ap[i]/R\r\n lmap = np.nanmean(ap)\r\n\r\n return lmap,imap", "def HeatmapLoss(pred, gt, masks):\n assert pred.size() == gt.size()\n l = ((pred - gt)**2) * masks[:, None, :, :].expand_as(pred)\n l = l.mean(dim=3).mean(dim=2).mean(dim=1)\n return l", "def fn(i, j):\n if i == m-1 and j == n-1: return max(1, 1 - dungeon[i][j])\n if i > m-1 or j > n-1: return inf\n return max(1, min(fn(i+1, j), fn(i, j+1)) - dungeon[i][j])", "def compute_path_score(self, pre_scores, label):\n point_score = tf.reduce_sum(tf.reduce_sum(pre_scores * label, axis=2), 1, keep_dims=True)\n label1 = tf.expand_dims(label[:, :-1], 3)\n label2 = tf.expand_dims(label[:, 1:], 2)\n label = label1 * label2\n trans = tf.expand_dims(tf.expand_dims(self.trans, 0), 0)\n trans_score = tf.reduce_sum(tf.reduce_sum(label * trans, [2, 3]), 1, keep_dims=True)\n return point_score + trans_score", "def score(self, X, y=...):\n ...", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def shade_neighbours(x: int, y: int) -> None:\r\n if x > 0:\r\n safeboard[x-1, y] = 0\r\n if x < shape-1:\r\n safeboard[x+1, y] = 0\r\n if y > 0:\r\n safeboard[x, y-1] = 0\r\n if y < shape-1:\r\n safeboard[x, y+1] = 0\r\n safeboard[x, y] = 0", "def get_score(location, grid, shape):", "def himmelblaufcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"Himmelblau's function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n scores = ((X**2 + Y - 11) ** 2) + ((X + Y**2 - 7) ** 2)\n return scores", "def backward(ctx, gradPixels, dpointIdxMap, gradRhoMap, gradWsMap, gradIsBehind):\n pointIdxMap, bbPositionMap, isBehind, WsMap, rhoMap, depthMap, Ws, rhoValues, projPoints, cameraPoints, boundingBoxes, pixels, Ms = ctx.saved_tensors\n mergeThreshold = ctx.mergeThreshold\n focalLength = ctx.focalLength\n numPoint = ctx.numPoint\n considerZ = ctx.considerZ\n bbWidth = ctx.bbWidth\n bbHeight = ctx.bbHeight\n batchSize, height, width, topK, C = WsMap.shape\n if ctx.needs_input_grad[0]: # rho will not be backpropagated\n WsMap_ = torch.where(isBehind.unsqueeze(-1), torch.zeros(1, 1, 1, 1, 1, device=WsMap.device, dtype=WsMap.dtype), WsMap)\n totalIdxMap = pointIdxMap*bbHeight*bbWidth+bbPositionMap[:, :, :, :, 0]*bbWidth+bbPositionMap[:, :, :, :, 1]\n # TODO check dNormalizeddRho\n rhoMap_filtered = torch.where(isBehind, torch.zeros(1, 1, 1, 1, device=rhoMap.device, dtype=rhoMap.dtype), rhoMap)\n sumRho = torch.sum(rhoMap_filtered, dim=-1, keepdim=True)\n dNormalizeddRho = torch.where(rhoMap > 0, 1/sumRho-rhoMap/sumRho, rhoMap)\n dRho = _guided_scatter_maps(numPoint*bbWidth*bbHeight, dNormalizeddRho.unsqueeze(-1)*gradPixels.unsqueeze(3)*WsMap_, totalIdxMap, boundingBoxes)\n dRho = torch.sum(dRho, dim=-1)\n dRho = torch.reshape(dRho, (batchSize, numPoint, bbHeight, bbWidth))\n else:\n dRho = None\n\n if ctx.needs_input_grad[2]:\n # dPixels/dWs = Rho\n rhoMap_filtered = torch.where(isBehind, torch.zeros(1, 1, 1, 1, device=rhoMap.device, dtype=rhoMap.dtype), rhoMap)\n sumRho = torch.sum(rhoMap_filtered, dim=-1, keepdim=True)\n sumRho = torch.where(sumRho == 0, torch.zeros_like(sumRho), sumRho)\n rhoMap_normalized = rhoMap_filtered/sumRho\n # BxHxWx3 -> BxHxWxKx3 -> BxNx3\n dWs = _guided_scatter_maps(numPoint, gradPixels.unsqueeze(3)*rhoMap_normalized.unsqueeze(-1), pointIdxMap, boundingBoxes)\n else:\n dWs = None\n\n if ctx.needs_input_grad[3]:\n localWidth = ctx.localWidth\n localHeight = ctx.localHeight\n depthValues = cameraPoints[:, :, 2].contiguous()\n # B,N,1\n dIdp = torch.zeros_like(projPoints, device=gradPixels.device, dtype=gradPixels.dtype)\n dIdz = torch.zeros(1, numPoint, device=gradPixels.device, dtype=gradPixels.dtype)\n outputs = _visibility_backward(focalLength, mergeThreshold, considerZ,\n localHeight, localWidth,\n gradPixels, pointIdxMap, rhoMap, WsMap, depthMap, isBehind,\n pixels, boundingBoxes, projPoints, Ws, depthValues, rhoValues, dIdp, dIdz)\n dIdp, dIdz = outputs\n # outputs = _visibility_debug_backward(mergeThreshold, focalLength, considerZ,\n # localHeight, localWidth, 0,\n # gradPixels, pointIdxMap, rhoMap, WsMap, depthMap, isBehind,\n # pixels, boundingBoxes, projPoints, Ws, depthValues, rhoValues, dIdp, dIdz)\n # dIdp, dIdz, debugTensor = outputs\n\n dIdcam = torch.zeros_like(cameraPoints)\n dIdcam[:, :, 2] = dIdz\n # saved_variables[\"dI\"] = gradPixels.detach().cpu()\n # saved_variables[\"dIdp\"] = saved_variables[\"dIdp\"].scatter_(1, saved_variables[\"renderable_idx\"].expand(-1, -1, dIdp.shape[-1]),\n # dIdp.cpu().detach())\n # saved_variables[\"projPoints\"] = saved_variables[\"projPoints\"].scatter_(1, saved_variables[\"renderable_idx\"].expand(-1,-1,dIdp.shape[-1]),\n # projPoints.cpu().detach())\n # saved_variables[\"dIdpMap\"] = debugTensor[:,:,:,:2].cpu().detach()\n else:\n dIdp = dIdcam = None\n\n return (None, None, dWs, dIdp, None, None, dIdcam, None, None, None, None, None, None, None, None, None, None)", "def backward(ctx, gradPixels, dpointIdxMap, gradRhoMap, gradWsMap, gradIsBehind):\n pointIdxMap, bbPositionMap, isBehind, WsMap, rhoMap, depthMap, Ws, rhoValues, projPoints, cameraPoints, boundingBoxes, pixels, Ms = ctx.saved_tensors\n mergeThreshold = ctx.mergeThreshold\n focalLength = ctx.focalLength\n numPoint = ctx.numPoint\n considerZ = ctx.considerZ\n bbWidth = ctx.bbWidth\n bbHeight = ctx.bbHeight\n batchSize, height, width, topK, C = WsMap.shape\n if ctx.needs_input_grad[0]: # rho will not be backpropagated\n WsMap_ = torch.where(isBehind.unsqueeze(-1), torch.zeros(1, 1, 1, 1, 1, device=WsMap.device, dtype=WsMap.dtype), WsMap)\n totalIdxMap = pointIdxMap*bbHeight*bbWidth+bbPositionMap[:, :, :, :, 0]*bbWidth+bbPositionMap[:, :, :, :, 1]\n # TODO check dNormalizeddRho\n rhoMap_filtered = torch.where(isBehind, torch.zeros(1, 1, 1, 1, device=rhoMap.device, dtype=rhoMap.dtype), rhoMap)\n sumRho = torch.sum(rhoMap_filtered, dim=-1, keepdim=True)\n dNormalizeddRho = torch.where(rhoMap > 0, 1/sumRho-rhoMap/sumRho, rhoMap)\n dRho = _guided_scatter_maps(numPoint*bbWidth*bbHeight, dNormalizeddRho.unsqueeze(-1)*gradPixels.unsqueeze(3)*WsMap_, totalIdxMap, boundingBoxes)\n dRho = torch.sum(dRho, dim=-1)\n dRho = torch.reshape(dRho, (batchSize, numPoint, bbHeight, bbWidth))\n else:\n dRho = None\n\n if ctx.needs_input_grad[2]:\n # dPixels/dWs = Rho\n rhoMap_filtered = torch.where(isBehind, torch.zeros(1, 1, 1, 1, device=rhoMap.device, dtype=rhoMap.dtype), rhoMap)\n sumRho = torch.sum(rhoMap_filtered, dim=-1, keepdim=True)\n sumRho = torch.where(sumRho == 0, torch.zeros_like(sumRho), sumRho)\n rhoMap_normalized = rhoMap_filtered/sumRho\n # BxHxWx3 -> BxHxWxKx3 -> BxNx3\n dWs = _guided_scatter_maps(numPoint, gradPixels.unsqueeze(3)*rhoMap_normalized.unsqueeze(-1), pointIdxMap, boundingBoxes)\n else:\n dWs = None\n\n if ctx.needs_input_grad[3]:\n localWidth = ctx.localWidth\n localHeight = ctx.localHeight\n depthValues = cameraPoints[:, :, 2].contiguous()\n # B,N,1\n dIdp = torch.zeros_like(projPoints, device=gradPixels.device, dtype=gradPixels.dtype)\n dIdz = torch.zeros(1, numPoint, device=gradPixels.device, dtype=gradPixels.dtype)\n gamma = 0.1\n # rhoMap_filtered = torch.where(isBehind, torch.zeros(1, 1, 1, 1, device=rhoMap.device, dtype=rhoMap.dtype), rhoMap)\n # sumRho = torch.sum(rhoMap_filtered, dim=-1, keepdim=True)\n # rhoMap_normalized = rhoMap_filtered/sumRho\n # rhoMap_normalized = rhoMap_filtered/sumRho\n outputs = _visibility_reference_backward(focalLength, mergeThreshold, gamma, considerZ, localHeight, localWidth, 0,\n gradPixels, pointIdxMap, rhoMap, WsMap, depthMap, isBehind,\n pixels, boundingBoxes, projPoints, Ws, depthValues, rhoValues, Ms, dIdp, dIdz)\n dIdp, dIdz, debugTensor = outputs\n dIdcam = torch.zeros_like(cameraPoints)\n dIdcam[:, :, 2] = dIdz\n # saved_variables[\"dI\"] = gradPixels.detach().cpu()\n # saved_variables[\"dIdp\"] = saved_variables[\"dIdp\"].scatter_(1, saved_variables[\"renderable_idx\"].expand(-1, -1, dIdp.shape[-1]),\n # dIdp.cpu().detach())\n # saved_variables[\"projPoints\"] = saved_variables[\"projPoints\"].scatter_(1, saved_variables[\"renderable_idx\"].expand(-1,-1,dIdp.shape[-1]),\n # projPoints.cpu().detach())\n # saved_variables[\"dIdpMap\"] = debugTensor[:,:,:,:2].cpu().detach()\n else:\n dIdp = dIdcam = None\n\n return (None, None, dWs, dIdp, None, None, dIdcam, None, None, None, None, None, None, None, None, None, None)", "def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):\n\n # According to the introduction, images with no ground truth bboxes will not be\n # included in the map score unless there is a false positive detection (?)\n\n # return None if both are empty, don't count the image in final evaluation (?)\n if len(boxes_true) == 0 and len(boxes_pred) == 0:\n return None\n\n assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, \"boxes should be 2D arrays with shape[1]=4\"\n if len(boxes_pred):\n assert len(scores) == len(boxes_pred), \"boxes_pred and scores should be same length\"\n # sort boxes_pred by scores in decreasing order\n boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]\n\n map_total = 0\n\n # loop over thresholds\n for t in thresholds:\n matched_bt = set()\n tp, fn = 0, 0\n for i, bt in enumerate(boxes_true):\n matched = False\n for j, bp in enumerate(boxes_pred):\n miou = calculate_iou(bt, bp)\n if miou >= t and not matched and j not in matched_bt:\n matched = True\n tp += 1 # bt is matched for the first time, count as TP\n matched_bt.add(j)\n if not matched:\n fn += 1 # bt has no match, count as FN\n\n fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt\n m = tp / (tp + fn + fp)\n map_total += m\n\n return map_total / len(thresholds)", "def _update_farness_map(self,ind):", "def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd", "def inverse(self, x, y):", "def final_strategy(score, opponent_score):\n # BEGIN PROBLEM 11\n if(score == 0):\n return -1\n return swap_strategy(score, opponent_score, 6, 3)\n # END PROBLEM 11", "def get_k2_boundary(tri, v_neighbours):\n three = np.array([0, 1, 2])\n nv = tri.shape[0]\n k2s = np.empty((nv, 3), dtype=np.int32)\n for i in range(nv):\n for k in range(3):\n neighbour = v_neighbours[i, k]\n if neighbour == -1:\n k2s[i,k] = -1\n else:\n k2 = ((v_neighbours[neighbour] == i) * three).sum()\n k2s[i, k] = k2\n return k2s", "def _diffmat_objective(a,X):\n \n (n,p) = X.shape\n return(X - np.tile(a,(n,1)))", "def compute_gradient_saliency_maps(samples: torch.tensor,\n true_labels: torch.tensor,\n model: nn.Module):\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n return torch.rand(6, 256, 256)", "def map_minus_one_to_one(x, a, b):\n assert b > a\n s = 2./(b - a)\n t = (a+b)/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<-1] = -1\n return y", "def neighbors(self, x):\n pass", "def _apply_tore(y_coordinate, x_coordinate, game_data):\n\n board_x = game_data['variables']['board_size']['x']\n board_y = game_data['variables']['board_size']['y']\n\n if x_coordinate > board_x:\n x_coordinate -= board_x\n if y_coordinate > board_y:\n y_coordinate -= board_y\n\n if x_coordinate < 1:\n x_coordinate += board_x\n if y_coordinate < 1:\n y_coordinate += board_y\n\n return y_coordinate, x_coordinate", "def _check_run(self, axis):\n if (self.x_traj, self.y_traj)[axis] is None:\n if (self.inverse_x_traj, self.inverse_y_traj)[axis] is None:\n raise Exception('The algorithm has not been run.')\n else:\n if self.params['print_info']:\n print('Warning: you are using the opposite score. It can contain errors if any score is a zero below threshold.')\n return (self.inverse_x_traj, self.inverse_y_traj)[axis], (self.inverse_x_ranking, self.inverse_y_ranking)[axis]\n return (self.x_traj, self.y_traj)[axis], (self.x_ranking, self.y_ranking)[axis]", "def relu(self):\n return self * self.ge(0)", "def back_prop(nodes_in_path, playout_result):\n for temp_node in nodes_in_path:\n temp_node.visited += 1\n if str(playout_result) == str(temp_node.side):\n temp_node.winning += 1", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \"\"\"\n #Heuristic 1: Aggressive Improved Score\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return float(own_moves - 2*opp_moves)\n\n \"\"\"\n\n \"\"\"\n #Heuristic 2: Border/Non-Border Differentiated Moves Scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n own_score = 0\n opp_score = 0\n for each_move in game.get_legal_moves(player):\n if each_move in border_moves:\n own_score = own_score + 1\n else:\n own_score = own_score + 1.5\n\n for each_move in game.get_legal_moves(game.get_opponent(player)):\n if each_move in border_moves:\n opp_score = opp_score + 1\n else:\n opp_score = opp_score + 1.5\n\n return float(own_score - opp_score)\n \"\"\"\n\n #Heuristic 3: Advanced Differentiated Board scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n\n next_to_border_moves = [(1,1), (1,2), (1,3), (1,4), (1,5), (2,1),\n (2,5), (3,1), (3,5), (4,1), (4,5),\n (5,1), (5,2), (5,3), (5,4), (5,5)]\n\n own_score = 0\n opp_score = 0\n\n for move in game.get_legal_moves(player):\n if move in border_moves:\n own_score += 1\n elif move in next_to_border_moves:\n own_score += 1.2\n else:\n own_score += 1.5\n\n for move in game.get_legal_moves(game.get_opponent(player)):\n if move in border_moves:\n opp_score += 1\n elif move in next_to_border_moves:\n opp_score += 1.2\n else:\n opp_score += 1.5\n\n return float(own_score - opp_score)", "def py_cpu_nms(dets, scores, thresh): \n # inpurt 8x3 \n x1 = dets[:, 0, 0] \n y1 = dets[:, 0, 1] \n # z1 = dets[:, 0, 2]\n x2 = dets[:, 2, 0] \n y2 = dets[:, 2, 1] \n print('7777777777777',scores.shape)\n # z2 = dets[:, 2, 2] \n # height = dets[:, 4, 2] - dets[:, 0, 2]\n \n areas = (x2 - x1 + 1) * (y2 - y1 + 1) \n #打分从大到小排列,取index \n order = scores.argsort()[::-1] \n #keep为最后保留的边框 \n keep = [] \n while order.size > 0: \n #order[0]是当前分数最大的窗口,肯定保留 \n i = order[0] \n keep.append(i) \n #计算窗口i与其他所有窗口的交叠部分的面积\n xx1 = np.maximum(x1[i], x1[order[1:]]) \n yy1 = np.maximum(y1[i], y1[order[1:]]) \n xx2 = np.minimum(x2[i], x2[order[1:]]) \n yy2 = np.minimum(y2[i], y2[order[1:]]) \n \n w = np.maximum(0.0, xx2 - xx1 + 1) \n h = np.maximum(0.0, yy2 - yy1 + 1) \n inter = w * h \n #交/并得到iou值 \n ovr = inter / (areas[i] + areas[order[1:]] - inter) \n #inds为所有与窗口i的iou值小于threshold值的窗口的index,其他窗口此次都被窗口i吸收 \n inds = np.where(ovr <= thresh)[0] \n #order里面只保留与窗口i交叠面积小于threshold的那些窗口,由于ovr长度比order长度少1(不包含i),所以inds+1对应到保留的窗口\n order = order[inds + 1] \n \n return keep", "def rosenbrockfcn(x: np.ndarray) -> np.ndarray:\n scores = np.zeros((x.shape[0], 1))\n n = x.shape[1]\n assert n >= 1, \"Given input X cannot be empty\"\n a = 1\n b = 100\n for i in range(n - 1):\n scores += b * ((x[:, i + 1] - (x[:, i] ** 2)) ** 2) + (\n (a - x[:, i]) ** 2\n )\n return scores", "def zakharovfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n comp1 = np.sum(x**2, axis=1)\n comp2 = np.sum(0.5 * np.arange(1, n + 1) * x, axis=1)\n\n scores = comp1 + comp2**2 + comp2**4\n\n return scores", "def fn(i, j):\n grid2[i][j] = 0 # mark as visited \n ans = grid1[i][j]\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid2[ii][jj]: \n ans &= fn(ii, jj)\n return ans", "def needleman_wunsch1(x,y,lodict=None,gop=-2.5, gep=-1.75, local=False):\n n,m = len(x),len(y)\n dp = np.zeros((n+1,m+1))\n pointers = np.zeros((n+1,m+1),np.int32)\n for i in range(1,n+1):\n dp[i,0] = dp[i-1,0]+(gep if i>1 else gop)\n pointers[i,0]=1\n for j in range(1,m+1):\n dp[0,j] = dp[0,j-1]+(gep if j>1 else gop)\n pointers[0,j]=2\n for i in range(1,n+1):\n for j in range(1,m+1):\n if not lodict:\n if x[i-1] == y[j-1]:\n match = dp[i-1,j-1]+1\n else:\n match = dp[i-1,j-1]-1\n else:\n match = dp[i-1,j-1]+lodict[x[i-1],y[j-1]]\n insert = dp[i-1,j]+(gep if pointers[i-1,j]==1 else gop)\n delet = dp[i,j-1]+(gep if pointers[i,j-1]==2 else gop)\n max_score = max([match,insert,delet])\n dp[i,j] = max_score\n pointers[i,j] = [match,insert,delet].index(max_score)\n alg = []\n i,j = n,m\n while(i>0 or j>0):\n pt = pointers[i,j]\n if pt==0:\n i-=1\n j-=1\n alg = [[x[i],y[j]]]+alg\n if pt==1:\n i-=1\n alg = [[x[i],'-']]+alg\n if pt==2:\n j-=1\n alg = [['-',y[j]]]+alg\n return dp[-1,-1], alg", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def update_gol(arr):\n nxt = np.zeros(arr.shape)\n rows,cols = nxt.shape\n for i in range(rows):\n for j in range(cols):\n nn = sum_vonneuman_nn(arr,i,j)\n if arr[i][j]==1:\n if nn==2 or nn==3:\n nxt[i][j]=1\n else:\n if nn==3:\n nxt[i][j]=1\n return nxt", "def forward(ctx, scores):\n size = scores.size()\n prob = F.softmax(scores, dim=-1)\n idx = multinomial(prob.view(-1, size[-1]), num_samples=1, replacement=False).view(size[:-1])\n scores_net = eye(scores.size(-1), device=scores.device)\n return scores_net[idx]", "def get_others(map_, r, c):\n nums = 0\n # your code here\n if r == 0 and c == 0: #top left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == 0 and c == len(map_[0])-1: #top right corner\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == 0: #bottom left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == len(map_[0])-1: #bottom right corner\n nums += 2\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == 0: # top edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if len(map_) > r and map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1: # bottom edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif c == 0: # left edge, excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if len(map_[0]) > c and map_[r][c+1] == 0:\n nums += 1\n elif c == len(map_[0])-1: # right edge. excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n else: # the rest, excluding edge and corner\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n return nums", "def evaluate(y_pred_X, gnd, thresh, le_y):\n df2 = pd.DataFrame({'y':y_pred_X, 'gnd':gnd})\n out = df2.groupby(['gnd']).sum()\n out.reset_index(inplace=True)\n labels = out['gnd'].as_matrix()\n mask2 = labels[out['y'] > thresh]\n return list(le_y.inverse_transform(mask2))", "def __call__(self, a, y, hinge=1.0):\n\n check_loss_inputs(a, y)\n self.variables = (a,)\n scores = a.data\n correct_labels = (range(len(y)), y)\n correct_class_scores = scores[correct_labels] # Nx1\n\n M = scores - correct_class_scores[:, np.newaxis] + hinge # NxC margins\n not_thresh = np.where(M <= 0)\n Lij = M\n Lij[not_thresh] = 0\n Lij[correct_labels] = 0\n if _tracking.TRACK_GRAPH:\n TMP = np.ones(M.shape, dtype=float)\n TMP[not_thresh] = 0\n TMP[correct_labels] = 0 # NxC; 1 where margin > 0\n TMP[correct_labels] = -1 * TMP.sum(axis=-1)\n self.back = TMP\n self.back /= scores.shape[0]\n return np.sum(Lij) / scores.shape[0]", "def calculate_loss_function(self, predicted, groundTruth):\n predictedParameters = np.reshape(\n predicted, [-1, self.numOfGridsIn1D, self.numOfGridsIn1D, 30])\n predictedClasses = predictedParameters[:, :, :, :20]\n predictedObjectConfidence = predictedParameters[:, :, :, 20:22]\n predictedBoxes = predictedParameters[:, :, :, 22:]\n groundTruthClasses = np.reshape(groundTruth[:, :20], [-1, 1, 1, 20])\n groundTruthBoxes = np.reshape(groundTruth[:, 20:24], [-1, 1, 1, 4])\n groundTruthGrid = np.reshape(groundTruth[:, 24:], [-1, 7, 7, 1])\n predictedFirstBoxes = predictedBoxes[:, :, :, :4]\n predictedSecondBoxes = predictedBoxes[:, :, :, 5:]\n # Calulate loss along the 4th axis, localFirstBoxes -1x7x7x1\n # Think there should be a simpler method to do this\n lossFirstBoxes = tf.reduce_sum(\n tf.square(predictedFirstBoxes - groundTruthBoxes), 3)\n lossSecondBoxes = tf.reduce_sum(\n tf.square(predictedSecondBoxes - groundTruthBoxes), 3)\n # Computing which box (bbox1 or bbox2) is responsible for\n # detection\n IOU = iou_train(predictedFirstBoxes,\n predictedSecondBoxes, groundTruthBoxes)\n responsbileBox = tf.greater(IOU[:, :, :, 0], IOU[:, :, :, 1])\n # Suppose it is known which iou is greater,\n # coordinate loss (loss due to difference in coordinates of\n # predicted-responsible and real box)\n coordinateLoss = tf.where(\n responsibleBox, lossFirstBoxes, lossSecondBoxes)\n # why do we need to reshape it\n coordinateLoss = tf.reshape(coordinateLoss, [-1, 7, 7, 1])\n # count the loss only if the object is in the groundTruth grid\n # gives a sparse -1x7x7x1 matrix, only one element would be nonzero in\n # each slice\n coorinateLoss = self.lambdaCoordinate * \\\n tf.multiply(groundTruthGrid, coordinateLoss)\n # object loss (loss due to difference in object confidence)\n # only take the objectLoss of the predicted grid with higher IoU is\n # responsible for the object\n objectLoss = tf.square(predictedObjectConfidence - groundTruthGrid)\n objectLoss = tf.where(responsibleBox, objectLoss[\n :, :, :, 0], objectLoss[:, :, :, 1])\n tempObjectLoss = tf.reshape(objectLoss, [-1, 7, 7, 1])\n objectLoss = tf.multiply(groundTruthGrid, tempObjectLoss)\n # class loss (loss due to misjudgement in class of the object\n # detected\n classLoss = tf.square(predictedClasses - groundTruthClasses)\n classLoss = tf.reduce_sum(\n tf.mul(groundTruthGrid, classLoss), reduction_indices=3)\n classLoss = tf.reshape(classLoss, [-1, 7, 7, 1])\n # no-object loss, decrease the confidence where there is no\n # object in the ground truth\n noObjectLoss = self.lambdaNoObject * \\\n tf.multiply(1 - groundTruthGrid, tempObjectLoss)\n # total loss\n totalLoss = coordinateLoss + objectLoss + classLoss + noObjectLoss\n totalLoss = tf.reduce_mean(tf.reduce_sum(\n totalLoss, reduction_indeces=[1, 2, 3]), reduction_indices=0)\n return totalLoss" ]
[ "0.58188856", "0.5598756", "0.5585042", "0.55513406", "0.5534682", "0.5500189", "0.5466905", "0.541867", "0.54055065", "0.5333235", "0.53182185", "0.5281047", "0.5278096", "0.52513933", "0.5239409", "0.5227734", "0.5214485", "0.5214058", "0.5212882", "0.520227", "0.5186105", "0.5179842", "0.5168227", "0.5166361", "0.51654065", "0.5164534", "0.5164201", "0.51621944", "0.5153478", "0.5138308", "0.51289445", "0.51169527", "0.51137257", "0.5100932", "0.51007485", "0.5086631", "0.5086451", "0.5085571", "0.5068255", "0.5063699", "0.50632566", "0.505944", "0.50285375", "0.50249785", "0.50218457", "0.50196457", "0.5016658", "0.50121695", "0.5006725", "0.5000477", "0.49941108", "0.49840957", "0.49822155", "0.49800315", "0.49766478", "0.49723303", "0.49700302", "0.49688086", "0.49596444", "0.49595082", "0.4952496", "0.49427822", "0.494024", "0.4940142", "0.49362567", "0.49229804", "0.49112996", "0.4910548", "0.49097058", "0.49036998", "0.48986337", "0.48986337", "0.4894781", "0.48927212", "0.4892512", "0.48867825", "0.4885721", "0.48856014", "0.48833308", "0.48828384", "0.4881772", "0.4869751", "0.48682308", "0.48667756", "0.486523", "0.48648962", "0.4859494", "0.48567945", "0.48531765", "0.48469016", "0.48420128", "0.4841402", "0.48394057", "0.48386168", "0.48378766", "0.4833392", "0.48285353", "0.48263437", "0.48229858", "0.48225722" ]
0.50255334
43
Given the scores s, the list of nodes that already reached the low boundary and the ranking computed so far, check if new nodes have reached the low boundary, and, if so, updated the ranking with those
def _update_zero_rank(self, s, zero_ind, rank): lb = self.params['low_bound'] new_zeros_ind = _np.setdiff1d(_np.nonzero(s <= lb), zero_ind) if len(new_zeros_ind) > 0: sorted_zeros_ind = new_zeros_ind[_np.argsort(s[new_zeros_ind])] rank = _np.append(rank, sorted_zeros_ind) zero_ind = _np.append(zero_ind, new_zeros_ind) return rank, zero_ind
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rerank_candidates(s, pred2sub_rank, all_predictions, rerank_top=20):\n predicted_smiles = []\n model_input = []\n for (predict_smi, label), _ in Counter(all_predictions).most_common(rerank_top):\n if predict_smi == s:\n continue\n features = get_all_features(\n get_all_ranking_info(pred2sub_rank[predict_smi]))\n predicted_smiles.append((predict_smi, label))\n model_input.append(features)\n\n model = RankingModel()\n model.load_state_dict(torch.load('./models/ranker/rank_model.pt', map_location='cpu'))\n model.eval()\n\n test_loader = DataLoader(RankingTestDataset(\n model_input), batch_size=1000, shuffle=False, num_workers=2)\n ranking_scores = []\n for data in test_loader:\n outputs = model(data)[0]\n ranking_scores.extend(outputs.detach().cpu().numpy())\n\n assert len(predicted_smiles) == len(ranking_scores)\n pred_smi2score = {k: v[1]\n for k, v in zip(predicted_smiles, ranking_scores)}\n return pred_smi2score", "def update_highscores(self):\n for i in range(len(self.highscores)):\n if self.score >= self.highscores[i]:\n self.highscores.insert(i, self.score)\n self.highscores.pop()\n break", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score", "def add_score(self, points: int) -> None:\n self.__score += points\n\n for rank in self.__ranks.keys():\n if self.__score >= rank:\n self.__level = self.__ranks[rank]\n else:\n break", "def check_high_score(stats, sb):\r\n if stats.score > stats.high_score:\r\n stats.high_score = stats.score\r\n sb.prep_high_score()", "def check_high_score(stats, sb):\n if stats.score > stats.score:\n stats.high_score = stats.score\n sb.prep_high_score()", "def score_of_nodes(self, score):\n for hypervisor_id in self.model.get_all_hypervisors():\n hypervisor = self.model. \\\n get_hypervisor_from_id(hypervisor_id)\n count = self.model.get_mapping(). \\\n get_node_vms_from_id(hypervisor_id)\n if len(count) > 0:\n result = self.calculate_score_node(hypervisor)\n else:\n # The hypervisor has not VMs\n result = 0\n if len(count) > 0:\n score.append((hypervisor_id, result))\n return score", "def check_high_score(stats,sb):\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()", "def node_and_vm_score(self, sorted_score, score):\n node_to_release = sorted_score[len(score) - 1][0]\n vms_to_mig = self.model.get_mapping().get_node_vms_from_id(\n node_to_release)\n\n vm_score = []\n for vm_id in vms_to_mig:\n vm = self.model.get_vm_from_id(vm_id)\n if vm.state == vm_state.VMState.ACTIVE.value:\n vm_score.append(\n (vm_id, self.calculate_score_vm(vm)))\n\n return node_to_release, vm_score", "def check_high_score(stats, sb):\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()", "def _add_to_end_states(self, end_states: List[Tensor], min_score: float, state: Tensor, min_index: int) ->Tuple[List[Tensor], float, int]:\n if len(end_states) < self.nbest:\n end_states.append(state)\n if float(state[0]) <= min_score:\n min_score = float(state[0])\n min_index = len(end_states) - 1\n elif bool(state[0] > min_score):\n end_states[min_index] = state\n min_index = -1\n min_score = float('inf')\n for idx in range(len(end_states)):\n s = end_states[idx]\n if bool(float(s[0]) <= min_score):\n min_index = idx\n min_score = float(s[0])\n return end_states, min_score, min_index", "def test_update_scores():\n ref = np.zeros((36,), dtype=int)\n depth = np.array([_ for _ in ref])\n ascore = np.array([3, 1, 2, 0, 0, 6])\n for offset in xrange(31):\n pref = [_ for _ in ref]\n prev = ref[offset + 5]\n pdep = depth[offset + 5]\n _update_scores(ref, depth, ascore, offset)\n if ref[offset + 5] != 6 + prev:\n raise ValueError('Error updating scores at offset {}\\n'\n 'prev: {}\\n'\n 'next: {}'.format(offset, pref, ref.tolist()))\n assert depth[offset + 5] == 1 + pdep", "def _update_rankings(self, sensitivity: int, verbose: bool = False):\n\n if verbose is True:\n print(\"Updating source rankings...\")\n\n for source in self._sources:\n source.update_ranking(self, sensitivity, verbose)\n\n if verbose is True:\n print(\"Source rankings updated.\")", "def check_high_score(stats, sb):\n\tif stats.score > stats.high_score:\n\t\tstats.high_score = stats.score\n\t\tsb.prep_high_score()", "def _lowess_update_nn(x, cur_nn,i):\n while True:\n if cur_nn[1]<x.size:\n left_dist = x[i] - x[cur_nn[0]]\n new_right_dist = x[cur_nn[1]] - x[i]\n if new_right_dist < left_dist:\n cur_nn[0] = cur_nn[0] + 1\n cur_nn[1] = cur_nn[1] + 1\n else:\n break\n else:\n break", "def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1", "def score_solution(g, s):\n pass", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def calculate(self, prev_scores):\n self.set_scores(prev_scores)\n for match in self.week.matches:\n if match.played:\n # Fetch old scores\n winner_score = float(prev_scores[match.winner])\n loser_score = float(prev_scores[match.loser])\n\n # Update this ranking's scores\n score_delta = loser_score * 0.1\n self.score[match.winner] = winner_score + score_delta\n self.score[match.loser] = loser_score - score_delta", "def _insort(self, node):\n lo = 0\n hi = len(self._pool)\n f_score = node.get_f_score()\n while lo < hi:\n mid = (lo+hi)//2\n if f_score < self._pool[mid].get_f_score(): hi = mid\n else: lo = mid + 1\n self._pool.insert(lo, node)", "def assign_numbers(old_SS, new_SS, overlap_matrix, num_of_ss):\n\n # Now assign new sunspost numbers based off the old sunspots\n # if column is empty => new sunspot\n # if row is empty => old sunspot is retired\n new_SS_accounted_for, old_SS_accounted_for = [], []\n SS_claims = []\n\n # loop over new SS\n for icolumn, vcolumn in enumerate(overlap_matrix.T):\n if sum(vcolumn) == 0: # doesnt overlap with any old SS -> new SS\n new_SS[icolumn].number = num_of_ss + 1\n num_of_ss += 1\n new_SS_accounted_for.append(icolumn)\n continue\n\n max_vcolumn = max(vcolumn)\n if sum(vcolumn) == max_vcolumn: # only 1 in column\n row = list(vcolumn).index(max_vcolumn)\n\n if sum(overlap_matrix[row]) == max_vcolumn: # only 1 in row\n # in this scenario, new_SS[icolumn] = old_SS[row]\n new_SS[icolumn].number = old_SS[row].number\n\n new_SS_accounted_for.append(icolumn)\n old_SS_accounted_for.append(row)\n continue\n\n else: # two new sunspots have claim to an old sunspot\n SS_claims.append([row, icolumn, max_vcolumn])\n \n else: # more than 1 overlap in column -> 2 SS merging\n for irow, vrow in enumerate(vcolumn):\n if vrow != 0:\n SS_claims.append([irow, icolumn, vrow])\n\n # Now to sort out competing claims (where there are two overlaps)\n # whichever old-new sunspot pair has the highest overlap will get the number\n # sort by intersection area\n SS_claims = sorted(SS_claims, key = operator.itemgetter(2), reverse = True)\n\n for i in SS_claims:\n old_numb = i[0]\n new_numb = i[1]\n\n if (old_numb not in old_SS_accounted_for) and (new_numb not in new_SS_accounted_for):\n new_SS[new_numb].number = old_SS[old_numb].number\n\n old_SS_accounted_for.append(old_numb)\n new_SS_accounted_for.append(new_numb)\n\n elif (old_numb in old_SS_accounted_for) and (new_numb not in new_SS_accounted_for):\n new_SS[new_numb].number = num_of_ss + 1\n num_of_ss += 1\n new_SS_accounted_for.append(new_numb)\n old_SS_accounted_for.append(new_numb)\n\n elif (old_numb not in old_SS_accounted_for) and (new_numb in new_SS_accounted_for):\n pass\n\n return old_SS, new_SS, num_of_ss", "def next_positions(self):\n self.scores = np.array(self.scores)\n improved = self.scores < self._pso_data.best_scores\n\n self._pso_data.best_scores[improved] = self.scores[improved]\n self._pso_data.best_positions[improved] = self.positions[improved]\n\n self._pso_data.velocities = self._new_velocities()\n new_positions = self.positions + self._pso_data.velocities\n return new_positions", "def mts_ls1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve = False\n grade = 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade = grade + bonus1\n best_x = current_x.copy()\n best_fitness = new_fitness\n if new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n else:\n grade = grade + bonus2\n improve = True\n current_fitness = new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def assignRanks(self):\r\n\t\trank = 0\r\n\t\tscores = list(self._playerScores)\r\n\t\tscores.reverse()\r\n\t\tfor playerScore in scores:\r\n\t\t\tif not playerScore.has(NOT_MET) or not playerScore.value(NOT_MET):\r\n\t\t\t\trank += 1\r\n\t\t\t\tplayerScore.set(RANK, smallText(BugUtil.colorText(u\"%d\" % rank, ScoreOpt.getRankColor())))\r\n\t\tif rank > 0:\r\n\t\t\tself._anyHas[RANK] = True", "def update_score(best_score: int, new_score: int) -> int:\n if new_score > best_score:\n return new_score\n else:\n return best_score", "def set_subhead_ranking(self):\n # remove dirty rows\n # TODO: determine why the \"Col8\" values appear in rows in the first place\n self.subhead = self.subhead[self.subhead.Col8 != \"Col8\"]\n # cast all to int so that we can take `.max`\n self.subhead[\"Col8\"] = self.subhead[\"Col8\"].astype(\"int32\")\n\n self.subhead[\"ranking_score\"] = (\n self.subhead[\"Col8\"].max() - self.subhead[\"Col8\"]\n )", "def calc_scores(self, epsilon=1e-4):\n epsilon_matrix = epsilon * np.ones(self.__n)\n if self.__is_sparse:\n while True:\n hubs_old = self.__hubs\n auths_old = self.__auths\n\n self.__auths = self.__link_matrix_tr * hubs_old\n max_score = self.__auths.max(axis=0)\n if max_score != 0:\n self.__auths = self.__auths / max_score\n self.all_auths.append(self.__auths)\n\n self.__hubs = self.__link_matrix * self.__auths\n max_score = self.__hubs.max(axis=0)\n if max_score != 0:\n self.__hubs = self.__hubs / max_score\n self.all_hubs.append(self.__hubs)\n\n if (((abs(self.__hubs - hubs_old)) < epsilon_matrix).all()) and (\n ((abs(self.__auths - auths_old)) < epsilon_matrix).all()):\n break\n\n else:\n while True:\n hubs_old = self.__hubs\n auths_old = self.__auths\n\n self.__auths = np.dot(self.__link_matrix_tr, hubs_old)\n max_score = self.__auths.max(axis=0)\n if max_score != 0:\n self.__auths = self.__auths / max_score\n self.all_auths.append(self.__auths)\n\n self.__hubs = np.dot(self.__link_matrix, self.__auths)\n max_score = self.__hubs.max(axis=0)\n if max_score != 0:\n self.__hubs = self.__hubs / max_score\n self.all_hubs.append(self.__hubs)\n\n if (((abs(self.__hubs - hubs_old)) < epsilon_matrix).all()) and (\n ((abs(self.__auths - auths_old)) < epsilon_matrix).all()):\n break", "def update_ranking(self, session: Session, sensitivity: int, verbose: bool = False) -> None:\n\n if verbose is True:\n print(f\"Updating {self.uri}...\", end=\" \")\n\n best_vector = _Analysis.make_best_vector(self.levenshtein_vector, sensitivity)\n\n self.ranking = _Ranking()\n self.ranking.score_sum = _Analysis.make_score_sum(best_vector)\n self.ranking.score_average = _Analysis.make_score_average(best_vector)\n self.ranking.score_coverage = _Analysis.make_score_coverage(best_vector)\n self.ranking.recall = _Analysis.make_recall(len(session._scheme.concepts), self.ranking.score_coverage)\n\n if verbose is True:\n print(\"updated.\")", "def sort_nodes(self, min_return=5):\n nodes = self._all_nodes()\n sorted_nodes, sorted_scores = self.scorer.sort(nodes)\n\n if len(nodes) <= min_return:\n return sorted_nodes, sorted_scores\n\n seen_hashes = set()\n best_nodes = []\n best_scores = []\n last_score = 1e16\n for score, node in zip(sorted_scores, sorted_nodes):\n if len(best_nodes) >= min_return and score < last_score:\n break\n route_actions, _ = self.search_tree.route_to_node(node)\n route_hash = self._routehash(route_actions)\n\n if route_hash in seen_hashes:\n continue\n seen_hashes.add(route_hash)\n best_nodes.append(node)\n best_scores.append(score)\n last_score = score\n\n return best_nodes, best_scores", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def update(self):\r\n debug.write(\"[SourceRPG] Updating all ranked positions\", 1)\r\n database.execute(\"SELECT steamid FROM Player ORDER BY level DESC,xp DESC\")\r\n results = database.cursor.fetchall()\r\n self.ranks = []\r\n for index, steamid in enumerate(results):\r\n debug.write(\"Rank: %s Steamid: %s\" % (index, steamid), 5)\r\n self.ranks.append(steamid[0])\r\n debug.write(\"[SourceRPG] All ranked positions updated\", 1)", "def __score_by_iceberg_level(self, source_iceberg, iceberg_to_score, iceberg_owner_after_all_groups_arrived):\n if utils.is_me(self.__game, iceberg_owner_after_all_groups_arrived):\n if source_iceberg.level <= iceberg_to_score.level:\n return SOURCE_LEVEL_SMALL_THAN_DESTINATION_SCORE\n if iceberg_to_score.level < iceberg_to_score.upgrade_level_limit:\n return LEVEL_FACTOR_SCORE ** iceberg_to_score.penguins_per_turn\n elif utils.is_neutral(self.__game, iceberg_owner_after_all_groups_arrived):\n if MapChecker.get().is_tricky_map() or MapChecker.get().is_extra_far_treasure() or MapChecker.get().is_extra_far():\n return LEVEL_FACTOR_SCORE * iceberg_to_score.penguins_per_turn\n else:\n return LEVEL_FACTOR_SCORE ** iceberg_to_score.penguins_per_turn\n return iceberg_to_score.penguins_per_turn", "def update_score():\n pass", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def find_optimal_low_rank_matrix( self, orig_similarity_matrix, orig_rank, u, s, v, singular_reduction ):\n '''rank_list = list()\n sum_singular_values = list()\n for rank in range( 0, orig_rank ):\n compute_result = self.compute_low_rank_matrix( u, s, v, rank + 1 )\n rank_list.append( ( rank + 1 ) / float( orig_rank ) )\n sum_singular_values.append( compute_result[ 1 ] )\n utils._plot_singular_values_rank( rank_list, sum_singular_values )'''\n return self.compute_low_rank_matrix( u, s, v, int( singular_reduction * orig_rank ) )", "def list_of_scores(self):\n with open(Constant.this_score, \"r\") as f:\n final= f.read()\n final = int(final)\n with open(Constant.list_scores, \"r\") as li:\n points_file = json.load(li)\n points = points_file['HighScores']\n\n with open(Constant.list_scores,\"w\") as files:\n\n if len(points) == 5:\n if points[0] < final:\n for i in range(len(points)-1,0,-1):\n points[i] = points[i-1]\n points[0] = final\n json.dump(points_file,files)\n elif points[0] > final:\n if points[1] < final:\n for i in range(len(points)-1,1,-1):\n points[i] = points[i-1]\n points[1] = final\n json.dump(points_file,files)\n elif points[1] > final:\n if points[2] < final:\n for i in range(len(points)-1,2,-1):\n points[i] = points[i-1]\n points[2] = final\n json.dump(points_file,files)\n elif points[2] > final:\n if points[3] < final:\n for i in range(len(points)-1,3,-1):\n points[i] = points[i-1]\n points[3] = final\n json.dump(points_file,files)\n elif points[3] > final:\n if points[4] < final:\n points[4] = final\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)\n else:\n json.dump(points_file,files)", "def best_pairing(current_end, end_dict, inverse_dict, blast_hits, l_min_score, r_min_score):\n #this duplicates part of trio_hits - should try to rewrite that to use this function\n \n l_flange = int(end_dict[current_end][1])\n l_contig = end_dict[current_end][0]\n \n #first find blast hits for the target scaffold end\n left_matches = []\n for hit in blast_hits:\n if hit[0] == l_contig and int(hit[11]) >= l_min_score:\n left_matches.append(hit)\n \n link_count = {}\n \n #then find other ends with correctly oriented hits adjacent to the target hits\n for slink in end_dict:\n link = end_dict[slink][0]\n \n right_matches = []\n\n for hit in blast_hits:\n if hit[0] == link and int(hit[11]) >= r_min_score: \n right_matches.append(hit)\n \n for lhit in left_matches:\n for rhit in right_matches:\n srhit = inverse_dict[rhit[0]]\n r_flange = end_dict[srhit][1]\n joint_flange = l_flange + r_flange\n \n if lhit[1] == rhit[1]:\n lh_start = int(lhit[8])\n lh_end = int(lhit[9])\n rh_start = int(rhit[8])\n rh_end = int(rhit[9])\n\n if abs(lh_start - rh_start) < joint_flange + 3000:\n if (lh_end - lh_start)/(rh_end - rh_start) < 0:\n if abs(lh_end - rh_end) > abs(lh_start - rh_start):\n link_score = int(lhit[11]) * int(rhit[11])\n if not link in link_count: \n link_count[link] = link_score\n elif link_score > link_count[link]:\n link_count[link] = link_score\n return link_count", "def next_node_dfs(search_state, last_node_is_ok):\n log_T, initial_state, min_score, max_depth, maxtraversals, node, node_idx, it, order, score, sub_info = search_state\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n n_states = log_T.shape[0]\n if it == maxtraversals:\n assert False, \"Number of traversals exceeded\"\n while True:\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if last_node_is_ok and min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return [-1], score, search_state # end of the generator, can't increase even the root\n last_node_is_ok = True # We can now make progress again, regardless of whether we could at the beginning\n it += 1\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return list(node), score, search_state # the invocation to list here is to make a copy, don't remove!", "def update_score(self):\n td = self.created - datetime.datetime(1970, 1, 1)\n epoch_seconds = td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n order = math.log(max(abs(self.points), 1), 10)\n sign = 1 if self.points > 0 else -1 if self.points < 0 else 0\n seconds = epoch_seconds - 1134028003\n self.score = round(order + sign * seconds / 45000, 7)", "async def handle_rank_ups(self, user: discord.User, brawler: str):\n\n brawler_data = await self.get_player_stat(\n user, 'brawlers', is_iter=True, substat=brawler)\n\n pb = brawler_data['pb']\n rank = brawler_data['rank']\n\n rank_as_per_pb = self.get_rank(pb)\n\n if rank_as_per_pb <= rank:\n return False\n\n await self.update_player_stat(\n user, 'brawlers', rank_as_per_pb, brawler, 'rank')\n\n rank_up_tokens = self.RANKS[str(rank)][\"PrimaryLvlUpRewardCount\"]\n\n token_doubler = await self.get_player_stat(user, 'token_doubler')\n\n upd_td = token_doubler - rank_up_tokens\n if upd_td < 0:\n upd_td = 0\n\n if token_doubler > rank_up_tokens:\n rank_up_tokens *= 2\n else:\n rank_up_tokens += token_doubler\n\n rank_up_starpoints = self.RANKS[str(rank)][\"SecondaryLvlUpRewardCount\"]\n\n await self.update_player_stat(\n user, 'tokens', rank_up_tokens, add_self=True)\n await self.update_player_stat(\n user, 'starpoints', rank_up_starpoints, add_self=True)\n await self.update_player_stat(\n user, 'token_doubler', upd_td)\n\n embed = discord.Embed(\n color=EMBED_COLOR,\n title=f\"Brawler Rank Up! {rank} → {rank_as_per_pb}\"\n )\n embed.set_author(name=user.name, icon_url=user.avatar_url)\n embed.add_field(\n name=\"Brawler\", value=f\"{brawler_emojis[brawler]} {brawler}\")\n embed.add_field(\n name=\"Tokens\", value=f\"{emojis['token']} {rank_up_tokens}\")\n if rank_up_starpoints:\n embed.add_field(\n name=\"Star Points\",\n value=f\"{emojis['starpoints']} {rank_up_starpoints}\"\n )\n if token_doubler > 0:\n embed.add_field(\n name=\"Token Doubler\",\n value=f\"{emojis['tokendoubler']} x{upd_td} remaining!\",\n inline=False\n )\n return embed", "def get_chars_to_rank_up(chars, new_rank, score_for_rank, n, m, sa, s, sb): # pylint: disable=invalid-name, too-many-arguments\n target_score = None\n target_rank = new_rank - 1\n # There are gaps in rankings for ties. Find the next highest rank.\n while True:\n assert target_rank > 0\n if target_rank in score_for_rank:\n break\n else:\n target_rank -= 1\n\n target_score = score_for_rank[target_rank]\n assert chars > sa\n if chars == s:\n # This is the top score for the language. Improving it will affect Sb.\n to_rank_up = floor_with_tolerance(1000 * sa * m / (target_score * (n + m) - 1000 * n))\n else:\n to_rank_up = floor_with_tolerance(1000 * sb / target_score)\n\n # Check to make sure the results make sense.\n next_sb = (n / (n + m)) * min(s, to_rank_up) + (m / (n + m)) * min(sa, to_rank_up)\n next_score = 1000 * next_sb / to_rank_up\n assert next_score > target_score or isclose(next_score, target_score)\n return to_rank_up", "def mts_ls2(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve, grade = False, 0.0\n for _ in range(len(current_x)):\n d = -1 + rng.random(len(current_x)) * 2\n r = rng.choice([0, 1, 2, 3], len(current_x))\n new_x = task.repair(np.vectorize(move_x)(current_x, r, d, search_range, operator.sub), rng)\n new_fitness = task.eval(new_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, new_x.copy(), new_fitness\n elif new_fitness != current_fitness:\n if new_fitness > current_fitness:\n new_x = task.repair(np.vectorize(move_x)(current_x, r, d, search_range, operator.add), rng)\n new_fitness = task.eval(new_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, new_x.copy(), new_fitness\n elif new_fitness < current_fitness:\n grade, current_x, current_fitness, improve = grade + bonus2, new_x.copy(), new_fitness, True\n else:\n grade, current_x, current_fitness, improve = grade + bonus2, new_x.copy(), new_fitness, True\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def find_ranking(comparisons, equal_width=0.2, max_rank=-1, verbose=False):\n # remove unnecessary variables\n comparisons = {(i, j) if i < j else (j, i): value if i < j else 1 - value\n for (i, j), value in comparisons.items()}\n nodes = np.unique(\n [i for ij in comparisons.keys() for i in ij])\n\n # define variables\n model = Model('comparison')\n model.setParam('OutputFlag', verbose)\n values = np.fromiter(comparisons.values(), dtype=float)\n assert values.max() <= 1 and values.min() >= 0\n # variables to encode the error of comparisons\n E_ij = model.addVars(comparisons.keys(), name='e_ij', vtype=GRB.CONTINUOUS,\n ub=1.0-values, lb=-values)\n # variables to encode hard choice of >=, <=, ==\n Ge_ij = model.addVars(comparisons.keys(), name='ge_ij', vtype=GRB.BINARY)\n Le_ij = model.addVars(comparisons.keys(), name='le_ij', vtype=GRB.BINARY)\n Eq_ij = model.addVars(comparisons.keys(), name='eq_ij', vtype=GRB.BINARY)\n # variables to help with transitivity in non-fully connected graphs\n if max_rank < 1:\n max_rank = len(nodes)\n R_i = model.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=max_rank)\n # variables to emulate abs\n T_ij_pos = {}\n T_ij_neg = {}\n index = (values != 1) & (values != 0)\n T_ij_pos = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_pos', lb=0, ub=1-values[index])\n T_ij_neg = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_neg', lb=0, ub=values[index])\n model.update()\n\n # emulate abs for non-binary comparisons: E_ij = T_ij_pos - T_ij_neg\n model.addConstrs(\n (E_ij[ij] == T_ij_pos[ij] - T_ij_neg[ij] for ij in T_ij_pos),\n 'E_ij = T_ij_pos - T_ij_neg')\n\n # hard decision of >=, <=, and ==\n lower_bound = 0.5 - equal_width / 2.0\n upper_bound = 0.5 + equal_width / 2.0\n # <=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound <= ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound >= -1 + ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_upper_bound')\n # >=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound >= -le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound <= 1 - le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_upper_bound')\n # ==\n model.addConstrs(\n (le + eq + ge == 1 for le, eq, ge in zip(\n Le_ij.values(), Eq_ij.values(), Ge_ij.values())), 'eq_ij')\n\n # transitivity\n for (i, j), eq_a in Eq_ij.items():\n le_a = Le_ij[i, j]\n ge_a = Ge_ij[i, j]\n for k in nodes:\n j_, k_ = j, k\n if j > k:\n j_, k_ = k, j\n eq_b = Eq_ij.get((j_, k_), None)\n if eq_b is None:\n continue\n else:\n le_b = Le_ij[j_, k_]\n ge_b = Ge_ij[j_, k_]\n if j_ != j:\n le_b, ge_b = ge_b, le_b\n\n i_, k_ = i, k\n if i > k:\n i_, k_ = k, i\n eq_c = Eq_ij.get((i_, k_), None)\n if eq_c is None:\n continue\n else:\n le_c = Le_ij[i_, k_]\n ge_c = Ge_ij[i_, k_]\n if i_ != i:\n le_c, ge_c = ge_c, le_c\n\n # a <= b and b <= c -> a <= c\n model.addLConstr(\n ge_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_ge_{i},{j},{k}')\n # a >= b and b >= c -> a >= c\n model.addLConstr(\n le_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_le_{i},{j},{k}')\n # a <= b and b == c -> a <= c\n model.addLConstr(\n le_a + eq_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_leeq_{i},{j},{k}')\n # a == b and b <= c -> a <= c\n model.addLConstr(\n eq_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_eqle_{i},{j},{k}')\n # a >= b and b == c --> a >= c\n model.addLConstr(\n ge_a + eq_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_geeq_{i},{j},{k}')\n # a == b and b >= c --> a >= c\n model.addLConstr(\n eq_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_eqge_{i},{j},{k}')\n # a == b and b == c --> a == c\n model.addLConstr(\n eq_a + eq_b, GRB.LESS_EQUAL, 1 + eq_c,\n f'transitivity_eq_{i},{j},{k}')\n\n # transitivity helper (for not-fully connected graphs)\n # also provides a latent rank\n big_m = max_rank\n model.addConstrs(\n ((1 - ge_ij) * big_m + R_i[i] >= R_i[j] + 1 for (i, j), ge_ij in Ge_ij.items()),\n 'rank_transitivity_larger')\n model.addConstrs(\n ((1 - le_ij) * big_m + R_i[j] >= R_i[i] + 1 for (i, j), le_ij in Le_ij.items()),\n 'rank_transitivity_smaller')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[j] >= R_i[i] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal1')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[i] >= R_i[j] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal2')\n\n # objective function\n objective = LinExpr()\n for ij, value in comparisons.items():\n if value == 1.0:\n objective += -E_ij[ij]\n elif value == 0.0:\n objective += E_ij[ij]\n else:\n objective += T_ij_pos[ij] + T_ij_neg[ij]\n model.setObjective(objective, GRB.MINIMIZE)\n\n # solve\n model.optimize()\n\n # verify abs emulation: one T_ij has to be 0\n for ij, value in T_ij_pos.items():\n assert value.X == 0 or T_ij_neg[ij] == 0, \\\n f'T_{ij} pos {value.X} neg {T_ij_neg[ij]}'\n\n # find minimal Rs\n model_ = Model('comparison')\n model_.setParam('OutputFlag', verbose)\n R_i = model_.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=len(nodes))\n for ((i, j), ge_ij), le_ij in zip(Ge_ij.items(), Le_ij.values()):\n if ge_ij.x == 1:\n model_.addConstr(R_i[i] >= R_i[j] + 1)\n elif le_ij.x == 1:\n model_.addConstr(R_i[j] >= R_i[i] + 1)\n else:\n model_.addConstr(R_i[j] == R_i[i])\n model_.setObjective(R_i.sum(), GRB.MINIMIZE)\n model_.optimize()\n\n return [model_.getVarByName(f'r_i[{i}]').X for i in range(len(nodes))], \\\n model.objVal", "def fit_score(self, solution):\r\n illegal_neighbours = 0\r\n legal_neighbours = 0\r\n for polygon in solution.genetic_units.values():\r\n for neighbour_id in polygon.neighbours_ids:\r\n if polygon.color is solution.genetic_units[neighbour_id].color:\r\n illegal_neighbours += 1\r\n else:\r\n legal_neighbours += 1\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return illegal_neighbours\r\n else:\r\n return legal_neighbours", "def problem5(self, s):\n points = 0\n\n points = self.neighbor( 10, 10, s.nearest_neighbor)*3\n points += self.neighbor(100, 10, s.nearest_neighbor)*3\n points += self.neighbor( 10, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n\n _testDriver.get_code(s.nearest_neighbor)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n \n return points", "def local_aligner_score(s1, s2, gap_penalty=-1, gap_opening_penalty=-10, edit_function=utils.sub_matrices_distance, matrix=MatrixInfo.pam120):\n\n n_row = len(s1) + 1\n n_col = len(s2) + 1\n # Creates a matrix where the partial scores are stored.\n S = np.zeros((n_row, n_col))\n # Creates a matrix (stored as DataFrame) where the optimal movements are\n # stored.\n backtrack_matrix = pd.DataFrame(\"\", index=np.arange(n_row), columns=np.arange(n_col))\n\n # Initialize the first column and row of the matrices.\n # In the local aligner, we stop when a 0 is encountered, which corresponds to an \"X\"\n for i in range(n_row):\n backtrack_matrix.set_value(i, 0, \"X\")\n\n for j in range(n_col):\n backtrack_matrix.set_value(0, j, \"X\")\n \n # small optimization: keep track of the maximum score encountered so far, and its indices.\n score_max = 0\n i_max = 0\n j_max = 0\n \n for i in range(1, n_row):\n for j in range(1, n_col):\n # Compute the possible movements, and then keeps the best.\n s1_gap = max([S[i - k, j] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, i+1)])\n s2_gap = max([S[i, j - k] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, j+1)])\n mut = S[i - 1, j - 1] + edit_function(s1[i - 1], s2[j - 1], matrix=matrix)\n # In the local aligner, don't accept negative scores!\n S[i, j] = max(s1_gap, s2_gap, mut, 0)\n\n if S[i, j] >= score_max:\n score_max = S[i, j]\n i_max = i\n j_max = j\n # Write in the matrix the movement that lead to that cell, as a string.\n # e.g. \"HV\" means that horizontal and vertical movements were the\n # best.\n # In local alignment, \"X\" means that 0 was the maximum value, and all the movements gave a negative score.\n # The backtracking will stop when an \"X\" is encountered.\n backtrack_matrix.set_value(i, j, \"\".join(check_argmax([s1_gap, s2_gap, mut, 0])))\n \n return [score_max, S, backtrack_matrix, i_max, j_max]", "def all_nodes_dfs(log_T, initial_state, min_score, sub_info, max_depth=1000000000000000000, maxtraversals=1000000000000000000):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n order = np.zeros(log_T.shape, np.int64)\n for i in xrange(order.shape[1]):\n order[i] = (-log_T[i]).argsort()\n n_states = log_T.shape[0]\n node = [order[initial_state, 0]] # most likely first node\n node_idx = [0]\n lengths_dfs = [-1.0]\n nodes_dfs = [[-1, ]]\n for it in xrange(maxtraversals):\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n lengths_dfs.append(-score)\n nodes_dfs.append(list(node))\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n break # end of the generator, can't increase even the root\n else:\n assert False, \"Number of traversals exceeded\"\n\n return lengths_dfs[1:], nodes_dfs[1:]", "def getTopRank(ss_def, contacts_def):\n rank_seq = {}\n contacts_true = contacts_def.keys()\n for i in range(1, len(ss_def)):\n smotif = getSmotif(ss_def[i - 1], ss_def[i])\n no_of_contacts = 0\n for j in range(smotif[0][0], smotif[0][1] + 1):\n if j in contacts_true:\n contacts = contacts_def[j]\n for contact in contacts:\n if contact in range(smotif[1][0], smotif[1][1] + 1):\n no_of_contacts += 1\n\n rank_seq[i - 1] = no_of_contacts\n top_rank = getTopSmotif(rank_seq)\n\n return top_rank, top_rank + 1", "def _leaderboard_rank(df: pd.DataFrame, public_score: float, lower_is_better: bool, own_team_id: int=CONFIG['own_team_id']) -> LeaderboardRank:\n assert public_score and type(public_score) is float\n log.info(\"Calculating leaderboard rank\")\n df = df.sort_values('Score', ascending=lower_is_better)\n df = df[df['TeamId'] != own_team_id] # filter own team\n if lower_is_better:\n team_best = df.groupby(['TeamId'])['Score'].min().sort_values()\n rank = team_best.searchsorted(public_score, side='left') + 1\n else:\n team_best = df.groupby(['TeamId'])['Score'].max().sort_values()\n rank = len(team_best) + 1 - team_best.searchsorted(public_score, side='right')\n assert rank >= 1 and rank <= len(team_best) + 1\n return LeaderboardRank(rank, len(team_best))", "def ranking_loss(scores, targets):\n costs = targets[1]\n true_ants = targets[2]\n weights = targets[4] if len(targets) == 5 else None\n true_ant_score = torch.gather(scores, 1, true_ants)\n top_true, _ = true_ant_score.max(dim=1)\n tmp_loss = scores.add(1).add(\n top_true.unsqueeze(1).neg()\n ) # 1 + scores - top_true\n if weights is not None:\n tmp_loss = tmp_loss.mul(weights)\n tmp_loss = tmp_loss.mul(costs)\n loss, _ = tmp_loss.max(dim=1)\n out_score = torch.sum(loss)\n return out_score / n", "def rank_and_assign(self, cutoff_matrix_element):\r\n\r\n L0 = (qt.liouvillian(self.rotating_frame_hamiltonian, self.jump_ops))\r\n \r\n relevance_table = [[self.calculate_first_order_correction(cutoff_matrix_element,L0,ket_index=n,bra_index=m) for m in range(self.dim)] for n in range(self.dim)]\r\n relevance_table = np.asarray(relevance_table)\r\n \r\n number_of_transitions = int(self.dim*(self.dim-1)/2)\r\n transition_rank = [None for i in range(number_of_transitions)]\r\n # This loop ranks drive terms according to relevance \r\n for rank in range(number_of_transitions):\r\n max_ranked_indices = np.where(relevance_table == relevance_table.max())\r\n indices = [max_ranked_indices[0][0], max_ranked_indices[1][0]]\r\n transition_rank[rank] = [relevance_table.max(), indices]\r\n relevance_table[indices[0]][indices[1]] = relevance_table[indices[1]][indices[0]] = 0\r\n \r\n # This graphical algorithm assigns an integer to each eigenstate of the Hamiltonian based on the ranking from above\r\n integer_list = [None for i in range(self.dim)]\r\n # START ALGORITHM\r\n # initialize first term into a graph\r\n first_index = transition_rank[0][1]\r\n graph_list = [[first_index[0],first_index[1]]]\r\n integer_list[max(first_index)] = 1\r\n integer_list[min(first_index)] = 0\r\n # assign subsequent terms\r\n for i in range(1,number_of_transitions):\r\n # if no more non-zero relevance parameters, then break \r\n if transition_rank[i][0] == 0.0:\r\n break\r\n else:\r\n index = transition_rank[i][1]\r\n # scenario (i) neither states have been incorporated into the graph \r\n if integer_list[index[0]]==integer_list[index[1]]==None: \r\n integer_list[max(index)] = 1\r\n integer_list[min(index)] = 0\r\n # place them in a new graph\r\n graph_list.append([index[0],index[1]])\r\n # scenario (ii) one of the states has been incorporated, but not the other\r\n elif integer_list[index[0]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[0]] = integer_list[index[1]] + 1\r\n else:\r\n integer_list[index[0]] = integer_list[index[1]] - 1\r\n # find which graph component to put the state in (the component the other state is in)\r\n for k,graph in enumerate(graph_list):\r\n if index[1] in graph:\r\n # place that state in that graph component\r\n graph_list[k].append(index[0]) \r\n break\r\n elif integer_list[index[1]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[1]] = integer_list[index[0]] - 1\r\n else:\r\n integer_list[index[1]] = integer_list[index[0]] + 1\r\n for k,graph in enumerate(graph_list):\r\n if index[0] in graph:\r\n graph_list[k].append(index[1])\r\n break\r\n # scenario (iii) both states have already been incorporated in the graph\r\n else:\r\n # find the graph components where these states have been placed\r\n for k,graph in enumerate(graph_list):\r\n overlap = list(set(index) & set(graph))\r\n # subscenario: the states are in the same graph component, hence a cycle, so nothing can do\r\n if (len(overlap) == 2):\r\n break\r\n # subscenario: the states are in two disjoint graph components\r\n elif (len(overlap) == 1):\r\n fixed_index = overlap[0]\r\n shift_index = list(set(index) - set(graph))[0]\r\n old_integer = integer_list[shift_index]\r\n if shift_index > fixed_index:\r\n new_integer = integer_list[fixed_index] + 1\r\n else:\r\n new_integer = integer_list[fixed_index] - 1\r\n shift_amount = new_integer - old_integer\r\n # merge one graph component into the other\r\n for j,graph2 in enumerate(graph_list):\r\n if shift_index in graph2:\r\n for m,index2 in enumerate(graph2):\r\n integer_list[index2] = integer_list[index2] + shift_amount\r\n graph_list[k] = graph_list[k] + graph2\r\n graph_list.pop(j)\r\n break\r\n break\r\n else:\r\n continue\r\n continue\r\n # Just in case, if a state was not assigned an integer due to not participating in dynamics, set its integer to 0\r\n for i,integer in enumerate(integer_list):\r\n if integer == None:\r\n integer_list[i] = 0\r\n ## END algorithm\r\n return transition_rank, integer_list", "def ranks_from_scores(scores, rank_gap=1e-15):\n prev_score = None\n rank = 0\n for i, (key, score) in enumerate(scores):\n try:\n if abs(score - prev_score) > rank_gap:\n rank = i\n except TypeError:\n pass\n\n yield key, rank\n prev_score = score", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def check_high_score(self):\r\n if self.stats.score > self.stats.high_score:\r\n self.stats.high_score = self.stats.score\r\n self.prep_placar_score()", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def scoreR(self) :\n if self.leafR() :\n return self.leafScore(), self\n else :\n games = self.R()\n min_g = games[0]\n min_score = min_g.scoreL()\n for g in games[1:] :\n score = g.scoreL()\n if score[0] < min_score[0] :\n min_g = g\n min_score = score\n return (min_score+(min_g,))", "def _add_ranks(standings, key):\n prev_key = None\n current_rank = 0\n for i, team in enumerate(standings, start=1):\n this_key = key(team)\n if this_key != prev_key:\n current_rank = i\n prev_key = this_key\n team.rank = current_rank", "def ltiecorrect(rankvals):\r\n sorted,posn = shellsort(rankvals)\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def get_overwrite_res(score, submit, known, min_thre, top_n=1):\n\n res = {}\n for i, k in enumerate(known):\n temp_score_index = np.argsort(score[:, i])[-top_n:][::-1]\n for ind in temp_score_index:\n if score[:, i][ind] < min_thre:\n break\n this_score = score[:, i][ind]\n this_test_image = submit[ind]\n if this_test_image in res:\n if tagged[k] != res[this_test_image]['label']:\n if this_score > res[this_test_image]['score']:\n res[this_test_image]['label'] = tagged[k]\n res[this_test_image]['score'] = this_score\n else:\n res[this_test_image] = {}\n res[this_test_image]['score'] = this_score\n res[this_test_image]['label'] = tagged[k]\n\n return res", "def partition(data, s, b, u, res, points, size, depth):\r\n\t# depth is just for demonstration purposes, terminating the recursion early\r\n\t\r\n\t# termination conditions\r\n\tif size > 1 and depth > 0:\r\n\r\n\t\t# variables that keep track of the scope of \"points\" for iteration purposes\r\n\t\trlen = []\r\n\t\tclen = len(points)\r\n\t\tfor i in range(clen):\r\n\t\t\trlen.append(len(points[i]))\r\n\t\t\r\n\t\t# keeps track of which point defines the maximal set\r\n\t\tmax = -10000\r\n\t\tmax_index = [0,0]\r\n\r\n\t\t# each point on the grid defines a potentially maximal set (including that point and the best \r\n\t\t# choice for higher rows) s[x][y] tracks the value of the set defined by (x, y)\r\n\t\tfor i in range(len(points)):\r\n\t\t\t# calculating s based on current row\r\n\t\t\ts[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]] = data[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]]\r\n\t\t\tfor j in range(rlen[i] - 2, -1, -1):\r\n\t\t\t\ts[points[i][j][0]][points[i][j][1]] = s[points[i][j + 1][0]][points[i][j + 1][1]] + data[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\r\n\t\t\t# if below the first row, factoring in the optimal set from above rows\r\n\t\t\tif i != 0:\r\n\t\t\t\tprev_end = points[i-1][rlen[i-1]-1]\r\n\t\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\t\tu[points[i][j][0]][points[i][j][1]] = b[prev_end[0]][np.minimum(prev_end[1], points[i][j][1])]\r\n\t\t\t\t\ts[points[i][j][0]][points[i][j][1]] += s[prev_end[0]][u[points[i][j][0]][points[i][j][1]]]\r\n\t\t\t\r\n\t\t\t# keeping track of the best sets from the new row for later use (what b and u are for)\r\n\t\t\trow_max = -10000\r\n\t\t\trow_max_index = -1\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tcurr = s[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\tif curr > row_max:\r\n\t\t\t\t\trow_max = curr\r\n\t\t\t\t\trow_max_index = points[i][j][1]\r\n\t\t\t\tb[points[i][j][0]][points[i][j][1]] = row_max_index\r\n\r\n\t\t\t# updating the global optimal set\r\n\t\t\tif row_max > max:\r\n\t\t\t\tmax = row_max\r\n\t\t\t\tmax_index[0] = i\r\n\t\t\t\tmax_index[1] = row_max_index\r\n\t\t\r\n\t\t# finding the set of points that generated the global optimum\r\n\t\tpointers = []\r\n\t\tpointers.append(max_index[1])\r\n\t\tfor i in range(max_index[0], 0, -1):\r\n\t\t\tpointers.append(u[points[i][0][0]][pointers[max_index[0]-i]])\r\n\t\tpointers = np.flip(pointers, axis=0)\r\n\t\t\r\n\t\t# finding the set of points of the upper and lower partitions defined by the optimal set\r\n\t\tupper_points = []\r\n\t\tlower_points = []\r\n\t\tup_num = 0\r\n\t\tlow_num = 0\r\n\t\tfor i in range(clen):\r\n\t\t\turow = []\r\n\t\t\tlrow = []\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tif i <= max_index[0] and points[i][j][1] >= pointers[i]:\r\n\t\t\t\t\turow.append(points[i][j])\r\n\t\t\t\t\tup_num += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlrow.append(points[i][j])\r\n\t\t\t\t\tlow_num += 1\r\n\t\t\tif len(urow) > 0:\r\n\t\t\t\tupper_points.append(tuple(urow))\r\n\t\t\tif len(lrow) > 0:\r\n\t\t\t\tlower_points.append(tuple(lrow))\r\n\r\n\t\t# updating the final result and prepping the new datasets to have mean 0\r\n\t\tfor i in range(len(upper_points)):\r\n\t\t\tfor j in range(len(upper_points[i])):\r\n\t\t\t\tres[upper_points[i][j][0]][upper_points[i][j][1]] += max/up_num\r\n\t\t\t\tdata[upper_points[i][j][0]][upper_points[i][j][1]] -= max/up_num\r\n\t\tfor i in range(len(lower_points)):\r\n\t\t\tfor j in range(len(lower_points[i])):\r\n\t\t\t\tres[lower_points[i][j][0]][lower_points[i][j][1]] -= max/low_num\r\n\t\t\t\tdata[lower_points[i][j][0]][lower_points[i][j][1]] += max/low_num\r\n\t\t\r\n\t\t# recursion (if the optimal set is the current one, stop since at this point \r\n\t\t# the mean of the selected elements is optimal over them)\r\n\t\tif up_num != size:\r\n\t\t\tpartition(data, s, b, u, res, upper_points, up_num, depth-1)\r\n\t\tif low_num != size:\r\n\t\t\tpartition(data, s, b, u, res, lower_points, low_num, depth-1)\r\n\telse:\r\n\t\treturn", "def _compute_rank(self):\n# print(Card((self.ranks[0]),self.suits[0]))\n# print(Card((self.ranks[1]),self.suits[1]))\n# print(Card((self.ranks[2]),self.suits[2]))\n# print(Card.ranks[self.ranks[0]])\n# #print(Card.ranks[self.ranks[0]+1])\n# print(self.ranks[1])\n# print(Card.suits[self.suits[1]])\n a = ['Ace','2','3']\n newlist =[self.ranks[0],self.ranks[1],self.ranks[2]]\n newlist = sorted(newlist)\n if(Card.suits[self.suits[0]] == Card.suits[self.suits[1]] == Card.suits[self.suits[2]]):\n #a = ['Ace','2','3']\n if(Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a):\n self.rank=5\n else:\n if(newlist[1] - newlist[0]) == 1 and (newlist[2]-newlist[1])==1:\n #StraightFlush\n self.rank=5\n else:\n #Flush\n self.rank=2\n \n #Threeofakind\n elif (Card.ranks[self.ranks[0]] == Card.ranks[self.ranks[1]] == Card.ranks[self.ranks[2]]):\n self.rank=4\n #Pair\n elif(Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[1]] or Card.ranks[self.ranks[0]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[1]]==Card.ranks[self.ranks[2]] or Card.ranks[self.ranks[2]]==Card.ranks[self.ranks[1]]):\n self.rank=1 \n #Straight\n elif(((newlist[1] - newlist[0]) == 1) and (newlist[2]-newlist[1])==1):\n self.rank=3\n \n elif((Card.ranks[self.ranks[0]] in a) and (Card.ranks[self.ranks[1]] in a) and (Card.ranks[self.ranks[2]] in a)):\n if(Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]] != Card.ranks[self.ranks[2]]):\n #if((Card.ranks[self.ranks[0]] != Card.ranks[self.ranks[1]]) and (Card.ranks[self.ranks[0]]!= Card.ranks[self.ranks[2]])and (Card.ranks[self.ranks[1]]!= Card.ranks[self.ranks[2]])):\n self.rank=3\n\n else:\n self.rank=0\n #pass", "def rerank(test_predicted_paraphrases, test_features, ranker, minimum_score):\n new_test_predicted_paraphrases = { (w1, w2) : [] for (w1, w2) in test_predicted_paraphrases.keys() }\n\n for ((w1, w2), curr_paraphrases), curr_paraphrase_features in tqdm.tqdm(zip(\n test_predicted_paraphrases.items(), test_features)):\n pars_and_vectors = zip(curr_paraphrases.items(), curr_paraphrase_features)\n\n # Sort the paraphrases according to the ranking\n def compare_paraphrases(p1, p2):\n return ranker.predict((p2[1] - p1[1]).reshape(1, -1))\n\n # Consider both the original score (for the specific noun-compound)\n # and the new rank (which paraphrases are more commonly ranked higher)\n sorted_paraphrases = [(paraphrase, (len(curr_paraphrases) - rank) * float(score))\n for rank, ((paraphrase, score), feature) in\n enumerate(sorted(pars_and_vectors,\n key=functools.cmp_to_key(compare_paraphrases)))]\n\n sorted_paraphrases = sorted(sorted_paraphrases, key=lambda x: x[1], reverse=True)\n\n # Keep only paraphrases with score above threshold. Best score = k * 1 = k,\n new_test_predicted_paraphrases[(w1, w2)] = \\\n [(paraphrase, score) for (paraphrase, score) in sorted_paraphrases if score >= minimum_score]\n\n return new_test_predicted_paraphrases", "def compute_scores(self, *scorers):\n if self.nodes[0]:\n list_ = self.nodes\n else:\n list_ = self.reaction_trees\n\n for idx, item in enumerate(list_):\n scores = {repr(scorer): scorer(item) for scorer in scorers}\n self.all_scores[idx].update(scores)\n self._update_route_dict(self.all_scores, \"all_score\")", "def mts_ls1v1(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n sr_fix=0.4, **_kwargs):\n if not improve:\n search_range /= 2\n i_fix = np.argwhere(search_range < 1e-15)\n search_range[i_fix] = task.range[i_fix] * sr_fix\n improve, d, grade = False, rng.uniform(-1, 1, task.dimension), 0.0\n for i in range(len(current_x)):\n x_old = current_x[i]\n current_x[i] = x_old - search_range[i] * d[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness == current_fitness:\n current_x[i] = x_old\n elif new_fitness > current_fitness:\n current_x[i] = x_old + 0.5 * search_range[i]\n current_x = task.repair(current_x, rng)\n new_fitness = task.eval(current_x)\n if new_fitness < best_fitness:\n grade, best_x, best_fitness = grade + bonus1, current_x.copy(), new_fitness\n elif new_fitness >= current_fitness:\n current_x[i] = x_old\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n else:\n grade, improve, current_fitness = grade + bonus2, True, new_fitness\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def score(self, candidate_holder, new_scores):\n cand_seqs = candidate_holder.cand_seqs\n cand_states = candidate_holder.cand_states\n cand_syms = cand_seqs[:, -1]\n\n cand_state_value = []\n cand_score_value = []\n for j in range(cand_states[self.state_index].shape[0]):\n cand_state_value.append(cand_states[self.state_index][j][cand_syms[j]])\n cand_score_value.append(cand_states[self.score_index][j][cand_syms[j]])\n ctc_score_result = []\n ctc_score_total = []\n new_states = []\n for i in tf.range(new_scores.shape[0]):\n num_sym_state = np.array([self.init_state] * self.num_classes)\n num_sym_score = np.array([0.0] * self.num_classes, dtype=np.float32)\n num_sym_score_minus = np.array([0.0] * self.num_classes, dtype=np.float32)\n cand_seq = cand_seqs[i]\n ctc_pre_state = cand_state_value[i]\n top_ctc_candidates = np.argsort(new_scores[i, :])\n top_ctc_candidates = sorted(top_ctc_candidates[-self.ctc_beam :].tolist())\n cand_seq = np.array(cand_seq)\n top_ctc_candidates = np.array(top_ctc_candidates)\n ctc_pre_state = ctc_pre_state.numpy()\n ctc_score, new_state = self.cand_score(\n cand_seq, top_ctc_candidates, ctc_pre_state\n )\n ctc_pre_score = tf.cast(cand_score_value[i], tf.float32)\n ctc_score_minus = self.ctc_weight * (ctc_score - ctc_pre_score) + 500\n\n for k in range(len(top_ctc_candidates)):\n num_sym_score[top_ctc_candidates[k]] = ctc_score[k]\n num_sym_score_minus[top_ctc_candidates[k]] = ctc_score_minus[k]\n num_sym_state[top_ctc_candidates[k]] = new_state[k]\n num_sym_score_minus -= 500\n ctc_score_result.append(num_sym_score_minus)\n ctc_score_total.append(num_sym_score)\n new_states.append(num_sym_state)\n cand_states[self.state_index] = tf.convert_to_tensor(np.array(new_states))\n ctc_score_result = tf.convert_to_tensor(np.array(ctc_score_result))\n ctc_score_total = tf.convert_to_tensor(np.array(ctc_score_total))\n cand_states[self.score_index] = ctc_score_total\n return ctc_score_result, cand_states", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def evaluate_ranks(articles, rank_tuples):\n rank_tp=defaultdict(int)\n rank_fn=defaultdict(int)\n rank_fp=defaultdict(int)\n \n for article in articles:\n for mention in article.entity_mentions:\n form=mention.mention\n meaning=mention.gold_link\n sys_meaning=mention.sys_link\n t_gold=(form, meaning)\n t_sys=(form, sys_meaning)\n for rank, r_tuples in rank_tuples.items():\n if t_gold in r_tuples and t_sys in r_tuples:\n rank_tp[rank]+=1\n break\n elif t_gold in r_tuples:\n rank_fn[rank]+=1\n elif t_sys in r_tuples:\n rank_fp[rank]+=1\n print('tp', rank_tp)\n print('fp', rank_fp)\n print('fn', rank_fn)\n \n rank_prec={}\n rank_recall={}\n rank_f1={}\n \n for rank in range(1,13):\n if rank_tp[rank]+rank_fp[rank]>0:\n rank_prec[rank]=rank_tp[rank]/(rank_tp[rank]+rank_fp[rank])\n else:\n rank_prec[rank]=0.0\n if rank_tp[rank]+rank_fn[rank]>0:\n rank_recall[rank]=rank_tp[rank]/(rank_tp[rank]+rank_fn[rank])\n else:\n rank_recall[rank]=0.0\n if rank_prec[rank]+rank_recall[rank]>0:\n rank_f1[rank]=2*rank_prec[rank]*rank_recall[rank]/(rank_prec[rank]+rank_recall[rank])\n else:\n rank_f1[rank]=0.0\n print('precision', rank_prec)\n print()\n print('recall', rank_recall)\n print()\n print('f1', rank_f1)\n print()\n return rank_prec, rank_recall, rank_f1", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def updateScores(rankedLists):\n docToRank = {}\n for rankedList in rankedLists:\n\n f = open(rankedList, 'r')\n for line in f:\n documentID = line.split()[2]\n docno = documentID\n score = float(line.split()[4])\n position = int(line.split()[3])\n docToRank[docno] = (position,score)\n f.close()\n client = MongoClient('asr2.iem.technion.ac.il',27017)\n db = client.asr16\n documents = db.documents.find({})\n for document in documents:\n key = document[\"query_id\"]+\"-\"+document[\"username\"]\n document['position'] = docToRank[key][0]\n document['score'] = docToRank[key][1]\n document['posted_document'] = document['current_document']\n db.documents.save(document)", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def get_node_scores(scores_file,G):\n scores = {}\n with open(scores_file) as f:\n for line in f:\n split = line.strip().split()\n scores[split[0]] = float(split[1])\n for nd in G.nodes():\n G.add_node(nd, score=scores[nd])", "def ConstrRank():\n with open(path.join(MAIN_PATH, RANK)) as f:\n ranked_data = []\n for line in f:\n ranked_data.append(line.strip().split()[0]) \n\n threshold = 5000\n global rank_less\n global rank_more\n rank_less = ranked_data[:threshold]\n rank_more = ranked_data[threshold:]\n\n with open(path.join(MAIN_PATH, INST)) as f:\n for line in f:\n line = line.strip().split(\",\")\n exists.append(line[0:2])", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def cull(self):\r\n\r\n # From each node in population we get [node_index, node_score] in population_ranking\r\n population_ranking = [[x, self.score(self.population[x])] for x in \\\r\n range(len(self.population))]\r\n population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest\r\n\r\n # The new population is the top population_size guys as ranked\r\n # x[0] is the index of the node\r\n self.population = [self.population[x[0]] for x in population_ranking[-self.population_size:]]\r\n # The actual scores, with the same indices as their node counterparts in population\r\n self.ranking = [x[1] for x in population_ranking[-self.population_size:]]\r\n\r\n #score keeping\r\n self.complete_scores.append(self.ranking)\r\n minimum = self.ranking[0]\r\n maximum = self.ranking[-1]\r\n mean = sum(self.ranking)/self.population_size\r\n median = self.ranking[math.ceil(self.population_size/2)]\r\n self.summary_scores.append([minimum, maximum, mean, median])", "def atiecorrect(rankvals):\r\n sorted,posn = ashellsort(N.array(rankvals))\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T", "def calc_score(pins_stats):\n count = 0\n new = pins_stats[:, :2] - ORIG_PINS_LOC\n for p in new:\n if np.linalg.norm(p) > R_PIN / 2:\n count += 1\n return count", "def get_rank(points: int, cutoffs: List[int]) -> int:\n rank = 0\n for i, cutoff in enumerate(cutoffs):\n if points < cutoff:\n if i == 0:\n break\n else:\n rank = i - 1\n break\n else:\n rank = RANK_COUNT - 1\n\n return rank", "def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod", "def rescore(self, scorer):\n if self.nodes[0]:\n self.nodes, self.scores, sortidx = scorer.sort(\n self.nodes, return_sort_indices=True\n )\n self.reaction_trees = [self.reaction_trees[idx] for idx in sortidx]\n else:\n self.reaction_trees, self.scores, sortidx = scorer.sort(\n self.reaction_trees, return_sort_indices=True\n )\n self._routes = [self._routes[idx] for idx in sortidx]\n self.all_scores = [self.all_scores[idx] for idx in sortidx]\n if self._dicts:\n self._dicts = [self._dicts[idx] for idx in sortidx]\n if self._images:\n self._images = [self._images[idx] for idx in sortidx]\n if self._jsons:\n self._jsons = [self._jsons[idx] for idx in sortidx]\n\n for idx, score in enumerate(self.scores):\n self.all_scores[idx][repr(scorer)] = score\n self._update_route_dict(self.all_scores, \"all_score\")", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def adjust_fitness_scores(self):\n\n for species in self.species:\n species.adjust_fitness()", "def update_lowest_sell(self, limit):\n if limit.size == 0:\n #successor case\n limit = self.sell_tree.successor(limit)\n if limit is None:\n #no successor\n self.lowest_sell = None\n else: #have a successor, but dont know if it has orders or not\n if limit.size == 0:#limit has no orders but other limits in the tree might have orders\n if self.sell_tree.size == 0: #we know, no other limits have an order\n self.lowest_sell = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.sell_tree.successor(limit)\n # now our limit has a valid order, and we've found the first valid successor\n self.lowest_sell = limit.price\n else: #limit has an order, we found the valid successor!\n self.lowest_sell = limit.price", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def RScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 5\n elif x <= d[p][0.4]:\n return 4\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 2\n else:\n return 1", "def branchize(self):\n near_ones = self.get_near_ones()\n\n if self.current_branch[\"g_score\"] == 31:\n return\n\n for item in near_ones:\n\n if self.current_branch.get(\"move\") and self.current_branch[\"move\"] == item:\n continue\n\n self.change(item)\n\n if self.astar:\n serialized = self.serialize()\n if serialized in self.previous_branches:\n self.change(item)\n continue\n else:\n self.previous_branches.append(serialized)\n\n a_branch = {\n \"status\" : True,\n \"move\" : item,\n \"g_score\" : self.current_branch[\"g_score\"] + 1,\n \"h_score\" : self.calc_manhattan(self.goal),\n \"branches\" : [],\n \"parent\" : self.current_branch\n }\n a_branch[\"f_score\"] = a_branch[\"g_score\"] + a_branch[\"h_score\"]\n\n self.current_branch[\"branches\"].append(a_branch)\n self.score_scheme.append((a_branch[\"f_score\"], a_branch))\n self.change(item)\n\n self.score_scheme.sort(key=lambda x: x[0])", "def minimum_spanning_arborescence(sol):", "def new_evaluate(board):\n\n #Logic for new_evaluate function:\n #1)Traverse through each of the columns\n #2)For each of the columns, find the top most element.\n\t #If the topmost element = Current Player\n\t\t \t#3)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a POSITIVE value\n\t #Else\n\t\t \t#4)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a NEGATIVE value\n #5)Sort these Positive and Negative scores\n #6)IF the highest negative score is greater than the highest positive score, then it means that the opposition has MORE chances to WIN.\n #So, that has to be blocked and so we will return that HIGHEST NEGATIVE value as the score for that board\n #7)ELSE we go ahead and return the HIGHEST POSITIVE value as the score for that board\n #->This logic has increasing the AGGRESSION of the player a lot and it makes senses we hope.\n\n posdict = {}\n negdict = {}\n for col in range(7):\n if(board.get_top_elt_in_column(col)==board.get_current_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = board._max_length_from_cell(rowValue,col)\n posdict[col]=score\n elif(board.get_top_elt_in_column(col)==board.get_other_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = -(board._max_length_from_cell(rowValue,col))\n negdict[col]=score\n\n\n sorted(posdict.values(),reverse= True)\n sorted(negdict.values())\n if((bool(posdict))and (bool(negdict))):\n if(abs(negdict.values()[0]) >= ((posdict.values()[0]))):\n return negdict[negdict.keys()[0]]\n else:\n return posdict[posdict.keys()[0]]\n elif(bool(posdict)):\n return posdict[posdict.keys()[0]]\n elif(bool(negdict)):\n return negdict[negdict.keys()[0]]\n else:\n return 0", "def mts_ls3(current_x, current_fitness, best_x, best_fitness, improve, search_range, task, rng, bonus1=10, bonus2=1,\n **_kwargs):\n x_new, grade = np.copy(current_x), 0.0\n for i in range(len(current_x)):\n x1, x2, x3 = np.copy(x_new), np.copy(x_new), np.copy(x_new)\n x1[i], x2[i], x3[i] = x1[i] + 0.1, x2[i] - 0.1, x3[i] + 0.2\n x1, x2, x3 = task.repair(x1, rng), task.repair(x2, rng), task.repair(x3, rng)\n x1_fit, x2_fit, x3_fit = task.eval(x1), task.eval(x2), task.eval(x3)\n if x1_fit < best_fitness:\n grade, best_x, best_fitness, improve = grade + bonus1, x1.copy(), x1_fit, True\n if x2_fit < best_fitness:\n grade, best_x, best_fitness, improve = grade + bonus1, x2.copy(), x2_fit, True\n if x3_fit < best_fitness:\n grade, best_x, best_fitness, improve = grade + bonus1, x3.copy(), x3_fit, True\n d1, d2, d3 = current_fitness - x1_fit if np.abs(x1_fit) != np.inf else 0, current_fitness - x2_fit if np.abs(\n x2_fit) != np.inf else 0, current_fitness - x3_fit if np.abs(x3_fit) != np.inf else 0\n if d1 > 0:\n grade, improve = grade + bonus2, True\n if d2 > 0:\n grade, improve = grade + bonus2, True\n if d3 > 0:\n grade, improve = grade + bonus2, True\n a, b, c = 0.4 + rng.random() * 0.1, 0.1 + rng.random() * 0.2, rng.random()\n x_new[i] += a * (d1 - d2) + b * (d3 - 2 * d1) + c\n x_new = task.repair(x_new, rng)\n x_new_fitness = task.eval(x_new)\n if x_new_fitness < current_fitness:\n if x_new_fitness < best_fitness:\n best_x, best_fitness, grade = x_new.copy(), x_new_fitness, grade + bonus1\n else:\n grade += bonus2\n current_x, current_fitness, improve = x_new, x_new_fitness, True\n return current_x, current_fitness, best_x, best_fitness, improve, grade, search_range", "def node_assignment_score(edge_index: nb.int64[:,:],\n edge_scores: nb.float32[:,:],\n n: nb.int64) -> nb.int64[:]:\n return edge_assignment_score(edge_index, edge_scores, n)[1]", "def add_to_open(open, neighbour):\n for node in open:\n if neighbour == node and neighbour.f >= node.f:\n # Will not add if there already exists the same node in open that has lower f value\n return False\n\n return True", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def ranking(orig_data):\n data = np.copy(orig_data)\n values = np.sort(data)\n rank = np.zeros(data.shape)\n r = 0\n for i in range(values.shape[0]):\n for j in range(data.shape[0]):\n if data[j] == values[i]:\n rank[j] = r\n data[j] = 9223372036854775807 # MaxInt\n break\n if i < values.shape[0]-1 and values[i] < values[i+1]:\n r = i + 1\n return rank", "def calculate_power_ranking(wins, losses, is_winning_streak,\n streak_length, recent_wins, recent_losses):\n pred1 = 0\n pred2 = round( ( ((wins*1.0)/(wins+losses)) - 0.500 ) * 16 * 9 * 10/9 )\n pred3 = recent_wins - recent_losses\n streak_factor = 1 if is_winning_streak else -1\n pred4 = streak_factor * round( ( streak_length - 1 ) / 2.0 )\n print pred1, pred2, pred3, pred4\n return pred1 + pred2 + pred3 + pred4", "def _compute_ranks(df, lower_better=True):\n # return df.rank(axis=1, numeric_only=True, ascending=lower_better)\n return df.rank(axis=1, numeric_only=True, ascending=lower_better, method='min')", "def get_hit_rank(box_pred,\n boxes_truth,\n topn,\n iou_th=IOU_THRESHOLD,\n is_match=is_equal):\n # Go through all the boxes of ground truth.\n # Find the one with max iou as the candidate.\n max_iou = 0.0\n for box in boxes_truth:\n iou = box_pred.iou(box)\n if iou > max_iou:\n candidate = box\n max_iou = iou\n\n # If there is no box overlapped, it returns the result directly.\n if max_iou == 0.0:\n return {\n 'max_iou': 0.0,\n 'is_box_detected': False,\n 'rank': -1,\n 'label': '',\n }\n\n # Check the rank the labels predicted match the ground truth.\n # Note that we check the labels whether the max_iou\n # is greater than the threshold or not since we want to analyze\n # the results for the localization error case (right label, low iou)\n pred_labels = [x['label'] for x in box_pred.pred_labels()]\n truth_label = candidate.label()\n for i in range(0, min(topn, len(pred_labels))):\n print('>> Label to check: Predict: {}, Truth: {}'.format(\n pred_labels[i], truth_label))\n if is_match(pred_labels[i], truth_label):\n return {\n 'max_iou': max_iou,\n 'is_box_detected': (max_iou > iou_th),\n 'rank': i,\n 'label': pred_labels[i],\n }\n\n # If all the labels predicted are not matched to the ground truth,\n # it returns rank '-1' to identify that there is no match.\n return {\n 'max_iou': max_iou,\n 'is_box_detected': (max_iou > iou_th),\n 'rank': -1,\n 'label': '',\n }" ]
[ "0.5954974", "0.59247744", "0.5833818", "0.56625795", "0.56450963", "0.5566416", "0.55646116", "0.5558839", "0.5552098", "0.5540993", "0.55369794", "0.5497016", "0.546615", "0.54586864", "0.5453142", "0.54514194", "0.54451036", "0.5403085", "0.53968745", "0.53879005", "0.5386224", "0.5385344", "0.5380636", "0.53713304", "0.5365572", "0.53551066", "0.53168297", "0.5315478", "0.53066045", "0.52907884", "0.52872723", "0.5279639", "0.5272004", "0.5254786", "0.5250517", "0.52486205", "0.5245531", "0.52126855", "0.5200404", "0.5196778", "0.5195573", "0.51946086", "0.5188245", "0.5184223", "0.51716685", "0.51681364", "0.5162089", "0.516102", "0.5155067", "0.5146964", "0.51461315", "0.5142303", "0.51421", "0.5140759", "0.5139989", "0.51346266", "0.5129334", "0.5125113", "0.50988984", "0.50873697", "0.5082745", "0.50799334", "0.5076277", "0.5074855", "0.5065438", "0.5064226", "0.5061735", "0.5055844", "0.50411963", "0.5027537", "0.5027132", "0.5027132", "0.50091636", "0.5004842", "0.50040275", "0.49968305", "0.49966782", "0.49882954", "0.4986689", "0.49854583", "0.49710324", "0.4969437", "0.496802", "0.49660617", "0.4962803", "0.4950497", "0.49500903", "0.49500903", "0.49495393", "0.49427193", "0.49407062", "0.4931382", "0.49299517", "0.4928942", "0.49275288", "0.49242383", "0.4918987", "0.49174362", "0.4912278", "0.49067712" ]
0.6555181
0
Update the class variables after the algorithm execution
def _update_vars(self, axis, traj_s, traj_o, rank_s, rank_o, t): if axis == 0: self.x_traj = traj_s self.x_ranking = rank_s self.x_scores = traj_s[-1] self.inverse_y_traj = traj_o self.inverse_y_ranking = rank_o self.inverse_y_scores = traj_o[-1] if axis == 1: self.y_traj = traj_s self.y_ranking = rank_s self.y_scores = traj_s[-1] self.inverse_x_traj = traj_o self.inverse_x_ranking = rank_o self.inverse_x_scores = traj_o[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def algorithm_loop(self):", "def updateVariables(self) -> None:\n ...", "def _update(self):\n pass", "def update(self):", "def update(self):", "def update(self):", "def update(self):\r\n pass", "def update(self):\n\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n ## Initialize\n self.domain.update()\n self.var = self.domain.var.copy()\n self.out = []\n\n ## Construct var and out, respecting DAG properties\n for fun in self.functions:\n self.var = list(set(self.var).union(set(fun.var).difference(set(self.out))))\n\n self.out = list(set(self.out).union(set(fun.out)))\n\n try:\n self.var_rand = list(self.density.marginals.keys())\n except AttributeError:\n self.var_rand = []\n self.var_det = list(set(self.var).difference(self.var_rand))\n\n ## TODO parameters\n\n ## Convenience constants\n self.n_var = len(self.var)\n self.n_var_rand = len(self.var_rand)\n self.n_var_det = len(self.var_det)\n self.n_out = len(self.out)", "def update(self, iteration):\n pass", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self):\n self.brain.update()", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def process(self):", "def process(self):", "def process(self):", "def update(self):\n raise NotImplementedError", "def _update(self):\n self.cv.update()", "def prob_update(self):\n pass", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def update(self):\n # default implementation is to do nothing.", "def on_iteration_start(self):\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def processing(self):\n pass", "def update(self) -> None:\n ...", "def on_iteration_start(self):\n\n self.Xfprv = self.Xf.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yfprv = self.Yf.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def _update_(self):\n self._update_distance_()\n self._check_literature_name_()", "def apply_algorithm(self):\n pass", "def update():", "def update():", "def update( ):\r\n pass", "def refresh(self):\n self.reward = 0\n self.score = 0", "def update(self):\n raise NotImplementedError()", "def update(self):\n\n SolidSolver.update(self)\n\n self.__nextStep()", "def _run(self):\n self._algorithm(self._list, self)", "def update(self):\r\n if self.games and all(game.result for game in self.games):\r\n self.rankings = self.compute_ranking()\r\n self.update_observers()\r\n\r\n if self.finals:\r\n for final in self.finals:\r\n final.update()", "def update(self)->None:\n pass", "def update(self):\n self.chromosome_list = self.next_population\n self.reset_mating_pool()\n self.reset_next_population()", "def __reset_variables(self):\r\n self.__running = True", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def initialize(self, *args, **kwargs): \n super().initialize(*args, **kwargs)\n self.updates_per_optimize = 1", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def update(cls) -> None:\n raise NotImplementedError", "def finished(self):\n self.update(self._total)", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def update(self):\r\n\r\n self.target.load_state_dict(self.model.state_dict())\r\n self.target.eval()", "def update(self):\n\n self._pre_calc_mb()", "def update(self, initial, follows):", "def __init__(self):\r\n self.unique_classes = []\r\n self.total_classes_number = 0\r\n self.class_number_dict = {}\r\n self.unique_word_number = 0\r\n self.class_word_number_dict = {}\r\n self.class_total_words_dict = {}", "def __init__(self):\n if cons.is_boa == True:\n algorithm = \"XCS/BOA\"\n else:\n algorithm = \"XCS\"\n print(algorithm + \": Running \" + str( cons.multiple_runs ) + \" times\")\n #Global Parameters-------------------------------------------------------------------------------------\n self.population = None # The rule population (the 'solution/model' evolved by XCS)\n self.learn_track = None # Output file that will store tracking information during learning\n #self.previousState = None # states received from environment from last time step\n\n for count in range( cons.multiple_runs ):\n print(\"Time \" + str( count ) + \": Initializing Algorithm...\")\n #-------------------------------------------------------\n # POPULATION REBOOT - Begin XCS learning from an existing saved rule population\n #-------------------------------------------------------\n if cons.do_population_reboot:\n self.populationReboot( count )\n #-------------------------------------------------------\n # NORMAL XCS - Run XCS from scratch on given data\n #-------------------------------------------------------\n else:\n try:\n self.learn_track = open(cons.out_file+str(count)+'_LearnTrack.txt','w')\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', cons.out_file+str(count)+'_LearnTrack.txt')\n raise\n else:\n self.learn_track.write(\"Explore_Iteration\\tMacroPopSize\\tMicroPopSize\\tAccuracy_Estimate\\tAveGenerality\\tExpRules\\tTime(min)\\n\")\n\n # Instantiate Population---------\n self.population = ClassifierSet()\n self.explore_iter = 0\n self.tracked_results = [0] * cons.tracking_frequency\n self.exploit_iters = 0\n\n #Run the XCS/BOA algorithm--------------------------------------------------------------------------\n self.boa = None\n bcons.setCycleTime( int( cons.N/2 ) )\n self.run_XCS_BOA( count )", "def solve(self, regparam):\n self.regparam = regparam\n \n #Some counters for bookkeeping\n self.stepcounter = 0\n self.flipcounter = 0\n self.nochangecounter = 0\n \n #Cached results\n self.evals = np.multiply(self.svals, self.svals)\n self.newevals = 1. / (self.evals + self.regparam)\n newevalslamtilde = np.multiply(self.evals, self.newevals)\n self.D = np.sqrt(newevalslamtilde)\n #self.D = -newevalslamtilde\n \n self.VTY = self.svecs.T * self.Y\n DVTY = np.multiply(self.D.T, self.svecs.T * self.Y)\n \n #Using lists in order to avoid unnecessary matrix slicings\n self.DVTY_list = []\n self.YTVDDVTY_list = []\n self.classFitnessList = []\n for i in range(self.labelcount):\n DVTY_i = DVTY[:,i]\n self.DVTY_list.append(DVTY_i)\n YTVDDVTY_i = DVTY_i.T * DVTY_i\n self.YTVDDVTY_list.append(YTVDDVTY_i)\n fitness_i = self.size - DVTY_i.T * DVTY_i\n self.classFitnessList.append(fitness_i)\n \n self.Dsvecs_list = []\n self.svecsDDsvecs_list = []\n for i in range(self.size):\n Dsvec = np.multiply(self.D.T, self.svecs[i].T)\n self.Dsvecs_list.append(Dsvec)\n self.svecsDDsvecs_list.append(Dsvec.T*Dsvec)\n \n self.updateA()\n \n \n converged = False\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n while True:\n \n converged = self.roundRobin()\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n if converged: break\n \n if self.oneclass:\n self.Y = self.Y[:, 0]\n self.A = self.A[:, 0]\n self.results['predicted_clusters_for_training_data'] = self.Y\n self.predictor = self.svdad.createModel(self)", "def _do_compute(self, var_map):\n raise Exception(\"Not implemented. Subclass responsibility\")", "def _update_flowcellrun(self):\n logger.debug(\"updating `FlowcellRun` object attributes\")\n pass", "def update(self) -> None:\n self.faithful = self.is_faithful()\n if self.faithful:\n old_class_names = self.class_names\n old_training_image_names = self.training_image_names\n self.class_names = self.find_class_names()\n self.training_image_names = self.find_training_image_names()\n self.extracted_features = list()\n if old_class_names != self.class_names or old_training_image_names != self.training_image_names:\n self.generate_csv_dictionary()\n return", "def _update(self):\n num_new_evals = (self.metamodel.model_evaluations - self._last_rebuild)\n if num_new_evals >= self.rebuild_interval:\n self._built = True\n self._last_rebuild = self.metamodel.model_evaluations\n\n # Rebuild relevance function and make it usable on arrays.\n self._relevance_function = self._construct_relevance_function()\n rel_fun = np.vectorize(self._relevance_function)\n\n # Learn relevance prediction model\n data = self.metamodel.history.get_model_evaluations()\n relevance_values = rel_fun(data[:, -1])\n self._predictor.fit(data[:, :-1], relevance_values)\n return", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def update(self):\r\n self.g = self.create_graph()", "def updatestats(self):\n result = self.statsfromcounts(self.hypCountByScenario)\n self.pScenario = result[\"p\"]\n self.scenarioEntropy = result[\"entropy\"]", "def apply(self):", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def __init__(self):\n self.param_names = []\n self.param_values = []\n self.param_settings = []\n self.result = []\n self.best_params = None\n self.best_score = None\n self.max_reps = 5\n self.num_values = False\n self.algorithm_done = False", "def refresh(self):\n self.active_member_count\n self.description\n self.lbmethod\n self.members\n self.minimum_active_member\n self.minimum_up_member\n self.slow_ramp_time\n self.statistics", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def _refresh(self):\n self.__init__(self.db, self.roster_set, self.constraint_fns)\n self.add_objective()\n for fns in self.constraint_fns.keys():\n fns(*self.constraint_fns[fns])", "def reInitAndRun(self):\n self.playlists = self.readPlaylistData()\n self.audioDF = self.readAudioData(shouldProcess=True)\n self.clusterLabels = []\n self.models = Clusterers(k=len(self.playlists))\n self.processAndCluster()\n self.analyzeResults()", "def update_attr_par(self):\n\n # Retrieve all current values\n self.all_values_temp = nx.get_node_attributes(self.G, 'value')\n\n # Calculate all new values\n new_values_list = Parallel(n_jobs=2)(delayed(self.single_node_update)(i) \\\n for i in range(self.n_v))\n\n # # Set list to dict as needed for node update\n # new_values_dict = {}\n # for i, value in enumerate(new_values_list):\n # new_values_dict[i] = {'value': value}\n #\n # # Update node value\n # nx.set_node_attributes(self.G, new_values_dict)", "def update_class(self):\n neighbors_set = list(set(self.neighbors))\n counts = np.array([self.neighbors.count(n) for n in neighbors_set])\n probs = (counts / counts.sum()) * (1-self.mutation_prob)\n probs = np.append(probs, self.mutation_prob)\n neighbors_set.append(np.random.choice(np.arange(1, self.board.classes))) \n\n self.cell_class = np.random.choice(neighbors_set, p=probs)", "def __init__(self):\n\n self.result = None # To store the result\n self.predictor = None # To store the fit predictor", "def update_E(self):", "def init_other_vars(self):\n self.endIndexOfClassConditionalProbability = self.numOfGridsIn1D \\\n * self.numOfGridsIn1D * self.numOfClasses\n self.endIndexOfObjectProbability \\\n = self.endIndexOfClassConditionalProbability \\\n + self.numOfGridsIn1D*self.numOfGridsIn1D*self.numOfBoxesPerGrid\n # Class Conditional Probability: P(class | object),\n self.classConditionalProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfClasses\n ])\n # P(object): Object probability, i.e. the probability of an\n self.objectProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # Box data (x, y, w, h)\n self.boxData = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid, 4\n ])\n # Offset to add to x and y values to convert from within-grid\n # coordinates to image coordinates\n self.offsetY = np.tile(\n np.arange(self.numOfGridsIn1D)[:, np.newaxis, np.newaxis],\n (1, self.numOfGridsIn1D, self.numOfBoxesPerGrid)\n )\n self.offsetX = np.transpose(self.offsetY, (1, 0, 2))\n # Most probable classes per grid\n self.maxProbableClasses = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # Probabilities of most probable classes per grid\n self.maxProbableClassProbabilities = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid\n ])\n # The probability of an object present, and it being each class\n self.objectClassProbability = np.zeros([\n self.numOfGridsIn1D, self.numOfGridsIn1D, self.numOfBoxesPerGrid,\n self.numOfClasses\n ])", "def update(self):\n pass", "def finalise(self):", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def class_adjustment(self):\n self.ABILITIES_ORDER = fetch_data.get_abilities_order(self.ch_class)\n\n for key in self.SAVES_LVLS:\n self.SAVES_LVLS[key] = fetch_data.get_class_saves(key, self.ch_class)\n\n self.CLASS_SKILLS = fetch_data.get_class_skills(self.ch_class)\n self.SKILL_POINTS_MOD = fetch_data.get_skillp_modifier(self.ch_class)\n self.HIT_DIE = fetch_data.get_hit_die(self.ch_class)\n self.BASE_ATTACK_LVLS = fetch_data.get_class_base_attack(self.ch_class)", "def flush(self):\n super().flush()\n self._targetEvaluation = None\n self._solutionExport = None", "def _update_objective(self):\n # rewrap the cost if the solver has been run\n self.Finalize()\n return", "def process(self):\n self.output_info = self.attributes.copy()", "def _analyze(self):\r\n if self.value is None or self.value == self.previous:\r\n pass\r\n elif self._operation == \"add\":\r\n self._additions = self.value\r\n elif self._operation == \"remove\":\r\n self._removals = self.value\r\n elif self.previous is None:\r\n self._assignments = self.value\r\n else:\r\n # partial update time\r\n self._additions = (self.value - self.previous) or None\r\n self._removals = (self.previous - self.value) or None\r\n self._analyzed = True", "def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]" ]
[ "0.6898461", "0.6669482", "0.6625015", "0.6544939", "0.6408313", "0.6408313", "0.6408313", "0.6405048", "0.6306032", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6305308", "0.6245735", "0.6221661", "0.61755955", "0.6161531", "0.6159562", "0.6147771", "0.61441797", "0.61441797", "0.61441797", "0.61391866", "0.61382324", "0.61229056", "0.6118686", "0.6118686", "0.6101417", "0.6089138", "0.60773826", "0.60621005", "0.605453", "0.60409445", "0.60376054", "0.6031777", "0.6030784", "0.6030784", "0.6009723", "0.60058737", "0.6005736", "0.59963", "0.59749013", "0.59653884", "0.5963407", "0.59612256", "0.59602094", "0.5957308", "0.5934423", "0.59231067", "0.59231067", "0.59169024", "0.591241", "0.59071124", "0.59040797", "0.59030336", "0.5900432", "0.5898363", "0.58980864", "0.5895561", "0.5894548", "0.58883584", "0.588141", "0.5877988", "0.58609253", "0.58469546", "0.5845451", "0.58433306", "0.5838448", "0.5838448", "0.5838448", "0.5838448", "0.5837226", "0.58260494", "0.5825046", "0.5808003", "0.5790254", "0.5787018", "0.57803345", "0.5776047", "0.57726896", "0.57647705", "0.5763285", "0.57627714", "0.57577175", "0.5751831", "0.5748521", "0.5748486", "0.57465076", "0.57459956", "0.57357883" ]
0.0
-1
Get two lists (for rows and columns) where the row/col index gives the list of col/row indexes of the nonzero elements.
def _get_index_lists(self, mat): n_row, n_col = mat.shape col_ind_at_row, row_ind_at_col = [],[] for i in range(n_row): aux_ind = _np.where(mat[i]>0)[0] if len(aux_ind) == 0: raise Exception('Row {} is composed of zeros'.format(i)) col_ind_at_row.append(aux_ind) for j in range(n_col): aux_ind = _np.where(mat[:,j]>0)[0] if len(aux_ind) == 0: raise Exception('Column {} is composed of zeros'.format(j)) row_ind_at_col.append(aux_ind) return col_ind_at_row, row_ind_at_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_empty_cells(state):\n cells = []\n for row_index, row in enumerate(state.board):\n for col_index, cell in enumerate(row):\n if cell == 0:\n cells.append([row_index, col_index])\n return cells", "def empty_cells(state):\n cells = []\n\n for i, row in enumerate(state):\n for j, col in enumerate(row):\n if state[i][j] == 0:\n cells.append([i, j])\n\n return cells", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def row_col_make_zero(two_dimensional_array):\n columns = []\n rows = []\n for row, rvalue in enumerate(two_dimensional_array):\n check = [column for column, cvalue in enumerate(rvalue) if cvalue == 0]\n if check:\n columns.extend(check)\n rows.append(row)\n for row in two_dimensional_array:\n for column in columns:\n row[column] = 0\n for row in rows:\n two_dimensional_array[row] = [0]*len(two_dimensional_array[row])\n print(two_dimensional_array)", "def find_empty_cells(gr):\n l = list()\n for i in range(0,9):\n for j in range(0,9):\n if(gr[i][j] == 0):\n l.append([i, j])\n return l", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def matZeros(shape):\n return [[0 for y in range(shape[1])] \\\n for x in range(shape[0])]", "def _find_zero(board):\n for r_index, row in enumerate(board):\n for c_index, num in enumerate(row):\n if num == 0:\n return r_index, c_index", "def nonzeros(m, row):\n for index in range(m.indptr[row], m.indptr[row+1]):\n yield m.indices[index], m.data[index]", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def get_0_pos(grid):\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 0:\n return i, j\n return -1, -1", "def positions_to_play(self):\r\n\r\n positions = []\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"0\":\r\n # Add [row, column] to the list\r\n positions.append([i, j])\r\n \r\n return positions", "def empty_matrix(self):\r\n\r\n return [[0 for i in range(len(self.s2)+1)] for j in range(len(self.s1)+1)]", "def nonzero_indices(a):\n return (np.nonzero(a)[0])", "def non_zero_idx_val(seq):\n return [(i, v) for i, v in enumerate(seq) if v > 0]", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def index_col(self, i0, i1, j0, j1):\n edges = self.h5['indexes']['bin1_offset'][i0:i1 + 1]\n index = []\n for lo1, hi1 in zip(edges[:-1], edges[1:]):\n if hi1 - lo1 > 0:\n bin2 = self.h5['pixels']['bin2_id'][lo1:hi1]\n mask = (bin2 >= j0) & (bin2 < j1)\n index.append(lo1 + np.flatnonzero(mask))\n if not index:\n return np.array([], dtype=int)\n else:\n return np.concatenate(index, axis=0)", "def slice0(A,rowrange,colrange):\n\treturn [[A[i][j] for j in range(rowrange[0],rowrange[1])] for i in range(colrange[0],colrange[1])]", "def row_to_indices(row):\r\n return [(row, col) for col in range(0, 9)]", "def coordinates(self, mask):\n y,x = mask.nonzero()\n return list(zip(x,y))", "def init_zero_matrix(self,rows,cols):\n\t\ttmpMatrix = []\n\t\tfor i in range(rows):\n\t\t\ttmp = [0 for j in range(cols)]\n\t\t\ttmpMatrix.append(tmp)\n\t\treturn tmpMatrix", "def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)", "def printNonZeroMatrixElements(A):\n for i, row in enumerate(A):\n for j, element in enumerate(row):\n if(element != 0):\n print(f\"[{i}, {j}] = {element}\")", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]", "def indices(self):\n\n # We used lookup tables here. Read more about other methods here:\n # https://chessprogramming.wikispaces.com/Bitboard+Serialization\n\n if self.num == 0:\n return []\n\n bits = []\n\n for i in [0, 1, 2, 3, 4, 5, 6, 7]:\n row = (self.num >> UINT64_PADDING[i]) & EIGHT_ONES\n indices = row_to_indices[row]\n for index in indices:\n bits.append(index + i*8)\n\n return bits", "def col_to_indices(col):\r\n return [(row, col) for row in range(0, 9)]", "def make_column(pixel_counts):\n column = []\n for i, count in enumerate(pixel_counts):\n column += [i % 2]*count\n column = np.array(column)\n return list(np.where(column != 0)[0])", "def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions", "def get_nonzero_vectors(motion_vectors):\n if np.shape(motion_vectors)[0] == 0:\n return motion_vectors\n else:\n idx = np.where(np.logical_or(motion_vectors[:, 7] != 0, motion_vectors[:, 8] != 0))[0]\n return motion_vectors[idx, :]", "def columnIndexes(a):\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]", "def zero_matrix(matrix):\n rows = set()\n columns = set()\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n\n for i in range(m):\n for j in range(n):\n if i in rows or j in columns:\n matrix[i][j] = 0\n return matrix", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n\n return M", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n\n return M", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def possibilities(board):\n return board[np.where(board == 0)]", "def zeros_matrix(rows, cols):\n M = []\n while len(M) < rows:\n M.append([])\n while len(M[-1]) < cols:\n M[-1].append(0.0)\n \n return M", "def zeros_matrix(self, rows, cols):\r\n M = []\r\n while len(M) < rows:\r\n M.append([])\r\n while len(M[-1]) < cols:\r\n M[-1].append(0.0)\r\n\r\n return M", "def _xy_locs(mask):\n y, x = mask.nonzero()\n return list(zip(x, y))", "def to_ijv(self):\n row_indices = []\n column_indices = []\n nonzero_elements = []\n k = 0\n for key, value in self.data.items():\n if value == 0:\n continue\n row, col = key[:2]\n row_indices.append(row)\n column_indices.append(col)\n nonzero_elements.append(value)\n k += 1\n return row_indices, column_indices, nonzero_elements", "def zero_matrix(matrix):\n cols = []\n for row in range(0, len(matrix)):\n for col in range(0, len(matrix[row])):\n if matrix[row][col] == 0:\n cols.append(col)\n matrix[row] = [0 for _ in range(0, len(matrix[row]))]\n break\n for col in cols:\n for row in range(0, len(matrix)):\n matrix[row][col] = 0\n return matrix", "def cardinal_indices(self, index):\n cardinals = [\n self.north_index(index),\n self.east_index(index),\n self.south_index(index),\n self.west_index(index)\n ]\n return [i for i in cardinals if 0 < i < (self.size * self.size)]", "def getNeighbors(self, row, col):\n neighbors = []\n for deltaRow in range(-1, 2):\n for deltaCol in range(-1, 2):\n if not (deltaRow == 0 and deltaCol == 0) and self.inBoard(row + deltaRow, col + deltaCol):\n neighbors += [(row + deltaRow, col + deltaCol)]\n return neighbors", "def column(self, index: int) -> List[int]:\n return [x[index - 1] for x in self.matrix]", "def get_zero_nonzero_idx(spins):\n idx_zero = spins.index(0)\n idx_nonzero = tuple([idx for idx, spin in enumerate(spins) if spin != 0])\n if len(idx_nonzero) + 1 != len(spins):\n raise ValueError(\"`spins` must contain exactly one zero.\")\n return idx_zero, idx_nonzero", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def non_masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask-1,order='C'))[0]", "def find_empty(grid):\n for i in range(LEN_GRID):\n for j in range(LEN_GRID):\n if grid[i][j] == 0:\n return (i, j) # row, col\n return None", "def _find_nonzero_runs(values):\n\n error_checking.assert_is_numpy_array_without_nan(values)\n error_checking.assert_is_numpy_array(values, num_dimensions=1)\n\n zero_flags = numpy.concatenate((\n [True], numpy.equal(values, 0), [True]\n ))\n\n nonzero_flags = numpy.invert(zero_flags)\n differences = numpy.abs(numpy.diff(nonzero_flags))\n index_matrix = numpy.where(differences == 1)[0].reshape(-1, 2)\n\n return index_matrix[:, 0], index_matrix[:, 1] - 1", "def get_empty_squares(self):\n empty = []\n for row in range(self._dim):\n for col in range(self._dim):\n if self._board[row][col] == EMPTY:\n empty.append((row, col))\n return empty", "def masked_indices(mask):\n\treturn np.nonzero(np.ravel(mask,order='C'))[0]", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def get_column_offsets(self):\n offsets = [x + self.bitcell_array_inst.lx() for x in self.bitcell_array.get_column_offsets()]\n return offsets", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col", "def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)", "def get_empty_cells(self):\n empty_cells = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n if current_cell.get_cell_state() == 0:\n empty_cells.append(current_cell)\n return empty_cells", "def nonzero_values(a):\r\n return a.flatten()[flatnonzero(a)]", "def find_empty(bo):\n for i in range(len(bo)):\n for j in range(len(bo[0])):\n if bo[i][j] == 0:\n return (i, j)\n\n return None", "def vacantPoint(L):\n pliste = []\n for i in range(0, len(L)):\n for j in range(0, len(L[i])):\n if L[i][j]==0:\n pliste.append([i, j])\n return pliste", "def find_empty_space(board: list) -> tuple:\n board_length = len(board)\n for i in range(board_length):\n for j in range(board_length):\n if board[i][j] == 0:\n return (i,j)", "def sudoku_getcol(A, j):\r\n n = len(A)\r\n B = [0 for i in range(n)]\r\n for i in range(n):\r\n B[i] = A[i][j]\r\n return B", "def Indexes(self, latitudes, longitudes):\n res = self._transform.TransformPoints(\n np.column_stack((longitudes, latitudes)))\n res = list(zip(*res))\n x, y = np.array(res[0]), np.array(res[1])\n idx_col = self._inv_txf[0] + self._inv_txf[1] * x + self._inv_txf[2] * y\n idx_row = self._inv_txf[3] + self._inv_txf[4] * x + self._inv_txf[5] * y\n return idx_row.astype(int), idx_col.astype(int)", "def get_empty_cells(board):\n empty_cells = [idx for idx, e in enumerate(board) if e == ' ']\n return empty_cells", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def extract_pixels(img_):\n non_zero_pixels = np.argwhere(0 < img_)\n x = non_zero_pixels.T[0].astype(np.float32)\n y = non_zero_pixels.T[1].astype(np.float32)\n return x, y", "def get_pos(self):\n return [self.row, self.col]", "def getNonEmptyCells(self):\n nonemptys = []\n for ri in range(self.nRow):\n for ci in range(self.nCol):\n val = self.vals[ri][ci]\n if not self.isEmpty(val):\n row = ri+1\n col = ci+1\n nonemptys.append(CellDesc(row=row, col=col, val=val))\n return nonemptys", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def get_empty_square(self) -> list:\n empty_square = []\n for line_index in range(len(self.grid)):\n for col_index in range(len(self.grid[line_index])):\n if self.grid[line_index][col_index].color is None:\n empty_square.append((line_index, col_index))\n\n return empty_square", "def get_zero_list(line):\n zero_list = []\n for dummy_i in range(0, len(line)):\n if line[dummy_i] == 0:\n zero_list.append(0)\n return zero_list", "def test_find_row_col_indices(self):\r\n obs = self.mc._find_row_col_indices(0)\r\n self.assertEqual(obs, (1, 0))\r\n\r\n obs = self.mc._find_row_col_indices(1)\r\n self.assertEqual(obs, (2, 0))\r\n\r\n obs = self.mc._find_row_col_indices(2)\r\n self.assertEqual(obs, (2, 1))\r\n\r\n obs = self.mc._find_row_col_indices(3)\r\n self.assertEqual(obs, (3, 0))\r\n\r\n obs = self.mc._find_row_col_indices(4)\r\n self.assertEqual(obs, (3, 1))\r\n\r\n obs = self.mc._find_row_col_indices(5)\r\n self.assertEqual(obs, (3, 2))\r\n\r\n obs = self.mc._find_row_col_indices(6)\r\n self.assertEqual(obs, (4, 0))\r\n\r\n self.assertRaises(IndexError, self.mc._find_row_col_indices, -1)", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours", "def _low_tri_indices(rowCount):\n for col in range(rowCount):\n for row in range(col, rowCount):\n yield (row, col)", "def blank_board(self):\n return [[False for x in range(self._dim)] for y in range(self._dim)]", "def find_empty_squares(board):\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 0:\n return (i,j) #row , column\n\n #if there are no blank squres\n return None", "def setZeroes(self, matrix: List[List[int]]) -> None:\n zeros = []\n m, n = len(matrix), len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n zeros.append([i, j])\n for index in zeros:\n row = index[0]\n col = index[1]\n for r in range(m):\n matrix[r][col] = 0\n for c in range(n):\n matrix[row][c] = 0", "def list_of_neighbors(self):\n return self.to_coo_matrix().tolil().rows.tolist()", "def get_numbered_neighbours(self, row, col):\n return [cell for cell in self.get_neighbours(row, col) if type(cell.state) is int]", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row = []\n col = []\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0:\n col.append(j)\n row.append(i)\n\n for i in range(len(row)):\n self.col(matrix, col[i])\n for i in range(len(col)):\n self.row(matrix, row[i])", "def image_to_points(numpy_image):\r\n res = []\r\n for i in range(numpy_image.shape[0]):\r\n for j in range(numpy_image.shape[1]):\r\n if numpy_image[i,j]==0:\r\n res.append([i,j])\r\n return res", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def get_block_positions(self, fig):\n block_positions = []\n\n # Iterates through y + active_piece.y and x + active_piece.x\n for y, row in enumerate(fig, start=self.active_piece.y):\n for x, val in enumerate(row, start=self.active_piece.x):\n if val != 0:\n block_positions.append((x, y))\n\n return block_positions", "def mainIndices(self):\n return self.i1, self.i2", "def emptyNeighborsList(board, row, column):\n\n neighboursList = list(BoardUtils.neighborsList(board, 1, row, column))\n emptyNeighbours = []\n for row, column in neighboursList:\n if BoardUtils.isEmpty(board, row, column):\n emptyNeighbours.append((row, column))\n return emptyNeighbours", "def get_empty_board_indecies(self):\n empty_indecies = []\n for row_num in range(len(self.board)):\n for col_num in range(len(self.board)):\n if self.board[row_num][col_num] and self.board[row_num][col_num].state == PegState.EMPTY:\n empty_indecies.append((row_num, col_num))\n return empty_indecies", "def __around_short_row(self):\n list_of_cord = []\n for i in range(-self.rad, self.rad + 1):\n for j in range(-self.rad, self.rad + 1):\n if (i == 0 and j == 0) or (i != 0 and j == -1):\n continue\n list_of_cord.append((i, j))\n return list_of_cord", "def available_combinations(self):\n result = []\n\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 0:\n result.append((i, j))\n\n return result", "def get_tile_indices(rows, cols, row_tile_size, col_tile_size):\n indices = list()\n num_row_tiles, num_col_tiles = get_num_tiles(rows, cols, row_tile_size, col_tile_size)\n for r in range(0, num_row_tiles):\n start_r = r * row_tile_size\n end_r = ((r + 1) * row_tile_size) if (r < num_row_tiles - 1) else rows\n for c in range(0, num_col_tiles):\n start_c = c * col_tile_size\n end_c = ((c + 1) * col_tile_size) if (c < num_col_tiles - 1) else cols\n indices.append((start_r, end_r, start_c, end_c, r + 1, c + 1))\n return indices", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def diagonalsPos (matrix, cols, rows):\r\n for diagonal in ([(j, i - j) for j in range(cols)] for i in range(cols + rows -1)):\r\n yield [matrix[i][j] for i, j in diagonal if i >= 0 and j >= 0 and i < cols and j < rows]", "def diagonalsPos(board, cols, rows):\n for di in ([(j, i - j) for j in range(cols)] for i in range(cols + rows - 1)):\n yield [board[i][j] for i, j in di if i >= 0 and j >= 0 and i < cols and j < rows]", "def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)", "def nonzero(self):\n\t\t_x = self.__seqvector.vec.nonzero()[1]\n\t\t_x = list(set(_x)) # uniquify them\n\t\t_x.sort() # sort positions\n\t\treturn _x", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row = set()\n column = set()\n \n m = len(matrix)\n n = len(matrix[0])\n \n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n row.add(i)\n column.add(j)\n \n for i in range(m):\n for j in range(n):\n if i in row or j in column:\n matrix[i][j] = 0", "def get_idxes_from_mask(mask):\n if len(mask) > 1:\n return torch.nonzero(mask.squeeze(), as_tuple=False).reshape(1, -1)[0]\n elif len(mask) == 1:\n return torch.tensor([0], dtype=torch.int64) if mask.sum() == 1 else torch.tensor([], dtype=torch.int64)\n return torch.tensor([], dtype=torch.int64)" ]
[ "0.70034736", "0.6999915", "0.6924702", "0.6525192", "0.6505699", "0.6490474", "0.64878106", "0.6456846", "0.6356905", "0.63506573", "0.6348926", "0.62902606", "0.6273074", "0.62722355", "0.6267735", "0.62005794", "0.6167228", "0.61156595", "0.6083813", "0.6070011", "0.60664994", "0.60655284", "0.6058561", "0.60565144", "0.60366917", "0.5976341", "0.59707403", "0.59656096", "0.5953343", "0.5950113", "0.5923537", "0.59193087", "0.5907074", "0.5907074", "0.58990335", "0.58965474", "0.5882107", "0.58802575", "0.5876494", "0.5876369", "0.5876068", "0.58593583", "0.5847763", "0.5839047", "0.5837554", "0.5832377", "0.5802944", "0.57843006", "0.578352", "0.5774372", "0.57743436", "0.57609683", "0.5742309", "0.57321316", "0.5725172", "0.57193094", "0.5718415", "0.5714817", "0.5713816", "0.57132185", "0.57107687", "0.5702827", "0.5701226", "0.5694848", "0.5687909", "0.56775635", "0.56769013", "0.5675274", "0.5673023", "0.56673455", "0.5647554", "0.563272", "0.5627333", "0.56210154", "0.56162626", "0.56056786", "0.55954045", "0.55949247", "0.5563114", "0.55619735", "0.5557886", "0.5551612", "0.5546261", "0.55425936", "0.5531332", "0.5525966", "0.5525646", "0.55236924", "0.55138713", "0.5511264", "0.55050546", "0.5501051", "0.54961574", "0.54881793", "0.54861724", "0.54795825", "0.5477302", "0.54752517", "0.54738384", "0.5472609" ]
0.72458094
0
Check if the algorithm has been run. It also return the trajectories and the ranking of the associated axis
def _check_run(self, axis): if (self.x_traj, self.y_traj)[axis] is None: if (self.inverse_x_traj, self.inverse_y_traj)[axis] is None: raise Exception('The algorithm has not been run.') else: if self.params['print_info']: print('Warning: you are using the opposite score. It can contain errors if any score is a zero below threshold.') return (self.inverse_x_traj, self.inverse_y_traj)[axis], (self.inverse_x_ranking, self.inverse_y_ranking)[axis] return (self.x_traj, self.y_traj)[axis], (self.x_ranking, self.y_ranking)[axis]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, axis, gamma):\n \n # Trajectories of the main score to compute and the opposite one\n traj_s, traj_o = [_np.ones(self.d[axis])], [_np.ones(self.d[1-axis])]\n # Ranked indices of the scores\n rank_s, rank_o = _np.array([], dtype=int), _np.array([], dtype=int)\n # List of node indices that have reached the zero threshold\n zeros_s, zeros_o = _np.array([], dtype=int), _np.array([], dtype=int)\n \n # Main loop\n for t in range(int(self.params['t_max'])):\n \n # Computing the opposite score without approx\n o = self._one_step(gamma, 1-axis, traj_s[-1])\n rank_o, zeros_o = self._update_zero_rank(o, zeros_o, rank_o)\n traj_o = _np.concatenate((traj_o, [o]))\n \n # Computing the main score (given the opposite one) without approx\n s = self._one_step(gamma, axis, o)\n rank_s, zeros_s = self._update_zero_rank(s, zeros_s, rank_s)\n # Imposing the threshold to the score\n s[zeros_s] = self.params['low_bound']\n traj_s = _np.concatenate((traj_s, [s]))\n \n # Checking the convergence\n if self._converg_check(axis, t, traj_s):\n break\n\n # Finalize the ranking of the positive scores\n rank_s = _np.append(rank_s, _np.argsort(s)[len(zeros_s):])[::-1]\n rank_o = _np.append(rank_o, _np.argsort(o)[len(zeros_o):])[::-1]\n\n # Update the class variables\n self._update_vars(axis, traj_s, traj_o, rank_s, rank_o, t)\n \n if self.params['print_info']:\n print (\"Convergence in \" + str(t) + \" time steps.\")\n if t >= self.params['t_max']:\n print(\"Warning. Stationary state not reached.\")", "def evaluate(self, algo):\n raise NotImplementedError()", "def analyze(self):\n self.__data = Ranking.ranks()\n self.__motion, self.__blur, self.__text, self.__audio = self.__data\n self.__rank_length = len(self.__motion)\n\n self.__ranks = [self.__motion[i] + self.__blur[i] +\n self.__text[i] + self.__audio[i] for i in range(self.__rank_length)]\n\n try:\n self.__timestamps = Ranking.get_timestamps()\n except RankingOfFeatureMissing:\n Log.e(RankingOfFeatureMissing.cause)\n return\n\n self.__output_length = Ranking.get_video_length()\n self.__actual_length = abs(self.__cache.read_data(CACHE_FRAME_COUNT) /\n self.__cache.read_data(CACHE_FPS))\n\n self.__plot_rank_line()\n self.__analytics()", "def algorithm_loop(self):", "def get_predictions():\n\n print(\"OK1\");\n print(\"OK2\");\n return;", "def _compute_is_terminal(self):\n # self.n_actions contains a number of unlabelled datapoints that is left\n if self.n_actions==1:\n # print('We ran out of samples!')\n done = True\n else:\n done = False\n return done", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def analyse(self):\n self.__try_fitting()\n self.second.rotate()\n self.__try_fitting()", "def _check_order(self, handles_key: _HandlesKey, is_training: bool) -> None:\n # Do not check order in eval mode since the post-backward callback does\n # not run so it cannot be used to mark the end of an iteration\n if not is_training:\n return\n if self.is_first_iter:\n msg_prefix = \"Forward order differs across ranks:\"\n local_indices: Optional[Tuple[int, ...]] = self._get_handle_indices(\n handles_key\n )\n device = handles_key[0].device # guaranteed to be non-CPU\n num_valid_indices = sum((index is not None) for index in local_indices)\n tensor_kwargs = {\"dtype\": torch.int32, \"device\": device}\n world_num_valid_indices = torch.zeros(self.world_size, **tensor_kwargs)\n local_num_valid_indices = torch.tensor([num_valid_indices], **tensor_kwargs)\n dist._all_gather_base(\n world_num_valid_indices,\n local_num_valid_indices,\n group=self.process_group,\n )\n # Check that all ranks plan to all-gather the same number of\n # parameters\n # TODO (awgu): Since every module has at most one handle in the\n # current implementation, this should never raise the error.\n for (r1, n1), (r2, n2) in itertools.combinations(\n (\n (rank, world_num_valid_indices[rank])\n for rank in range(self.world_size)\n ),\n 2,\n ):\n if n1 != n2:\n raise RuntimeError(\n f\"{msg_prefix} rank {r1} is all-gathering {n1} parameters \"\n f\"while rank {r2} is all-gathering {n2} parameters\"\n )\n world_indices = torch.zeros(\n self.world_size * num_valid_indices, **tensor_kwargs\n )\n local_indices = torch.tensor(local_indices, **tensor_kwargs)\n dist._all_gather_base(\n world_indices, local_indices, group=self.process_group\n )\n # Check that all ranks plan to all-gather the same index parameters\n for (r1, i1), (r2, i2) in itertools.combinations(\n (\n (\n rank,\n world_indices[\n rank * num_valid_indices : (rank + 1) * num_valid_indices\n ],\n )\n for rank in range(self.world_size)\n ),\n 2,\n ):\n if i1 != i2:\n r1_param_names = self._get_names_from_handle_indices(i1)\n r2_param_names = self._get_names_from_handle_indices(i2)\n raise RuntimeError(\n f\"{msg_prefix} rank {r1} is all-gathering parameters \"\n f\"for {r1_param_names} while rank {r2} is all-gathering \"\n f\"parameters for {r2_param_names}\"\n )\n elif self._checking_order:\n # Only issue warnings on the first deviating iteration and stop\n # checking thereafter to avoid flooding the console\n if self.warn_status == _ExecOrderWarnStatus.WARNED:\n return\n msg_prefix = None # non-`None` means we should warn\n if self.current_order_index >= len(self.handles_pre_forward_order):\n # This iteration sees extra all-gather(s) compared to the first\n msg_prefix = (\n \"Expected to not all-gather any more parameters in the \"\n \"forward but trying to all-gather parameters for \"\n )\n else:\n expected_handles_key = self.handles_pre_forward_order[\n self.current_order_index\n ]\n if expected_handles_key != handles_key:\n expected_param_names = self._get_names_from_handles(\n expected_handles_key\n )\n msg_prefix = (\n f\"Expected to all-gather for {expected_param_names} \"\n \"but trying to all-gather parameters for \"\n )\n if msg_prefix is not None:\n param_names = self._get_names_from_handles(handles_key)\n msg_suffix = (\n f\"{param_names}\"\n if param_names\n else \"a newly-added parameter since construction time\"\n )\n warnings.warn(\n \"Forward order differs from that of the first iteration \"\n f\"on rank {self.rank}. Collectives are unchecked and may \"\n f\"give incorrect results or hang.\\n{msg_prefix}{msg_suffix}\"\n )\n self.warn_status = _ExecOrderWarnStatus.WARNING\n self.current_order_index += 1", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def compute_trajectory():\n pass", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def test_execution(self):\n self.result = self.plot(tree=self.tree, feature_table=self.table,\n sample_metadata=self.md,\n feature_metadata=self.fmd)\n self.assertIsInstance(self.result, Results)\n self.assertIsInstance(self.result.visualization, Visualization)\n # TODO check details of viz more carefully (likely by digging into the\n # index HTML of self.result.visualization, etc.)", "def run(self):\n for i in range(len(self.edges)):\n if self.tracks[i] == -1:\n self._dfs(i)\n self.cnt += 1\n return self.tracks", "def is_done(self):\n return_val = False\n for name in os.listdir(self.results_dir_path):\n if name.startswith('top_genes_per_phenotype'):\n return_val = True\n return return_val", "def on_iteration_start(self):\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def compute_session_status(self):\n if self.passed is None:\n raise AttributeError('passed is None; compute QC first')\n # Get mean passed of each check, or None if passed is None or all NaN\n results = {k: None if v is None or np.isnan(v).all() else np.nanmean(v)\n for k, v in self.passed.items()}\n session_outcome, outcomes = self.compute_session_status_from_dict(results)\n return session_outcome, results, outcomes", "def _compute_is_terminal(self):\n new_score = self.episode_qualities[-1]\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when a quality reaches a predefined level\n if new_score >= self.target_quality:\n done = True\n return done", "def get_predictions_for_trace(self, trace, tuning=True):\n pass", "def evaluate(self):\n eval_list = nx.topological_sort(self.graph)\n for n in eval_list:\n n.evaluate()\n print(\"evaluating type\", type(n))\n\n # Notify observers of finished calculation\n self.notifyObservers(\"EVALUATION DONE\")\n return \"FINISHED\"", "def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n num_episodes_train, average_reward_train, average_steps_per_second = (\n self._run_train_phase(statistics))\n active_num_episodes_eval, active_average_reward_eval = self._run_eval_phase(\n statistics, 'active')\n passive_num_episodes_eval, passive_average_reward_eval = (\n self._run_eval_phase(statistics, 'passive'))\n\n self._save_tensorboard_summaries(iteration, num_episodes_train,\n average_reward_train,\n active_num_episodes_eval,\n active_average_reward_eval,\n passive_num_episodes_eval,\n passive_average_reward_eval,\n average_steps_per_second)\n return statistics.data_lists", "def is_done(self, observations):\n ####################################################################\n # Plan0: init #\n ####################################################################\n # done = False\n # done_reward = 0\n # reward_reached_goal = 2000\n # reward_crashing = -200\n # reward_no_motion_plan = -50\n # reward_joint_range = -150\n\n ####################################################################################\n # Plan1: Reach a point in 3D space (usually right above the target object) #\n # Reward only dependent on distance. Nu punishment for crashing or joint_limits #\n ####################################################################################\n done = False\n done_reward = 0\n reward_reached_goal = 100\n reward_crashing = 0\n reward_no_motion_plan = 0\n reward_joint_range = 0\n\n\n # Check if there are invalid collisions\n invalid_collision = self.get_collisions()\n\n # print(\"##################{}: {}\".format(self.moveit_action_feedback.header.seq, self.moveit_action_feedback.status.text))\n if self.moveit_action_feedback.status.text == \"No motion plan found. No execution attempted.\" or \\\n self.moveit_action_feedback.status.text == \"Solution found but controller failed during execution\" or \\\n self.moveit_action_feedback.status.text == \"Motion plan was found but it seems to be invalid (possibly due to postprocessing).Not executing.\":\n\n print(\">>>>>>>>>>>> NO MOTION PLAN!!! <<<<<<<<<<<<<<<\")\n done = True\n done_reward = reward_no_motion_plan\n\n # Successfully reached goal: Contact with at least one contact sensor and there is no invalid contact\n if observations[7] != 0 and observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get two contacts <<<<<<<<<<<<<<<<<<')\n done_reward = reward_reached_goal\n # save state in csv file\n U.append_to_csv(self.csv_success_exp, observations)\n self.success_2_contacts += 1\n print(\"Successful 2 contacts so far: {} attempts\".format(self.success_2_contacts))\n\n if observations[7] != 0 or observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get one contacts <<<<<<<<<<<<<<<<<<')\n self.success_1_contact += 1\n print(\"Successful 1 contact so far: {} attempts\".format(self.success_1_contact))\n\n # Check if the box has been moved compared to the last observation\n target_pos = U.get_target_position()\n if not np.allclose(self.object_position, target_pos, rtol=0.0, atol=0.0001):\n print(\">>>>>>>>>>>>>>>>>>> Target moved <<<<<<<<<<<<<<<<<<<<<<<\")\n done = True\n\n # Crashing with itself, shelf, base\n if invalid_collision:\n done = True\n print('>>>>>>>>>>>>>>>>>>>> crashing <<<<<<<<<<<<<<<<<<<<<<<')\n done_reward = reward_crashing\n\n joint_exceeds_limits = False\n for joint_pos in self.joints_state.position:\n joint_correction = []\n if joint_pos < -math.pi or joint_pos > math.pi:\n joint_exceeds_limits = True\n done = True\n done_reward = reward_joint_range\n print('>>>>>>>>>>>>>>>>>>>> joint exceeds limit <<<<<<<<<<<<<<<<<<<<<<<')\n joint_correction.append(-joint_pos)\n else:\n joint_correction.append(0.0)\n\n if joint_exceeds_limits:\n print(\"is_done: Joints: {}\".format(np.round(self.joints_state.position, decimals=3)))\n self.publisher_to_moveit_object.pub_joints_to_moveit([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n while not self.movement_complete.data:\n pass\n self.publisher_to_moveit_object.pub_relative_joints_to_moveit(joint_correction)\n while not self.movement_complete.data:\n pass\n print('>>>>>>>>>>>>>>>> joint corrected <<<<<<<<<<<<<<<<<')\n\n return done, done_reward, invalid_collision", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def check(self, algorithm_data: AlgorithmData) -> Tuple[bool, str, bool]:\n\n return False, False, \"The check method of the training class has to be implemented\"", "def evaluate(self, test_data):\n test_results = [(self.decide(self.feedforward(x)), y)\n for (x, y) in test_data]\n\n # Check for and keep track of TP's FP's and FN's\n # Write FP's and FN's to a special directories\n TP = FP = FN = count = 0\n TN = 0\n # what you got = x, what should be = y\n for ((x, y), (image, gt)) in zip(test_results, test_data):\n count += 1\n if x == 1 and y == 1:\n TP += 1\n elif x == 1 and y == 0:\n FP += 1\n if not self.validating: self.save_Image(FP_PATH, count, image)\n elif x == 0 and y == 1:\n FN += 1\n if not self.validating: self.save_Image(FN_PATH, count, image)\n \n if (TP + FP ) == 0:\n return TP, FP, FN, '---------', '---------','---------'\n else:\n false_rate = float(FP) / float(TP + FP)\n\n if (TP + FN ) == 0:\n return TP, FP, FN, '---------', '---------', false_rate\n else:\n detect_rate = float(TP) / float(TP + FN)\n\n if (TP + FP + FN ) == 0:\n return TP, FP, FN, '---------', detect_rate, false_rate\n else:\n quality_rate = float(TP) / float(TP+FP+FN)\n \n return TP, FP, FN, quality_rate, detect_rate, false_rate", "def iter_fun(self):\n\n run_id = self._run_id\n etopo_dir = driver_home\n topodir = driver_home\n\n # load input info\n if self._input_info == None:\n scn_fname = os.path.join(self._run_home,'scenario_pts.txt') \n scn = np.loadtxt(scn_fname)\n scn_list = scn.tolist()\n else:\n scn_list = self._input_info\n \n # total number of runs\n M = len(scn_list)\n N = 8*M + 2 # 8*M runs plus two empty bathymetry runs\n\n if run_id == N:\n raise StopIteration()\n\n else:\n \n #=========================\n # set coarse and fine grids\n #\n t_shelf = 0. # time approaching continental slope\n t_harbor = 0. # time approaching harbor\n\n if ((run_id >= 0) and (run_id < 4*M)) or (run_id == 8*M):\n #------------------\n # setrun for coarse\n #\n grid = 'coarse'\n \n self._rundata.amrdata.amr_levels_max = 4\n # coarse grid run = 10\"\n # dx = 30', 5', 1', 10\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6]\n\n\n # add topography (coarse)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 4, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 3, 4, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n\n # add regions\n regions = self._rundata.regiondata.regions \n # between shelf and CC \n regions = []\n regions.append(\\\n [2, 3, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [3, 4, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [4, 4, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_coarse.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_coarse.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_coarse.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n \n elif ((run_id >= 4*M) and (run_id < 8*M)) or (run_id == 8*M+1):\n #----------------\n # setrun for fine\n #\n grid = 'fine'\n \n self._rundata.amrdata.amr_levels_max = 6\n\n ## fine grid run = 2/3\"\n ## dx = 30', 5', 1', 10\", 2\", 2/3\"\n self._rundata.amrdata.refinement_ratios_x = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_y = [6, 5, 6, 5, 3]\n self._rundata.amrdata.refinement_ratios_t = [6, 5, 6, 5, 3]\n\n regions = self._rundata.regiondata.regions \n regions = []\n # between shelf and CC\n regions.append(\\\n [2, 4, t_shelf, 1e9, -125, -124.05, 40.5, 43]) \n regions.append(\\\n [4, 5, t_harbor, 1e9, -124.26, -124.14, 41.67, 41.79])\n regions.append(\\\n [6, 6, t_harbor, 1e9, -124.218,-124.17, 41.7345, 41.77])\n\n # add topography (fine)\n topofiles = self._rundata.topo_data.topofiles\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n topofiles = []\n\n topofiles.append([3, 1, 6, 0., 1.e10, \\\n os.path.join(etopo_dir, 'etopo1_-130_-124_38_45_1min.asc')])\n topofiles.append([-3, 4, 6, 0., 1.e10, \\\n os.path.join(topodir, 'cc-1sec.asc')])\n topofiles.append([3, 6, 6, 0., 1.e10, \\\n os.path.join(topodir,'cc-1_3sec-c_pierless.asc')])\n \n # == fgmax.data values ==\n fgmax_files = self._rundata.fgmax_data.fgmax_files\n fgmax_files = []\n \n # for fixed grids append to this list names of any fgmax input files\n fgmax1_fname = os.path.join(driver_home,'fgmax1_fine.txt')\n fgmax2_fname = os.path.join(driver_home,'fgmax2_fine.txt')\n fgmax3_fname = os.path.join(driver_home,'fgmax3_fine.txt')\n\n fgmax_files.append(fgmax1_fname) \n fgmax_files.append(fgmax2_fname) \n fgmax_files.append(fgmax3_fname) \n \n self._rundata.fgmax_data.num_fgmax_val = 2\n \n\n\n #\n # set desired magnitude\n #\n if ((run_id >= 0) and (run_id < M)) \\\n or ((run_id >= 4*M) and (run_id < 5*M)):\n self.KL_Mw_desired = 8.6\n elif ((run_id >= M) and (run_id < 2*M)) \\\n or ((run_id >= 5*M) and (run_id < 6*M)):\n self.KL_Mw_desired = 8.8\n elif ((run_id >= 2*M) and (run_id < 3*M)) \\\n or ((run_id >= 6*M) and (run_id < 7*M)):\n self.KL_Mw_desired = 9.0\n elif ((run_id >= 3*M) and (run_id < 4*M)) \\\n or ((run_id >= 7*M) and (run_id < 8*M)):\n self.KL_Mw_desired = 9.2\n \n #\n # set slip distribution\n #\n run_id_mod = run_id - 100*(run_id/100)\n m = scn_list[run_id_mod]\n self.set_KL_slip(m)\n \n if run_id < 8*M:\n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_' + str(self.KL_Mw_desired)\n self._rundir = os.path.join(dir_grid_Mw, 'run_' + str(run_id_mod))\n else:\n # empty runs to obtain bathymetry\n \n dir_grid_Mw = '../geoclaw_output/' + str(grid) + '_B0'\n self._rundir = dir_grid_Mw\n self.KL_Mw_desired = 0.0\n self.set_KL_slip([0.]*len(m)) # set output\n self._rundata.clawdata.output_times = [1.0, 3.0]\n \n self._run_id += 1\n \n return self", "def run(graph, time_axis, initial, element2edge, var, element_component_clause_literal_node, ts, type_num,\n type_robot_label, buchi, show, last_subtask=None, loop=False):\n\n frontier = [[initial, -1, []]]\n # iterate until the accepting state is reached\n while True:\n if show:\n print([f[0] for f in frontier])\n node, clock, acpt_run_ = frontier.pop()\n\n # Determine the set of identical time instants\n instant_element = time_axis[clock + 1]\n if acpt_run_:\n pre_neg_edge = acpt_run_[-1]['neg_edge']\n else:\n pre_neg_edge = []\n # loop over each successor to see whether progress can be made\n for succ in graph.succ[node]:\n # equivalent subtask\n if graph.edges[element2edge[instant_element[1]]]['formula'] == graph.edges[(node, succ)]['formula'] and \\\n graph.nodes[element2edge[instant_element[1]][0]]['formula'] == graph.nodes[node]['formula']:\n # if isEquivalent(graph.edges[element2edge[instant_element[1]]]['formula'], graph.edges[(node, succ)]['formula']) and \\\n # isEquivalent(graph.nodes[element2edge[instant_element[1]][0]]['formula'], graph.nodes[node]['formula']):\n\n # print((node, succ), graph.edges[(node, succ)]['formula'])\n # whether the collection of paths at clock satisfies the edge label\n # neg_literal: negative clause that needs to be addressed\n # exe_robot: set of robots that takes the subtask with nonzero id\n\n essential_clause_edge, neg_clause_edge, exe_robots_edge \\\n = determine_essentials(instant_element, var, graph.edges[(node, succ)]['label'],\n graph.edges[(node, succ)]['neg_label'], 1,\n element_component_clause_literal_node, ts, type_num,\n type_robot_label, last_subtask, buchi, [], loop)\n\n essential_clause_vertex, neg_clause_vertex, exe_robots_vertex \\\n = determine_essentials(instant_element, var, graph.nodes[node]['label'],\n graph.nodes[node]['neg_label'], 0,\n element_component_clause_literal_node, ts, type_num, dict(),\n last_subtask, buchi,\n pre_neg_edge, loop)\n\n # clock, the exact time when transition occurs\n acpt_run = acpt_run_.copy() # copy the history\n acpt_run.append({'subtask': (node, succ), 'time_element': time_axis[clock + 1],\n 'essential_robot_edge': exe_robots_edge,\n 'essential_clause_edge': essential_clause_edge, 'neg_edge': neg_clause_edge,\n 'essential_robot_vertex': exe_robots_vertex,\n 'neg_vertex': neg_clause_vertex})\n\n # stop when accept is reached\n if 'accept' in succ:\n return acpt_run\n # clock + 1, after reaching succ, the immediate time clock that should be verified\n frontier.append([succ, clock + 1, acpt_run])", "def has_result(self):\n return len(self.__analysis_items) > 0", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def compute_statistics(self):", "def _find_similar_experiments(self):\n super()._find_similar_experiments()\n\n # TODO: Remove below reversal of `similar_experiments` when `result_reader.ResultFinder.sort` finished\n for _i, _experiment in enumerate(self.similar_experiments[::-1]):\n _hyperparameters = dimension_subset(_experiment[0], self.space.names())\n _evaluation = _experiment[1]\n _experiment_id = _experiment[2] if len(_experiment) > 2 else None\n self.logger.print_result(_hyperparameters, _evaluation, experiment_id=_experiment_id)\n self._update_optimizer(_hyperparameters, _evaluation)\n\n # self.optimizer_result = self.optimizer.tell(\n # _hyperparameters, _evaluation, fit=(_i == len(self.similar_experiments) - 1))\n\n if eval_callbacks(self.callbacks, self.optimizer_result):\n return self.optimizer_result\n # FLAG: Could wrap above `tell` call in try/except, then attempt `_tell` with improper dimensions", "def _check_ties(self,rank):\n for k in self._run:\n curr_dict = defaultdict(list)\n qid = self._run[k][0].get_qid()\n for t in self._run[k]:\n # print t.get_str()\n curr_dict[t.get_score()].append(t)\n curr_dict = OrderedDict(sorted(curr_dict.iteritems(),reverse=True))\n cnt = 0\n for score in curr_dict:\n cnt = cnt + 1\n if len(curr_dict[score]) > 1 and cnt<rank:\n self._ties[qid] = 1", "def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)", "def testResults(self):\n problem = problems.simple()\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\n \"layers\": (),\n \"initializer\": \"zeros\"\n }))\n minimize_ops = optimizer.meta_minimize(problem, 5)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n cost, final_x = train(sess, minimize_ops, 1, 2)\n\n # Torch results\n torch_cost = 0.7325327\n torch_final_x = 0.8559\n\n self.assertAlmostEqual(cost, torch_cost, places=4)\n self.assertAlmostEqual(final_x[0], torch_final_x, places=4)", "def findphase(self):\n debug('ControllerStartup.findphase()')\n if not self.pidevice.HasFPH() or self.prop['skipfph']:\n return\n if not self._databuf['cstdone']:\n debug('no need to do find phase for axes %r', self.pidevice.axes)\n return\n for axis in self._databuf['cstdone']:\n if self.pidevice.qFRF(axis)[axis]:\n self.pidevice.FPH(axis)\n waitonphase(self.pidevice, **self._kwargs)\n self.pidevice.WPA()\n else:\n info('skip find phase for axis while axis %s is not referenced' % axis)", "def test_step(self, x_test, y_test):\n\n print(\"Evaluation:\")\n\n input_x_op = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n input_y_op = self.session.graph.get_operation_by_name(\"input_y\").outputs[0]\n global_step_op = self.session.graph.get_operation_by_name(\"global_step\").outputs[0]\n\n loss_op = self.session.graph.get_operation_by_name(\"loss/loss\").outputs[0]\n\n predictions_op = self.session.graph.get_operation_by_name(\"output/predictions\").outputs[0] \n\n accuracy_op = self.session.graph.get_operation_by_name(\"accuracy/accuracy\").outputs[0]\n confusion_update_op = self.session.graph.get_operation_by_name(\"accuracy/confusion_update\").outputs[0]\n\n d_ = {\n input_x_op: x_test,\n input_y_op: y_test\n }\n\n self.init_dataset(d_)\n\n valid_batches_per_epoch = (len(x_test) - 1) // self.FLAGS.batch_size + 1\n\n sum_accuracy = 0\n \n confusion_variable = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"accuracy/confusion\")[0]\n self.session.run([confusion_variable.initializer])\n\n for current_step in range(valid_batches_per_epoch):\n\n if self.FLAGS.summary:\n step, summaries, loss, accuracy, cnf_matrix, predictions = self.session.run(\n [global_step_op, self.dev_summary_op, loss_op, accuracy_op, confusion_update_op, predictions_op])\n\n self.writer.add_summary(summaries, step)\n else:\n step, loss, accuracy, cnf_matrix, predictions = self.session.run(\n [global_step_op, loss_op, accuracy_op, confusion_update_op, predictions_op]) \n\n sum_accuracy += accuracy\n\n try:\n all_predictions = np.concatenate((all_predictions, predictions), axis=0)\n except NameError:\n all_predictions = predictions\n\n\n valid_accuracy = sum_accuracy / valid_batches_per_epoch\n\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: valid_accuracy {:g}\".format(time_str, valid_accuracy))\n print(\"Confusion matrix:\")\n print(cnf_matrix)\n\n return valid_accuracy, all_predictions", "def fit_and_get_test_predictions(self, trace, tuning=True):\n pass", "def is_action_applied(instance: Algorithm) -> bool:\n if len(get_results(instance)) == 0:\n return False\n return True", "def backtrack(self):\n prediction = list()\n\n # import ipdb\n # ipdb.set_trace()\n # Initialize for length of top-k sequences\n length = [[self.max_unroll] * self.beam_size for _ in range(self.batch_size)]\n\n # Last step output of the beam are not sorted => sort here!\n # Size not changed [batch size, beam_size]\n top_k_score, top_k_idx = self.scores[-1].topk(self.beam_size, dim=1)\n\n # Initialize sequence scores\n top_k_score = top_k_score.clone()\n\n n_eos_in_batch = [0] * self.batch_size\n\n # Initialize Back-pointer from the last step\n # Add self.position_idx for indexing variable with batch x beam as the first dimension\n # [batch x beam]\n back_pointer = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)\n\n for t in reversed(range(self.max_unroll)):\n # Reorder variables with the Back-pointer\n # [batch x beam]\n token_id = self.token_ids[t].index_select(0, back_pointer)\n\n # Reorder the Back-pointer\n # [batch x beam]\n back_pointer = self.back_pointers[t].index_select(0, back_pointer)\n\n # Indices of ended sequences\n # [< batch x beam]\n eos_indices = self.token_ids[t].data.eq(EOS_ID).nonzero()\n\n # For each batch, every time we see an EOS in the backtracking process,\n # If not all sequences are ended\n # lowest scored survived sequence <- detected ended sequence\n # if all sequences are ended\n # lowest scored ended sequence <- detected ended sequence\n if eos_indices.dim() > 0:\n # Loop over all EOS at current step\n for i in range(eos_indices.size(0) - 1, -1, -1):\n # absolute index of detected ended sequence\n eos_idx = eos_indices[i, 0].item()\n\n # At which batch EOS is located\n batch_idx = eos_idx // self.beam_size\n batch_start_idx = batch_idx * self.beam_size\n\n # if n_eos_in_batch[batch_idx] > self.beam_size:\n\n # Index of sequence with lowest score\n _n_eos_in_batch = n_eos_in_batch[batch_idx] % self.beam_size\n beam_idx_to_be_replaced = self.beam_size - _n_eos_in_batch - 1\n idx_to_be_replaced = batch_start_idx + beam_idx_to_be_replaced\n\n # Replace old information with new sequence information\n back_pointer[idx_to_be_replaced] = self.back_pointers[t][eos_idx].item()\n token_id[idx_to_be_replaced] = self.token_ids[t][eos_idx].item()\n top_k_score[batch_idx,\n beam_idx_to_be_replaced] = self.scores[t].view(-1)[eos_idx].item()\n length[batch_idx][beam_idx_to_be_replaced] = t + 1\n\n n_eos_in_batch[batch_idx] += 1\n\n # max_unroll * [batch x beam]\n prediction.append(token_id)\n\n # Sort and re-order again as the added ended sequences may change the order\n # [batch, beam]\n top_k_score, top_k_idx = top_k_score.topk(self.beam_size, dim=1)\n final_score = top_k_score.data\n\n for batch_idx in range(self.batch_size):\n length[batch_idx] = [length[batch_idx][beam_idx.item()]\n for beam_idx in top_k_idx[batch_idx]]\n\n # [batch x beam]\n top_k_idx = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)\n\n # Reverse the sequences and re-order at the same time\n # It is reversed because the backtracking happens in the reverse order\n # [batch, beam]\n\n prediction = [step.index_select(0, top_k_idx).view(\n self.batch_size, self.beam_size) for step in reversed(prediction)]\n\n # [batch, beam, max_unroll]\n prediction = torch.stack(prediction, 2)\n\n return prediction, final_score, length", "def getSteps():", "def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()", "def run(self, market):\n datapoints = self.fetch_datapoints(market)\n logging.debug(\"Strategy datapoints: {}\".format(datapoints))\n if datapoints is None:\n logging.debug(\"Unable to fetch market datapoints\")\n return TradeDirection.NONE, None, None\n return self.find_trade_signal(market, datapoints)", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def check(self, algorithm_data: AlgorithmData) -> Tuple[bool, str, bool]:\n\n num_miss = np.sum(self.algorithm_data[:,FieldRolls.StepResult] != self.algorithm_data[:,FieldRolls.ResultPresentation])\n num_miss_perc = num_miss * 100/algorithm_data.shape[0]\n finish_level = self.parameters[\"max miss\"][\"value\"]\n result_str = str(num_miss) + \" out of \" + str(algorithm_data.shape[0]) + \" (\" + str(num_miss_perc) + \"% < \" + str(finish_level) + \"%)\"\n return True, result_str, num_miss_perc < finish_level", "def processAlgorithm(self, parameters, context, feedback):\n NO2_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTNP, context)\n NO2_present_data_source = gdal.Open(NO2_present_raster.dataProvider().dataSourceUri())\n arr_NO2_present = NO2_present_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTPP, context)\n PM10_present_data_source = gdal.Open(PM10_present_raster.dataProvider().dataSourceUri())\n arr_PM10_present = PM10_present_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTOP, context)\n ozono_present_data_source = gdal.Open(ozono_present_raster.dataProvider().dataSourceUri())\n arr_ozono_present = ozono_present_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_present = arr_ozono_present + arr_PM10_present + arr_NO2_present\n\n NO2_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTNF, context)\n NO2_future_data_source = gdal.Open(NO2_future_raster.dataProvider().dataSourceUri())\n arr_NO2_future = NO2_future_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTPF, context)\n PM10_future_data_source = gdal.Open(PM10_future_raster.dataProvider().dataSourceUri())\n arr_PM10_future = PM10_future_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTOF, context)\n ozono_future_data_source = gdal.Open(ozono_future_raster.dataProvider().dataSourceUri())\n arr_ozono_future = ozono_future_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_future = arr_ozono_future + arr_PM10_future + arr_NO2_future\n\n area_pixel = self.parameterAsInt(parameters, self.PIXEL_RES, context) * self.parameterAsInt(\n parameters, self.PIXEL_RES, context)\n\n NO2_euro_coeff = 77641.89\n ozono_euro_coeff = 14658.11\n PM10_euro_coeff = 17132.56\n\n arr_euro_present_NO2 = arr_NO2_present * NO2_euro_coeff\n arr_euro_present_ozono = arr_ozono_present * ozono_euro_coeff\n arr_euro_present_PM10 = arr_PM10_present * PM10_euro_coeff\n arr_value_present = arr_euro_present_PM10 + arr_euro_present_ozono + arr_euro_present_NO2\n\n arr_euro_future_NO2 = arr_NO2_future * NO2_euro_coeff\n arr_euro_future_ozono = arr_ozono_future * ozono_euro_coeff\n arr_euro_future_PM10 = arr_PM10_future * PM10_euro_coeff\n arr_value_future = arr_euro_future_PM10 + arr_euro_future_ozono + arr_euro_future_NO2\n\n arr_diff_NO2 = arr_euro_future_NO2 - arr_euro_present_NO2\n arr_diff_PM10 = arr_euro_future_PM10 - arr_euro_present_PM10\n arr_diff_ozono = arr_euro_future_ozono - arr_euro_present_ozono\n\n arr_diff_tot = arr_diff_NO2 + arr_diff_PM10 + arr_diff_ozono\n\n # Initialize and write on output raster\n path_output = self.parameterAsString(parameters, self.OUTPUT, context)\n file_output = path_output + '/SE_02_rimozione_inquinanti_delta_euro.tiff'\n driver = gdal.GetDriverByName(\"GTiff\")\n [cols, rows] = arr_NO2_present.shape\n diff_tot = np.sum(arr_diff_tot) / (cols * rows )\n outdata = driver.Create(file_output, rows, cols, 1, gdal.GDT_Float64)\n outdata.SetGeoTransform(NO2_present_data_source.GetGeoTransform()) ##sets same geotransform as input\n outdata.SetProjection(NO2_present_data_source.GetProjection()) ##sets same projection as input\n outdata.GetRasterBand(1).WriteArray(arr_diff_tot)\n print(np.max(outdata.GetRasterBand(1).ReadAsArray()))\n outdata.FlushCache()\n\n # Years\n present = self.parameterAsInt(parameters, self.INPUTPRE, context)\n future = self.parameterAsInt(parameters, self.INPUTFUT, context)\n report_output = path_output + '/SE_rimozione_inquinanti.txt'\n f = open(report_output, \"w+\")\n today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n f.write(\"Sommario dell'analisi della rimozione inquinanti\\n\")\n f.write(\"Data: \" + today +\"\\n\\n\\n\")\n f.write(\"Analisi stato di fatto\\n\\n\")\n f.write(\"Anno corrente: %i \\n\" % (present))\n f.write(\"Rimozione NO2 Stato attuale (ton): %f \\n\" % (np.sum(arr_NO2_present)))\n f.write(\"Rimozione PM10 Stato attuale (ton): %f \\n\" % (np.sum(arr_PM10_present)))\n f.write(\"Rimozione ozono Stato attuale (ton): %f \\n\" % (np.sum(arr_ozono_present)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_present)))\n f.write(\"Analisi stato di progetto\\n\\n\")\n f.write(\"Anno progetto: %i \\n\" % (future))\n f.write(\"Rimozione NO2 Stato di progetto (ton): %f \\n\" % (np.sum(arr_NO2_future)))\n f.write(\"Rimozione PM10 Stato di progetto (ton): %f \\n\" % (np.sum(arr_PM10_future)))\n f.write(\"Rimozione ozono Stato di progetto (ton): %f \\n\" % (np.sum(arr_ozono_future)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_future)))\n f.write(\"Differenze tra stato di progetto e stato attuale\\n\\n\")\n f.write(\"Anno progetto: %i - %i\\n\" % (present, future))\n f.write(\"Differenza della rimozione inquinanti (ton):: %f \\n\" % (np.sum(arr_future - arr_present)))\n f.write(\"Differenza sequestro inquinanti per unità di superficie (ton/ha): %f \\n\" % (\n np.sum(arr_future - arr_present) / (cols * rows * area_pixel) * 10000))\n f.write(\"Differenza in termini economici del SE Rimozione inquinanti (stato di progetto – stato attuale) (€):%d \\n\" % (\n np.sum(arr_diff_tot))) \n return {self.OUTPUT: 'Completed'}\n\n \n # ----------------------------------------------------------------------------------- \n # Copyright (c) 2021 Città di Torino.\n # \n # This material is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 2 of the License, or\n # (at your option) any later version.\n # \n # This program is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n # \n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see http://www.gnu.org/licenses.\n # ----------------------------------------------------------------------------------- ", "def positive_scores(self, axis, th_stat=10**(-2)):\n \n traj, rank = self._check_run(axis)\n \n low_boundary = _np.exp(_np.log(self.params['low_bound'])/2)\n t = len(traj)-1\n stat = 0\n \n # Iteration over the trajectories\n for i in range(len(traj[0])):\n # above the low boundary\n if traj[t][i] > low_boundary:\n # stationary (change less than stat_th)\n if abs(_np.log(traj[t][i]) - _np.log(traj[t-1][i])) < th_stat:\n stat += 1\n return stat", "def run():\n if am_i_root():\n\n print(\"*** initializing...\")\n\n # Print parameters\n print(\"N_DIMS = \" + str(N_DIMS))\n print(\"LAMBDA_OVER_DX = \" + str(LAMBDA_OVER_DX))\n print(\"R_DT = \" + str(R_DT))\n print(\"MU0_POISSON = \" + str(MU0_POISSON))\n print(\"NORM_POISSON = \" + NORM_POISSON)\n print(\"N_GRID = \" + str(N_GRID))\n print(\"N_HITS = \" + str(N_HITS))\n print(\"POLICY = \" + str(POLICY))\n if POLICY == -1:\n print(\"MODEL_PATH = \" + str(MODEL_PATH))\n else:\n print(\"STEPS_AHEAD = \" + str(STEPS_AHEAD))\n print(\"EPSILON = \" + str(EPSILON))\n print(\"STOP_t = \" + str(STOP_t))\n print(\"STOP_p = \" + str(STOP_p))\n print(\"N_PARALLEL = \" + str(N_PARALLEL))\n print(\"WITH_MPI = \" + str(WITH_MPI))\n print(\"ADAPTIVE_N_RUNS = \" + str(ADAPTIVE_N_RUNS))\n print(\"REL_TOL = \" + str(REL_TOL))\n print(\"MAX_N_RUNS = \" + str(MAX_N_RUNS))\n print(\"N_RUNS(input) = \" + str(N_RUNS))\n sys.stdout.flush()\n\n # Perform runs\n if am_i_root():\n print(\"*** generating episodes...\")\n\n N_runs = N_RUNS\n if ADAPTIVE_N_RUNS or WITH_MPI:\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n N_runso = 0\n\n if WITH_MPI:\n cdf_t_tot_loc = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot_loc = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_loc = np.nan * np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n failed_loc = - np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n else:\n cdf_t_tot = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_episodes = np.nan * np.ones(MAX_N_RUNS, dtype=float)\n failed_episodes = - np.ones(MAX_N_RUNS, dtype=float)\n\n while True:\n if WITH_MPI: # MPI\n if N_runs % N_PARALLEL != 0:\n raise Exception(\"N_runs must be multiple of N_PARALLEL with MPI\")\n COMM.Barrier()\n # Decomposition\n Nepisodes = N_runs // N_PARALLEL\n episode_list = range(N_runso + ME, N_runs, N_PARALLEL)\n # Run episodes and reduce locally\n ind = N_runso // N_PARALLEL\n for episode in episode_list:\n cdf_t, cdf_h, mean_t_loc[ind], failed_loc[ind] = Worker(episode)\n cdf_t_tot_loc += cdf_t\n cdf_h_tot_loc += cdf_h\n ind += 1\n\n # Reduce globally the mean_t and failed\n mean_t_episodes = np.empty([N_runs], dtype=float)\n failed_episodes = np.empty([N_runs], dtype=float)\n COMM.Barrier()\n COMM.Allgather([mean_t_loc[:ind], Nepisodes, MPI.DOUBLE], [mean_t_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Allgather([failed_loc[:ind], Nepisodes, MPI.DOUBLE], [failed_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Barrier()\n elif N_PARALLEL > 1: # multiprocessing\n # Run episodes in parallel\n pool = multiprocessing.Pool(N_PARALLEL)\n result = pool.map(Worker, range(N_runso, N_runs))\n pool.close()\n pool.join()\n # Reduce\n ind = N_runso\n for cdf_t, cdf_h, mean_t, failed in result:\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n elif N_PARALLEL == 1: # sequential\n ind = N_runso\n for episode in range(N_runso, N_runs):\n cdf_t, cdf_h, mean_t, failed = Worker(episode)\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n else:\n raise Exception(\"Problem with N_PARALLEL: must be an int >= 1\")\n\n # estimate of the error\n mean_ep = np.mean(mean_t_episodes[:N_runs])\n sigma_ep = np.std(mean_t_episodes[:N_runs])\n std_error_mean = sigma_ep / np.sqrt(N_runs)\n rel_std_error_mean = std_error_mean / mean_ep\n\n # break clause\n if not ADAPTIVE_N_RUNS:\n break\n else:\n if rel_std_error_mean < REL_TOL:\n break\n elif N_runs >= MAX_N_RUNS:\n break\n else:\n N_runso = N_runs\n N_runs = int(np.ceil(1.05 * (sigma_ep / mean_ep / REL_TOL) ** 2))\n N_runs = min(N_runs, MAX_N_RUNS)\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n if am_i_root():\n print(\"N_RUNS(performed) = \" + str(N_runs))\n sys.stdout.flush()\n\n # Reduce\n if am_i_root():\n print(\"*** post-processing...\")\n if WITH_MPI:\n # locally\n cdf_t_tot_loc /= N_runs\n cdf_h_tot_loc /= N_runs\n # Reduce globally\n cdf_t_tot = np.empty([LEN_CDF_T], dtype=float)\n cdf_h_tot = np.empty([LEN_CDF_H], dtype=float)\n COMM.Barrier()\n COMM.Allreduce(cdf_t_tot_loc, cdf_t_tot, op=MPI.SUM)\n COMM.Allreduce(cdf_h_tot_loc, cdf_h_tot, op=MPI.SUM)\n COMM.Barrier()\n else:\n cdf_t_tot /= N_runs\n cdf_h_tot /= N_runs\n mean_t_episodes = mean_t_episodes[:N_runs]\n failed_episodes = failed_episodes[:N_runs]\n\n # Further post-processing, save and plot\n if am_i_root():\n\n # from cdf to pdf\n pdf_t_tot = cdf_to_pdf(cdf_t_tot)\n pdf_h_tot = cdf_to_pdf(cdf_h_tot)\n\n # compute stats of number of steps and number of hits\n t_bins = np.arange(BIN_START_T, BIN_END_T, BIN_SIZE_T) + 0.5 * BIN_SIZE_T\n mean_t, sigma_t, skew_t, kurt_t, p_found = stats_from_pdf(t_bins, pdf_t_tot)\n p25_t, p50_t, p75_t, p90_t, p95_t, p99_t, _ = stats_from_cdf(t_bins, cdf_t_tot)\n\n h_bins = np.arange(BIN_START_H, BIN_END_H, BIN_SIZE_H) + 0.5 * BIN_SIZE_H\n mean_h, sigma_h, skew_h, kurt_h, _ = stats_from_pdf(h_bins, pdf_h_tot)\n p25_h, p50_h, p75_h, p90_h, p95_h, p99_h, _ = stats_from_cdf(h_bins, cdf_h_tot)\n\n print(\"probability that the source is never found : %.10f\" % (1.0 - p_found, ))\n print(\"mean number of steps to find the source : %.3f +/- %.3f\" % (mean_t, 1.96 * std_error_mean))\n print(\"number of steps to find the source with 50 %% probability: %.3f\" % p50_t)\n print(\"number of steps to find the source with 99 %% probability: %.3f\" % p99_t)\n nb_failed = np.sum(failed_episodes)\n if np.any(failed_episodes < 0):\n nb_failed = -1\n print(\"problem while recording failures\")\n else:\n print(\"number of failed episodes : %d / %d (%f %%)\"\n % (nb_failed, N_runs, nb_failed / N_runs * 100))\n sys.stdout.flush()\n\n # save all parameters to txt file\n inputs = {\n \"N_DIMS\": N_DIMS,\n \"LAMBDA_OVER_DX\": LAMBDA_OVER_DX,\n \"R_DT\": R_DT,\n \"MU0_POISSON\": MU0_POISSON,\n \"NORM_POISSON\": NORM_POISSON,\n \"N_GRID\": N_GRID,\n \"N_HITS\": N_HITS,\n \"POLICY\": POLICY,\n \"STEPS_AHEAD\": STEPS_AHEAD,\n \"MODEL_PATH\": MODEL_PATH,\n \"STOP_t\": STOP_t,\n \"STOP_p\": STOP_p,\n \"ADAPTIVE_N_RUNS\": ADAPTIVE_N_RUNS,\n \"REL_TOL\": REL_TOL,\n \"MAX_N_RUNS\": MAX_N_RUNS,\n \"N_RUNS_PERFORMED\": N_runs,\n \"BIN_START_T\": BIN_START_T,\n \"BIN_END_T\": BIN_END_T,\n \"BIN_SIZE_T\": BIN_SIZE_T,\n \"BIN_START_H\": BIN_START_H,\n \"BIN_END_H\": BIN_END_H,\n \"BIN_SIZE_H\": BIN_SIZE_H,\n \"EPSILON\": EPSILON,\n }\n param_txt_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_parameters\" + \".txt\"))\n with open(param_txt_file, 'w') as out:\n for key, val in inputs.items():\n print(key + \" = \" + str(val), file=out)\n\n # save stats\n stats_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_statistics\" + \".txt\"))\n with open(stats_file, \"w\") as sfile:\n sfile.write(\"p_not_found\\t%+.4e\\n\" % (1 - p_found,))\n for varname in \\\n ('mean_t', 'sigma_t', 'skew_t', 'kurt_t', 'p25_t', 'p50_t', 'p75_t', 'p90_t', 'p95_t', 'p99_t'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n for varname in \\\n ('mean_h', 'sigma_h', 'skew_h', 'kurt_h', 'p25_h', 'p50_h', 'p75_h', 'p90_h', 'p95_h', 'p99_h'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n\n # save CDF of number of steps\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nsteps\" + \".npy\"))\n np.save(table_file, np.vstack((t_bins, cdf_t_tot)))\n\n # save CDF of number of hits\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nhits\" + \".npy\"))\n np.save(table_file, np.vstack((h_bins, cdf_h_tot)))\n\n # create and save figures\n if POLICY == -1:\n specifics = \"MODEL = \" + os.path.basename(MODEL_PATH)\n else:\n specifics = \"STEPS_AHEAD = \" + str(STEPS_AHEAD)\n subtitle = (\n \"N_DIMS = \"\n + str(N_DIMS)\n + \", \"\n + \"LAMBDA_OVER_DX = \"\n + str(LAMBDA_OVER_DX)\n + \", \"\n + \"R_DT = \"\n + str(R_DT)\n + \", \"\n + \"POLICY = \"\n + str(POLICY)\n + \", \"\n + specifics\n + \", \"\n + \"N_GRID = \"\n + str(N_GRID)\n + \", \"\n + \"N_HITS = \"\n + str(N_HITS)\n + \", \"\n + \"N_RUNS = \"\n + str(N_runs)\n + \"\\n\"\n )\n\n # plot PDF(nsteps), CDF(nsteps), PDF(nhits), CDF(nhits)\n fig, ax = plt.subplots(2, 2, figsize=(12, 10))\n plt.subplots_adjust(left=0.08, bottom=0.06, right=0.96, top=0.92, hspace=0.35, wspace=0.30)\n kwargs = {'xycoords': 'axes fraction', 'fontsize': 8, 'ha': \"right\"}\n for row, varname in enumerate([\"number of steps\", \"number of hits\"]):\n if varname == \"number of steps\":\n bins = t_bins\n cdf_tot = cdf_t_tot\n pdf_tot = pdf_t_tot\n mean = mean_t\n sigma = sigma_t\n skew = skew_t\n kurt = kurt_t\n p50 = p50_t\n p75 = p75_t\n p90 = p90_t\n p99 = p99_t\n filesuffix = 'nsteps'\n color = \"tab:blue\"\n else:\n bins = h_bins\n cdf_tot = cdf_h_tot\n pdf_tot = pdf_h_tot\n mean = mean_h\n sigma = sigma_h\n skew = skew_h\n kurt = kurt_h\n p50 = p50_h\n p75 = p75_h\n p90 = p90_h\n p99 = p99_h\n filesuffix = 'nhits'\n color = \"tab:orange\"\n max_x = bins[np.nonzero(pdf_tot)[0][-1]]\n for col, fct in enumerate([\"PDF\", \"CDF\"]):\n if fct == \"PDF\":\n ydata = pdf_tot\n ylim = (0.0, 1.02 * np.max(pdf_tot))\n elif fct == \"CDF\":\n ydata = cdf_tot\n ylim = (0.0, 1.0)\n\n ax[row, col].plot(bins, ydata, \"-o\", color=color, markersize=2, linewidth=1)\n ax[row, col].set_title(fct + \" of \" + varname)\n ax[row, col].set_xlabel(varname + \" to find the source\")\n ax[row, col].set_xlim((0, max_x))\n ax[row, col].set_ylim(ylim)\n\n if fct == \"PDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"mean = \" + \"{:.3e}\".format(mean), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"std = \" + \"{:.3e}\".format(sigma), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"skew = \" + \"{:.3e}\".format(skew), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"ex. kurt = \" + \"{:.3e}\".format(kurt), xy=(0.98, 0.44), **kwargs)\n elif fct == \"CDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"P50 = \" + \"{:.3e}\".format(p50), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"P75 = \" + \"{:.3e}\".format(p75), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"P90 = \" + \"{:.3e}\".format(p90), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"P99 = \" + \"{:.3e}\".format(p99), xy=(0.98, 0.44), **kwargs)\n plt.grid(False)\n plt.figtext(0.5, 0.985, subtitle, fontsize=7, ha=\"center\", va=\"top\")\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_distributions.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # plot mean nb steps vs number of episodes\n number_episodes = range(1, N_runs + 1)\n cum_mean_t_episodes = np.cumsum(mean_t_episodes) / number_episodes\n if N_runs >= 100:\n number_episodes = number_episodes[20:]\n cum_mean_t_episodes = cum_mean_t_episodes[20:]\n fig, ax = plt.subplots()\n ax.plot(number_episodes, cum_mean_t_episodes, color=\"r\")\n ax.set_title(\"Convergence of the mean number of steps\")\n ax.set_xlabel(\"number of episodes\")\n ax.set_ylabel(\"mean number of steps\")\n plt.figtext(0.5, 0.985, subtitle, fontsize=5, ha=\"center\", va=\"top\")\n plt.grid(False)\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_convergence.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # save monitoring information (concatenate episodes files)\n monitoring_episodes_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_episodes.txt\"))\n filenames = [os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\")) for episode in range(N_runs)]\n with open(monitoring_episodes_file, \"w\") as mfile:\n mfile.write(\"# episode\\thit_init\\tstop_flag\\tboundary_flag\\t\"\n \"p_not_found\\t\\tmean_nsteps\\t\\ttime_elapsed(sec)\\n\")\n for fname in filenames:\n if os.path.isfile(fname):\n with open(fname) as infile:\n mfile.write(infile.read())\n os.remove(fname)\n else:\n print(\"Unexpected: Missing episode file: \" + str(fname))\n\n # clean up tmp dirs\n if len(os.listdir(DIR_TMP)) != 0:\n print(\"Unexpected: The directory '\" + DIR_TMP\n + \"' is not removed, because it should be empty but is not.\")\n else:\n os.rmdir(DIR_TMP)\n if len(os.listdir(PARENT_DIR_TMP)) == 0:\n os.rmdir(PARENT_DIR_TMP)\n\n # summary\n monitoring_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_summary\" + \".txt\"))\n with open(monitoring_file, \"w\") as mfile:\n mfile.write(\"*** initial hit ***\\n\")\n first_hit = np.loadtxt(monitoring_episodes_file, usecols=1, dtype='int')\n hit_max = np.max(first_hit)\n hit_hist, _ = np.histogram(first_hit, bins=np.arange(0.5, hit_max + 1.5), density=True)\n for h in range(1, hit_max + 1):\n mfile.write(\"hit=%1d: %6.2f %% \\n\" % (h, hit_hist[h - 1] * 100))\n\n mfile.write(\"\\n*** stats convergence ***\\n\")\n mfile.write(\"number of episodes simulated : %d\\n\" % N_runs)\n mfile.write(\"standard error of the mean (estimate): %.4e = %5.2f %%\\n\"\n % (std_error_mean, rel_std_error_mean * 100))\n\n stopping_reason = np.loadtxt(monitoring_episodes_file, usecols=2, dtype='int')\n stop_max = np.max(stopping_reason)\n stopping_hist, _ = np.histogram(stopping_reason, bins=np.arange(0.5, stop_max + 1.5), density=True)\n mfile.write(\"\\n*** reason for stopping (1 is success, anything else is failure) ***\\n\")\n for stop in range(1, stop_max + 1):\n mfile.write(\"stop=%1d: %6.2f %% \\n\" % (stop, stopping_hist[stop - 1] * 100))\n\n mfile.write(\"\\n*** probability that the source is not found at the end of the episodes ***\\n\")\n p_not_found = np.loadtxt(monitoring_episodes_file, usecols=4)\n p_gtr_stop = p_not_found[p_not_found > STOP_p]\n p_not_found_max = np.max(p_not_found)\n mfile.write(\"criteria (STOP_p): %.5e\\n\" % STOP_p)\n mfile.write(\"max(p) : %.5e\\n\" % p_not_found_max)\n mfile.write(\"number of episodes where p > STOP_p: %7d (%8.4f %%)\\n\"\n % (len(p_gtr_stop), len(p_gtr_stop) / N_runs * 100))\n\n near_boundaries = np.loadtxt(monitoring_episodes_file, usecols=3, dtype='int')\n near_boundaries = np.count_nonzero(near_boundaries)\n mfile.write(\"\\n*** agent near boundaries ***\\n\")\n mfile.write(\"number of episodes where it happened: %7d (%8.4f %%)\\n\"\n % (near_boundaries, near_boundaries / N_runs * 100))\n\n episode_elapsed = np.loadtxt(monitoring_episodes_file, usecols=5)\n mfile.write(\"\\n*** computational cost per episode ***\\n\")\n mfile.write(\"avg elapsed seconds per episode: %.5e\\n\" % (np.mean(episode_elapsed)))\n mfile.write(\"max elapsed seconds per episode: %.5e\\n\" % (np.max(episode_elapsed)))\n\n elapsed_time_0 = (time.monotonic() - start_time_0) / 3600.0\n mfile.write(\"\\n*** computational cost ***\\n\")\n mfile.write(\"N_PARALLEL = %d\\n\" % N_PARALLEL)\n mfile.write(\"total elapsed hours : %.5e\\n\" % elapsed_time_0)\n mfile.write(\"cost in hours = total elapsed time * N_PARALLEL: %.5e\\n\" % (elapsed_time_0 * N_PARALLEL))\n\n print(\">>> Results saved in the directory: \" + DIR_OUTPUTS)\n\n sys.stdout.flush()", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def solve(self):\n start = datetime.now()\n f = self.function\n while not self.converged():\n self.history.append(self.vertices)\n\n #step 1: sort\n self.order_vertices()\n\n #step 3: reflect\n reflected = self.get_reflected_point()\n if f(*self.vertices[0]) < f(*reflected) < f(*self.vertices[-1]):\n self.reflect()\n continue\n\n #step 4: expand\n if self.reflected_is_best():\n expanded = self.get_expanded_point()\n if f(*expanded) < f(*reflected):\n self.expand()\n else:\n self.reflect()\n continue\n\n #step 5: contract\n contracted = self.get_contracted_point()\n if f(*contracted) < f(*self.vertices[-1]):\n self.contract()\n continue\n\n #step 6: shrink\n self.shrink()\n print(\"optimization took {0}\".format(datetime.now()-start))\n return self.history, self.cache.history", "def static_evaluation(self):\n\n # check the main diagonals first\n value_i, token_i = self._check_main_diagonal(self.__human)\n value_j, token_j = self._check_main_diagonal(self.__machine)\n if value_i and token_i == self.__human:\n return 1\n elif value_j and token_j == self.__machine:\n return 3\n\n # check the secondary diagonals\n value_i, token_i = self._check_secondary_diagonal(self.__human)\n value_j, token_j = self._check_secondary_diagonal(self.__machine)\n if value_i and token_i == self.__human:\n return 1\n elif value_j and token_j == self.__machine:\n return 3\n\n # secondly check the columns\n value_i, token_i = self._check_rows(self.__human)\n value_j, token_j = self._check_rows(self.__machine)\n if value_i and token_i == self.__human:\n return 1\n elif value_j and token_j == self.__machine:\n return 3\n\n # lastly check the rows\n value_i, token_i = self._check_cols(self.__human)\n value_j, token_j = self._check_cols(self.__machine)\n if value_i and token_i == self.__human:\n return 1\n elif value_j and token_j == self.__machine:\n return 3\n\n # if no solution has been found, 2 Tie\n return 2", "def test_algorithm_info_fine(self):\n\t\ti = self.algo.algorithmInfo()\n\t\tself.assertIsNotNone(i)", "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)", "def update(self):\r\n if self.games and all(game.result for game in self.games):\r\n self.rankings = self.compute_ranking()\r\n self.update_observers()\r\n\r\n if self.finals:\r\n for final in self.finals:\r\n final.update()", "def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)", "def before_run(self, run_context):\n logging.info('Before creating the session...')\n\n self._global_step_value = run_context.session.run(self._global_step)\n if self._global_step_value % self._iterations_per_loop == 0:\n\n # Calling `play` the environment roll out a trajectory of length\n # `self._max_horizon`. Currently, we support two modes for play:\n # (1) stochastic play (similar to PPO)\n # (2) Monte-Carlo Tree Search (MCTS) play\n self._env_wrapper.play(self._max_horizon)\n\n # Computes explained variance between predicted values (from network)\n # and computed return values from environment.\n ev = math_utils.explained_variance(\n np.asarray(self._env_wrapper.trajectory_values),\n np.asarray(self._env_wrapper.trajectory_returns))\n tf_utils.add_summary(\n float(ev), 'Variation/explained_variance', self._global_step_value,\n self.summary_writer)\n\n if type(self._env_wrapper).__name__ == 'Env':\n # Update queues for episode data\n # (length of episodes and episode rewards)\n self._episode_reward_buf.extend(\n self._env_wrapper.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.trajectory_per_episode_lengths)\n else:\n self._episode_reward_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_lengths)\n\n # Summaries for the current trajectory\n tf_utils.summary_stats(self._episode_reward_buf, 'Reward',\n 'Episode Rewards', self._global_step_value,\n self.summary_writer, False)\n tf_utils.summary_stats(self._episode_length_buf, 'Reward',\n 'Episode Length', self._global_step_value,\n self.summary_writer, False)\n\n mcts_tensor = np.full(\n np.asarray(self._env_wrapper.trajectory_values).shape,\n self._env_wrapper.mcts_sampling)\n\n run_context.session.run(\n self._iterator.initializer,\n feed_dict={\n self.features_ph['mcts_features']:\n self._env_wrapper.trajectory_states,\n self.features_ph['policy_features']:\n self._env_wrapper.policy_trajectory_states,\n self.labels_ph['action_tensor']:\n self._env_wrapper.trajectory_actions,\n self.labels_ph['value_tensor']:\n self._env_wrapper.trajectory_values,\n self.labels_ph['return_tensor']:\n self._env_wrapper.trajectory_returns,\n self.labels_ph['old_neg_logprob_tensor']:\n self._env_wrapper.trajectory_neg_logprobs,\n self.labels_ph['mean_tensor']:\n self._env_wrapper.trajectory_means,\n self.labels_ph['logstd_tensor']:\n self._env_wrapper.trajectory_logstds,\n self.labels_ph['mcts_enable_tensor']:\n mcts_tensor,\n self.labels_ph['policy_action_tensor']:\n self._env_wrapper.policy_trajectory_actions,\n self.labels_ph['policy_value_tensor']:\n self._env_wrapper.policy_trajectory_values,\n self.labels_ph['policy_return_tensor']:\n self._env_wrapper.policy_trajectory_returns,\n self.labels_ph['policy_old_neg_logprob_tensor']:\n self._env_wrapper.policy_trajectory_neg_logprobs,\n })", "def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)", "def check(self):\n basic_recognized = 1\n # Scan the array, in order to check if the primitives are recognized correctly\n for frame in self.data_array:\n hmm_name = frame.best_log_probability[0]\n\n if str(basic_recognized) in hmm_name or str(basic_recognized+1) in hmm_name:\n if str(basic_recognized+1) in hmm_name:\n basic_recognized+=1\n else:\n return False\n # Has been recognized the complete gesture? If yes return true else false\n if basic_recognized == self.n_primitives+1:\n return True", "def has_been_fit(self):\n return self.predictor.has_been_fit", "def fit(self):\n N = self.data.shape\n prev_k = 1\n k_lst = []\n k_list = self.tt_ranks.copy()\n if self.model=='tt': depth = len(N)-1\n elif self.model=='tk': depth = len(N)\n\n for axis in range(depth):\n if self.global_rank==0: print('Decomposing for stage=',axis+1)\n if self.model=='tt':\n self.data = tn_data_operations(self.comm1,self.fpath,self.data).dist_reshape([prev_k * N[axis], -1])\n elif self.model=='tk':\n k_lst.append(prev_k)\n self.data = tn_data_operations(self.comm1,self.fpath,self.data).dist_reshape([N[axis], np.product(k_lst)*np.product(N[axis+1:])])\n\n if np.any(k_list) != False:\n self.this_k = k_list[axis]\n else:\n if self.global_rank==0: print('Estimating rank now with SVD...')\n self.this_k = self.determine_rank(self.data, self.err)\n\n if self.routine=='nmf':\n self.params.k = self.this_k\n if self.global_rank==0: print('Performing NMF for TN stage=',axis+1)\n W, H, rel_error = PyNMF(self.data, factors=None, params=self.params).fit()\n\n elif self.routine=='svd':\n if self.global_rank == 0: print('Performing SVD for TN stage=', axis + 1)\n singularValues, U, V,rel_error = self.compute_svd(self.data,self.this_k)\n S = np.diag(singularValues)\n W = U\n H = S @ V\n\n if self.model=='tt':\n self.factors.append(np.reshape(W, (prev_k, N[axis], self.this_k)))\n elif self.model=='tk':\n self.factors.append(W)\n if axis==depth-1:\n H_final = np.hstack((self.comm.allgather(H)))\n if self.model=='tk':\n k_lst.append(self.this_k)\n H_final = np.reshape(H_final,k_lst[1:])\n # H_final = np.hstack((block_idx_rank_H_tran(H_final,1, self.size)))\n self.factors.append(H_final)\n self.rel_error.append(rel_error)\n prev_k = self.this_k\n fpath = 'data/H_factor.zarr'\n self.data = tn_data_operations(self.comm1,fpath,H).lazy_store_file()", "def arrived(self):\n \"\"\" Responsible for transformations \"\"\"\n \n if self.phase == 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, array([0,0]))\n else: \n return array_equal(self.destination, array([0,0]))\n elif self.phase > 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, self.position)\n else: \n return array_equal(self.destination, self.position)", "def run(self):\r\n if not self.s or not self.t:\r\n return False\r\n while self.unvisited: # not empty\r\n self.search_space_size += 1\r\n _, v = self.getHighestPriorityNode()\r\n self.search_space.append((self.preds[v], [v]))\r\n if v in self.closed_set:\r\n continue\r\n elif v == self.t: # or self.graph.getGeoCoords(v) == self.graph.getGeoCoords(self.t):\r\n return True\r\n self.closed_set.add(v)\r\n self.relaxVertex(v)\r\n return False # if no valid path has been found (some node inaccessible before t\r", "def algo(self):\n audio = np.array([self.audio.popleft() for _ in range(self.count)])\n # Run Classifier\n wav_data = np.abs(np.fft.rfft(audio.flatten()))\n if len(wav_data) > 0:\n pred = self.clf.predict(np.expand_dims(wav_data, 0))\n if self.verbose > 1:\n print('The prediction is : ' + str(pred))\n self.finished.emit(int(pred[-1]))\n else:\n self.finished.emit(0)", "def hr_game(t0, tf, n, A, B, R, x0):\n # t0 - Initial time\n # tf - Final time\n # n - Number of steps\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Step in each iteration\n h = (tf - t0) / n\n # Result of each step, np.ndarray (N, S, n+1)\n y = np.zeros([N, S, n+1], dtype='double')\n y[:, :, 0] = x0\n k = np.zeros([N, S])\n # I still don't know why, but theres a problem with negative payoffs\n B = matrixTranslate(B)\n\n # Fourth order Runge-Kutta\n for t in range(n):\n k1 = np.multiply(h, hr_egn(A, B, R, y[:, :, t]))\n k2 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k1, 2))))\n k3 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], np.divide(k2, 2))))\n k4 = np.multiply(h, hr_egn(A, B, R, np.add(y[:, :, t], k3)))\n # k = (k1 + 2*k2 + 2*k3 + k4)/6\n k = np.divide(np.add(np.add(k1, np.multiply(2, k2)), np.add(np.multiply(2, k3), k4)), 6)\n\n y[:, :, t+1] = np.add(y[:, :, t], k)\n\n # Filter results with machine epsilon\n for v in range(N):\n for s in range(S):\n if y[v, s, t+1] < np.sqrt(np.finfo('double').eps):\n y[v, s, t+1] = 0\n elif y[v, s, t+1] > np.subtract(1, np.sqrt(np.finfo('double').eps)):\n y[v, s, t + 1] = 1\n\n return y", "def analyse(self):\n pass", "def _decisionFunction(self):\n #THIS IS WHERE THE INTELLIGENT AGENT CODE MAKES DECISION\n #since this is the hand coded extension, I'm just going to hard code some stuff\n #q learning and Sarsa should hopefully do better\n \n #this is some other hand coded stuff that you read in stone's paper\n c1 = 64 #c1 = distance in pixels\n c2 = 2.5#c2 = something to multiply angle by\n c3 = 77 #c3 is the number of pixels you assume are in 5 meteres\n #state variable 7 is distance in pixels from K1 to T1\n if self.stateVariables[7] > c1:\n self._holdBall()\n else:\n passMax = float(\"-Inf\")\n passMaxArg = None\n for i in range(1,3):\n var = (c2 * arccos(self.stateVariables[10+i])) + (self.stateVariables[8+i] / c3)\n \"\"\"\n print(\"var = \", var)\n print(\"stateVariable[\", 10 + i, \"]=\", self.stateVariables[10+i] )\n print(\"arccos of stateVariable[\", 10 + i, \"]=\", arccos(self.stateVariables[10+i]) )\n print(\"stateVariable[\", 8 + i, \"]=\", self.stateVariables[8+i] )\n \"\"\"\n \n if var > passMax:\n passMax = var\n passMaxArg = i\n self._passBall(passMaxArg)", "def search_possible_steps(self):\n if self.ended:\n return False\n possible_steps_turple = (self.board == 0)\n possible_steps = np.transpose(possible_steps_turple.nonzero())\n return possible_steps", "def _after_run_finished(self):\n\n # append lists for this run\n self._mutual_info.append([])\n self._baseline_mutual_info.append([])\n\n self._classifier_accuracy.append([])\n self._baseline_classifier_accuracy.append([])\n\n self._steps.append([])\n\n self._average_boosting_dur.append([])\n self._average_delta.append([])\n self._different_steps.append([])\n\n # get all the measurements from the last run\n last_run_measurements = self._measurement_manager.run_measurements[-1]\n\n # temporary sliding window for computation of one value of the mutual information\n labels_window = []\n outputs_window = []\n baseline_outputs_window = []\n\n print('computing statistics after run...')\n\n # go step-by-step through the last run (single_measurement contains all the values taken in that time-step)\n for single_measurement in last_run_measurements:\n\n # these two measurements have to run with different (lower) frequency\n if 'average_boosting_dur' in single_measurement.keys():\n self._average_boosting_dur[-1].append(single_measurement['average_boosting_dur'])\n self._average_delta[-1].append(single_measurement['average_delta'])\n self._different_steps[-1].append(single_measurement['current_step'])\n\n # pick \"dataset_labels\" (see the init()) from the single_measurement and append one value to the separate list\n labels_window.append(single_measurement['dataset_labels'])\n outputs_window.append(single_measurement['model_outputs'])\n baseline_outputs_window.append(single_measurement['baseline_outputs'])\n\n # wait until the window has enough values\n if len(labels_window) < self._evaluation_period:\n continue\n\n # compute stats in the window and store to the last run (that's the [-1]) at the end (that's the append)\n self._mutual_info[-1].append(\n compute_mutual_information(\n np.array(labels_window),\n np.array(outputs_window),\n self._num_classes,\n data_contains_id=True)\n )\n\n if self._debug_mi:\n self._debug_mutual_info(np.array(labels_window), np.array(outputs_window), self._mutual_info[-1][-1])\n\n self._baseline_mutual_info[-1].append(\n compute_mutual_information(\n np.array(labels_window),\n np.array(baseline_outputs_window),\n self._num_classes,\n data_contains_id=True)\n )\n\n # compute the classifier accuracies (for model and baseline)\n dev = self._topology_adapter.get_device()\n output_dim = self._topology_adapter.get_model_output_size()\n\n labels_tensor = torch.tensor(labels_window, dtype=torch.long, device=dev)\n outputs_tensor = torch.tensor(outputs_window, dtype=torch.long, device=dev)\n baseline_outputs_tensor = torch.tensor(baseline_outputs_window, dtype=torch.long, device=dev)\n\n acc = self._compute_classifier_acc(outputs_tensor, labels_tensor, output_dim)\n self._classifier_accuracy[-1].append(acc)\n\n baseline_acc = self._compute_classifier_acc(baseline_outputs_tensor, labels_tensor, output_dim)\n self._baseline_classifier_accuracy[-1].append(baseline_acc)\n\n # store also step (for the x-axis)\n self._steps[-1].append(single_measurement['current_step'])\n\n # remove the self._sliding_window_stride items from the sliding windows.. (then fill the same amount..)\n for i in range(0, self._sliding_window_stride):\n if len(labels_window) > 0:\n labels_window.pop(0)\n outputs_window.pop(0)\n baseline_outputs_window.pop(0)", "def check(self):\n # check forward\n self._check_impl(self.key_to_stat_fwd, \"forward\")\n\n # check backward\n self._check_impl(self.key_to_stat_bwd, \"backward\")", "def convergence_on_track(self):\n\n on_track = True\n threshold = 5. # used to check condition if at least one of charnge_neutrality, rms-error goes down fast enough\n\n # first check if previous calculation was stopped due to reaching the QBOUND limit\n try:\n calc_reached_qbound = self.ctx.last_calc.outputs.output_parameters.get_dict(\n )['convergence_group']['calculation_converged']\n except AttributeError: # captures error when last_calc dies not have an output node\n calc_reached_qbound = False\n except KeyError: # captures\n calc_reached_qbound = False\n\n if self.ctx.kkrimp_step_success and not calc_reached_qbound:\n first_rms = self.ctx.last_rms_all[0]\n # skip first if this is the initial LDA+U iteration because there we see the original non-LDAU convergence value\n if 'settings_LDAU' in self.inputs and self.ctx.loop_count < 2 and len(self.ctx.last_rms_all) > 1:\n first_rms = self.ctx.last_rms_all[1]\n last_rms = self.ctx.last_rms_all[-1]\n # use this trick to avoid division by zero\n if last_rms == 0:\n last_rms = 10**-16\n r = last_rms / first_rms\n message = f'INFO: convergence check: first/last rms {first_rms}, {last_rms}'\n self.report(message)\n if r < 1:\n message = 'INFO: convergence check: rms goes down'\n self.report(message)\n on_track = True\n elif r > threshold:\n message = 'INFO: convergence check: rms goes up too fast, convergence is not expected'\n self.report(message)\n on_track = False\n elif len(self.ctx.last_rms_all) == 1:\n message = 'INFO: convergence check: already converged after single iteration'\n self.report(message)\n on_track = True\n else:\n message = 'INFO: convergence check: rms does not shrink fast enough, convergence is not expected'\n self.report(message)\n on_track = False\n elif calc_reached_qbound:\n message = 'INFO: convergence check: calculation reached QBOUND'\n self.report(message)\n on_track = True\n else:\n message = 'INFO: convergence check: calculation unsuccessful'\n self.report(message)\n on_track = False\n\n message = f'INFO: convergence check result: {on_track}'\n self.report(message)\n\n return on_track", "def runAlg_noPrints(self):\n alpha = self.__problem.getAlpha()\n beta = self.__problem.getBeta()\n q0 = self.__problem.getQ0()\n rho = self.__problem.getRho()\n \n bestSol= Ant(self.__n)\n \n for i in range(self.__noEpoch):\n antSol = self.iteration(alpha, beta, q0, rho)\n if antSol.evaluate() < bestSol.evaluate():\n bestSol.setSolution ( deepcopy(antSol.getSolution()) )\n if bestSol.evaluate() == 1 :\n return bestSol\n\n return bestSol", "def analyze(self):\n self.grayscale = (input(\"[G]rayscale or [C]olor? \").lower()[0] == \"g\")\n for i in range(1, 6):\n for j in range(1, 10):\n network_name = \"acas_%d_%d\" % (i, j)\n try:\n distance_classified = self.read_artifact(\n \"%s/distance\" % network_name)\n theta_classified = self.read_artifact(\n \"%s/theta\" % network_name)\n sample_pre, sample_post = self.read_artifact(\n \"%s/sample\" % network_name)\n single_line_data = self.read_artifact(\n \"%s/single_lines\" % network_name)\n except KeyError:\n # Skip due to missing data.\n continue\n print(\"Analyzing network:\", network_name)\n self.distance_plot(distance_classified)\n self.finalize_plot(\"%s/distance\" % network_name)\n self.theta_plot(theta_classified)\n self.finalize_plot(\"%s/theta\" % network_name)\n self.overlapping_plot(distance_classified, theta_classified)\n self.finalize_plot(\"%s/overlapping\" % network_name)\n self.sample_plot(sample_pre, sample_post)\n self.finalize_plot(\"%s/sample\" % network_name)\n\n self.single_line_plots(network_name, single_line_data)\n return True", "def test(self):\n\t\ttest_rewards = []\n\t\tobs = self.env.reset()\n\t\tdone_test = False\n\t\tfor t in range(self.test_cycles):\n\t\t\t# to render or not to render\n\t\t\tif self.render_test:\n\t\t\t\tenv.render()\n\n\t\t\tcycle_rewards = 0\n\t\t\twhile not done_test:\n\t\t\t\tfeed_dict_test = {self.va_input: obs}\n\t\t\t\taction_test = self.sess.run(self.va_out, feed_dict = feed_dict_test)\n\t\t\t\taction_test = self.sess.run(tf.argmax(action_test))\n\t\t\t\tobs_test, r_test, done_test,_ = env.step(action_test)\n\t\t\t\tcycle_rewards += r_test\n\n\t\t\ttest_rewards.append(cycle_rewards)\n\n\t\treturn test_rewards", "def __trajectories(self):\n\n\t\tXPi = np.copy(self.__XPinit)\n\n\t\tXPEnsembleBefore = []\n\t\tXPEnsembleAfter = []\n\n\t\tfor ki in range(self.__Nkicks):\n\n\t\t\tXPdamped = self.__kick(XPi)\n\t\t\tXPEnsembleBefore.append(XPdamped)\n\t\t\tXPkicked = self.__damping(XPdamped)\n\t\t\tXPEnsembleAfter.append(XPkicked)\n\t\t\tXPi = XPkicked\n\n\t\treturn XPEnsembleBefore, XPEnsembleAfter", "def solve(self):\n measure = None\n while not self.step():\n self._nbSteps += 1\n measure = self.measure(lastMeasure=measure)", "def dynamic_programming_on_trellis(self, instance, run_forward_alg=True):\n \"\"\"Build a trellis of length of all tags +1 for tag|START x length of the instance sequence of probabilities.\n Backtrace matrix has the same dimensions but stores corresponding codes for states.\n First, I initialize the probabilities for each state at t=0 as simply emission probability*transition probability.\n backtrace_pointers at t=0 are already initialized to 0.\n Loop through time step starting from t=1 and through states and either sum up (Forward) or get max (Viterbi) of the product of the previous step and emission probability and transition probability for all states (for each another loop through states is needed).\n In Viterbi, the code(index) to the most probable state is added at the same time.\n \"\"\"\n #TODO:Initialize trellis and backtrace pointers\n trellis = numpy.zeros((len(self.labels)+1,len(instance.data)))\n backtrace_pointers = numpy.zeros(shape=(len(self.labels)+1,len(instance.data)), dtype=int)\n #TODO:Traverse through the trellis here\n if run_forward_alg == True:\n for i in xrange(len(self.labels)):\n if instance.data[0] in self.V:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index(instance.data[0])][i]\n else:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index('<UNK>')][i]\n for i in xrange(len(self.labels)):\n for j in xrange(1,len(instance.data)):\n emission_prob = 0\n if instance.data[j] in self.V:\n emission_prob = self.emission_matrix[self.V.index(instance.data[j])][i]\n else:\n emission_prob = self.emission_matrix[self.V.index('<UNK>')][i]\n for k in xrange(1, len(self.labels)):\n trellis[i][j] += trellis[k][j-1] * self.transition_matrix[k][i] * emission_prob\n else:\n for i in xrange(len(self.labels)):\n if instance.data[0] in self.V:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index(instance.data[0])][i]\n else:\n trellis[i][0] = self.transition_matrix[len(self.labels)][i]*self.emission_matrix[self.V.index('<UNK>')][i]\n for j in xrange(1,len(instance.data)):\n for i in xrange(len(self.labels)):\n emission_prob = 0\n if instance.data[j] in self.V:\n emission_prob = self.emission_matrix[self.V.index(instance.data[j])][i]\n else:\n emission_prob = self.emission_matrix[self.V.index('<UNK>')][i]\n max_prob = trellis[0][j-1] * self.transition_matrix[0][i] * emission_prob\n max_index = 0\n for k in xrange(1, len(self.labels)):\n prob = trellis[k][j-1] * self.transition_matrix[k][i] * emission_prob\n if prob > max_prob:\n max_prob = prob\n max_index = k\n trellis[i][j] = max_prob\n backtrace_pointers[i][j] = max_index\n return (trellis, backtrace_pointers)", "def eval_pos():\n annotations_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/annotations\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n print(seq)\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n mean, covariance = kalman_filter.predict(mean, covariance)\n trace_predict_bbox.append(tlwh(mean))\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i)*100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('IoU*100')\n plt.show()", "def testInThread(self):\n\n self.collectSensorData()\n self.moveHome()\n self.requestGrasp()\n result = self.waitForGenerateGraspsResult()\n graspFound = self.processGenerateGraspsResult(result)\n return graspFound", "def start_algorithm(self):\r\n pass", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def test(self, plot=False):\n accuracy_list = []\n fobj_avg = self.load_stats()\n\n for ii in range(settings.PARS['maxIters']):\n model = self.load_model(ii)\n D1 = model['D']\n W1 = model['W']\n\n # classification\n tic = time.time()\n accuracy_list.append(self.classification(D1, W1)[1])\n toc = time.time()\n print(\n 'Final recognition rate for OnlineDL is : {} , objective function value: {}, time: {}'\n .format(accuracy_list[ii], fobj_avg[ii], toc-tic)\n )\n\n accuracy_list = np.asarray(accuracy_list)\n\n print('Best recognition rate for OnlineDL is {} at iteration {}'.format(\n accuracy_list.max(), accuracy_list.argmax()))\n\n if plot:\n # plot the objective function values for all iterations\n plt.clf()\n plt.plot(list(fobj_avg.keys()), list(fobj_avg.values()), 'mo--', linewidth=2)\n plt.xlabel('Iterations')\n plt.ylabel('Average objective function value')\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.show()\n\n plt.clf()\n plt.plot(accuracy_list, 'rs--', linewidth=2)\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.xlabel('Iterations')\n plt.ylabel('Accuracy')\n plt.show()", "def _on_step(self, plot=True) -> bool:\n mean_rewards, std_rewards = evaluate_policy(\n self.model, self.env, n_eval_episodes=self.eval_episodes)\n \n images = []\n rewards = []\n actions = []\n obses = []\n step_cnt = 0\n done, state = False, None\n obs = self.env.reset()\n while not done:\n if step_cnt % self.render_freq == 0:\n images.append(self.env.render(mode='rgb_array'))\n\n action, state = self.model.predict(obs, state=state, deterministic=True)\n obs, reward, done, _ = self.env.step(action)\n\n rewards.append(reward)\n actions.append(action)\n obses.append(obs)\n step_cnt += 1\n\n render = np.array(images)\n render = np.transpose(render, (0, 3, 1, 2))\n\n actions = np.array(actions).flatten()\n observes = np.array(obses).flatten()\n\n rewards = np.array(rewards)\n if plot:\n plt.clf()\n plt.plot(np.arange(len(rewards)), rewards)\n plt.xlabel('timesteps')\n plt.ylabel('rewards')\n plt.title('Timestep {}'.format(self.num_timesteps))\n\n wandb.log({\n 'test_reward_mean': mean_rewards, \n 'test_reward_std': std_rewards,\n 'render': wandb.Video(render, format='gif', fps=self.fps),\n 'global_step': self.num_timesteps,\n 'evaluations': self.n_calls,\n 'reward_distribution': wandb.Histogram(rewards),\n 'action_distribution': wandb.Histogram(actions),\n 'observation_distribution': wandb.Histogram(observes),\n 'reward_vs_time': plot and wandb.Image(plt),\n }, step=self.num_timesteps)\n\n return True", "def run(self):\r\n history = self.extracter.load_user_history()\r\n self.plot_history(history)\r\n \r\n pp_history = self.analyser.preprocess_history(history)\r\n part_worths, attribute_importance, relative_importance = self.analyser.conjoint_analysis(pp_history)\r\n self.plot_analysis(part_worths, relative_importance)\r\n \r\n return history, pp_history, part_worths, relative_importance", "def is_sorting_step_complete(sorting_algos):\n for algo in sorting_algos:\n if not algo.is_thread_locked() and algo.thread.is_alive():\n # At least one algorithm is still running and is not locked, ie not done\n return False\n return True", "def evaluateTeams(self):\n # Get base data for mission\n fly_zones = FlyZone.objects.all()\n stationary_obstacles = StationaryObstacle.objects.all()\n moving_obstacles = MovingObstacle.objects.all()\n # Start a results map from user to evaluation data\n results = dict()\n # Fill in evaluation data for each user except admins\n users = User.objects.all()\n logging.info('Starting team evaluations.')\n for user in users:\n # Ignore admins\n if user.is_superuser:\n continue\n logging.info('Evaluation starting for user: %s.' % user.username)\n # Start the evaluation data structure\n eval_data = results.setdefault(user, dict())\n # Get the relevant logs for the user\n server_info_logs = ServerInfoAccessLog.getAccessLogForUser(user)\n obstacle_logs = ObstacleAccessLog.getAccessLogForUser(user)\n uas_telemetry_logs = UasTelemetry.getAccessLogForUser(user)\n flight_periods = TakeoffOrLandingEvent.getFlightPeriodsForUser(\n user)\n # Determine if the uas hit the waypoints\n waypoints = self.evaluateUasSatisfiedWaypoints(uas_telemetry_logs)\n waypoints_keyed = dict()\n for wpt_id in xrange(len(waypoints)):\n waypoints_keyed[wpt_id+1] = waypoints[wpt_id]\n eval_data['waypoints_satisfied'] = waypoints_keyed\n # Determine if the uas went out of bounds \n out_of_bounds_time = FlyZone.evaluateUasOutOfBounds(\n fly_zones, uas_telemetry_logs)\n eval_data['out_of_bounds_time'] = out_of_bounds_time\n # Determine interop rates\n interop_times = eval_data.setdefault('interop_times', dict())\n server_info_times = ServerInfoAccessLog.getAccessLogRates(\n flight_periods,\n ServerInfoAccessLog.getAccessLogForUserByTimePeriod(\n server_info_logs, flight_periods)\n )\n obstacle_times = ObstacleAccessLog.getAccessLogRates(\n flight_periods,\n ObstacleAccessLog.getAccessLogForUserByTimePeriod(\n obstacle_logs, flight_periods)\n )\n uas_telemetry_times = UasTelemetry.getAccessLogRates(\n flight_periods,\n UasTelemetry.getAccessLogForUserByTimePeriod(\n uas_telemetry_logs, flight_periods)\n )\n interop_times['server_info'] = {\n 'min': server_info_times[0],\n 'max': server_info_times[1],\n 'avg': server_info_times[2]\n }\n interop_times['obst_info'] = {\n 'min': obstacle_times[0],\n 'max': obstacle_times[1],\n 'avg': obstacle_times[2]\n }\n interop_times['uas_telem'] = {\n 'min': uas_telemetry_times[0],\n 'max': uas_telemetry_times[1],\n 'avg': uas_telemetry_times[2]\n }\n # Determine collisions with stationary and moving obstacles\n stationary_collisions = eval_data.setdefault(\n 'stationary_obst_collision', dict())\n for obst in stationary_obstacles:\n collision = obst.evaluateCollisionWithUas(uas_telemetry_logs)\n stationary_collisions[obst.pk] = collision\n moving_collisions = eval_data.setdefault(\n 'moving_obst_collision', dict())\n for obst in moving_obstacles:\n collision = obst.evaluateCollisionWithUas(uas_telemetry_logs)\n moving_collisions[obst.pk] = collision\n return results", "def getTrajectories(self):\n\n\t\t\treturn self.__XPEnsembleBefore, self.__XPEnsembleAfter", "def _do_iteration(self):\n try:\n results = self._compare()\n except Exception as e:\n print(str(e))\n return False\n\n if results[\"action\"] == \"trade\":\n self._trade(results[\"price1\"] > results[\"price2\"])\n elif results[\"action\"] == \"close\":\n self._close()\n\n self.last_results = results\n if results[\"action\"] == \"trade\" or results[\"action\"] == \"close\":\n self._log_results(results)\n\n return True", "def evaluate(self):\n self.training = False", "def converged(self):\n if len(self.rundir) >= 2:\n if io.ionic_steps(self.rundir[-1]) <= 3:\n return True\n if self.settings[\"nrg_convergence\"] != None:\n if io.job_complete(self.rundir[-1]) and io.job_complete(self.rundir[-2]):\n o1 = io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\"))\n o2 = io.Oszicar(os.path.join(self.rundir[-2],\"OSZICAR\"))\n if abs( o1.E[-1] - o2.E[-1]) < self.settings[\"nrg_convergence\"]:\n return True\n\n return False", "def run():\n \n start_time = time.time()\n \n args = parse_args_plotting()\n config = ConfigParser()\n config.read(args.config_file)\n \n # initialize the OP class object\n OPs = initialize_plot_options(config)\n \n # which plot\n plot_settings = {}\n burnin = config.getint('plotting', 'burnin', fallback=0)\n plot_astr = config.getboolean('plotting', 'Astrometry_orbits_plot', fallback=False)\n plot_astr_pred = config.getboolean('plotting', 'Astrometric_prediction_plot', fallback=False)\n plot_rv_full = config.getboolean('plotting', 'RV_orbits_plot', fallback=False)\n plot_rv = config.getboolean('plotting', 'RV_plot', fallback=False)\n plot_rel_sep = config.getboolean('plotting', 'Relative_separation_plot', fallback=False)\n plot_position_angle = config.getboolean('plotting', 'Position_angle_plot', fallback=False)\n plot_proper_motions = config.getboolean('plotting', 'Proper_motion_plot', fallback=False)\n plot_corner = config.getboolean('plotting', 'Corner_plot', fallback=False)\n save_params = config.getboolean('save_results', 'save_params', fallback=True)\n checkconv = config.getboolean('plotting', 'check_convergence', fallback=False)\n \n if checkconv:\n OPs.plot_chains()\n if plot_astr:\n OPs.astrometry()\n if plot_astr_pred:\n OPs.astrometric_prediction_plot()\n if plot_rv_full:\n OPs.RV_fullorbit()\n if plot_rv:\n OPs.RV()\n if plot_rel_sep:\n OPs.relsep()\n if plot_position_angle:\n OPs.PA()\n if plot_proper_motions:\n OPs.proper_motions()\n if plot_corner:\n OPs.plot_corner()\n if save_params:\n OPs.save_data()", "def evaluate(env: AlfEnvironment, algorithm: RLAlgorithm,\n num_episodes: int) -> List[alf.metrics.StepMetric]:\n batch_size = env.batch_size\n env.reset()\n time_step = common.get_initial_time_step(env)\n algorithm.eval()\n policy_state = algorithm.get_initial_predict_state(env.batch_size)\n trans_state = algorithm.get_initial_transform_state(env.batch_size)\n episodes_per_env = (num_episodes + batch_size - 1) // batch_size\n env_episodes = torch.zeros(batch_size, dtype=torch.int32)\n episodes = 0\n metrics = [\n alf.metrics.AverageReturnMetric(\n buffer_size=num_episodes, example_time_step=time_step),\n alf.metrics.AverageEpisodeLengthMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageEnvInfoMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageDiscountedReturnMetric(\n buffer_size=num_episodes, example_time_step=time_step),\n alf.metrics.EpisodicStartAverageDiscountedReturnMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageRewardMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n ]\n time_step = common.get_initial_time_step(env)\n while episodes < num_episodes:\n # For parallel play, we cannot naively pick the first finished `num_episodes`\n # episodes to estimate the average return (or other statitics) as it can be\n # biased towards short episodes. Instead, we stick to using the first\n # episodes_per_env episodes from each environment to calculate the\n # statistics and ignore the potentially extra episodes from each environment.\n invalid = env_episodes >= episodes_per_env\n # Force the step_type of the extra episodes to be StepType.FIRST so that\n # these time steps do not affect metrics as the metrics are only updated\n # at StepType.LAST. The metric computation uses cpu version of time_step.\n time_step.cpu().step_type[invalid] = StepType.FIRST\n\n next_time_step, policy_step, trans_state = policy_trainer._step(\n algorithm=algorithm,\n env=env,\n time_step=time_step,\n policy_state=policy_state,\n trans_state=trans_state,\n metrics=metrics)\n\n time_step.step_type[invalid] = StepType.FIRST\n\n for i in range(batch_size):\n if time_step.step_type[i] == StepType.LAST:\n env_episodes[i] += 1\n episodes += 1\n\n policy_state = policy_step.state\n time_step = next_time_step\n\n env.reset()\n return metrics", "def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')", "def train(self)->None:", "def iterate(self):\n ret = super(ExpandableAlgorithm, self).pre_iteration()\n if ret is None:\n return None\n active, passive, neighbors, features_active, features_passive = ret\n params = [features_active, features_passive]\n if self._post_args:\n params += self._post_args\n s = self._overlap_function(*params)\n if self.condition_axelrod:\n if self.__condition_axelrod(s, features_active, features_passive):\n return True\n if self.condition_centola:\n if self.__condition_centola(s, active, passive, neighbors):\n return True", "def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True", "def evaluate_tracking(self) -> Dict[str, Any]:\n eval_scenes = create_splits_scenes(verbose=False)[self.eval_set]\n for scene in tqdm(eval_scenes, disable=not self.verbose):\n scene = self.nusc.get('scene', self.scene_name2tok[scene])\n cur_token, last_token = scene['first_sample_token'], scene['last_sample_token']\n pred_sem, pred_inst, label_sem, label_inst = [None], [None], [None], [None]\n\n while True:\n cur_sample = self.nusc.get('sample', cur_token)\n sd_token = cur_sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud, filter evaluation classes.\n gt_label_file = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])\n panoptic_label = load_bin_file(gt_label_file, type='panoptic')\n label_sem.append(self.mapper.convert_label(panoptic_label // 1000))\n label_sem = label_sem[-2:]\n label_inst.append(panoptic_label)\n label_inst = label_inst[-2:]\n\n # Load predictions for the point cloud, filter evaluation classes.\n pred_file = os.path.join(self.results_folder, 'panoptic', self.eval_set, sd_token + '_panoptic.npz')\n panoptic_pred = load_bin_file(pred_file, type='panoptic')\n pred_sem.append(panoptic_pred // 1000)\n pred_sem = pred_sem[-2:]\n pred_inst.append(panoptic_pred)\n pred_inst = pred_inst[-2:]\n\n # Get the confusion matrix between the ground truth and predictions. Update the confusion matrix for\n # the sample data into the confusion matrix for the eval set.\n self.evaluator['tracking'].add_batch(scene['name'], pred_sem, pred_inst, label_sem, label_inst)\n if cur_token == last_token:\n break\n cur_token = cur_sample['next']\n\n pat, mean_pq, mean_tq = self.evaluator['tracking'].get_pat()\n mean_ptq, class_all_ptq, mean_sptq, class_all_sptq = self.evaluator['tracking'].get_ptq()\n mean_iou, class_all_iou = self.evaluator['tracking'].getSemIoU()\n lstq, s_assoc = self.evaluator['tracking'].get_lstq()\n mean_motsa, mean_s_motsa, mean_motsp = self.evaluator['tracking'].get_motsa()\n\n results = self.wrap_result_mopt(pat=pat,\n mean_pq=mean_pq,\n mean_tq=mean_tq,\n mean_ptq=mean_ptq,\n class_all_ptq=class_all_ptq,\n mean_sptq=mean_sptq,\n class_all_sptq=class_all_sptq,\n mean_iou=mean_iou,\n class_all_iou=class_all_iou,\n lstq=lstq,\n s_assoc=s_assoc,\n mean_motsa=mean_motsa,\n mean_s_motsa=mean_s_motsa,\n mean_motsp=mean_motsp)\n\n return results", "def test(self, test_algorithm_data: AlgorithmData) -> Tuple[bool, str, bool]:\n success, message = self.algorithm.step(test_algorithm_data)\n if not success:\n return False, message, False, False\n\n success, message, result = self.check(test_algorithm_data)\n if not success:\n return False, message, False, False\n\n return True, message, result", "def has_results(self):\n pass", "def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")", "def solveOneStep(self):\n ### Student code goes here\n\n if (self.currentState.state == self.victoryCondition):\n self.visited[self.currentState] = True\n win_or_not = (self.currentState.state == self.victoryCondition)\n return win_or_not\n\n game_depth = self.currentState.depth\n continue_game = True\n test_its = 0\n while continue_game:\n test_its += 1\n # too long \n # time test\n if test_its == \"too long\":\n return \"too long\"\n result = self.solveOneStep_helper(game_depth)\n if result:\n victory_satisfied = (self.currentState.state == self.victoryCondition)\n if victory_satisfied:\n result_bool = True\n return result_bool\n else:\n game_depth = game_depth + 1\n else:\n return False" ]
[ "0.57092583", "0.5609589", "0.5562583", "0.5543754", "0.54045916", "0.5378096", "0.5376567", "0.5344754", "0.53006", "0.52756476", "0.5219423", "0.51986915", "0.51444024", "0.51367295", "0.51324886", "0.50954634", "0.50765365", "0.506339", "0.50485265", "0.50454706", "0.5031855", "0.5024703", "0.5022633", "0.5012631", "0.50119144", "0.5008587", "0.49987626", "0.49934945", "0.49846604", "0.4965836", "0.49620834", "0.49553835", "0.49542126", "0.49539164", "0.49526626", "0.4949129", "0.49460033", "0.49406683", "0.49353606", "0.49265364", "0.49244982", "0.49179453", "0.49094757", "0.49015215", "0.48980242", "0.48962396", "0.48918802", "0.489039", "0.4888225", "0.48864314", "0.48763475", "0.48731053", "0.48728395", "0.48699203", "0.486487", "0.48479986", "0.48436752", "0.4842713", "0.48403898", "0.48400685", "0.48397487", "0.48396912", "0.4837174", "0.4832275", "0.48322496", "0.4824647", "0.48202527", "0.48195252", "0.48099348", "0.48089343", "0.4807892", "0.48015782", "0.47999427", "0.47980398", "0.47960508", "0.478903", "0.47881338", "0.47874367", "0.4787291", "0.47850958", "0.47807473", "0.4779516", "0.47792283", "0.47755635", "0.47747955", "0.47745556", "0.4773975", "0.47728592", "0.4769634", "0.47680566", "0.4765975", "0.4765568", "0.475949", "0.47591713", "0.47590286", "0.47587872", "0.47558177", "0.47506493", "0.47489133", "0.47480285" ]
0.7411444
0
Compute the extinction area of a binary matrix formatted as a "indexes_lists" (see "_get_index_lists"), given a ranking of the nodes and the axis (0 for row removing, 1 for column removing).
def _ext_area(axis, ranking, row_ind_at_col, col_ind_at_row): if axis == 0: indexes_lists = _np.array([col_ind_at_row[:], row_ind_at_col[:]]) else: indexes_lists = _np.array([row_ind_at_col[:], col_ind_at_row[:]]) if len(ranking) != len(indexes_lists[0]): print ('Dimensions do not match') return # Counting the already extincted columns. They are the ones whose list of # associated row indexes is empty. In that case the extinction counter is # increased and a -1 is added to the indexes list. ext_nodes = 0 for c in range(len(indexes_lists[1])): if len(indexes_lists[1][c]) == 0: ext_nodes += 1 indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1) ext_curve = [ext_nodes] # Iteration over the ranked nodes to remove, r for r in ranking[:-1]: # Iter over the connected nodes in the other layer, r for c in indexes_lists[0][r]: # Removing the ranked node from the neighbours of c indexes_lists[1][c] = indexes_lists[1][c][indexes_lists[1][c] != r] # If the neighbours of c is empty, then c gets extincted if len(indexes_lists[1][c]) == 0: ext_nodes += 1 indexes_lists[1][c] = _np.append(indexes_lists[1][c], -1) ext_curve.append(ext_nodes) # Returning the area below the extinction curve return sum(ext_curve) / float(len(indexes_lists[0]) * len(indexes_lists[1]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def independendentColumns(matrix, rank, threshold):\n\n index = [None]*rank\n norms = [None]*rank\n\n if rank == 0:\n return index\n\n # select the first column\n index[0] = 0\n norms[0] = np.linalg.norm(matrix[:, 0])\n idx = 1\n\n if rank == 1:\n return index\n\n for i in range(1, matrix.shape[1]):\n li = True\n i2 = np.linalg.norm(matrix[:, i])\n for j in range(idx):\n ij = np.dot(matrix[:, i], matrix[:, index[j]])\n if abs(ij - i2*norms[j]) < threshold:\n li = False\n break\n if li:\n index[idx] = i\n norms[idx] = i2\n idx += 1\n if idx >= rank:\n break\n return index", "def eligible_edges_with_indexes(self):\n return enumerate(self.edges)", "def extents(nodes):\n from numpy import min, max\n return ( min(nodes[:,0]), max(nodes[:,0]),\n min(nodes[:,1]), max(nodes[:,1]),\n min(nodes[:,2]), max(nodes[:,2]) )", "def compute_cost(indexes, matrix):\n total = 0\n for row, columns in indexes:\n total += matrix[row][columns]\n return total", "def eligible_edges_with_indexes(self):\n return list(map(lambda e: (self.edges.index(e), e), self.eligible_edges))", "def gf2_rank(rows):\r\n rank = 0\r\n while rows:\r\n pivot_row = rows.pop()\r\n if pivot_row:\r\n rank += 1\r\n lsb = pivot_row & -pivot_row\r\n for index, row in enumerate(rows):\r\n if row & lsb:\r\n rows[index] = row ^ pivot_row\r\n return rank", "def node_equivalent(self,idx=None):\n if idx is not None:\n pert = self.run(idx)+0.000001\n tmp = np.zeros(self.shape)\n for i in range(len(pert)):\n tmp[idx[i,0],idx[i,1]] = pert[i]\n row_nz = np.count_nonzero(tmp,axis=1)\n row_sum = np.sum(tmp,axis=1)\n row_nodes = np.nan_to_num(row_sum/row_nz, nan=0.0)\n if self.bipartite_network:\n col_sum = np.sum(tmp,axis=0)\n col_nz = np.count_nonzero(tmp,axis=0)\n col_nodes = np.nan_to_num(col_sum/col_nz, nan=0.0)\n return row_nodes,col_nodes\n else:\n return row_nodes\n else:\n pert = self.run(idx).reshape(self.shape)\n if self.bipartite_network:\n row_nodes = np.mean(pert,axis=1)\n col_nodes = np.mean(pert,axis=0)\n return row_nodes,col_nodes\n else:\n return np.mean(pert,axis=0)", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def ext_area(self, axis):\n traj, rank = self._check_run(axis)\n return _ext_area(axis, rank, self._neighb[1], self._neighb[0])", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def ravel_index(x, dims):\n i = 0\n for dim, j in zip(dims, x):\n i *= dim\n i += j\n return i", "def absolute_sum_of_elements(self) -> int:\n absolute_sum = abs(self._A[0][0])\n for y in range(0, self.num_of_rows):\n for x in range(0, self.num_of_cols):\n absolute_sum += abs(self._A[y][x])\n return absolute_sum", "def create_logits_mask_by_second_edge_graph(edge_indexes, num_edges, nvec, last_actions):\n bs = edge_indexes.shape[0]\n\n max_edge = edge_indexes.shape[1]\n\n\n total_mask = np.zeros(shape=(bs, max_edge), dtype=np.int8)\n\n for i in range(bs):\n num_edge = num_edges[i]\n edges = edge_indexes[i]\n all_valid_edges = edges[:num_edge]\n\n adj_mat = np.zeros(shape=(nvec, nvec), dtype=np.int8)\n for edge in all_valid_edges:\n adj_mat[edge[0], edge[1]] = 1\n\n mask = np.zeros(shape=(max_edge,), dtype=np.int8)\n\n last_action = last_actions[i]\n edge_1 = edges[last_action]\n\n for idx_2, edge_2 in enumerate(all_valid_edges):\n fail_cond = edge_2[0] in edge_1 or edge_2[1] in edge_1 or \\\n int(adj_mat[edge_2[0], edge_1[0]]) + int(adj_mat[edge_2[0], edge_1[1]]) + \\\n int(adj_mat[edge_2[1], edge_1[0]]) + int(adj_mat[edge_2[1], edge_1[1]]) > 0\n\n mask[idx_2] = not fail_cond\n total_mask[i, :] = mask\n\n return total_mask", "def xy_to_idx(self, xs, ys, mask=None, mask_outside=False, nodata=-1):\n _, ncol = self.shape\n r, c = self.rowcol(xs, ys, mask=mask, mask_outside=mask_outside, nodata=nodata)\n mask = r != nodata\n idx = np.full(r.shape, nodata, dtype=int)\n idx[mask] = r[mask] * ncol + c[mask]\n return idx", "def indexes_edge_removal_order(self):\n return self._indexes_edge_removal_order", "def get_neighbours(self):\n shape=self.cubeshape[1:]\n neighboursx=np.arange(self.xpos-(self.blocksize-1)/2,(self.xpos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursx=[x if (x>=0) & (x<=shape[1]-1) else np.nan for x in neighboursx ]\n neighboursy=np.arange(self.ypos-(self.blocksize-1)/2,(self.ypos+(self.blocksize-1)/2)+1,dtype='int' )\n neighboursy=[y if (y>=0) & (y<=shape[0]-1) else np.nan for y in neighboursy ]\n keys=[np.ravel_multi_index([y,x], shape) if np.all(np.isfinite(np.asarray([y,x]))) else np.nan for y in neighboursy for x in neighboursx]\n\n return keys", "def _assess_dimension_(spectrum, rank, n_samples, n_features):\n if rank > len(spectrum):\n raise ValueError(\"The tested rank cannot exceed the rank of the\"\n \" dataset\")\n\n pu = -rank * log(2.)\n for i in range(rank):\n pu += (gammaln((n_features - i) / 2.) -\n log(np.pi) * (n_features - i) / 2.)\n\n pl = np.sum(np.log(spectrum[:rank]))\n pl = -pl * n_samples / 2.\n\n if rank == n_features:\n pv = 0\n v = 1\n else:\n v = np.sum(spectrum[rank:]) / (n_features - rank)\n pv = -np.log(v) * n_samples * (n_features - rank) / 2.\n\n m = n_features * rank - rank * (rank + 1.) / 2.\n pp = log(2. * np.pi) * (m + rank + 1.) / 2.\n\n pa = 0.\n spectrum_ = spectrum.copy()\n spectrum_[rank:n_features] = v\n for i in range(rank):\n for j in range(i + 1, len(spectrum)):\n pa += log((spectrum[i] - spectrum[j]) *\n (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)\n\n ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.\n\n return ll", "def fitnessFunction(individual, board_size, pos_bits_size): \r\n right_diag = [0] * (2 * board_size - 1)\r\n left_diag = [0] * (2 * board_size - 1)\r\n vertical = [0] * board_size\r\n conflicts = 0\r\n idx = 0\r\n while idx < board_size:\r\n # print \"idx: \",idx,individual[idx * pos_bits_size : idx * pos_bits_size + pos_bits_size]\r\n vpos = fromBitArray(individual[idx * pos_bits_size : idx * pos_bits_size + pos_bits_size])\r\n # print \"vpos: \", vpos + 1\r\n if vertical[vpos] != 0:\r\n conflicts = conflicts + vertical[vpos]\r\n vertical[vpos] = vertical[vpos] + 1\r\n if left_diag[vpos + idx] != 0:\r\n conflicts = conflicts + left_diag[vpos + idx]\r\n left_diag[vpos + idx] = left_diag[vpos + idx] + 1\r\n if right_diag[vpos + board_size - idx - 1] != 0:\r\n conflicts = conflicts + right_diag[vpos + board_size - idx - 1]\r\n right_diag[vpos + board_size - idx - 1] = right_diag[vpos + board_size - idx - 1] + 1\r\n idx = idx + 1\r\n return (board_size * (board_size - 1))/2 - conflicts", "def Areml_eigh(self):\n s,U = LA.eigh(self.Areml(),lower=True)\n i_pos = (s>1e-10)\n s = s[i_pos]\n U = U[:,i_pos]\n return s,U", "def matrix_to_ij_indices(fock, frame, orbs):\n # maps atom types to different n indices\n io_base, _ = orbs_base(orbs)\n\n # prepares storage\n diaglist = {}\n offdlist_p = {}\n offdlist_m = {}\n heterolist = {}\n\n # creates storage. these are the blocks of the matrix we'll have to fill up later\n lorbs = []\n for el_a in orbs.keys():\n for ia, a in enumerate(orbs[el_a]):\n na, la, ma = a\n na += io_base[el_a] # adds element offset\n for el_b in orbs.keys():\n for ib, b in enumerate(orbs[el_b]):\n nb, lb, mb = b\n nb += io_base[el_b] # adds element offset\n if ( (nb>na or (nb==na and lb>=la)) and\n not (na,la,nb,lb) in lorbs ):\n orb = (na,la,nb,lb)\n lorbs.append(orb)\n if el_a == el_b:\n diaglist[orb] = []\n offdlist_p[orb] = []\n offdlist_m[orb] = []\n else:\n heterolist[orb] = []\n\n\n # reads in and partitions into blocks\n ki = 0\n nat = len(frame.numbers)\n for i in range(nat):\n el_a = frame.symbols[i]\n cur_a = ()\n for ia, oa in enumerate(orbs[el_a]):\n na, la, ma = oa\n na += io_base[el_a]\n # we read the Hamiltonian in blocks\n if (cur_a == (na,la)): continue\n cur_a = (na,la)\n kj = 0\n for j in range(nat):\n el_b = frame.symbols[j]\n cur_b = ()\n for ib, ob in enumerate(orbs[el_b]):\n nb, lb, mb = ob\n nb += io_base[el_b] # adds element offset\n if (cur_b == (nb,lb)): continue # only read at the beginning of each m block\n cur_b = (nb,lb)\n if (nb<na or (nb==na and lb<la)): continue\n orb = (na,la,nb,lb)\n blockij = (i,j)\n if (i==j):\n diaglist[orb].append(blockij)\n elif (i<j and el_a == el_b):\n offdlist_p[orb].append(blockij)\n offdlist_m[orb].append(blockij)\n elif(el_a != el_b):\n heterolist[orb].append(blockij)\n kj += len(orbs[el_b])\n ki += len(orbs[el_a])\n\n # stores as ndarray for more flexible indexing\n for orb in lorbs:\n for d in [diaglist, offdlist_p, offdlist_m, heterolist]:\n if orb in d:\n d[orb] = np.asarray(d[orb])\n\n return dict( diag=diaglist, offd_p=offdlist_p, offd_m=offdlist_m, hete=heterolist)", "def leaves(node, res):\n leaf = True\n if node.lesser:\n leaf = False\n leaves(node.lesser, res)\n if node.greater:\n leaf = False\n leaves(node.greater, res)\n if leaf:\n res.append(node.indices)", "def internal_adjacency(self, node_list):\n # Create igraph Graph object describing the subgraph\n subgraph = self.graph.subgraph(node_list)\n # Get adjacency matrix\n return np.array(subgraph.get_adjacency(type=2).data).astype(np.int8)", "def get_adjactent(r,c, num_rows, num_columns): \n return [(adj_row, adj_col)\n for adj_row in [r+x for x in range(-1,2) if r+x >= 0 and r+x < num_rows]\n for adj_col in [c+x for x in range(-1,2) if c+x >= 0 and c+x < num_columns]\n if (adj_row, adj_col) != (r,c)]", "def neighbors_magnitude(self):\n return sum((n.weight for n in self.neighbors))", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def find_ranking(comparisons, equal_width=0.2, max_rank=-1, verbose=False):\n # remove unnecessary variables\n comparisons = {(i, j) if i < j else (j, i): value if i < j else 1 - value\n for (i, j), value in comparisons.items()}\n nodes = np.unique(\n [i for ij in comparisons.keys() for i in ij])\n\n # define variables\n model = Model('comparison')\n model.setParam('OutputFlag', verbose)\n values = np.fromiter(comparisons.values(), dtype=float)\n assert values.max() <= 1 and values.min() >= 0\n # variables to encode the error of comparisons\n E_ij = model.addVars(comparisons.keys(), name='e_ij', vtype=GRB.CONTINUOUS,\n ub=1.0-values, lb=-values)\n # variables to encode hard choice of >=, <=, ==\n Ge_ij = model.addVars(comparisons.keys(), name='ge_ij', vtype=GRB.BINARY)\n Le_ij = model.addVars(comparisons.keys(), name='le_ij', vtype=GRB.BINARY)\n Eq_ij = model.addVars(comparisons.keys(), name='eq_ij', vtype=GRB.BINARY)\n # variables to help with transitivity in non-fully connected graphs\n if max_rank < 1:\n max_rank = len(nodes)\n R_i = model.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=max_rank)\n # variables to emulate abs\n T_ij_pos = {}\n T_ij_neg = {}\n index = (values != 1) & (values != 0)\n T_ij_pos = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_pos', lb=0, ub=1-values[index])\n T_ij_neg = model.addVars(\n (ij for ij, value in comparisons.items() if value not in [0.0, 1.0]),\n vtype=GRB.CONTINUOUS, name='T_ij_neg', lb=0, ub=values[index])\n model.update()\n\n # emulate abs for non-binary comparisons: E_ij = T_ij_pos - T_ij_neg\n model.addConstrs(\n (E_ij[ij] == T_ij_pos[ij] - T_ij_neg[ij] for ij in T_ij_pos),\n 'E_ij = T_ij_pos - T_ij_neg')\n\n # hard decision of >=, <=, and ==\n lower_bound = 0.5 - equal_width / 2.0\n upper_bound = 0.5 + equal_width / 2.0\n # <=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound <= ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - upper_bound >= -1 + ge_ij\n for ij, ge_ij in Ge_ij.items()), 'ge_ij_upper_bound')\n # >=\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound >= -le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_lower_bound')\n model.addConstrs(\n (E_ij[ij] + comparisons[ij] - lower_bound <= 1 - le_ij\n for ij, le_ij in Le_ij.items()), 'le_ij_upper_bound')\n # ==\n model.addConstrs(\n (le + eq + ge == 1 for le, eq, ge in zip(\n Le_ij.values(), Eq_ij.values(), Ge_ij.values())), 'eq_ij')\n\n # transitivity\n for (i, j), eq_a in Eq_ij.items():\n le_a = Le_ij[i, j]\n ge_a = Ge_ij[i, j]\n for k in nodes:\n j_, k_ = j, k\n if j > k:\n j_, k_ = k, j\n eq_b = Eq_ij.get((j_, k_), None)\n if eq_b is None:\n continue\n else:\n le_b = Le_ij[j_, k_]\n ge_b = Ge_ij[j_, k_]\n if j_ != j:\n le_b, ge_b = ge_b, le_b\n\n i_, k_ = i, k\n if i > k:\n i_, k_ = k, i\n eq_c = Eq_ij.get((i_, k_), None)\n if eq_c is None:\n continue\n else:\n le_c = Le_ij[i_, k_]\n ge_c = Ge_ij[i_, k_]\n if i_ != i:\n le_c, ge_c = ge_c, le_c\n\n # a <= b and b <= c -> a <= c\n model.addLConstr(\n ge_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_ge_{i},{j},{k}')\n # a >= b and b >= c -> a >= c\n model.addLConstr(\n le_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_le_{i},{j},{k}')\n # a <= b and b == c -> a <= c\n model.addLConstr(\n le_a + eq_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_leeq_{i},{j},{k}')\n # a == b and b <= c -> a <= c\n model.addLConstr(\n eq_a + le_b, GRB.LESS_EQUAL, 1 + le_c,\n f'transitivity_eqle_{i},{j},{k}')\n # a >= b and b == c --> a >= c\n model.addLConstr(\n ge_a + eq_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_geeq_{i},{j},{k}')\n # a == b and b >= c --> a >= c\n model.addLConstr(\n eq_a + ge_b, GRB.LESS_EQUAL, 1 + ge_c,\n f'transitivity_eqge_{i},{j},{k}')\n # a == b and b == c --> a == c\n model.addLConstr(\n eq_a + eq_b, GRB.LESS_EQUAL, 1 + eq_c,\n f'transitivity_eq_{i},{j},{k}')\n\n # transitivity helper (for not-fully connected graphs)\n # also provides a latent rank\n big_m = max_rank\n model.addConstrs(\n ((1 - ge_ij) * big_m + R_i[i] >= R_i[j] + 1 for (i, j), ge_ij in Ge_ij.items()),\n 'rank_transitivity_larger')\n model.addConstrs(\n ((1 - le_ij) * big_m + R_i[j] >= R_i[i] + 1 for (i, j), le_ij in Le_ij.items()),\n 'rank_transitivity_smaller')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[j] >= R_i[i] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal1')\n model.addConstrs(\n ((1 - eq_ij) * big_m + R_i[i] >= R_i[j] for (i, j), eq_ij in Eq_ij.items()),\n 'rank_transitivity_equal2')\n\n # objective function\n objective = LinExpr()\n for ij, value in comparisons.items():\n if value == 1.0:\n objective += -E_ij[ij]\n elif value == 0.0:\n objective += E_ij[ij]\n else:\n objective += T_ij_pos[ij] + T_ij_neg[ij]\n model.setObjective(objective, GRB.MINIMIZE)\n\n # solve\n model.optimize()\n\n # verify abs emulation: one T_ij has to be 0\n for ij, value in T_ij_pos.items():\n assert value.X == 0 or T_ij_neg[ij] == 0, \\\n f'T_{ij} pos {value.X} neg {T_ij_neg[ij]}'\n\n # find minimal Rs\n model_ = Model('comparison')\n model_.setParam('OutputFlag', verbose)\n R_i = model_.addVars(nodes, name='r_i', vtype=GRB.CONTINUOUS, lb=0,\n ub=len(nodes))\n for ((i, j), ge_ij), le_ij in zip(Ge_ij.items(), Le_ij.values()):\n if ge_ij.x == 1:\n model_.addConstr(R_i[i] >= R_i[j] + 1)\n elif le_ij.x == 1:\n model_.addConstr(R_i[j] >= R_i[i] + 1)\n else:\n model_.addConstr(R_i[j] == R_i[i])\n model_.setObjective(R_i.sum(), GRB.MINIMIZE)\n model_.optimize()\n\n return [model_.getVarByName(f'r_i[{i}]').X for i in range(len(nodes))], \\\n model.objVal", "def __getMaxUpperLeftCoordinate(self, entityNodeList):\r\n minX = sys.maxint\r\n minY = sys.maxint\r\n for node in entityNodeList:\r\n if(node.graphObject_.y < minY):\r\n minY = node.graphObject_.y\r\n if(node.graphObject_.x < minX):\r\n minX = node.graphObject_.x \r\n return (minX, minY)", "def east_index(self, index):\n # Add a check for the edge of the board\n if index % self.size == self.size - 1:\n # Indices of < 0 are filtered out in cardinal_indices()\n return -1\n else:\n return index + 1", "def index_col(self, i0, i1, j0, j1):\n edges = self.h5['indexes']['bin1_offset'][i0:i1 + 1]\n index = []\n for lo1, hi1 in zip(edges[:-1], edges[1:]):\n if hi1 - lo1 > 0:\n bin2 = self.h5['pixels']['bin2_id'][lo1:hi1]\n mask = (bin2 >= j0) & (bin2 < j1)\n index.append(lo1 + np.flatnonzero(mask))\n if not index:\n return np.array([], dtype=int)\n else:\n return np.concatenate(index, axis=0)", "def cell_edges(self):", "def adj(self):\n\t\tres = SquareMatrix(self._rows)\n\t\tfor i in range(self._rows):\n\t\t\tfor j in range(self._rows):\n\t\t\t\tres[i][j] = ((-1) ** (i + j)) * self.minor(j, i)\n\t\treturn res", "def max_dim(elements, coordinates):\n atom_vdw_vertical = np.matrix(\n [[atomic_vdw_radius[i.upper()]] for i in elements])\n atom_vdw_horizontal = np.matrix(\n [atomic_vdw_radius[i.upper()] for i in elements])\n dist_matrix = euclidean_distances(coordinates, coordinates)\n vdw_matrix = atom_vdw_vertical + atom_vdw_horizontal\n re_dist_matrix = dist_matrix + vdw_matrix\n final_matrix = np.triu(re_dist_matrix)\n i1, i2 = np.unravel_index(final_matrix.argmax(), final_matrix.shape)\n maxdim = final_matrix[i1, i2]\n return i1, i2, maxdim", "def score(encoders, index, rows, cols):\n i = index % cols # find the 2d location of the indexth element\n j = index / cols\n \n sim = 0 # total of dot products\n count = 0 # number of neighbours\n if i>0: # if we're not at the left edge, do the WEST comparison\n sim += np.dot(encoders[j*cols+i], encoders[j*cols+i-1])\n count += 1\n if i<cols-1: # if we're not at the right edge, do EAST\n sim += np.dot(encoders[j*cols+i], encoders[j*cols+i+1])\n count += 1\n if j>0: # if we're not at the top edge, do NORTH\n sim += np.dot(encoders[j*cols+i], encoders[(j-1)*cols+i])\n count += 1\n if j<rows-1: # if we're not at the bottom edge, do SOUTH \n sim += np.dot(encoders[j*cols+i], encoders[(j+1)*cols+i])\n count += 1\n return sim/count", "def analytical_eigenvalues_2d(Ne, lx, ly):\n ev = [(m * np.pi / lx) ** 2 + (n * np.pi / ly) ** 2 for m in range(1, Ne + 1)\n for n in range(1, Ne + 1)]\n ev = np.array(ev)\n\n return ev[:Ne]", "def zernike_Double_Index(nlevels):\n \n\t \n if not (nlevels>=0):\n print('Input parameter nlevels must be >= 0')\n raise AssertionError()\n \n if (nlevels == 0):\n \n m = 0\n n = 0\n \n return n, m\n \n else:\n \n # ++++ Defining layout for row number n and colunmn number m ++++++++\n\n row_n = nlevels+1\n col_m = 2*nlevels +1\n x = np.arange(row_n)\n y = np.arange(-(col_m-1)//2, (col_m+1)//2,1)\n Q = [(i,j) for i in x for j in y]\n #\n\n\n nm_index = []\n \n top = (col_m + 1)/2\n leftside = row_n*col_m - col_m + 1\n rightside = row_n*col_m \n\n k1 = 0; k2 = 0\n\n for i in xrange(top,row_n*col_m+1, 2*col_m):\n\n nm_index.append(Q[i-1])\n s1 = i + col_m + 1\n s2 = i + col_m - 1 \n jj1 = k1\n jj2 = k2\n\n\n while (s2 <= leftside): \n\n nm_index.append(Q[s2-1])\n s2 +=col_m - 1\n jj1 += 1\n jj2 -= 1\n\n leftside +=2\n\n jj1 = k1\n jj2 = k2\n\n while (s1 <= rightside): \n\n # \n nm_index.append(Q[s1-1])\n s1 +=col_m + 1\n jj1 += 1\n jj2 += 1\n\n rightside -=2\n k1 = 0; k2 += 2\n\n n = np.array(nm_index)[:,0]\n m = np.array(nm_index)[:,1]\n\n return n, m", "def get_indexes(self,\r\n childrentoo=True,\r\n levels=0):\r\n\r\n if childrentoo:\r\n if levels==0:\r\n entrylist = self.apply_limit(self.find_within(indexfrom=Index(0),orequal=True))\r\n # if entrylist is , default to all notes, with limit applied.\r\n\r\n else:\r\n entrylist = self.apply_limit([a_temp for a_temp in self.indexes()\r\n if Index(a_temp) > Index(str(0))\r\n and Index(a_temp).level()<=levels])\r\n\r\n else:\r\n entrylist = self.apply_limit([a_temp for a_temp in self.indexes()\r\n if Index(a_temp) > Index(str(0))\r\n and Index(a_temp).is_top()])\r\n\r\n return entrylist", "def compute_ensemble(fopen_list, var_list, range_list, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n rlzn_ensmbl = compute_rlzn_ensemble(fopen_list, var_list, range_list)\n\n # average is over 2nd dimension (if it exists), since first is time\n #ensmbl = compute_cluster_ensemble(rlzn_ensmbl, indicesOnCluster, maxIndices, indicesToParticle)\n\n ensmbl = rlzn_ensmbl\n\n return ensmbl #}}}", "def compact_neighb(self):\n order = np.argsort(self.edges[:, 0] * float(self.V) + self.edges[:, 1])\n neighb = self.edges[order, 1].astype(np.int_)\n weights = self.weights[order]\n degree, _ = self.degrees()\n idx = np.hstack((0, np.cumsum(degree))).astype(np.int_)\n return idx, neighb, weights", "def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges", "def E_0_o(x_r, occlusion):\n temp = sum(lambda_(s, x_r) * abs(occlusion[s]) ** 2.\n for s in neighborhood(x_r, occlusion.shape[:2]))\n\n return temp", "def bounds(self, resids: NDArray) -> List[Tuple[float, float]]:", "def _index_list(self, level, node):\n if level >= self._max_level:\n raise ValueError(\"Invalid level: greater than `max_level`\")\n\n if node >= 2**level:\n raise ValueError(\"Invalid node\")\n\n return 2**level + node - 1", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()", "def lowest_rank_approx(A,e):", "def eta_l(self, children, coef_t, coef_r):\n with tf.compat.v1.name_scope('coef_l'):\n children = tf.cast(children, tf.float32)\n batch_size = tf.shape(input=children)[0]\n max_tree_size = tf.shape(input=children)[1]\n # creates a mask of 1's and 0's where 1 means there is a child there\n # has shape (batch_size x max_tree_size x max_children + 1)\n mask = tf.concat(\n [tf.zeros((batch_size, max_tree_size, 1)),\n tf.minimum(children, tf.ones(tf.shape(input=children)))],\n axis=2,\n name='mask'\n )\n\n # eta_l is shape (batch_size x max_tree_size x max_children + 1)\n return tf.multiply(\n tf.multiply((1.0 - coef_t), (1.0 - coef_r)), mask, name='coef_l'\n )\n\n # def aggregation_layer(self, nodes_representation, w_attention):\n # nodes_representation is (batch_size, max_graph_size, self.node_dim)\n \n # with tf.compat.v1.name_scope(\"global_attention\"):\n # batch_size = tf.shape(input=nodes_representation)[0]\n # max_tree_size = tf.shape(input=nodes_representation)[1]\n\n # (batch_size * max_graph_size, self.node_dim)\n # flat_nodes_representation = tf.reshape(nodes_representation, [-1, self.node_dim])\n # aggregated_vector = tf.matmul(flat_nodes_representation, w_attention)\n\n # attention_score = tf.reshape(aggregated_vector, [-1, max_tree_size, 1])\n\n \"\"\"A note here: softmax will distributed the weights to all of the nodes (sum of node weghts = 1),\n an interesting finding is that for some nodes, the attention score will be very very small, i.e e-12, \n thus making parts of aggregated vector becomes near zero and affect on the learning (very slow nodes_representationergence\n - Better to use sigmoid\"\"\"\n\n \n # attention_weights = tf.nn.softmax(attention_score, axis=1)\n \n # attention_weights = tf.nn.sigmoid(attention_score)\n\n # TODO: reduce_max vs reduce_sum vs reduce_mean\n # if aggregation_type == 1:\n # print(\"Using tf.reduce_sum...........\")\n # weighted_average_nodes = tf.reduce_sum(input_tensor=tf.multiply(nodes_representation, attention_weights), axis=1)\n # if aggregation_type == 2:\n # print(\"Using tf.reduce_max...........\")\n # weighted_average_nodes = tf.reduce_max(tf.multiply(nodes_representation, attention_weights), axis=1)\n # if aggregation_type == 3:\n # print(\"Using tf.reduce_mean...........\")\n # weighted_average_nodes = tf.reduce_mean(tf.multiply(nodes_representation, attention_weights), axis=1)\n\n # return weighted_average_nodes, attention_weights", "def rank_of_matrix(matrix: list[list[int | float]]) -> int:\n\n rows = len(matrix)\n columns = len(matrix[0])\n rank = min(rows, columns)\n\n for row in range(rank):\n # Check if diagonal element is not zero\n if matrix[row][row] != 0:\n # Eliminate all the elements below the diagonal\n for col in range(row + 1, rows):\n multiplier = matrix[col][row] / matrix[row][row]\n for i in range(row, columns):\n matrix[col][i] -= multiplier * matrix[row][i]\n else:\n # Find a non-zero diagonal element to swap rows\n reduce = True\n for i in range(row + 1, rows):\n if matrix[i][row] != 0:\n matrix[row], matrix[i] = matrix[i], matrix[row]\n reduce = False\n break\n if reduce:\n rank -= 1\n for i in range(rows):\n matrix[i][row] = matrix[i][rank]\n\n # Reduce the row pointer by one to stay on the same row\n row -= 1\n\n return rank", "def exterior_interior_points_eval(grid, points, solid_angle_tolerance, verbose=False):\n\n elements = grid.leaf_view.elements\n vertices = grid.leaf_view.vertices\n number_of_elements = grid.leaf_view.entity_count(0)\n elem = list(grid.leaf_view.entity_iterator(0))\n\n element_property = _np.zeros(number_of_elements, dtype=_np.int)\n element_groups = _np.zeros(shape=(4, number_of_elements), dtype=_np.int)\n element_groups[1:4, :] = elements\n for i in range(number_of_elements):\n property_number = elem[i].domain\n element_property[i] = property_number\n element_groups[0, i] = property_number\n\n element_properties = _np.array(list(set(element_property)), dtype=_np.int)\n if verbose:\n print(\"Element groups are:\")\n print(element_properties)\n\n points_interior = []\n points_exterior = []\n points_boundary = []\n index_interior = []\n index_exterior = _np.full(points.shape[1], True, dtype=bool)\n index_boundary = []\n\n for i in range(element_properties.size):\n\n elements_trunc = elements[:, element_groups[0, :] == element_properties[i]]\n num_elem = elements_trunc.shape[1]\n\n elements_x_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_y_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_z_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Populate grid vertices matrices\n for k in range(3):\n elements_x_coordinate[k, :] = vertices[0, elements_trunc[k, :]]\n elements_y_coordinate[k, :] = vertices[1, elements_trunc[k, :]]\n elements_z_coordinate[k, :] = vertices[2, elements_trunc[k, :]]\n # Obtain coordinates of triangular elements centroielements_surface_area\n # through barycentric method.\n elements_barycent_x_coordinate = _np.mean(elements_x_coordinate, axis=0)\n elements_barycent_y_coordinate = _np.mean(elements_y_coordinate, axis=0)\n elements_barycent_z_coordinate = _np.mean(elements_z_coordinate, axis=0)\n\n # Preallocate matrix of vectors for triangular elementses\n elements_u_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_v_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Compute matrix of vectors defining each triangular elements\n elements_u_coordinate = _np.array(\n [\n elements_x_coordinate[1, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[1, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[1, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_v_coordinate = _np.array(\n [\n elements_x_coordinate[2, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[2, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[2, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_u_cross_v = _np.cross(\n elements_u_coordinate, elements_v_coordinate, axisa=0, axisb=0, axisc=0\n )\n elements_u_cross_v_norm = _np.linalg.norm(elements_u_cross_v, axis=0)\n # Obtain outward pointing unit normal vectors for each elements\n normals = _np.divide(elements_u_cross_v, elements_u_cross_v_norm)\n # Obtain surface area of each elements\n elements_surface_area = 0.5 * elements_u_cross_v_norm\n\n start_time = _time.time()\n N_workers = _mp.cpu_count()\n parallelised_compute_solid_angle = _partial(\n compute_solid_angle,\n elements_barycent_x_coordinate,\n elements_barycent_y_coordinate,\n elements_barycent_z_coordinate,\n points,\n normals,\n elements_surface_area,\n )\n pool = _mp.Pool(N_workers)\n result = pool.starmap(\n parallelised_compute_solid_angle, zip(_np.arange(0, points.shape[1]))\n )\n pool.close()\n end_time = _time.time() - start_time\n if verbose:\n print(\"Time to complete solid angle field parallelisation: \", end_time)\n solid_angle = _np.hstack(result)\n if solid_angle_tolerance:\n index_interior_tmp = solid_angle > 0.5 + solid_angle_tolerance\n index_boundary_tmp = (solid_angle > 0.5 - solid_angle_tolerance) & (\n solid_angle < 0.5 + solid_angle_tolerance\n )\n points_boundary.append(points[:, index_boundary_tmp])\n index_boundary.append(index_boundary_tmp)\n index_exterior = index_exterior & (\n (index_interior_tmp == False) & (index_boundary_tmp == False)\n )\n else:\n index_interior_tmp = solid_angle > 0.5\n index_exterior = index_exterior & (index_interior_tmp == False)\n\n points_interior.append(points[:, index_interior_tmp])\n index_interior.append(index_interior_tmp)\n\n points_exterior = points[:, index_exterior]\n\n return (\n points_interior,\n points_exterior,\n points_boundary,\n index_interior,\n index_exterior,\n index_boundary,\n )", "def filterBankEdges(img):\n imgE = Views.extendBorder(img)\n opTop = as2DKernel(imgE, [-1]*3 + [0]*3 + [1]*3)\n opBottom = as2DKernel(imgE, [1]*3 + [0]*3 + [-1]*3)\n opLeft = as2DKernel(imgE, [-1, 0, 1] * 3)\n opRight = as2DKernel(imgE, [1, 0, -1] * 3)\n return [opTop, opBottom, opLeft, opRight]", "def voxel_efficiency_bipartite(clusts, node_assn, node_pred, primaries):\n others = [i for i in range(n) if i not in primaries]\n tot_vox = np.sum([len(clusts[i]) for i in others])\n int_vox = np.sum([len(clusts[i]) for i in others if node_pred[i] == node_assn[i]])\n return int_vox * 1.0 / tot_vox", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def calculate_area(boxes):\n box_dimension = len(boxes.size())\n if (box_dimension == 1) and (boxes.size()[0] != 0):\n return (boxes[3] - boxes[1] + 1) * (boxes[2] - boxes[0] + 1)\n elif box_dimension == 2:\n return (boxes[:, 3] - boxes[:, 1] + 1) * (boxes[:, 2] - boxes[:, 0] + 1)\n else:\n return torch.tensor([])", "def get_element_index(el, elements):\n for idx, element in enumerate(elements):\n diff = torch.sum(torch.abs(el - element))\n if diff.item() < 1e-8:\n return idx\n return None", "def k_rank_approximate(doc_matrix, k):\n return []", "def rank(self):\n\n if self._rank >= 0:\n return self._rank\n\n reduced, operations = self.to_row_echelon()\n non_leading_rows = 0\n for i in range(self.rows, 0, -1):\n if not reduce(lambda x,y: x or y, reduced.row(i)):\n non_leading_rows += 1\n else:\n break\n\n self._rank = self.rows - non_leading_rows\n return self._rank", "def GetColumnIndexOfRank(\n self: \"HereditaryStratumOrderedStoreList\",\n rank: int,\n ) -> typing.Optional[int]:\n if self.GetNumStrataRetained() == 0:\n return None\n else:\n res_idx = binary_search(\n lambda idx: self.GetRankAtColumnIndex(idx) >= rank,\n 0,\n self.GetNumStrataRetained() - 1,\n )\n if res_idx is None:\n return None\n elif self.GetRankAtColumnIndex(res_idx) == rank:\n return res_idx\n else:\n return None", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def compute_bin_indices(X_part, bin_limits=None, n_bins=20):\n if bin_limits is None:\n bin_limits = []\n for variable_data in range(X_part.shape[1]):\n bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])\n\n bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)\n for axis, bin_limits_axis in enumerate(bin_limits):\n bin_indices *= (len(bin_limits_axis) + 1)\n bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])\n\n return bin_indices", "def compute_sorted_index(leaves, ncnt, nodes):\n leaves= np.array(leaves).astype(int)\n idx = []\n for i in range(0, len(leaves)):\n idx.append(int(leaves[i]))\n nidx = []\n for i in range(0, len(nodes)):\n if i not in leaves:\n nidx.append(i)\n\n nidx = np.array(nidx)[nodes[nidx][:,0].argsort()]\n j = 0\n for i in range(0, ncnt):\n if i not in leaves:\n idx.append(nidx[j])\n j+=1\n return idx", "def upper_triangular_indices(dimension):\n assert dimension > 0, \"tensor_utils.upper_triangular_indices: Dimension must be positive integer!\"\n\n for row in range(dimension):\n for column in range(row + 1, dimension):\n element_index = dimension * row + column\n yield element_index", "def mean_rank(rs):\n _rs = []\n for r in rs:\n ids = np.asarray(r).nonzero()[0]\n if len(ids) == 0:\n _rs.append(0)\n else:\n _rs.append(ids[0] + 1)\n return np.mean(_rs)", "def Indexes(self, latitudes, longitudes):\n res = self._transform.TransformPoints(\n np.column_stack((longitudes, latitudes)))\n res = list(zip(*res))\n x, y = np.array(res[0]), np.array(res[1])\n idx_col = self._inv_txf[0] + self._inv_txf[1] * x + self._inv_txf[2] * y\n idx_row = self._inv_txf[3] + self._inv_txf[4] * x + self._inv_txf[5] * y\n return idx_row.astype(int), idx_col.astype(int)", "def matrix_to_edges(matrix: numpy.ndarray, include_reverse_edges: bool=True):\n sparse = scipy.sparse.coo_matrix(matrix)\n edges = zip(sparse.row, sparse.col)\n\n if not include_reverse_edges:\n edges = filter(lambda edge: edge[0] <= edge[1], edges)\n return list(edges)", "def E_1_o(x_r, occlusion):\n temp = sum(lambda_(s, x_r) * abs(1. - occlusion[s]) ** 2.\n for s in neighborhood(x_r, occlusion.shape[:2]))\n\n return temp", "def ranged_axes(shape):\n return (-np.arange(1, len(shape) + 1)[::-1]).tolist() or -1", "def get_axes_by_nodes(self, model, pid_ref, node1, node2, xyz1, xyz2, log):\n #TODO: not integrated with CBAR yet...\n\n is_failed = True\n eid = self.eid\n #centroid = (n1 + n2) / 2.\n #i = n2 - n1\n #Li = norm(i)\n #ihat = i / Li\n\n elem = self\n #(nid1, nid2) = elem.node_ids\n #node1 = model.nodes[nid1]\n #node2 = model.nodes[nid2]\n #xyz1 = node1.get_position()\n #xyz2 = node2.get_position()\n\n # wa/wb are not considered in i_offset\n # they are considered in ihat\n i = xyz2 - xyz1\n Li = norm(i)\n if Li == 0.:\n msg = 'xyz1=%s xyz2=%s\\n%s' % (xyz1, xyz2, self)\n raise ValueError(msg)\n i_offset = i / Li\n\n unused_v, wa, wb, xform = rotate_v_wa_wb(\n model, elem,\n xyz1, xyz2, node1, node2,\n i_offset, i, eid, Li, log)\n if wb is None:\n # one or more of v, wa, wb are bad\n\n # xform is xform_offset...assuming None\n ihat = None\n yhat = None\n zhat = None\n return is_failed, (wa, wb, ihat, yhat, zhat)\n\n ihat = xform[0, :]\n yhat = xform[1, :]\n zhat = xform[2, :]\n\n is_failed = False\n return is_failed, (wa, wb, ihat, yhat, zhat)", "def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def tot_neg_elbo(self):\n\n\n self.neg_elbo = tf.reduce_sum([-self.experts[i].elbo((self.X[self.partition[i]],self.Y[self.partition[i]])) for i in range(self.M)])\n \n return self.neg_elbo", "def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None,\n verbose=True, debug=False):\n if verbose:\n print(('get_arr_edge_indices for arr of shape: ', arr.shape))\n\n # initialise variables\n lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True)\n lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False)\n lon_diff = lon_e[-5]-lon_e[-6]\n lat_diff = lat_e[-5]-lat_e[-6]\n nn, n, = 0, 0\n last_lat_box = arr[nn, n]\n coords = []\n last_lon_box = arr[nn, n]\n need_lon_outer_edge, need_lat_outer_edge = False, False\n if debug:\n print((lon_e, lat_e))\n\n # ---- Loop X dimension ( lon )\n for nn, lon_ in enumerate(lon_c):\n\n # Loop Y dimension ( lat ) and store edges\n for n, lat_ in enumerate(lat_c):\n\n if debug:\n print((arr[nn, n], last_lat_box, last_lon_box,\n arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))\n\n if arr[nn, n] != last_lat_box:\n\n # If 1st lat, selct bottom of box\n point_lon = lon_e[nn]+lon_diff/2\n if need_lat_outer_edge:\n point_lat = lat_e[n+1]\n else:\n point_lat = lat_e[n]\n need_lat_outer_edge = True\n need_lat_outer_edge = False\n\n # Add mid point to cordinates list\n if isinstance(extra_points_point_on_edge, type(None)):\n mid_point = [point_lon, point_lat]\n coords += [mid_point]\n\n # Add given number of points along edge\n else:\n coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in\n np.linspace(0, 1, extra_points_point_on_edge,\n endpoint=True)]\n\n # temporally save the previous box's value\n last_lat_box = arr[nn, n]\n\n # ---- Loop Y dimension ( lat )\n for n, lat_ in enumerate(lat_c):\n\n if debug:\n print((arr[nn, n], last_lat_box, last_lon_box,\n arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))\n # Loop X dimension ( lon ) and store edges\n for nn, lon_ in enumerate(lon_c):\n\n # If change in value at to list\n if arr[nn, n] != last_lon_box:\n point_lat = lat_e[n]+lat_diff/2\n\n # Make sure we select the edge lon\n if need_lon_outer_edge:\n point_lon = lon_e[nn+1]\n else:\n point_lon = lon_e[nn]\n need_lon_outer_edge = True\n need_lon_outer_edge = False\n\n # Add mid point to coordinates list\n if isinstance(extra_points_point_on_edge, type(None)):\n mid_point = [point_lon, point_lat]\n coords += [mid_point]\n\n # Add given number of points along edge\n else:\n coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in\n np.linspace(0, 1, extra_points_point_on_edge,\n endpoint=True)]\n\n # temporally save the previous box's value\n last_lon_box = arr[nn, n]\n\n return coords", "def compute_node_impedances(self,freq):\n\n Zarr=[self.ZP2(freq)]\n for elem in self.element_array:\n if elem.orientation==0: #series\n Zarr.append(Zarr[-1]+elem.Z(freq))\n elif elem.orientation==1: #shunt\n Zarr.append(1.0/(1.0/Zarr[-1]+elem.Y(freq)))\n\n #Zarr.reverse()\n return Zarr", "def rank(self,others):\n self.__verify(others)\n \n #construct the n evaluation criteria + classes in an extensible way\n #evalFn = [AP,R] in the standard format -> column with as many rows as replicates\n numClasses = others[0].eval['APBCI'].shape[2]\n\n iouType = others[0].params.iouType\n if iouType in [\"segm\",\"bbox\"]:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,-1],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,3,-1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,1],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,1,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,2],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,3,2],axis=(0,2))]\n\n evfAP = lambda c: (lambda AP,R: np.nanmean(AP[:,:,c,0,-1],axis=0))\n for i in range(numClasses):\n evalFunctions.append(evfAP(i))\n\n else:\n evalFunctions = [ \\\n lambda AP,R: np.nanmean(AP[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(AP[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(AP[:,:,:,2,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,0,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[0,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[5,:,:,0,0],axis=(1)),\n lambda AP,R: np.nanmean(R[:,:,:,1,0],axis=(0,2)),\n lambda AP,R: np.nanmean(R[:,:,:,2,0],axis=(0,2))]\n\n numReplicates = others[0].eval['APBCI'].shape[1]\n numInstances = len(others)\n numEvals = len(evalFunctions)\n\n replicateStats = np.zeros((numReplicates,numInstances))\n\n outperformMatrix = np.zeros((numInstances,numInstances,numEvals))\n rankCI = np.zeros((numInstances,3,numEvals))\n ranks = np.zeros((numInstances,numEvals,numReplicates))\n\n for evi,evf in enumerate(evalFunctions):\n for oi,o in enumerate(others):\n replicateStats[:,oi] = evf(o.eval['APBCI'],o.eval['RBCI'])\n\n for oi in range(len(others)):\n for oj in range(len(others)):\n outperformMatrix[oi,oj,evi] = np.mean(replicateStats[:,oi]>replicateStats[:,oj])\n\n for bci in range(numReplicates):\n ranks[:,evi,bci] = stats.rankdata(-replicateStats[bci,:],method='min')\n\n for oi in range(len(others)): \n rankCI[oi,0,evi] = np.mean(ranks[oi,evi,:])\n #use simple percentile method; the bias correction misbehaves \n rankCI[oi,1:,evi] = np.percentile(ranks[oi,evi,:],[100*(self.params.bootstrapAlpha/2),100*(1-self.params.bootstrapAlpha/2)])\n\n return rankCI, outperformMatrix, ranks", "def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def rank(m, axis=0, method='average', ascending=False, reverse=False):\n if isinstance(m, list):\n m = np.array(m)\n if ascending == reverse: # greater is better (descending order)\n m = -m # take the opposite to inverse rank\n r = np.apply_along_axis(rankdata, axis, m, method=method) # convert values to ranking in all rows or columns\n return process_vote(m, r)", "def area(boxes: Union[np.array, torch.Tensor]) -> Union[np.array, torch.Tensor]:\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def dimension_along(self, axis):\n l, u = self._range_along(axis)\n return u - l", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def remove_trivial_edges(self):\n if self.E > 0:\n valid = self.edges[:, 0] != self.edges[:, 1]\n self.edges = self.edges[valid]\n self.weights = self.weights[valid]\n self.E = np.sum(valid)\n return self.E", "def _valndxFunc2(a, shape, ndim, indexes, minVal, maxVal, valLU, currentIndex, curridx):\n done = False\n lastidx = ndim - 1\n maxuint32 = 4294967295 # 2^32 - 1\n\n while not done:\n # use the specially chosen function for indexing the array\n arrVal = a[curridx[0], curridx[1]]\n \n found = False\n j = 0\n if arrVal >= minVal and arrVal <= maxVal:\n j = valLU[arrVal - minVal]\n found = j < maxuint32\n\n if found:\n m = currentIndex[j]\n for i in range(ndim):\n indexes[m, i] = curridx[i]\n currentIndex[j] = m + 1 \n \n # code that updates curridx - incs the next dim\n # if we have done all the elements in the current \n # dim\n idx = lastidx\n while idx >= 0:\n curridx[idx] = curridx[idx] + 1\n if curridx[idx] >= shape[idx]:\n curridx[idx] = 0\n idx -= 1\n else:\n break\n\n # if we are done we have run out of dims\n done = idx < 0", "def leftmost_leaf_descendant_indices(self, node_list):\r\n # Cf. Zhang & Shasha:p.1249:\r\n # \"l(i) is the number of the leftmost leaf descendant of the subtree\r\n # rooted at T[i]. When T[i] is a leaf, l(i)=i.\"\r\n def get_leftmost_leaf(node):\r\n if not node.is_leaf():\r\n return get_leftmost_leaf(node.left_child())\r\n else:\r\n return node\r\n \r\n indices = []\r\n for node in node_list:\r\n leftmost_node = get_leftmost_leaf(node)\r\n for i in range(len(node_list)):\r\n if id(node_list[i]) == id(leftmost_node):\r\n indices.append(i)\r\n break\r\n return indices", "def _extract_shape(idx, x, j, cur_center, normalize=True):\r\n _a = []\r\n for i in range(len(idx)):\r\n if idx[i] == j:\r\n if cur_center.sum() == 0:\r\n opt_x = x[i]\r\n else:\r\n _, opt_x = _sbd(cur_center, x[i])\r\n _a.append(opt_x)\r\n a = np.array(_a)\r\n\r\n if len(a) == 0:\r\n return np.zeros((1, x.shape[1]))\r\n columns = a.shape[1]\r\n if normalize:\r\n y = zscore(a, axis=1, ddof=1)\r\n s = np.dot(y.transpose(), y)\r\n\r\n p = np.empty((columns, columns))\r\n p.fill(1.0/columns)\r\n p = np.eye(columns) - p\r\n\r\n m = np.dot(np.dot(p, s), p)\r\n _, vec = eigh(m)\r\n else: \r\n s = np.dot(a.transpose(), a)\r\n m = s \r\n _, vec = eigh(m)\r\n \r\n centroid = vec[:, -1]\r\n finddistance1 = math.sqrt(((a[0] - centroid) ** 2).sum())\r\n finddistance2 = math.sqrt(((a[0] + centroid) ** 2).sum())\r\n\r\n if finddistance1 >= finddistance2:\r\n centroid *= -1\r\n \r\n return zscore(centroid, ddof=1) if normalize else centroid", "def bisect_anyaxis(counts, ndomains, split_fac):\n # split along any axis \n splits = {}\n pvals = []\n for axis in range(len(counts.shape)):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n # split into left and right \n counts1d = np.sum(counts, axis=sum_axes, dtype=np.int64)\n \n split_idx, n_L, pval = load_partition_1d(counts1d, ndomains, split_fac)\n\n splits[axis] = (split_idx, n_L)\n\n pvals.append(pval)\n\n axis = int(np.argmin(pvals))\n split_idx, n_L = splits[axis]\n return axis, split_idx, n_L", "def create_logits_mask_by_first_edge_graph(edge_indexes, num_edge, nvec):\n # find first edge\n # adj matrix of graphs\n # adj_mat = nx.to_numpy_matrix(graph, nodelist=range(nvec))[None]\n\n # bs = adj_mats.shape[0]\n # total_mask = []\n max_edge = edge_indexes.shape[0]\n total_mask = np.zeros(shape=(1, max_edge), dtype=np.int8)\n\n\n # edges = edge_indexes[:num_edge]\n # edges = np.where(adj_mats[i, :, :] > 0)\n\n # max_edge = max(max_edge, num_edges)\n # mask = np.zeros(shape=(1, max_edge), dtype=np.int8)\n all_half_edges = edge_indexes[:num_edge // 2] # only use the directed edge (a->b), not (b->a) # location[np.where(location[:, 0] < location[:, 1])[0]]\n all_valid_edges = edge_indexes[:num_edge] # only use the directed edge (a->b), not (b->a) # location[np.where(location[:, 0] < location[:, 1])[0]]\n\n # restore adj matrix\n rawobs = np.zeros(shape=(nvec, nvec), dtype=np.int8)\n for edge in all_valid_edges:\n rawobs[edge[0], edge[1]] = 1 # edge is an ndarray (2,), we cannot index using rawobs[edge] (is array with shape (2, 15))\n#\n for idx_1, edge_1 in enumerate(all_half_edges):\n encoded_edge_1 = idx_1\n # check if they are one-hop connected\n\n mask = np.zeros(shape=(max_edge,), dtype=np.int8)\n for idx_2, edge_2 in enumerate(all_valid_edges):\n\n fail_cond = edge_2[0] in edge_1 or edge_2[1] in edge_1 or\\\n int(rawobs[edge_2[0], edge_1[0]]) + int(rawobs[edge_2[0], edge_1[1]]) + \\\n int(rawobs[edge_2[1], edge_1[0]]) + int(rawobs[edge_2[1], edge_1[1]]) > 0\n\n mask[idx_2] = not fail_cond\n\n total_mask[0, encoded_edge_1] = mask.any()\n\n\n return total_mask", "def get_nullify_idxs(original_size, border_size):\n\tassert border_size < original_size/2, \"Border too large to be removed from image of this size\"\n\ttmp = np.zeros((original_size, original_size), dtype=int)\n\ttmp[:border_size,:] = 1\n\ttmp[-border_size:,:] = 1\n\ttmp[:,-border_size:] = 1\n\ttmp[:,:border_size] = 1\n\ttmp = tmp.reshape(tmp.shape[0]*tmp.shape[1])\n\treturn np.where(tmp==1)[0]", "def ftp(self, pre_node, post_node):\n pre_id, post_id = pre_node.id, post_node.id\n this_node_collocations = self.collocation_matrix[pre_id, post_id]\n assert isinstance(this_node_collocations, float)\n all_collocations = np.sum(self.collocation_matrix[pre_id])\n if all_collocations == 0:\n return all_collocations\n return this_node_collocations / all_collocations", "def _row_echelon_F2(matrix_in) -> np.ndarray: # pylint: disable=invalid-name\n size = matrix_in.shape\n\n for i in range(size[0]):\n pivot_index = 0\n for j in range(size[1]):\n if matrix_in[i, j] == 1:\n pivot_index = j\n break\n for k in range(size[0]):\n if k != i and matrix_in[k, pivot_index] == 1:\n matrix_in[k, :] = np.mod(matrix_in[k, :] + matrix_in[i, :], 2)\n\n matrix_out_temp = deepcopy(matrix_in)\n indices = []\n matrix_out = np.zeros(size)\n\n for i in range(size[0] - 1):\n if np.array_equal(matrix_out_temp[i, :], np.zeros(size[1])):\n indices.append(i)\n for row in np.sort(indices)[::-1]:\n matrix_out_temp = np.delete(matrix_out_temp, (row), axis=0)\n\n matrix_out[0 : size[0] - len(indices), :] = matrix_out_temp\n matrix_out = matrix_out.astype(int)\n\n return matrix_out", "def upper_binary_tree(self):\n return self.max_linear_extension().binary_search_tree_shape(left_to_right=False)", "def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)", "def occupiedElements(self):\n occupiedMatrix = [[0 for x in range(self.gridSize)] for y in range(self.gridSize)]\n\n for i in range(len(self.changeable)):\n for j in range(self.length[i]):\n if self.direction[i] == \"h\":\n occupiedMatrix[self.fixed[i]][self.changeable[i] + j] = 1\n else:\n occupiedMatrix[self.changeable[i] + j][self.fixed[i]] = 1\n\n return occupiedMatrix", "def test_reduced_row_echelon(binary_matrix, result):\n\n # build row echelon form of the matrix\n shape = binary_matrix.shape\n for irow in range(shape[0]):\n pivot_index = 0\n if np.count_nonzero(binary_matrix[irow, :]):\n pivot_index = np.nonzero(binary_matrix[irow, :])[0][0]\n\n for jrow in range(shape[0]):\n if jrow != irow and binary_matrix[jrow, pivot_index]:\n binary_matrix[jrow, :] = (binary_matrix[jrow, :] + binary_matrix[irow, :]) % 2\n\n indices = [\n irow\n for irow in range(shape[0] - 1)\n if np.array_equal(binary_matrix[irow, :], np.zeros(shape[1]))\n ]\n\n temp_row_echelon_matrix = binary_matrix.copy()\n for row in indices[::-1]:\n temp_row_echelon_matrix = np.delete(temp_row_echelon_matrix, row, axis=0)\n\n row_echelon_matrix = np.zeros(shape, dtype=int)\n row_echelon_matrix[: shape[0] - len(indices), :] = temp_row_echelon_matrix\n\n # build reduced row echelon form of the matrix from row echelon form\n for idx in range(len(row_echelon_matrix))[:0:-1]:\n nonzeros = np.nonzero(row_echelon_matrix[idx])[0]\n if len(nonzeros) > 0:\n redrow = (row_echelon_matrix[idx, :] % 2).reshape(1, -1)\n coeffs = (\n (-row_echelon_matrix[:idx, nonzeros[0]] / row_echelon_matrix[idx, nonzeros[0]]) % 2\n ).reshape(1, -1)\n row_echelon_matrix[:idx, :] = (\n row_echelon_matrix[:idx, :] + (coeffs.T * redrow) % 2\n ) % 2\n\n # get reduced row echelon form from the _reduced_row_echelon function\n rref_bin_mat = _reduced_row_echelon(binary_matrix)\n\n assert (rref_bin_mat == row_echelon_matrix).all()\n assert (rref_bin_mat == result).all()", "def get_node_indices_and_levels(nd: np.ndarray):\n indices = []\n lvs = []\n for j in range(1, nd.shape[0]):\n if j == 1:\n indices = nd[j]\n lvs = nd[j + 1]\n elif j % 2 != 0 and j > 1:\n indices = np.append(indices, nd[j])\n elif j % 2 == 0 and j > 2:\n lvs = np.append(lvs, nd[j])\n return indices, lvs", "def getAreas(self, idx = None, cell = 1, base_1 = None, base_2 = None):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n uCell = self.getCell(idx = idx, cell = cell, base_1 = base_1, base_2 = base_2)\n return np.abs(np.linalg.det(uCell))", "def feasible(leaf, x):\n feasibleDim =[]\n try:\n atom = (leaf.root.ub - leaf.root.lb) / leaf.problem.discreteLevel\n except:\n atom = 0\n for i in range(len(leaf.lb)):\n if leaf.ub[i] - leaf.lb[i] >= x * atom[i]:\n feasibleDim.append(i)\n return feasibleDim", "def ranks(inputs, axis=-1):\n return 1 + tf.cast(\n tf.argsort(tf.argsort(inputs, axis=axis), axis=axis), dtype=inputs.dtype)", "def eleven():\r\n \r\n matrix = [[8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 5, 77, 91, 8],\r\n [49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],\r\n [81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],\r\n [52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],\r\n [22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],\r\n [24, 47, 32, 60, 99, 03, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],\r\n [32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],\r\n [67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],\r\n [24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],\r\n [21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],\r\n [78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],\r\n [16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],\r\n [86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],\r\n [19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],\r\n [4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],\r\n [88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],\r\n [4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],\r\n [20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],\r\n [20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],\r\n [1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]]\r\n \r\n greatest = 0\r\n product = 1\r\n \r\n # Find highest product of four left to right numbers\r\n for i in range(20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four up and down numbers\r\n for i in range(20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product += matrix[j + k][i]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four diagonal up/down numbers\r\n for i in range(17):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i + k][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four diagonal down/up numbers\r\n for i in range(3, 20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i - k][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n \r\n #for i in range(17):\r\n # for j in range(4):\r\n # print matrix[i + j][0],\r\n # print\r\n \r\n return greatest", "def _leaves_in_view(self, analysis_width) -> list[tuple[int, int, int]]:\n range = (\n self.image.shape[0] / 2\n if self.orientation == Orientation.UP_DOWN\n else self.image.shape[1] / 2\n )\n # cut off the edge so that we're not halfway through a leaf.\n range -= (\n max(\n self.mlc.widths[0] * analysis_width,\n self.mlc.widths[-1] * analysis_width,\n )\n * self.image.dpmm\n )\n leaves = [\n i\n for i, c in enumerate(self.mlc.centers)\n if abs(c) < (range / self.image.dpmm)\n ]\n return [\n (leaf_num, center, width)\n for leaf_num, center, width in zip(\n leaves,\n self.mlc.centers[leaves[0] : leaves[-1] + 1],\n self.mlc.widths[leaves[0] : leaves[-1] + 1],\n )\n ]", "def group_into_descendents (self,from_indexes=None):\r\n\r\n if not from_indexes:\r\n from_indexes = self.indexes()\r\n last_up = None\r\n last_top = None\r\n returnlist = []\r\n for x_temp in from_indexes:\r\n if Index(x_temp) > Index(0):\r\n if last_up and last_up.is_top():\r\n if Index(x_temp).is_top():\r\n returnlist[-1].append(x_temp)\r\n else:\r\n last_list = returnlist[-1]\r\n returnlist = returnlist[0:-1]\r\n if len(last_list)>1:\r\n new_list = [last_list[-1],x_temp]\r\n last_list = last_list[0:-1]\r\n returnlist.append(last_list)\r\n returnlist.append(new_list)\r\n else:\r\n last_list.append(x_temp)\r\n returnlist.append(last_list)\r\n elif Index(x_temp).is_top() or not last_top:\r\n last_top = Index(x_temp)\r\n returnlist.append([x_temp])\r\n else:\r\n if Index(x_temp).is_descendent(last_top):\r\n returnlist[-1].append(x_temp)\r\n else:\r\n last_top = Index(x_temp)\r\n last_up = Index(x_temp)\r\n\r\n return returnlist", "def multiple_negatives_ranking_loss(self, embeddings_a: Tensor, embeddings_b: Tensor):\n scores = torch.matmul(embeddings_a, embeddings_b.t())\n diagonal_mean = torch.mean(torch.diag(scores))\n mean_log_row_sum_exp = torch.mean(torch.logsumexp(scores, dim=1))\n return -diagonal_mean + mean_log_row_sum_exp", "def __beinflumatgrid(axis):\n len_axis = len(axis)\n vec = np.zeros((1, len_axis))\n vec[0, :] = axis\n vertical_ax = np.zeros((len_axis, 1))\n vertical_ax[:, 0] = axis\n grid = np.repeat(vec, len_axis, axis=0)\n return np.absolute(np.subtract(grid, vertical_ax))", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass" ]
[ "0.5215772", "0.4802194", "0.47601265", "0.47544992", "0.47510993", "0.4719506", "0.46831623", "0.46391895", "0.46330476", "0.46060845", "0.4589925", "0.4584294", "0.45521936", "0.45242146", "0.45011955", "0.4497215", "0.44914553", "0.44876465", "0.44790936", "0.44733527", "0.44724473", "0.44718", "0.447177", "0.44681382", "0.44667518", "0.4460493", "0.44525388", "0.4452108", "0.44512203", "0.4449437", "0.44447398", "0.4434904", "0.44254842", "0.4422017", "0.44178098", "0.4386434", "0.43863583", "0.43830106", "0.43812746", "0.43802217", "0.43738174", "0.4371431", "0.43676838", "0.43625656", "0.43613765", "0.43377045", "0.43286029", "0.43263406", "0.43257987", "0.43192485", "0.43191198", "0.43088904", "0.4307713", "0.42993677", "0.42962527", "0.42851076", "0.42849353", "0.42820767", "0.42808813", "0.42805707", "0.42790177", "0.42772037", "0.42697147", "0.42654935", "0.42624795", "0.42615294", "0.4261228", "0.42591935", "0.42581943", "0.4254508", "0.42506516", "0.42504933", "0.42458674", "0.4244851", "0.42438987", "0.42382035", "0.42370203", "0.42366463", "0.42341003", "0.42340982", "0.42314708", "0.4226365", "0.4220545", "0.42175764", "0.42169806", "0.42168635", "0.42110598", "0.42092818", "0.42039788", "0.42030573", "0.4192564", "0.41899166", "0.41850004", "0.4181774", "0.41791758", "0.41773102", "0.41757828", "0.41728085", "0.41690174", "0.41687176" ]
0.75243336
0
Return an ndarray having row per element in data and one column
def make_data(self, data): return array(data, dtype=float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def process(self, data):\n data = np.atleast_2d(data)\n\n if self.orientation == 'row':\n return data\n else:\n return data.T", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def rows_from_data (data):\n return data.tolist() # use numpy.ndarray conversion function", "def augment_data(self,data):\n d = np.empty([data.shape[0]+2,data.shape[1]+2])\n d[1:-1,1:-1] = data\n d[0,1:-1] = data[0,:]\n d[1:-1,0] = data[:,0]\n d[-1,1:-1] = data[-1,:]\n d[1:-1,-1] = data[:,-1]\n d[0,0] = data[0,0]\n d[-1,-1] = data[-1,-1]\n d[0,-1] = data[0,-1]\n d[-1,0] = data[-1,0]\n return d", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def make_2d(x):\n return x.reshape((1, len(x)))", "def vertify(data: list):\n assert len(data) == 4\n n = [float(d) for d in data]\n return np.array([[n[0], n[1]], [n[2], n[1]], [n[2], n[3]], [n[0], n[3]], [n[0], n[1]]])", "def provide_data(self):\r\n # import pdb; pdb.set_trace()\r\n # for k, v in self.data:\r\n # print k,v\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def to_numpy(data):\n fields = [\n \"x\", \"y\", \"z\",\n \"proximity\"\n ]\n return np.array([[row[field] for field in fields] for row in data])", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def data_for_sorting():\n return RaggedArray([[1, 0], [2, 0], [0, 0]])", "def _rowvec(x):\n return _colvec(x).transpose()", "def _handle_input_data(data):\n data = np.asarray(data)\n if np.ndim(data) == 1:\n d_rows = 1\n d_cols = len(data)\n data = data.reshape((1, data.shape[0]))\n elif np.ndim(data) == 2:\n d_rows = data.shape[0]\n d_cols = data.shape[1]\n else:\n raise ValueError(\"Incorrect dimensionality of data. Must be <= 2\")\n return data, d_rows, d_cols", "def concatonate(data):\n tmp = np.array(data)\n tmp = np.reshape(tmp, (tmp.shape[0] * tmp.shape[1], -1))\n return tmp", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def make_col_vector(array):\n return array.reshape(-1, 1)", "def config_to_array(data):\n return np.array(data[\"data\"]).reshape(data[\"rows\"], data[\"cols\"])", "def atleast_2d(x):\n return np.atleast_2d(x).T if x.ndim < 2 else x", "def get_data(data_file_path):\n data_file = open(data_file_path, 'r').readlines()\n data = []\n n = -1\n dim = -1\n for i in range(len(data_file)):\n line_elems = [float(x) for x in data_file[i].split()]\n if i == 0:\n n = int(line_elems[0])\n dim = int(line_elems[1])\n else:\n data.append(np.array(line_elems))\n return data, n, dim", "def _colvec(x):\n x = np.atleast_1d(x)\n return x[:, None]", "def to_row_vec(x):\n assert x.ndim == 1\n return jnp.expand_dims(x, 0)", "def matrix_to_array(x, nodata=None):\n\n s = np.shape(x)\n if nodata is None: # Nan\n ix = np.where(np.isfinite(x))\n else:\n ix = np.where(x != nodata)\n y = x[ix].copy()\n return y, ix, s", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def get(self):\n return np.hstack((self.data[:, self.cur:], self.data[:, :self.cur])) #Concatena los datos en horizontal", "def to_col_vec(x):\n assert x.ndim == 1\n return jnp.expand_dims(x, 1)", "def dataset(self, timestep, data):\n dataX, dataY = [], []\n for i in range(len(data) - timestep):\n a = data[i:i+timestep]\n dataX.append(a)\n dataY.append(data[i + timestep])\n return np.array(dataX), np.array(dataY)", "def get_2Darray(file,cols='all',nrows='all',verbose='no'):\n if cols=='all':\n #Get the number of columns in the file\n for line in open(file).readlines():\n pieces=split(line)\n if len(pieces)==0: continue\n if line[0]=='#':continue\n nc=len(pieces)\n cols=list(range(nc))\n if verbose=='yes': print('cols=',cols)\n break\n else:\n nc=len(cols)\n \n lista=get_data(file,cols,nrows)\n nl=len(lista[0])\n x=zeros((nl,nc),float)\n for i in range(nc):x[:,i]=lista[i]\n return x", "def dataset_as_arrays(dataset):\r\n scores = []\r\n lenghts = []\r\n embeddings = []\r\n for row in dataset:\r\n embeddings += [vec for vec in row[0]]\r\n scores.append(float(row[1]))\r\n lenghts.append(row[0].shape[0])\r\n \r\n embeddings = numpy.array(embeddings)\r\n scores = numpy.array(scores)\r\n lenghts = numpy.array(lenghts)\r\n return embeddings, scores, lenghts", "def fromCols(cls, data):\n m = len(data[0])\n # check that list of data is valid\n if any([len(col) != m for col in data[1:]]):\n raise ValueError(\"inconsistent column lengths\")\n return Matrix.fromRows(data).transpose()", "def as_matrix(self):\n return self._data", "def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])", "def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v", "def as_list(self):\n data = []\n for row in self._matrix_data:\n for column in row:\n data.append(column)\n return data", "def column(self):\n return self.reshape((self.size, 1))", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def split_data(data):\n data_values = []\n target_drugs = []\n cell_lines = []\n states = []\n replicates = []\n \n for i in data.columns:\n target_drug = i.split()[-2]\n cell_line = i.split()[-1].split(\"_\")[0]\n state = i.split()[-1].split(\"_\")[1]\n replicate = i.split()[-1].split(\"_\")[2]\n \n data_values.append(list(data[i]))\n target_drugs.append(int(target_drug))\n cell_lines.append(cell_line)\n states.append(state)\n replicates.append(replicate)\n \n data_values = np.array(data_values)\n target_drugs = np.array(target_drugs)\n cell_lines = np.array(cell_lines)\n states = np.array(states)\n replicates = np.array(replicates)\n \n return data_values, target_drugs, cell_lines, states, replicates", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)", "def toarray(self, order=None, out=None):\n d = self._process_toarray_args(order, out)\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n d[i, j] = self.data[i][pos]\n return d", "def row(v):\n return v.reshape((1, v.size))", "def split_data(data, squeeze=False):\n vdata = np.atleast_2d(data)\n nr_freqs = int(vdata.shape[1] / 2)\n part1 = vdata[:, 0:nr_freqs]\n part2 = vdata[:, nr_freqs:]\n if(squeeze):\n part1 = part1.squeeze()\n part2 = part2.squeeze()\n return part1, part2", "def reshape_mat(self, data):\n if len(data['x'][0]) < self.num_ofdim:\n x = data['x']\n x_plus = np.zeros([len(x), self.num_ofdim - len(x[0])])\n data['x'] = np.append(x, x_plus, axis=1)\n return data", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))", "def get_value_array(data, dimension, expanded, keep_index, geom_col,\n is_points, geom_length=geom_length):\n column = data[dimension.name]\n if keep_index:\n return column\n all_scalar = True\n arrays, scalars = [], []\n for i, geom in enumerate(data[geom_col]):\n length = 1 if is_points else geom_length(geom)\n val = column.iloc[i]\n scalar = isscalar(val)\n if scalar:\n val = np.array([val])\n if not scalar and len(unique_array(val)) == 1:\n val = val[:1]\n scalar = True\n all_scalar &= scalar\n scalars.append(scalar)\n if not expanded or not scalar:\n arrays.append(val)\n elif scalar:\n arrays.append(np.full(length, val))\n if expanded and not is_points and not i == (len(data[geom_col])-1):\n arrays.append(np.array([np.NaN]))\n\n if not len(data):\n return np.array([])\n if expanded:\n return np.concatenate(arrays) if len(arrays) > 1 else arrays[0]\n elif (all_scalar and arrays):\n return np.array([a[0] for a in arrays])\n else:\n array = np.empty(len(arrays), dtype=object)\n array[:] = [a[0] if s else a for s, a in zip(scalars, arrays)]\n return array", "def row_to_array(r):\n a = np.ma.array([i for i in r.as_void()])\n return a", "def getDatumImg(row):\n width, height = 20, 20\n square = row[1:].reshape(width,height)\n return square.T", "def transform(self, data):\n unflattened = [unflatten_vec(d) for d in data]\n return numpy.array([self.signature(uf)\n for uf in unflattened])", "def as_column_vector(array):\n if array.ndim != 1:\n raise ValueError(\"Array must be 1D\")\n\n idx = _new_attribute_label('idx', array)\n ds = array.datashape.copy()\n ds.dim_low = list(ds.dim_low) + [0]\n ds.dim_high = list(ds.dim_high) + [0]\n ds.chunk_size = list(ds.chunk_size) * 2\n ds.chunk_overlap = list(ds.chunk_overlap) * 2\n ds.dim_names = list(ds.dim_names) + [idx]\n return array.redimension(ds.schema)", "def prepare_data(data, lags=1):\n X, y = [], []\n for row in range(len(data) - lags - 1):\n a = data[row:(row + lags), 0]\n X.append(a)\n y.append(data[row + lags, 0])\n return np.array(X), np.array(y)", "def prepare_data(data, lags=1):\n X, y = [], []\n for row in range(len(data) - lags - 1):\n a = data[row:(row + lags), 0]\n X.append(a)\n y.append(data[row + lags, 0])\n return np.array(X), np.array(y)", "def _format_column(self, row_data):\n return [[row[i] for row in row_data] for i in range(self.row_length)]", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def get_time_series_1d(data, bin_width):\n return np.array([x for x in zip(*(data[i:] for i in range(bin_width)))])", "def create2d(row_count, col_count, value=None):\n a = [None] * row_count\n for row in range(row_count):\n a[row] = [value] * col_count\n return a", "def np_col_vec (init_list):\n return np_row_vec (init_list).T", "def provide_data(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._data]", "def copy_to_matrix(data):\n spectrogram_shape = data[0].shape\n fit_data = np.zeros(shape=(len(data), spectrogram_shape[0], spectrogram_shape[1]))\n for i in range(len(data)):\n for j in range(spectrogram_shape[0]):\n for k in range(spectrogram_shape[1]):\n fit_data[i][j][k] = data[i][j][k]\n return fit_data", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def get_2Darray_hdf5(file,cols='Null',nrows='Null',verbose=False):\n if verbose:\n print (\"reading data from hdf5 file {} for filters:\".format(file))\n for col in cols:\n print(col)\n df = pd.read_hdf(file,\"df\")\n smalldf = df.loc[:,cols]\n outarray = smalldf.values #if we switch to pandas 0.24 or higher\n #this could be replaced with smalldf.to_numpy()\n return outarray", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def row(self):\n return self.reshape((1, self.size))", "def __array__(self):\n return np.asarray(self.data)", "def _data_with_axis(self, axis):\n shpl = list(self.data.shape)\n \n if len(shpl) == 2:\n shpl[1] += 1\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1:] = self.data\n data[:,0] = axis.data \n elif len(shpl) == 1:\n shpl.append(2)\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1] = self.data\n data[:,0] = axis.data\n else:\n raise Exception(\"Other shapes than (N,) and (N,M) not implemented\")\n return data", "def split_data_into_input_and_output(data):\n data_in, data_out = list(zip(*[((x[\"synopsis\"]), x[\"gross\"]) for x in data]))\n return np.array(data_in), np.array(data_out)", "def col(self, i):\n return Vector([row[i] for row in self.data])", "def column_or_row_or_1d(y: npt.NDArray) -> npt.NDArray:\n shape = np.shape(y)\n if (len(shape) == 1) or (len(shape) == 2 and (shape[1] == 1 or shape[0] == 1)):\n return np.ravel(y)\n else:\n raise ValueError(\"bad input shape {0}\".format(shape))", "def getArray2d(self):\n\t\treturn self.array2d", "def create_dataset(dataset,time_step=1):\n dataX,dataY=[],[]\n for i in range(len(dataset)-time_step):\n a=dataset[i:i+time_step]\n dataX.append(a)\n dataY.append(dataset[i+time_step])\n return np.asarray(dataX),np.asarray(dataY)", "def make_multi_output_data(self, data):\n confirmed, deceased, recovered = [], [], []\n for sample in data:\n confirmed.append(sample[:,0])\n deceased.append(sample[:,1])\n recovered.append(sample[:,2])\n confirmed = np.stack(confirmed)\n deceased = np.stack(deceased)\n recovered = np.stack(recovered)\n return np.stack([confirmed, deceased, recovered])", "def _split_inputs_outputs(self, data):\n\n\t\tinputs = []\n\t\toutputs = []\n\n\t\tfor point in data:\n\t\t\tinputs.append(point[0])\n\t\t\toutputs.append(point[1])\n\n\t\treturn np.array(inputs), np.array(outputs)", "def data(self) -> List[List[Any]]:\n\n column_wise = [column.values for column in self.plaincolumns]\n row_wise = [list(row) for row in zip(*column_wise)]\n\n return row_wise", "def connect_array_by_last_dim(*data):\n\n aax=list(map(lambda x: x.shape[:-1], data))\n if (not all_equal(aax)):\n raise ValueError('All inputs must have the same shape except the last dimension!')\n\n ax=list(map(lambda x: x.shape[-1], data))\n bx=np.concatenate([[0],np.cumsum(ax)])\n\n atype=list(map(lambda x: x.dtype, data))\n\n adata=np.concatenate(data,axis=-1)\n\n return adata,bx,atype", "def construct_second_order_data(X):\n X_second_order = []\n m = X.shape[1]\n for i in range(m):\n for j in range(m):\n if j <= i:\n X_second_order.append(X[:,i] * X[:,j])\n X_second_order = np.array(X_second_order).T\n return np.concatenate((X,X_second_order), axis = 1)", "def as_row_vector(array):\n if array.ndim != 1:\n raise ValueError(\"Array must be 1D\")\n idx = _new_attribute_label('idx', array)\n ds = array.datashape.copy()\n ds.dim_low = [0] + list(ds.dim_low)\n ds.dim_high = [0] + list(ds.dim_high)\n ds.chunk_size = list(ds.chunk_size) * 2\n ds.chunk_overlap = list(ds.chunk_overlap) * 2\n ds.dim_names = [idx] + list(ds.dim_names)\n return array.redimension(ds.schema)", "def prepare_data(data):\n\n image_array = np.zeros(shape=(len(data), 48, 48))\n image_label = np.array(list(map(int, data['emotion'])))\n\n for i, row in enumerate(data.index):\n image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')\n image = np.reshape(image, (48, 48))\n\n image = face_detection(image.astype(np.uint8))\n\n image_array[i] = image\n\n return image_array, image_label", "def ndarray(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n data = self.clear().data.collect()\n\n result = np.zeros(self._shape, dtype=self._dtype)\n\n for e in data:\n result[e[0], e[1]] = e[2]\n\n return result", "def getDataCoordinates(self):\n coord = np.zeros((self.dataset.shape[0], 2))\n for i in range(len(self.dataset)):\n coord[i, 0] = self.dataset[i][0]\n coord[i, 1] = self.dataset[i][1]\n return coord", "def get_X_y(data, column):\n if not is_string_type(data[0, :]):\n columns_titles = [column_title.decode('UTF-8') for column_title in data[0, :]]\n else:\n columns_titles = [column_title for column_title in data[0, :]]\n X = numpy.empty((data.shape[0] - 1, data.shape[1] - 1), dtype=object)\n y = numpy.empty((data.shape[0] - 1, 1), dtype=object)\n X_index = 0\n for index, column_title in enumerate(columns_titles):\n if column_title == column:\n y = numpy.asarray(data[1:len(data), index], dtype=numpy.float32)\n else:\n if is_numerical_type(data[1:len(data), index]):\n X[:, X_index] = numpy.asarray(data[1:len(data), index], dtype=numpy.float32)\n X_index += 1\n else:\n raise AttributeError(f'Error! Need one-hot encoding before getting X and y data.')\n return numpy.asarray(X, dtype=numpy.float32), y.reshape(-1, 1)", "def _load_data(data, n_prev=max_length):\n docX, docY = [], []\n for i in range(len(data) - n_prev):\n docX.append(data.iloc[i:i + n_prev].as_matrix())\n docY.append(data.iloc[i + n_prev].as_matrix())\n alsX = np.array(docX)\n alsY = np.array(docY)\n\n return alsX, alsY", "def unstack(a, axis=0):\n shape = a.shape\n return [jnp.squeeze(b, axis=axis) for b in \\\n jnp.split(a, shape[axis], axis=axis)]", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def values(self) -> ndarray:\n if len(self._data) == 1:\n kind: str = next(iter(self._data))\n order: List[int] = [self._column_info[col].loc for col in self._columns]\n arr = self._data[kind][:, order]\n if kind == 'b':\n return arr == 1\n else:\n return arr\n\n if {'b', 'S', 'm', 'M'} & self._data.keys():\n arr_dtype: str = 'O'\n else:\n arr_dtype = 'float64'\n\n v: ndarray = np.empty(self.shape, dtype=arr_dtype, order='F')\n\n for col, dtype, loc, order, col_arr in self._col_info_iter(with_order=True, with_arr=True):\n if dtype == 'S':\n cur_list_map = self._str_reverse_map[loc]\n _va.make_object_str_array(cur_list_map, v, col_arr, order)\n elif dtype == 'M':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n # changes array in place\n _va.make_object_datetime_array(v, col_arr.view('uint64'), order, unit)\n elif dtype == 'm':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n _va.make_object_timedelta_array(v, col_arr.view('uint64'), order, unit)\n else:\n v[:, order] = col_arr\n return v", "def _make_2D_array(df, data_col='Sample DNA Concentration',\n well_col='Well', rows=8, cols=12):\n # initialize empty Cp array\n cp_array = np.empty((rows, cols), dtype=object)\n\n # fill Cp array with the post-cleaned values from the right half of the\n # plate\n for record in df.iterrows():\n row = ord(str.upper(record[1][well_col][0])) - ord('A')\n col = int(record[1][well_col][1:]) - 1\n cp_array[row, col] = record[1][data_col]\n\n return cp_array", "def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data", "def transpose(x):\n return x[:, np.newaxis]", "def parse_structured_grid(data):\n # init dimesion fo array\n rows = 0\n columns = 0\n # set initial value arra\n initial_value_x = data[0, 0]\n # find # dof in y and x dimension of data set\n for i, element in enumerate(data[:, 0]):\n if element == initial_value_x:\n if columns == 0:\n columns = i - columns\n rows += 1\n # fist row is missing replace it with fictional value\n fict_value = np.array((data[columns - 1, 0], data[0, 1]))\n data = np.vstack((fict_value, data))\n # assumed structured grid\n x = data[:columns, 0]\n y = data[::columns, 1]\n return x, y, (rows, columns)", "def transform(array):\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new", "def transpose(self):\n data = [list(col) for col in zip(*self.data)]\n return self.__class__(self.n, self.m, data)", "def numpy(self):\n return self.data", "def slice_data_to_2D(x, y):\n if(x.shape != y.shape):\n print(\"Error: Images and Labels do not have the same shape\")\n else:\n x = np.array([(x[i, :, :, z]) for i in range(x.shape[0]) for z in range(x.shape[3])])\n y = np.array([(y[i, :, :, z]) for i in range(y.shape[0]) for z in range(y.shape[3])])\n return x,y", "def _read_data(self):\n return [np.array([]), np.array([])]", "def Phase2D(data):\r\n ph = np.zeros_like(data)\r\n for u, i in enumerate(data):\r\n ph[u] = Phase(i)\r\n return ph", "def read(self, *args):\n return_values = [[] for _ in range(len(args)+2)]\n for row in self.iter_rows(*args):\n for return_array, value in zip(return_values, row):\n return_array.append(value)\n\n return [np.array(x) for x in return_values]", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def IterRows(a: numpy.ndarray) -> t.Iterable[numpy.ndarray]:\n for row in a:\n return row[:, numpy.newaxis]" ]
[ "0.7534442", "0.7534442", "0.70013255", "0.6785662", "0.6688916", "0.6645851", "0.6567787", "0.65319115", "0.6462152", "0.6382598", "0.63573694", "0.62963057", "0.6281688", "0.627003", "0.623539", "0.62131137", "0.61992896", "0.6181052", "0.6180269", "0.616434", "0.6149688", "0.6149343", "0.6074828", "0.6013281", "0.5989657", "0.5979515", "0.5975817", "0.5973645", "0.59426796", "0.59362686", "0.59251505", "0.5911688", "0.590988", "0.589797", "0.5885246", "0.5867916", "0.58611244", "0.5845575", "0.58167917", "0.58060324", "0.5761761", "0.57234603", "0.5718916", "0.57176197", "0.57150173", "0.57078934", "0.5687659", "0.56850505", "0.5671085", "0.56691486", "0.5659148", "0.565352", "0.5646856", "0.5638969", "0.5638969", "0.56309384", "0.5618879", "0.56058085", "0.5604726", "0.5603436", "0.5580791", "0.55759", "0.5567573", "0.55633765", "0.5556218", "0.55430734", "0.55406076", "0.5538545", "0.5531347", "0.5522804", "0.55210465", "0.5519643", "0.5518412", "0.55157584", "0.5514282", "0.55118656", "0.5508513", "0.550747", "0.55004966", "0.5490204", "0.548612", "0.5479746", "0.54773605", "0.54763687", "0.5464295", "0.5460018", "0.5458705", "0.54566914", "0.54452074", "0.5441373", "0.543618", "0.5426036", "0.541995", "0.54147154", "0.5411242", "0.53998905", "0.5399014", "0.53968257", "0.5396561", "0.53882223" ]
0.573665
41
VGG16 construct for training backbone
def __init__(self, num_classes): super(VGG16, self).__init__() self.vgg16_feature_extractor = VGG16FeatureExtraction(weights_update=True) self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2) self.classifier = VGG16Classfier() self.fc3 = _fc(in_channels=4096, out_channels=num_classes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model", "def vgg16(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16.paddle')\n model.set_state_dict(model_dict)\n return model", "def vgg16(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']), strict=False)\n return model", "def build_cnn_vgg16(num_classes):\n\n inputs = tf.keras.layers.Input(\n shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)\n )\n\n x = inputs\n x = tf.keras.applications.vgg16.preprocess_input(x)\n vgg16 = tf.keras.applications.VGG16(\n input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS),\n weights=\"imagenet\",\n include_top=False\n )\n\n vgg16.trainable = False\n x = vgg16(x, training=False)\n\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dense(\n units=num_classes,\n activation=tf.keras.activations.softmax\n )(x)\n\n outputs = x\n\n model = tf.keras.Model(\n inputs=inputs,\n outputs=outputs\n )\n\n return model", "def vgg16_bn(pretrained,**kwargs):\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16_bn.paddle')\n model.set_state_dict(model_dict)\n return model", "def vgg16(**kwargs):\r\n return VGG16(**kwargs)", "def __init__(self):\n\t\tsuper(Vgg16, self).__init__()\n\t\tv = vgg16(pretrained= True)\n\t\t# Copy modules of vgg16\n\t\tfeatures = list(v.features)\n\t\tavgpool = v.avgpool\n\t\tsequentials = list(v.classifier)\n\t\tself.features = nn.ModuleList(features).eval() \n\t\tself.avgpool = avgpool\n\t\tself.sequentials = nn.ModuleList(sequentials).eval()", "def vgg16_bn(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGGBase(make_layers(), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model", "def build_vgg(self):\n vgg = VGG19(weights=\"imagenet\")\n # Set outputs to outputs of last conv. layer in block 3\n # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py\n vgg.outputs = [vgg.layers[9].output]\n\n img = Input(shape=self.hr_shape)\n\n # Extract image features\n img_features = vgg(img)\n\n return Model(img, img_features)", "def vgg16_bn(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']), strict=False)\n return model", "def __init__(self, img_size, multi_layers=False):\n super().__init__()\n\n vgg = VGG16(include_top=False, input_shape=(img_size[0], img_size[1], 3))\n if multi_layers:\n layer_ids = [2, 5, 9, 13, 17]\n layer_outputs = [\n Flatten()(vgg.layers[layer_id].output) for layer_id in layer_ids]\n features = Concatenate(axis=-1)(layer_outputs)\n else:\n layer_ids = [13] # 13 -> conv4_3\n features = [\n Flatten()(vgg.layers[layer_id].output) for layer_id in layer_ids]\n\n self._model = Model(inputs=vgg.input, outputs=features)", "def vgg16(*args):\n return _VGGWrapper(models.vgg16(*args))", "def build_vgg(self, weights=\"imagenet\"): \n \n # Input image to extract features from\n img = Input(shape=(self.img_rows, self.img_cols, 3))\n\n # Mean center and rescale by variance as in PyTorch\n processed = Lambda(lambda x: (x-self.mean) / self.std)(img)\n \n # If inference only, just return empty model \n if self.inference_only:\n model = Model(inputs=img, outputs=[img for _ in range(len(self.vgg_layers))])\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n return model\n \n # Get the vgg network from Keras applications\n if weights in ['imagenet', None]:\n vgg = VGG16(weights=weights, include_top=False)\n else:\n vgg = VGG16(weights=None, include_top=False)\n vgg.load_weights(weights, by_name=True)\n\n # Output the first three pooling layers\n vgg.outputs = [vgg.layers[i].output for i in self.vgg_layers] \n \n # Create model and compile\n model = Model(inputs=img, outputs=vgg(processed))\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n\n return model", "def vgg16_1d(**kwargs):\r\n return VGG16_1d(**kwargs)", "def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model", "def vgg16_bn(*args):\n return _VGGWrapper(models.vgg16_bn(*args))", "def instantiate_VGG_model(img_input_shape):\r\n # Load the VGG model\r\n vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=img_input_shape)\r\n \r\n # Freeze the layers except the last 4 layers\r\n for layer in vgg_conv.layers[:-4]:\r\n layer.trainable = False\r\n \r\n # Create the model\r\n model = models.Sequential()\r\n model.add(vgg_conv)\r\n \r\n # Add new layers\r\n model.add(layers.Flatten())\r\n model.add(layers.Dense(256, activation='relu'))\r\n model.add(layers.Dropout(0.25))\r\n model.add(layers.Dense(nb_class, activation='softmax'))\r\n \r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=optimizers.RMSprop(lr=1e-4),\r\n metrics=['acc'])\r\n return model", "def build_vgg16_notop(image_dimensions, pooling, size_final_dense, num_classes, trainable=False, weights='imagenet'):\n\n vgg16_base = VGG16(weights=weights\n , include_top=False # Ignore the final dense layers, we'll train our own\n , input_shape=image_dimensions\n , pooling=pooling)\n vgg16_base.trainable=trainable\n\n image_input = Input(shape=image_dimensions)\n\n x = vgg16_base(image_input)\n x = Flatten()(x)\n x = Dense(size_final_dense,activation='relu')(x)\n out = Dense(num_classes,activation='softmax')(x) # Task is classification\n\n model = Model(image_input, out)\n return(model)", "def vgg_16(input_shape=(224, 224, 3), output_shape=1000):\n model = Sequential()\n \n # layer 1 ~ 2 (filter: 64)\n model.add(Input(shape=input_shape))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 112 x 112 x 64\n \n # layer 3 ~ 4 (filter: 128)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 56 x 56 x 128\n \n # layer 5 ~ 7 (filter: 256)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 28 x 28 x 256\n \n # layer 8 ~ 10 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 14 x 14 x 512\n \n # layer 11 ~ 13 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 7 x 7 x 512\n \n # layer 14 ~ 16 (Fully Connected)\n model.add(Flatten())\n # flatten: 7 x 7 x 512 = 25,088\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(output_shape, activation='softmax'))\n # categorized by output shape\n \n return model", "def build_vgg16(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_1_feats = convolution(imgs, 3, 3, 64, 1, 1, 'conv1_1')\n conv1_1_feats = nonlinear(conv1_1_feats, 'relu')\n conv1_2_feats = convolution(conv1_1_feats, 3, 3, 64, 1, 1, 'conv1_2')\n conv1_2_feats = nonlinear(conv1_2_feats, 'relu')\n pool1_feats = max_pool(conv1_2_feats, 2, 2, 2, 2, 'pool1')\n\n conv2_1_feats = convolution(pool1_feats, 3, 3, 128, 1, 1, 'conv2_1')\n conv2_1_feats = nonlinear(conv2_1_feats, 'relu')\n conv2_2_feats = convolution(conv2_1_feats, 3, 3, 128, 1, 1, 'conv2_2')\n conv2_2_feats = nonlinear(conv2_2_feats, 'relu')\n pool2_feats = max_pool(conv2_2_feats, 2, 2, 2, 2, 'pool2')\n\n conv3_1_feats = convolution(pool2_feats, 3, 3, 256, 1, 1, 'conv3_1')\n conv3_1_feats = nonlinear(conv3_1_feats, 'relu')\n conv3_2_feats = convolution(conv3_1_feats, 3, 3, 256, 1, 1, 'conv3_2')\n conv3_2_feats = nonlinear(conv3_2_feats, 'relu')\n conv3_3_feats = convolution(conv3_2_feats, 3, 3, 256, 1, 1, 'conv3_3')\n conv3_3_feats = nonlinear(conv3_3_feats, 'relu')\n pool3_feats = max_pool(conv3_3_feats, 2, 2, 2, 2, 'pool3')\n\n conv4_1_feats = convolution(pool3_feats, 3, 3, 512, 1, 1, 'conv4_1')\n conv4_1_feats = nonlinear(conv4_1_feats, 'relu')\n conv4_2_feats = convolution(conv4_1_feats, 3, 3, 512, 1, 1, 'conv4_2')\n conv4_2_feats = nonlinear(conv4_2_feats, 'relu')\n conv4_3_feats = convolution(conv4_2_feats, 3, 3, 512, 1, 1, 'conv4_3')\n conv4_3_feats = nonlinear(conv4_3_feats, 'relu')\n pool4_feats = max_pool(conv4_3_feats, 2, 2, 2, 2, 'pool4')\n\n conv5_1_feats = convolution(pool4_feats, 3, 3, 512, 1, 1, 'conv5_1')\n conv5_1_feats = nonlinear(conv5_1_feats, 'relu')\n conv5_2_feats = convolution(conv5_1_feats, 3, 3, 512, 1, 1, 'conv5_2')\n conv5_2_feats = nonlinear(conv5_2_feats, 'relu')\n conv5_3_feats = convolution(conv5_2_feats, 3, 3, 512, 1, 1, 'conv5_3')\n conv5_3_feats = nonlinear(conv5_3_feats, 'relu')\n\n conv5_3_feats_flat = tf.reshape(conv5_3_feats, [self.batch_size, 196, 512])\n self.conv_feats = conv5_3_feats_flat\n self.conv_feat_shape = [196, 512]\n self.num_ctx = 196 \n self.dim_ctx = 512\n\n self.imgs = imgs\n self.is_train = is_train", "def get_model_vgg16():\n # base_model.summary():\n # ....\n # block5_conv4 (Conv2D) (None, 15, 15, 512) 2359808\n # _________________________________________________________________\n # block5_pool (MaxPooling2D) (None, 7, 7, 512) 0\n # _________________________________________________________________\n # flatten (Flatten) (None, 25088) 0\n # _________________________________________________________________\n # fc1 (Dense) (None, 4096) 102764544\n # _________________________________________________________________\n # fc2 (Dense) (None, 4096) 16781312\n # _________________________________________________________________\n # predictions (Dense) (None, 1000) 4097000\n #\n base_model = VGG16(weights='imagenet', include_top=True)\n model = Model(inputs=base_model.input,\n outputs=base_model.get_layer('fc2').output)\n return model", "def Yolov1_vgg16bn(pretrained=False, **kwargs):\n yolo = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n vgg_state_dict = model_zoo.load_url(model_urls['vgg16_bn'])\n yolo_state_dict = yolo.state_dict()\n for k in vgg_state_dict.keys():\n if k in yolo_state_dict.keys() and k.startswith('features'):\n yolo_state_dict[k] = vgg_state_dict[k]\n yolo.load_state_dict(yolo_state_dict)\n return yolo", "def vgg16_bn(model_dir, tag, num_classes, use_cls, batchnorm=True):\n cfg = [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 512, 512, 512], [1024, 'D', 1024, 'D']]\n net = VGG(model_dir, tag, cfg, num_classes, use_cls, batchnorm=batchnorm)\n\n return net", "def train_vgg16(nameOverride = None, layers_to_train = None):\n\n if nameOverride is None:\n nameOverride = \"block4and5\"\n # load data\n training_sets = load_augmented_dataset()\n\n # build models\n model_vgg = build_vgg_trainable(fine_tune = layers_to_train)\n\n baseWeights_t = model_vgg.get_weights()\n\n # NOTE: You can still leave this alone if you've only downloaded the fully augmented set.\n for training_set in training_sets:\n print(\" Starting training for set {}\".format(str(training_set)))\n model_vgg.set_weights(baseWeights_t) # Resets model\n train_x = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][0]))\n train_y = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][1]))\n\n early_stopping_monitor = EarlyStopping(patience=2)\n history = model_vgg.fit(train_x, train_y, batch_size=32, epochs=20, verbose=1, validation_split=0.2,\n shuffle=True,\n callbacks=[early_stopping_monitor])\n\n mpu.plot_accuracy_loss(history,\n \"./model_cache/train_data/{}_{}_vgg16_plots.png\".format(str(training_set), nameOverride))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_{}_vgg16_plots.png\".format(str(training_set), nameOverride),\n \"model_charts/{}_{}_vgg16_plots.png\".format(str(training_set), nameOverride))\n\n model_vgg.save(\"./model_cache/train_data/{}_{}_vgg16.h5\".format(str(training_set), nameOverride))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_{}_vgg16.h5\".format(str(training_set), nameOverride),\n \"saved_models/{}_{}_vgg16.h5\".format(str(training_set), nameOverride))", "def __init__(self, pretrained=True, freeze_weights=True):\n super(Veggie16, self).__init__()\n # Define the model's name for it's output files\n # Load a pre-trained VGG-16 model and turn off autograd\n # so its weights won't change.\n architecture = vgg16(pretrained=pretrained)\n if freeze_weights:\n for layer in architecture.parameters():\n layer.requires_grad = False\n # Copy the convolutional layers of the model.\n self.features = architecture.features\n # Copy the average pooling layer of the model.\n self.avgpool = architecture.avgpool\n # Redefine the classification block of VGG-16.\n # Use LeakyReLU units instead of ReLU units.\n # Output layer has 2 nodes only for the 2 classes in the PCam dataset.\n in_ftrs = architecture.classifier[0].in_features\n self.classifier = nn.Sequential(\n nn.Linear(in_features=in_ftrs, out_features=4096, bias=True),\n nn.LeakyReLU(inplace=True),\n nn.Dropout(p=0.5, inplace=False),\n nn.Linear(in_features=4096, out_features=4096, bias=True),\n nn.LeakyReLU(inplace=True),\n nn.Dropout(p=0.5, inplace=False),\n nn.Linear(in_features=4096, out_features=2, bias=True)\n )\n # Define a LogSoftmax layer for converting outputs to probabilities\n # Not needed in `forward()` because included in nn.CrossEntropyLoss\n self.log_softmax = nn.LogSoftmax(dim=1)", "def build_vgg(hr_shape):\n \n vgg = VGG19(weights=\"imagenet\")\n # Set outputs to outputs of last conv. layer in block 3\n # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py\n vgg.outputs = [vgg.layers[9].output]\n\n img = Input(hr_shape)\n\n # Extract image features\n img_features = vgg(img)\n\n return Model(img, img_features)", "def build_vgg(self):\n # Get the vgg network. Extract features from Block 5, last convolution.\n vgg = tf.keras.applications.VGG19(weights=\"imagenet\", input_shape=self.hr_shape, include_top=False)\n vgg.trainable = False\n for layer in vgg.layers:\n layer.trainable = False\n\n # Create model and compile\n model = tf.keras.models.Model(inputs=vgg.input, outputs=vgg.get_layer(\"block5_conv4\").output)\n\n return model", "def vgg(backbone='vgg16', inputs=None, modifier=None, **kwargs):\r\n # choose default input\r\n if inputs is None:\r\n if tf.compat.v1.keras.backend.image_data_format() == 'channels_first':\r\n inputs = tf.keras.layers.Input(shape=(3, 256, 256))\r\n else:\r\n inputs = tf.keras.layers.Input(shape=(256, 256, 3))\r\n # create the vgg backbone\r\n if backbone == 'vgg16':\r\n vgg = VGG16(input_tensor=inputs, include_top=False, weights='imagenet', classes=1)\r\n elif backbone == 'vgg19':\r\n vgg = VGG19(input_tensor=inputs, include_top=False, weights='imagenet', classes=1)\r\n else:\r\n raise ValueError(\"Backbone '{}' not recognized.\".format(backbone))\r\n\r\n if modifier:\r\n vgg = modifier(vgg)\r\n x = vgg.output\r\n\r\n vgg = AveragePooling2D(pool_size=8)(x)\r\n vgg = Flatten()(vgg)\r\n\r\n outputs = Dense(1, activation='sigmoid', kernel_initializer='he_normal')(vgg)\r\n model = Model(inputs=inputs, outputs=outputs)\r\n\r\n # create the full model\r\n return model", "def vgg19(**kwargs):\r\n return VGG19(**kwargs)", "def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))", "def VGGModel(input_shape):\n \n\n X_input = Input(input_shape)\n \n # Creating a Neural Network (VGG-16)\n\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(X_input)\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(X)\n\n # Block 2\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(X)\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(X)\n\n # Block 3\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(X)\n\n # Block 4\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(X)\n\n # Block 5\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(X)\n \n X = Flatten()(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc')(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc2')(X)\n X = Dense(2048, activation='relu', kernel_initializer = 'he_normal', name='fc3')(X)\n X = Dense(1024, activation='relu', kernel_initializer = 'he_normal', name='fc4')(X)\n X = Dense(512, activation='relu', kernel_initializer = 'he_normal', name='fc5')(X)\n X = Dense(256, activation='relu', kernel_initializer = 'he_normal', name='fc6')(X)\n X = Dense(2, activation='linear', name='regression')(X)\n model = Model(inputs=X_input, outputs = X, name='HappyModel')\n print(model.summary())\n \n return model", "def vgg13_bn(*args):\n return _VGGWrapper(models.vgg13_bn(*args))", "def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)", "def vgg19(pretrained=False,SEED=0,Ratio=0,**kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = MyVGG(make_layers(cfg['E'],SEED=SEED,Ratio=Ratio),**kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model", "def vgg19(*args):\n return _VGGWrapper(models.vgg19(*args))", "def vgg_based_model(num_cls, input_shape=(150, 150, 3), weights=None):\n # Create a base model\n base_model = VGG16(include_top=False, weights='imagenet', input_shape=input_shape)\n\n # Add new classifier\n last = base_model.output\n x = Flatten()(last)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.5)(x)\n predictions = Dense(num_cls, activation='softmax')(x)\n model = Model(input=base_model.input, output=predictions)\n\n if weights:\n model.load_weights(weights)\n\n return model, base_model", "def model(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D1'], dilation=dilation['D1']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model", "def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Rand, self).__init__()\r\n print(\"CIFAR VGG16_Rand is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n self.bias = True\r\n\r\n # Define the building blocks\r\n if layer <= 11:\r\n self.conv11 = CONV_3x3rand(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 12:\r\n self.conv12 = nn.Sequential(CONV_3x3rand(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 21:\r\n self.conv21 = CONV_3x3rand(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 22:\r\n self.conv22 = nn.Sequential(CONV_3x3rand(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 31:\r\n self.conv31 = CONV_3x3rand(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 32:\r\n self.conv32 = CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 33:\r\n self.conv33 = nn.Sequential(CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 41:\r\n self.conv41 = CONV_3x3rand(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 42:\r\n self.conv42 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 43:\r\n self.conv43 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 51:\r\n self.conv51 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 52:\r\n self.conv52 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 53:\r\n self.conv53 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)", "def __init__(self, input_shape=(3, 244, 244), include_classifier=True,\n **kwargs):\n self._name = \"VGG16\"\n self._weightsUrl = {\n 'th': 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5',\n 'tf': 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'\n }\n\n # If using the Tensor Flow backend we have to transform input shape\n if K.image_dim_ordering() == \"tf\":\n input_shape = (input_shape[1], input_shape[2], input_shape[0])\n\n self._inLayer = Input(shape=input_shape)\n\n self._net = Convolution2D(64, 3, 3, activation='relu',\n border_mode='same',\n name='block1_conv1')(self._inLayer)\n self._net = Convolution2D(64, 3, 3, activation='relu',\n border_mode='same',\n name='block1_conv2')(self._net)\n self._net = MaxPooling2D((2, 2), strides=(2, 2),\n name='block1_pool')(self._net)\n\n self._net = Convolution2D(128, 3, 3, activation='relu',\n border_mode='same',\n name='block2_conv1')(self._net)\n self._net = Convolution2D(128, 3, 3, activation='relu',\n border_mode='same',\n name='block2_conv2')(self._net)\n self._net = MaxPooling2D((2, 2), strides=(2, 2),\n name='block2_pool')(self._net)\n\n self._net = Convolution2D(256, 3, 3, activation='relu',\n border_mode='same',\n name='block3_conv1')(self._net)\n self._net = Convolution2D(256, 3, 3, activation='relu',\n border_mode='same',\n name='block3_conv2')(self._net)\n self._net = Convolution2D(256, 3, 3, activation='relu',\n border_mode='same',\n name='block3_conv3')(self._net)\n self._net = MaxPooling2D((2, 2), strides=(2, 2),\n name='block3_pool')(self._net)\n\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block4_conv1')(self._net)\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block4_conv2')(self._net)\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block4_conv3')(self._net)\n self._net = MaxPooling2D((2, 2), strides=(2, 2),\n name='block4_pool')(self._net)\n\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block5_conv1')(self._net)\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block5_conv2')(self._net)\n self._net = Convolution2D(512, 3, 3, activation='relu',\n border_mode='same',\n name='block5_conv3')(self._net)\n self._net = MaxPooling2D((2, 2), strides=(2, 2),\n name='block5_pool')(self._net)\n\n # Classification block\n if include_classifier:\n self._net = Flatten(name='flatten')(self._net)\n self._net = Dense(4096, activation='relu', name='fc1')(self._net)\n self._net = Dense(4096, activation='relu', name='fc2')(self._net)\n self._net = Dense(1000, activation='softmax',\n name='predictions')(self._net)\n\n # Create the model\n self._model = Model(self._inLayer, self._net)\n super(VGG16, self).__init__(**kwargs)", "def vgg11_bn(*args):\n return _VGGWrapper(models.vgg11_bn(*args))", "def vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16'):\n with tf.name_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [3, 3], padding='VALID', scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc + '/fc8'] = net\n return net, end_points", "def modified_vgg16(state_dict: Dict[str, torch.Tensor] = None, *,\n pretrained: bool = False,\n num_classes: int = NUM_CLASSES, hidden_dim: int = HIDDEN_DIM) -> VGG:\n vgg: VGG = vgg16(pretrained=pretrained)\n # Add fine-tuning/transfer learning modules\n vgg.classifier[0] = torch.nn.Linear(7 * 7 * 512, hidden_dim)\n vgg.classifier[3] = torch.nn.Linear(hidden_dim, hidden_dim)\n vgg.classifier[6] = torch.nn.Linear(hidden_dim, num_classes)\n if state_dict is not None:\n vgg.load_state_dict(state_dict)\n return vgg", "def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")", "def vgg13(*args):\n return _VGGWrapper(models.vgg13(*args))", "def vgg19_bn(*args):\n return _VGGWrapper(models.vgg19_bn(*args))", "def vgg11(*args):\n return _VGGWrapper(models.vgg11(*args))", "def vgg19(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['E']), **kwargs)\n\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19'],\n model_dir='../'))\n return model", "def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)", "def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Shuffle, self).__init__()\r\n print(\"CIFAR VGG16_Shuffle is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n\r\n # Define the building blocks\r\n if layer == 11:\r\n self.conv11 = CONV_3x3shuffle(3, 64, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 12:\r\n self.conv12 = nn.Sequential(CONV_3x3shuffle(64, 64, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv12 = CONV_3x3(64, 64, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 21:\r\n self.conv21 = CONV_3x3shuffle(64, 128, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 22:\r\n self.conv22 = nn.Sequential(CONV_3x3shuffle(128, 128, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv22 = CONV_3x3(128, 128, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 31:\r\n self.conv31 = CONV_3x3shuffle(128, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 32:\r\n self.conv32 = CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 33:\r\n self.conv33 = nn.Sequential(CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv33 = CONV_3x3(256, 256, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 41:\r\n self.conv41 = CONV_3x3shuffle(256, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 42:\r\n self.conv42 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 43:\r\n self.conv43 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv43 = CONV_3x3(512, 512, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 51:\r\n self.conv51 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 52:\r\n self.conv52 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 53:\r\n self.conv53 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)", "def build_model(img_width,\n\timg_height,\n\tchannels,\n\tnum_classes,\n\tlr=1e-5,\n\tfreeze=False):\n\n\tvgg_model = VGG16(include_top=False, weights='imagenet', input_shape=(img_width, img_height, channels))\n\n\tvgg_output = vgg_model.output\n\tdrop0 = Dropout(0.5)(vgg_output)\n\tflat = Flatten()(drop0)\n\tdense1 = Dense(512, activation='relu')(flat)\n\tdrop1 = Dropout(0.5)(dense1)\n\tpredictions = Dense(num_classes, activation='softmax')(drop1)\n\n\tmodel = Model(inputs=vgg_model.input, outputs=predictions)\n\t\n\tif freeze:\n\t\tfor layer in vgg_model.layers:\n\t\t\tlayer.trainable = False\n\n\tmodel.summary()\n\tadam = Adam(lr=lr, decay=1e-6)\n\tmodel.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model", "def vgg16_bn(tensorized, **kwargs):\n return _vgg('vgg16_bn', 'D', True, tensorized, **kwargs)", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def __init__(self, output_index: int = 26):\n super().__init__()\n vgg = torchvision.models.vgg19(pretrained=True)\n self.features = vgg.features\n self.output_index = output_index\n del vgg", "def __init__(self, feature_type={'model': 'vgg16', 'input_layer': 'default', 'output_layer': 'flatten'}):\n if feature_type['model'] == 'vgg16':\n self.feature_model = keras.applications.vgg16.VGG16(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None, pooling=None, classes=1000)\n if feature_type['model'] == 'custom':\n self.load_custom_model(os.getcwd())\n self.graph = tf.get_default_graph()\n self.load_intermediate_model(feature_type['output_layer'])", "def __init__(self, embed_size):\n super(ImgAttentionEncoder, self).__init__()\n vggnet_feat = models.vgg19(pretrained=True).features\n modules = list(vggnet_feat.children())[:-2]\n self.cnn = nn.Sequential(*modules)\n self.fc = nn.Sequential(nn.Linear(self.cnn[-3].out_channels, embed_size),\n nn.Tanh()) # feature vector of image", "def create_vgg(input_shape):\n\n # Load a pre-trained VGG19 model trained on 'Imagenet' dataset\n vgg = VGG19(weights=\"imagenet\")\n vgg.outputs = [vgg.layers[9].output]\n\n input_layer = Input(shape=input_shape)\n\n # Extract features\n features = vgg(input_layer)\n\n # Create a Keras model\n model = Model(inputs=[input_layer], outputs=[features])\n return model", "def vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16',\n fc_conv_padding='VALID',\n global_pool=False):\n with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n\n # if num_classes == 0:\n # return net\n\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if global_pool:\n net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')\n end_points['global_pool'] = net\n if num_classes:\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points", "def vgg16(tensorized, **kwargs):\n return _vgg('vgg16', 'D', False, tensorized, **kwargs)", "def __init__(self):\n torch.nn.Module.__init__(self)\n ######################### Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features # fine tune?\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-22]) # Remove pool2 and rest, lack of computational resource\n # No grad for convVGG\n # for param in self.features.parameters():\n # param.requires_grad = False\n\n #################### Channel Grouping Net\n # self.fc1_ = torch.nn.Linear(128, 128*16)#lack of resource\n # self.fc2_ = torch.nn.Linear(128, 128*16)\n # self.fc3_ = torch.nn.Linear(128, 128*16)\n #\n # torch.nn.init.kaiming_normal_(self.fc1_.weight.data, nonlinearity='relu')\n # if self.fc1_.bias is not None:\n # torch.nn.init.constant_(self.fc1_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc2_.weight.data, nonlinearity='relu')\n # if self.fc2_.bias is not None:\n # torch.nn.init.constant_(self.fc2_.bias.data, val=0) # fc层的bias进行constant初始化\n # torch.nn.init.kaiming_normal_(self.fc3_.weight.data, nonlinearity='relu')\n # if self.fc3_.bias is not None:\n # torch.nn.init.constant_(self.fc3_.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.fc1 = torch.nn.Linear(128*28*28, 128)\n self.fc2 = torch.nn.Linear(128*28*28, 128)\n self.fc3 = torch.nn.Linear(128*28*28, 128)\n\n\n torch.nn.init.kaiming_normal_(self.fc1.weight.data, nonlinearity='relu')\n if self.fc1.bias is not None:\n torch.nn.init.constant_(self.fc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc2.weight.data, nonlinearity='relu')\n if self.fc2.bias is not None:\n torch.nn.init.constant_(self.fc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.fc3.weight.data, nonlinearity='relu')\n if self.fc3.bias is not None:\n torch.nn.init.constant_(self.fc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n self.layerNorm=nn.LayerNorm([224,224])\n\n # global grad for hook\n self.image_reconstruction = None\n self.register_hooks()\n self.GradWeight=1e-1\n\n # ################### STN input N*3*448*448\n # self.localization = [\n # nn.Sequential(\n # nn.MaxPool2d(4,stride=4),#112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5,stride=1,padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3,stride=1,padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) #output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda(),\n # nn.Sequential(\n # nn.MaxPool2d(4, stride=4), # 112\n # nn.ReLU(True),\n #\n # nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2), # 112\n # nn.MaxPool2d(2, stride=2), # 56\n # nn.ReLU(True),\n #\n # nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 56/2=28\n # nn.ReLU(True),\n #\n # nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=1),\n # nn.MaxPool2d(2, stride=2), # 28/2=14\n # nn.ReLU(True) # output 64*14*14\n # ).cuda()\n # ]\n # # Regressor for the 3 * 2 affine matrix\n # self.fc_loc = [\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda(),\n # nn.Sequential(\n # nn.Linear(64 * 14 * 14, 32),\n # nn.ReLU(True),\n # nn.Linear(32, 3 * 2)\n # ).cuda()\n # ]\n # # Initialize the weights/bias with identity transformation\n # for fc_locx in self.fc_loc:\n # fc_locx[2].weight.data.zero_()\n # fc_locx[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n ########################Bilinear CNN output 256 channels\n self.bcnnConv_1=torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_2 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n self.bcnnConv_3 = torch.nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features.children())\n [:-1]) # Remove pool3 and rest.\n #BCNN Linear classifier.\n self.bfc1 = torch.nn.Linear(512*512, 200)\n self.bfc2 = torch.nn.Linear(512*512, 200)\n self.bfc3 = torch.nn.Linear(512*512, 200)\n torch.nn.init.kaiming_normal_(self.bfc1.weight.data) # 何凯明初始化\n if self.bfc1.bias is not None:\n torch.nn.init.constant_(self.bfc1.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc2.weight.data) # 何凯明初始化\n if self.bfc2.bias is not None:\n torch.nn.init.constant_(self.bfc2.bias.data, val=0) # fc层的bias进行constant初始化\n torch.nn.init.kaiming_normal_(self.bfc3.weight.data) # 何凯明初始化\n if self.bfc3.bias is not None:\n torch.nn.init.constant_(self.bfc3.bias.data, val=0) # fc层的bias进行constant初始化\n\n # self.CBP1 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP2 = CompactBilinearPooling(512, 512, 50000)\n # self.CBP3 = CompactBilinearPooling(512, 512, 50000)", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def test_model(self):\n\n vgg16 = VGG16()\n data_dir = os.path.dirname(os.path.abspath(__file__))\n data = os.path.join(data_dir, 'data/elephant.jpg')\n\n features = vgg16.produce(data)\n self.assertEqual(len(features[-1]), 4096)", "def create_vggvox(embedding_dims, name=\"vggvox\"):\n model = tf.keras.Sequential(name=name)\n model.add(tf.keras.layers.Conv2D(96, (7,7), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv1\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool1\"))\n model.add(tf.keras.layers.Conv2D(256, (5,5), strides=(2,2), padding=\"valid\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv2\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((3,3), strides=(2,2), name=\"mpool2\"))\n model.add(tf.keras.layers.Conv2D(384, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv3\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv4\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Conv2D(256, (3,3), strides=(1,1), padding=\"same\", kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_conv5\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPooling2D((5,3), strides=(3,2), name=f\"{name}_mpool5\"))\n model.add(tf.keras.layers.Conv2D(4096, (9,1), strides=1, kernel_regularizer=tf.keras.regularizers.L2(5e-4), padding=\"valid\", name=f\"{name}_fc6\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=[1,2], name=f\"{name}_apool6\")))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(embedding_dims, kernel_regularizer=tf.keras.regularizers.L2(5e-4), name=f\"{name}_embeddings\"))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.ReLU())\n return model", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n blocks = [self.layer_11,\n self.layer_12,\n self.layer_13,\n self.layer_14,\n self.layer_15]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def VGG_create(desc=None, isigma=None, img_normalize=None, use_scale_orientation=None, scale_factor=None, dsc_normalize=None): # real signature unknown; restored from __doc__\n pass", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble", "def vgg_fcn(images,\n num_classes=FLAGS.num_classes,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=False, #True,\n scope='vgg_16',\n fc_conv_padding='VALID'):\n with tf.variable_scope(scope, 'vgg_16', [images]) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection):\n conv1 = slim.repeat(images, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n pool1 = slim.max_pool2d(conv1, [2, 2], scope='pool1')\n conv2 = slim.repeat(pool1, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n pool2 = slim.max_pool2d(conv2, [2, 2], scope='pool2')\n conv3 = slim.repeat(pool2, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n pool3 = slim.max_pool2d(conv3, [2, 2], scope='pool3')\n conv4 = slim.repeat(pool3, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n pool4 = slim.max_pool2d(conv4, [2, 2], scope='pool4')\n conv5 = slim.repeat(pool4, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n pool5 = slim.max_pool2d(conv5, [2, 2], scope='pool5')\n fc6 = slim.conv2d(pool5, 4096, [7, 7], stride=1, scope='fc6')\n drop6 = slim.dropout(fc6, dropout_keep_prob, is_training=is_training, scope='dropout6')\n fc7 = slim.conv2d(drop6, 4096, [1, 1], scope='fc7')\n drop7 = slim.dropout(fc7, dropout_keep_prob, is_training=is_training, scope='dropout7')\n fc8 = slim.conv2d(drop7, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8')\n #end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n\n if spatial_squeeze:\n fc8 = tf.squeeze(fc8, [1, 2], name='fc8/squeezed')\n msk_shape = images.shape\n # it is intialized as bilinar interpolation(calculation predfined)\n score_fr = slim.conv2d(fc7, num_classes, [1, 1], scope='score_fr')\n tf.logging.info(\"score_fr size: %s\"% tf.shape(score_fr))\n fc32s = slim.conv2d_transpose(inputs=score_fr, num_outputs=num_classes, kernel_size=[64, 64], stride=32, scope='fc32s')\n tf.logging.info(\"fc32s: %s\" % tf.shape(fc32s))\n\n #fc32s = tf.image.crop_to_bounding_box(fc32s, 19, 19, msk_shape[1], msk_shape[2])\n \n upscore2 = slim.conv2d_transpose(inputs=score_fr, num_outputs=num_classes, kernel_size=[4, 4], stride=2, scope='upscore2')\n\n deconv_shape1 = upscore2.get_shape() \n score_pool4 = slim.conv2d(pool4, num_classes, [1, 1], stride=1, scope='score_pool4')\n\n #score_pool4 = tf.image.crop_to_bounding_box(score_pool4, 5, 5, deconv_shape1[1], deconv_shape1[2])\n \n fuse_pool4 = tf.add(upscore2, score_pool4)\n\n fc16s = slim.conv2d_transpose(inputs=fuse_pool4, num_outputs=num_classes, kernel_size=[32, 32], stride=16, scope='fc16s')\n\n tf.logging.info(\"fc16s size: %s\" % tf.shape(fc16s))\n #fc16s = tf.image.crop_to_bounding_box(fc16s, 27, 27, msk_shape[1], msk_shape[2])\n \n upscore_pool4 = slim.conv2d_transpose(inputs=fuse_pool4, num_outputs=num_classes, kernel_size=[4, 4], stride=2, scope='upscore_pool4')\n\n upscore_pool3 = slim.conv2d(pool3, num_classes, [1, 1], stride=1, scope='upscore_pool3')\n deconv_shape2 = upscore_pool4.get_shape()\n #upscore_pool3 = tf.image.crop_to_bounding_box(upscore_pool3, 9, 9, deconv_shape2[1], deconv_shape[2])\n fuse_pool3 = tf.add(upscore_pool4, upscore_pool3)\n\n fc8s = slim.conv2d_transpose(inputs=fuse_pool3, num_outputs=num_classes, kernel_size=[16, 16], stride=8, scope='fc8s')\n tf.logging.info(\"fc8s size: %s\" % tf.shape(fc8s))\n #fc8s = tf.image.crop_to_bounding_box(fc8s, 31, 31, msk_shape[1], msk_shape[2])\n\n\n annotation_pred = tf.argmax(fc8s, dimension=3, name=\"prediction\")\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n\n tf.get_variable_scope().reuse_variables()\n\n return tf.expand_dims(annotation_pred, dim=3), fc8s, end_points", "def __init__(self, num_classes=200):\n nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = vgg16(pretrained=True).features\n self.features_conv5_1 = nn.Sequential(*list(self.features.children())[:-5])\n self.features_conv5_2 = nn.Sequential(*list(self.features.children())[-5:-3])\n self.features_conv5_3 = nn.Sequential(*list(self.features.children())[-3:-1])\n self.bilinear_proj = nn.Sequential(nn.Conv2d(512, 8192, kernel_size=1, bias=False),\n nn.BatchNorm2d(8192),\n nn.ReLU(inplace=True))\n # Linear classifier.\n self.fc = torch.nn.Linear(8192 * 3, num_classes)", "def model(pretrained=False, **kwargs):\r\n\r\n layers = make_layers(cfg['O'], dilation=dilation['D1'])\r\n cnv = np.cumsum(cnvs['OI']) if kwargs['args'].IN or kwargs['args'].INL else np.cumsum(cnvs['O'])\r\n model = VGG(layers, cnvs=cnv, **kwargs)\r\n if pretrained:\r\n pre2local_keymap = [('features.{}.weight'.format(i), 'conv1_2.{}.weight'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.bias'.format(i), 'conv1_2.{}.bias'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 10), 'conv3.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 10), 'conv3.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 17), 'conv4.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 17), 'conv4.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 24), 'conv5.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 24), 'conv5.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap = dict(pre2local_keymap)\r\n\r\n\r\n model_dict = model.state_dict()\r\n pretrained_file = os.path.join(kwargs['args'].pretrained_model_dir, kwargs['args'].pretrained_model)\r\n if os.path.isfile(pretrained_file):\r\n pretrained_dict = torch.load(pretrained_file)\r\n print('load pretrained model from {}'.format(pretrained_file))\r\n else:\r\n pretrained_dict = model_zoo.load_url(model_urls['vgg16'])\r\n print('load pretrained model from {}'.format(model_urls['vgg16']))\r\n # 0. replace the key\r\n pretrained_dict = {pre2local_keymap[k] if k in pre2local_keymap.keys() else k: v for k, v in\r\n pretrained_dict.items()}\r\n # *. show the loading information\r\n for k in pretrained_dict.keys():\r\n if k not in model_dict:\r\n print('Key {} is removed from vgg16'.format(k))\r\n print(' ')\r\n for k in model_dict.keys():\r\n if k not in pretrained_dict:\r\n print('Key {} is new added for DA Net'.format(k))\r\n # 1. filter out unnecessary keys\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n # 2. overwrite entries in the existing state dict\r\n model_dict.update(pretrained_dict)\r\n # 3. load the new state dict\r\n model.load_state_dict(model_dict)\r\n return model", "def __init__(self, requires_grad=False):\n super().__init__()\n vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features\n # ImageNet mean and std\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(21, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n # disable gradient on VGG layers\n for param in self.parameters():\n param.requires_grad = False", "def get_vgg_model(self):\n # Load our model. We load pretrained VGG, trained on imagenet data\n self.vgg_model = tf.keras.applications.vgg19.VGG19(\n include_top=False, weights='imagenet')\n self.vgg_model.trainable = False\n # Get output layers corresponding to style and content layers\n self.style_outputs = [self.vgg_model.get_layer(\n name).output for name in self.style_layers]\n self.content_outputs = [self.vgg_model.get_layer(\n name).output for name in self.content_layers]\n self.model_outputs = self.style_outputs + self.content_outputs\n # Build model\n self.model = models.Model(self.vgg_model.input, self.model_outputs)", "def train_VGG_model(X_train_input, y_train_input, model, epochs=30, batch_size=32, patience=10):\r\n from sklearn.preprocessing import LabelBinarizer \r\n # One-hot encode target\r\n lb = LabelBinarizer()\r\n lb.fit(y_train_input)\r\n \r\n y_train_onehot = lb.transform(y_train_input)\r\n \r\n # Augmentation step\r\n train_batches = ImageDataGenerator().flow(X_train_input, y_train_onehot,\r\n batch_size=batch_size,shuffle=True)\r\n \r\n early_stopping_callback = EarlyStopping(monitor='acc', patience=patience, restore_best_weights=True)\r\n \r\n model.fit_generator(train_batches, steps_per_epoch=len(X_train_input)//batch_size,\r\n epochs=epochs,verbose=2, callbacks=[early_stopping_callback])\r\n return model, lb", "def Vgg19_simple_api(input, reuse, nchannels, rgb=False):\n VGG_MEAN = [103.939, 116.779, 123.68]\n with tf.variable_scope(\"VGG19\", reuse=reuse) as vs:\n start_time = time.time()\n print(\"build model started\")\n\n if rgb == True:\n rgb_scaled = input * 255.0\n # Convert RGB to BGR\n if tf.__version__ <= '0.11':\n red, green, blue = tf.split(3, 3, rgb_scaled)\n else: # TF 1.0\n # print(rgb_scaled)\n red, green, blue = tf.split(rgb_scaled, 3, 3)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n if tf.__version__ <= '0.11':\n bgr = tf.concat(3, [\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n else:\n bgr = tf.concat(\n [\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ], axis=3)\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n \"\"\" input layer \"\"\"\n net_in = InputLayer(bgr, name='input')\n\n else:\n assert input.get_shape().as_list()[1:] == [224, 224, nchannels]\n net_in = InputLayer(input, name = 'input')\n\n\n \"\"\" conv1 \"\"\"\n network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_1')\n network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')\n network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')\n \"\"\" conv2 \"\"\"\n network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')\n network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')\n network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')\n \"\"\" conv3 \"\"\"\n network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')\n network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')\n network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')\n network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')\n network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')\n \"\"\" conv4 \"\"\"\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')\n network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4') # (batch_size, 14, 14, 512)\n conv = network\n \"\"\" conv5 \"\"\"\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')\n network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')\n network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5') # (batch_size, 7, 7, 512)\n \"\"\" fc 6~8 \"\"\"\n network = FlattenLayer(network, name='flatten')\n network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc6')\n network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc7')\n network = DenseLayer(network, n_units=1000, act=tf.identity, name='fc8')\n print(\"build model finished: %fs\" % (time.time() - start_time))\n return network, conv", "def vgg11_bn(tensorized, **kwargs):\n return _vgg('vgg11_bn', 'A', True, tensorized, **kwargs)", "def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image", "def vgg13(tensorized, **kwargs):\n return _vgg('vgg13', 'B', False, tensorized, **kwargs)", "def create_model(input_tensor, mode, hyper_params):\n model = {}\n with tf.variable_scope('vgg16') as scope:\n net = tf.cast(input_tensor[\"image\"], dtype=tf.float32, name=\"input/cast\")\n model[\"image\"] = net\n mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n net = net - mean\n model[\"image-normalized\"] = net\n\n net = _create_conv2_block(model, net, filters=64, layer_number=1)\n net = _create_conv2_block(model, net, filters=128, layer_number=2)\n net = _create_conv3_block(model, net, filters=256, layer_number=3)\n net = _create_conv3_block(model, net, filters=512, layer_number=4)\n net = _create_conv3_block(model, net, filters=512, layer_number=5)\n print(net.get_shape())\n\n if not hyper_params.vgg16.encoder_only:\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(7, 7), strides=(1, 1), name=\"fc1\", activation=tf.nn.relu)\n model[\"vgg16/fc1\"] = net\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(1, 1), strides=(1, 1), name=\"fc2\", activation=tf.nn.relu)\n model[\"vgg16/fc2\"] = net\n net = tf.layers.conv2d(inputs=net, filters=1000, kernel_size=(1, 1), strides=(1, 1), name=\"logits\", activation=None)\n model[\"logits\"] = net\n net = tf.nn.softmax(net)\n model[\"probs\"] = net\n return model", "def create_tcnn_bottom(vgg_weights_path, conv1_1_weigths):\r\n\t# Create inputs for the 5 frames\r\n\tinput_shape=(224, 224, 3)\r\n\tframe1 = Input(shape=input_shape)\r\n\tframe2 = Input(shape=input_shape)\r\n\tframe3 = Input(shape=input_shape)\r\n\tframe4 = Input(shape=input_shape)\r\n\tframe5 = Input(shape=input_shape)\r\n\t\r\n\t# Convolution for each frame\r\n\tframe1_conv = ZeroPadding2D((1,1))(frame1)\r\n\tframe1_conv = Convolution2D(64, (3, 3), activation='relu', name='conv1_1a')(frame1_conv)\r\n\r\n\tframe2_conv = ZeroPadding2D((1,1))(frame2)\r\n\tframe2_conv = Convolution2D(64, (3, 3), activation='relu', name='conv1_1b')(frame2_conv)\r\n\r\n\tframe3_conv = ZeroPadding2D((1,1))(frame3)\r\n\tframe3_conv = Convolution2D(64, (3, 3), activation='relu', name='conv1_1c')(frame3_conv)\r\n\r\n\tframe4_conv = ZeroPadding2D((1,1))(frame4)\r\n\tframe4_conv = Convolution2D(64, (3, 3), activation='relu', name='conv1_1d')(frame4_conv)\r\n\r\n\tframe5_conv = ZeroPadding2D((1,1))(frame5)\r\n\tframe5_conv = Convolution2D(64, (3, 3), activation='relu', name='conv1_1e')(frame5_conv)\r\n\t\r\n\t# Temporal aggregation by averaging\r\n\ttemp_aggr = average([frame1_conv, frame2_conv, frame3_conv, frame4_conv, frame5_conv])\r\n\r\n\t# Then standard VGG-16 architecture\r\n\toutput = ZeroPadding2D((1,1))(temp_aggr)\r\n\toutput = Convolution2D(64, (3, 3), activation='relu', name='conv1_2')(output)\r\n\toutput = MaxPooling2D((2,2), strides=(2,2))(output)\r\n\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(128, (3, 3), activation='relu', name='conv2_1')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(128, (3, 3), activation='relu', name='conv2_2')(output)\r\n\toutput = MaxPooling2D((2,2), strides=(2,2))(output)\r\n\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(256, (3, 3), activation='relu', name='conv3_1')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(256, (3, 3), activation='relu', name='conv3_2')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(256, (3, 3), activation='relu', name='conv3_3')(output)\r\n\toutput = MaxPooling2D((2,2), strides=(2,2))(output)\r\n\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv4_1')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv4_2')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv4_3')(output)\r\n\toutput = MaxPooling2D((2,2), strides=(2,2))(output)\r\n\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv5_1')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv5_2')(output)\r\n\toutput = ZeroPadding2D((1,1))(output)\r\n\toutput = Convolution2D(512, (3, 3), activation='relu', name='conv5_3')(output)\r\n\toutput = MaxPooling2D((2,2), strides=(2,2))(output)\r\n\r\n\tinputs = [frame1, frame2, frame3, frame4, frame5]\r\n\tmodel = Model(inputs=inputs, outputs=output)\r\n\t\r\n\t# load VGG-face weigths\r\n\tmodel.load_weights(vgg_weights_path, by_name=True)\r\n\tfor layer in ['conv1_1a', 'conv1_1b', 'conv1_1c', 'conv1_1d', 'conv1_1e']:\r\n\t\tmodel.get_layer(layer).set_weights(conv1_1_weigths)\r\n\r\n\treturn model", "def vgg13_bn(tensorized, **kwargs):\n return _vgg('vgg13_bn', 'B', True, tensorized, **kwargs)", "def vgg19_bn(pretrained='', progress=True, **kwargs):\n\n model = _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)\n if len(pretrained) > 0:\n state_dict = torch.load(pretrained)['state_dict']\n state_dict = {k.replace('module.',''):v for k, v in state_dict.items()}\n #print(state_dict.keys())\n #print(model.state_dict().keys())\n sofar = 0\n newstate ={}\n for k,v in state_dict.items():\n if 'classifier' in k:\n break\n parts = k.split('.')\n newname = '.'.join([parts[0], str(int(parts[1]) + sofar), parts[2]])\n newstate[newname] = v\n #print(k,' to ', newname, ' sofar ', sofar)\n if 'running_var' in k:\n sofar+=1\n\n newstate['classifier.0.weight'] = state_dict['classifier.weight']\n newstate['classifier.0.bias'] = state_dict['classifier.bias']\n #print(newstate.keys())\n model.load_state_dict(newstate, strict=False)\n\n return model", "def vgg11(tensorized, **kwargs):\n return _vgg('vgg11', 'A', False, tensorized, **kwargs)", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n # iterations=50\n # model_input=model_input[:,20:-30:5,:]\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n # print('model_input is', model_input)\n # print('vocab_size is',vocab_size)\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = AttentionLayers(1024,iterations,256)#256\n audio_attention = AttentionLayers(128,iterations,256/4)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n # print('vlad_video is',vlad_video)\n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n return aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)", "def vgg19_bn(tensorized, **kwargs):\n return _vgg('vgg19_bn', 'E', True, tensorized, **kwargs)", "def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)", "def vgg19(tensorized, **kwargs):\n return _vgg('vgg19', 'E', False, tensorized, **kwargs)", "def load_vgg_model(self):\n vgg = scipy.io.loadmat(self.model_path)\n self.vgg_layers = vgg['layers'][0]", "def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn", "def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)", "def build_examples():\n build_models([\n \"VGG_16\",\n \"VGG_19\",\n \"RESNET_50\",\n \"MOBILENET\",\n #\"INCEPTION_V3\",\n #\"INCEPTION_RESNET\",\n #\"DENSENET_121\",\n #\"DENSENET_169\",\n #\"DENSENET_201\"])\n ])", "def VGG19(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n # Determine proper input shape\n\n\n img_input = input_tensor\n # Block 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)\n\n model = Model(img_input, x, name='vgg19')\n\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n file_hash='cbe5617147190e668d6c5d5026f83318')\n else:\n weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n file_hash='253f8cb515780f3b799900260a226db6')\n model.load_weights(weights_path)\n\n return model", "def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)", "def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net", "def VGGFace(input_shape=(224, 224, 3), n_classes=10, include_top=True):\n # Create the Tensor\n input = Input(shape=input_shape)\n\n # Block 1\n # 1st Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv1')(input)\n x = Activation('relu', name='block1_relu1')(x)\n\n # 2nd Convolutional Layer\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='block1_conv2')(x)\n x = Activation('relu', name='block1_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n # 3rd Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv1')(x)\n x = Activation('relu', name='block2_relu1')(x)\n\n # 4th Convolutional Layer\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='block2_conv2')(x)\n x = Activation('relu', name='block2_relu2')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n # 5th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv1')(x)\n x = Activation('relu', name='block3_relu1')(x)\n\n # 6th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv2')(x)\n x = Activation('relu', name='block3_relu2')(x)\n\n # 7th Convolutional Layer\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='block3_conv3')(x)\n x = Activation('relu', name='block3_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n # 8th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv1')(x)\n x = Activation('relu', name='block4_relu1')(x)\n\n # 9th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv2')(x)\n x = Activation('relu', name='block4_relu2')(x)\n\n # 10th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block4_conv3')(x)\n x = Activation('relu', name='block4_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n # 11th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv1')(x)\n x = Activation('relu', name='block5_relu1')(x)\n\n # 12th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv2')(x)\n x = Activation('relu', name='block5_relu2')(x)\n\n # 13th Convolutional Layer\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='block5_conv3')(x)\n x = Activation('relu', name='block5_relu3')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Block 6\n # 14th Convulation Layer\n x = Conv2D(4096, (7, 7), strides=(1, 1), name='fc1_conv1')(x)\n x = Activation('relu', name='fc1_relu1')(x)\n x = Dropout(0.5)(x)\n\n # 15th Convulation Layer\n x = Conv2D(4096, (1, 1), strides=(1, 1), name='fc2_conv1')(x)\n x = Activation('relu', name='fc2_relu2')(x)\n x = Dropout(0.5, name='fc2_dropout')(x)\n\n # 16th Convulation Layer\n x = Conv2D(2622, (1, 1), strides=(1, 1), name='fc3_conv1')(x)\n x = Flatten(name='fc3_flatten')(x)\n\n if include_top:\n # Output Layer\n x = Activation('softmax', name='predictions_softmax')(x)\n\n # Create model\n model = keras.models.Model(input, x, name='vggface')\n return model", "def VGG16(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n # Determine proper input shape\n input_shape = _obtain_input_shape(input_shape,\n default_size=224,\n min_size=48,\n data_format=K.image_data_format(),\n include_top=include_top)\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n # Block 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(4096, activation='relu', name='fc1')(x)\n x = Dense(4096, activation='relu', name='fc2')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='vgg16')\n\n # load weights\n if weights == 'imagenet':\n # if include_top:\n # weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH,\n # cache_subdir='models')\n # else:\n # weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',\n # WEIGHTS_PATH_NO_TOP,\n # cache_subdir='models')\n if include_top:\n weights_path = '../model/vgg16_weights_tf_dim_ordering_tf_kernels.h5'\n else:\n weights_path = '../model/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5' \n\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='block5_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def __init__(self, output_channels, config=None, verbose=False):\n super(VGGDecoder, self).__init__()\n # self.decoder = utils.VGG16utils.make_decoder_layers(batch_norm=batch_norm)\n self.decoder = nn.ModuleList()\n vgg16_decoder_blocks = (3, 3, 3, 2, 2)\n if config is None:\n config = VGGDecoder.vgg_16_config\n config = config + [output_channels] # config[::-1]\n\n for i, n_blocks in enumerate(vgg16_decoder_blocks):\n self.decoder.append(Decoder_block(n_blocks, config[i], config[i+1], i==(len(vgg16_decoder_blocks)-1)))\n\n\n self.verbose = verbose\n if verbose:\n print(self.decoder)", "def __init__(self, options):\r\n nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.basemodel = torchvision.models.resnet18(pretrained=True)\r\n self.options = options\r\n\r\n #label\r\n self.label_primary = nn.Linear(options['primary_dim'], options['proj_dim'])\r\n self.label_dual = nn.Linear(options['dual_dim'], options['proj_dim'])\r\n\r\n #classifer/regressor\r\n self.fc_primary = nn.Linear(512 + options['proj_dim'], options['primary_dim'])\r\n self.fc_dual = nn.Linear(512 + options['proj_dim'], options['dual_dim'])\r\n\r\n\r\n if self.options['fc'] == True:\r\n # Freeze all previous layers.\r\n for param in self.basemodel.parameters():\r\n param.requires_grad = False\r\n # Initialize the fc layers.\r\n nn.init.kaiming_normal_(self.fc_primary.weight.data)\r\n if self.fc_primary.bias is not None:\r\n nn.init.constant_(self.fc_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.fc_dual.weight.data)\r\n if self.fc_dual.bias is not None:\r\n nn.init.constant_(self.fc_dual.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_primary.weight.data)\r\n if self.label_primary.bias is not None:\r\n nn.init.constant_(self.label_primary.bias.data, val=0)\r\n\r\n nn.init.kaiming_normal_(self.label_dual.weight.data)\r\n if self.label_dual.bias is not None:\r\n nn.init.constant_(self.label_dual.bias.data, val=0)\r\n\r\n\r\n else:\r\n for param in self.basemodel.conv1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.bn1.parameters():\r\n param.requires_grad = False\r\n for param in self.basemodel.layer1.parameters():\r\n param.requires_grad = False\r\n #for param in self.basemodel.layer2.parameters():\r\n # param.requires_grad = False\r\n #for param in self.basemodel.layer3.parameters():\r\n # param.requires_grad = False\r", "def VGG(model_type='D', dropout=0.5, num_classes=1000, input_shape=(224, 224, 3)):\n \n model_type = model_type.upper()\n assert model_type in ['A', 'B', 'C', 'D', 'E'], \"Invalid value of 'model_type'\\n\" \\\n \"It should be one of {'A', 'B', 'C', 'D', 'E'}\"\n \n assert (type(input_shape) == tuple), \"Invalid value of 'input_shape'\\n\" \\\n \"It should be of form (<image_height>, <image_width>, <channels>)\"\n \n assert (len(input_shape) == 3), \"Invalid value of 'input_shape'\\n\" \\\n \"It should be of form (<image_height>, <image_width>, <channels>)\"\n \n if(dropout != None):\n assert ((type(dropout) == float) and (0 <= dropout <= 1)), \"Invalid value of 'dropout'.\\n\" \\\n \"It should be a real number between 0 and 1 (inclusive).\"\n\n # Implementing the model\n model = Sequential()\n\n model.add(Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu', input_shape=input_shape))\n if model_type in ['B', 'C', 'D', 'E']:\n model.add(Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))\n\n model.add(Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['B', 'C', 'D', 'E']:\n model.add(Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))\n\n model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['C']:\n model.add(Conv2D(filters=256, kernel_size=(1, 1), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['D', 'E']:\n model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['E']:\n model.add(Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))\n\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['C']:\n model.add(Conv2D(filters=512, kernel_size=(1, 1), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['D', 'E']:\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['E']:\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))\n\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['C']:\n model.add(Conv2D(filters=512, kernel_size=(1, 1), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['D', 'E']:\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n if model_type in ['E']:\n model.add(Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', \n activation='relu'))\n model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))\n\n model.add(Flatten())\n model.add(Dense(units=4096, activation='relu'))\n if(dropout != None):\n model.add(Dropout(dropout))\n model.add(Dense(units=4096, activation='relu'))\n if(dropout != None):\n model.add(Dropout(dropout))\n model.add(Dense(units=num_classes, activation='softmax'))\n\n return model", "def build_model(\n data_tensor,\n reuse,\n training,\n output_shape,\n data_format='NHWC'):\n if isinstance(output_shape, list):\n output_shape = output_shape[-1]\n elif isinstance(output_shape, dict):\n output_shape = output_shape['output']\n # norm_moments_training = training # Force instance norm\n # normalization_type = 'no_param_batch_norm_original'\n # output_normalization_type = 'batch_norm_original_renorm'\n output_normalization_type = 'instance_norm'\n data_tensor, long_data_format = tf_fun.interpret_data_format(\n data_tensor=data_tensor,\n data_format=data_format)\n\n # Build model\n with tf.variable_scope('vgg', reuse=reuse):\n vgg = vgg16.Vgg16(\n vgg16_npy_path='/media/data_cifs/clicktionary/pretrained_weights/vgg16.npy')\n vgg(rgb=data_tensor, train=training, ff_reuse=reuse)\n\n with tf.variable_scope('fgru', reuse=reuse):\n # Get side weights\n h2_rem = [\n vgg.conv1_2,\n vgg.conv2_2,\n vgg.conv3_3,\n vgg.conv4_3,\n vgg.conv5_3]\n res_act = []\n for idx, h in enumerate(h2_rem):\n res = normalization.apply_normalization(\n activity=h,\n name='output_norm1_%s' % idx,\n normalization_type=output_normalization_type,\n data_format=data_format,\n training=training,\n trainable=training,\n reuse=reuse)\n res_act += [tf.image.resize_bilinear(\n res,\n data_tensor.get_shape().as_list()[1:3],\n align_corners=True)]\n\n activity = tf.layers.conv2d(\n tf.concat(res_act, -1),\n filters=output_shape,\n kernel_size=(1, 1),\n padding='same',\n data_format=long_data_format,\n name='out',\n activation=None,\n trainable=training,\n use_bias=True,\n reuse=reuse)\n\n if long_data_format is 'channels_first':\n activity = tf.transpose(activity, (0, 2, 3, 1))\n extra_activities = {} # idx: v for idx, v in enumerate(hs_0)}\n if activity.dtype != tf.float32:\n activity = tf.cast(activity, tf.float32)\n return activity, extra_activities", "def build_model_mobilenet(num_classes):" ]
[ "0.7914276", "0.7741606", "0.7713254", "0.7710423", "0.766079", "0.7593987", "0.75367546", "0.7439226", "0.7426811", "0.7363272", "0.7344508", "0.73206055", "0.7303242", "0.7245254", "0.72279394", "0.7205576", "0.71928155", "0.7183653", "0.71666634", "0.7141517", "0.7109456", "0.70939076", "0.70614254", "0.7030613", "0.7020857", "0.7020053", "0.6972419", "0.6921092", "0.6911103", "0.6910915", "0.6888268", "0.68836284", "0.68780524", "0.68632066", "0.68371856", "0.683429", "0.6813092", "0.68126047", "0.68021137", "0.67562854", "0.67520344", "0.6748414", "0.674047", "0.67399764", "0.6730413", "0.672024", "0.67058736", "0.66766256", "0.6661883", "0.6640817", "0.6624495", "0.6613827", "0.6608172", "0.6607683", "0.6605486", "0.65873617", "0.6586372", "0.6582427", "0.6561019", "0.6544204", "0.6540217", "0.6509102", "0.649872", "0.6488114", "0.6487355", "0.6478758", "0.64771605", "0.6473767", "0.6468423", "0.6449423", "0.64407355", "0.64362806", "0.64112544", "0.63996106", "0.63871586", "0.6370049", "0.63588417", "0.63539946", "0.6336382", "0.6334495", "0.63289535", "0.6328472", "0.62839603", "0.62676114", "0.62608206", "0.62578636", "0.6233674", "0.6219503", "0.6214805", "0.620876", "0.6204233", "0.61974984", "0.6195124", "0.6177313", "0.6177186", "0.6173819", "0.6166148", "0.61614597", "0.6157163", "0.614645" ]
0.7155615
19
This function helps find coordinates of parallel lines. It uses an orthogonal vector to work out offsets to calculate coordinates of lines parallel to (x1,y1) > (x2,y2), with a given magnitude
def offset(x1,y1,x2,y2,magnitude): norm = math.sqrt((y2-y1)**2 + (x1-x2)**2) / magnitude offset_x = (y2-y1)/norm offset_y = (x1-x2)/norm return offset_x, offset_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def direction_coordinates(self, gc_lines):\n lins = [(_line[0][mid], _line[0][mid + 1], _line[1][mid], _line[1][mid + 1])\n for _line, mid in zip(gc_lines, [len(_line[0]) // 2 for _line in gc_lines])\n if len(_line[0]) > 2]\n lens = [np.hypot(_line[0][0] - _line[0][-1], _line[0][0] - _line[0][-1]) * 110.\n for _line in gc_lines\n if len(_line[0]) > 2]\n lins = [(x0 * np.cos(np.deg2rad(np.mean([y0, y1]))), x1 * np.cos(np.deg2rad(np.mean([y0, y1]))), y0, y1)\n for x0, x1, y0, y1 in lins]\n lins = [_x for _x, _l in zip(lins, lens) if _l > 10]\n\n direction = [(0.5 * (x0 + x1), 0.5 * (y0 + y1), x1 - x0, y1 - y0) for x0, x1, y0, y1 in lins]\n direction = [(_u, _v, _x / np.hypot(_x, _y), _y / np.hypot(_x, _y))\n for _u, _v, _x, _y in direction]\n los = [rotate_point(point[2:], -self.dsbObsAngleAzimuth.value()) for point in direction]\n\n dist = 1.\n tp_dir = (np.array(los).T * dist).T\n\n tps = [(x0, y0, x0 + tp_x, y0 + tp_y) for\n ((x0, y0, _, _), (tp_x, tp_y)) in zip(direction, tp_dir)]\n tps = [[(x0 / np.cos(np.deg2rad(y0)), y0), (x1 / np.cos(np.deg2rad(y0)), y1)] for (x0, y0, x1, y1) in tps]\n return tps", "def connect_lines(horizontal_lines, vertical_lines):\n horizontal = []\n vertical = []\n\n for x1,y1,x2,y2 in horizontal_lines:\n closest_vertical_left = 20000\n closest_vertical_right = 20000\n for v_x1,v_y1,v_x2,v_y2 in vertical_lines:\n if abs(x1 - v_x1) < abs(closest_vertical_left):\n closest_vertical_left = x1 - v_x1\n if abs(x2 - v_x1) < abs(closest_vertical_right):\n closest_vertical_right = x2 - v_x1\n x1 = x1 - closest_vertical_left\n x2 = x2 - closest_vertical_right\n horizontal.append((x1,y1,x2,y2))\n\n for x1,y1,x2,y2 in vertical_lines:\n closest_horizontal_up = 20000\n closest_horizontal_down = 20000\n for h_x1,h_y1,h_x2,h_y2 in horizontal_lines:\n if abs(y1 - h_y1) < abs(closest_horizontal_up):\n closest_horizontal_up = y1 - h_y1\n if abs(y2 - h_y1) < abs(closest_horizontal_down):\n closest_horizontal_down = y2 - h_y1\n y1 = y1 - closest_horizontal_up\n y2 = y2 - closest_horizontal_down\n vertical.append((x1,y1,x2,y2))\n\n return (horizontal, vertical)", "def expand_line(x0,y0,x1,y1,nx,ny,edge=6):\n def d2(x0,y0,x1,y1):\n \"\"\"squared distance between two points\"\"\"\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)\n def inside(x,e,n):\n \"\"\"return if x is within e and n-e-1\n \"\"\"\n if x < e: return False\n if x > n-e-1: return False\n return True\n # bypass everything\n if False:\n return [x0,y0,x1,y1]\n # pathetic cases\n if x0==x1: return [x0, edge, x1, ny-1-edge]\n if y0==y1: return [edge, y0, nx-1-edge, y1]\n # slope and center point of line\n a = (y1-y0)/(x1-x0)\n xc = (x0+x1)/2.0\n yc = (y0+y1)/2.0\n # intersections with the box vertices\n x_e = xc + (edge-yc)/a\n y_e = yc + a*(edge-xc)\n x_n = xc + (ny-edge-1-yc)/a\n y_n = yc + a*(nx-edge-1-xc)\n print \"x,y(0) x,y(1):\",x0,y0,x1,y1\n print \"x,y(e) x,y(n):\",x_e,y_e,x_n,y_n\n e = []\n if inside(x_e,edge,nx): \n e.append(x_e)\n e.append(edge)\n if inside(y_e,edge,ny):\n e.append(edge)\n e.append(y_e)\n if inside(x_n,edge,nx):\n e.append(x_n)\n e.append(ny-edge-1)\n if inside(y_n,edge,ny):\n e.append(nx-edge-1)\n e.append(y_n)\n if len(e) != 4:\n # can happen for small maps?\n msg = \"Math Error in expand_line: \",e\n raise Exception,msg\n return e", "def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def line(x1, y1, x2, y2):\r\n\r\n x1 = normalize(x1)\r\n y1 = normalize(y1)\r\n x2 = normalize(x2)\r\n y2 = normalize(y2)\r\n\r\n xdiff = max(x1, x2) - min(x1, x2)\r\n ydiff = max(y1, y2) - min(y1, y2)\r\n xdir = 1 if x1 <= x2 else -1\r\n ydir = 1 if y1 <= y2 else -1\r\n\r\n r = max(xdiff, ydiff)\r\n\r\n for i in range(r+1):\r\n x = x1\r\n y = y1\r\n\r\n if ydiff:\r\n y += (float(i) * ydiff) / r * ydir\r\n if xdiff:\r\n x += (float(i) * xdiff) / r * xdir\r\n\r\n yield (x, y)", "def findExtremeLines(lines):\r\n\r\n leftV = [[1000, 1000]]\r\n rightV = [[-1000, -1000]]\r\n topH = [[1000, 1000]]\r\n bottomH = [[-1000, -1000]]\r\n leftX = 100000\r\n rightX = 0\r\n\r\n for line in lines:\r\n\r\n rho = line[0][0]\r\n theta = line[0][1]\r\n\r\n xIntercept = rho / np.cos(theta)\r\n\r\n # Line is horizontal\r\n if theta > np.pi * 45 / 180 and theta < np.pi * 135 / 180:\r\n if rho < topH[0][0]:\r\n topH = line\r\n if rho > bottomH[0][0]:\r\n bottomH = line\r\n\r\n # Line is vertical\r\n else:\r\n if xIntercept > rightX:\r\n rightV = line\r\n rightX = xIntercept\r\n elif xIntercept <= leftX:\r\n leftV = line\r\n leftX = xIntercept\r\n\r\n return [[leftV, rightV], [topH, bottomH]]", "def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines", "def get_points_for_thick_line(start_x: float, start_y: float,\r\n end_x: float, end_y: float,\r\n line_width: float):\r\n vector_x = start_x - end_x\r\n vector_y = start_y - end_y\r\n perpendicular_x = vector_y\r\n perpendicular_y = -vector_x\r\n length = math.sqrt(vector_x * vector_x + vector_y * vector_y)\r\n if length == 0:\r\n normal_x = 1.0\r\n normal_y = 1.0\r\n else:\r\n normal_x = perpendicular_x / length\r\n normal_y = perpendicular_y / length\r\n r1_x = start_x + normal_x * line_width / 2\r\n r1_y = start_y + normal_y * line_width / 2\r\n r2_x = start_x - normal_x * line_width / 2\r\n r2_y = start_y - normal_y * line_width / 2\r\n r3_x = end_x + normal_x * line_width / 2\r\n r3_y = end_y + normal_y * line_width / 2\r\n r4_x = end_x - normal_x * line_width / 2\r\n r4_y = end_y - normal_y * line_width / 2\r\n points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)\r\n return points", "def vincenty(lat1, lon1, lat2, lon2,\n r_major=6378.1370, r_minor=6356.752314, r_sphere=None):\n lat1 = m.radians(lat1)\n lat2 = m.radians(lat2)\n lon1 = m.radians(lon1)\n lon2 = m.radians(lon2)\n \n if (r_sphere is not None):\n r_major = r_sphere\n r_minor = r_sphere\n f = 0.0\n else:\n f = (r_major-r_minor)/r_major\n \n U1 = m.atan((1.0-f) * m.tan(lat1))\n U2 = m.atan((1.0-f) * m.tan(lat2))\n L = lon2 - lon1\n \n epsilon = 1E-12 # Accuracy (10E-12 -> ~ 0.06mm)\n max_iter = 500\n lam = L\n \n cU1 = m.cos(U1)\n cU2 = m.cos(U2)\n sU1 = m.sin(U1)\n sU2 = m.sin(U2)\n \n for i in range(max_iter):\n lam_old = lam\n sLam = m.sin(lam)\n cLam = m.cos(lam)\n sin_sig = m.sqrt((cU2*sLam)**2 + (cU1*sU2 - sU1*cU2*cLam)**2)\n cos_sig = sU1*sU2 + cU1*cU2*cLam\n sig = m.atan2(sin_sig,cos_sig)\n sin_alp = (cU1*cU2*sLam) / sin_sig\n cos2_alp = 1.0 - sin_alp**2\n if (cos2_alp == 0.0):\n # equitorial line\n cos_2sigm = 100\n C = 0.0\n else:\n cos_2sigm = cos_sig - (2.0*sU1*sU2)/cos2_alp\n C = f/16.0 * cos2_alp * (4.0 + f*(4.0-3.0*cos2_alp))\n lam = L + (1.0 - C) * f * sin_alp * \\\n (sig + C * sin_sig * (cos_2sigm + C * cos_sig * \\\n (-1.0 + 2.0 * cos_2sigm**2)))\n if ((m.fabs(lam - lam_old)) <= epsilon):\n # Found a solution in i iters...\n break\n elif (i == max_iter):\n # Catch the out of iters case, never seen this.\n raise Exception(\"Failed to solve for distance\")\n \n usq = cos2_alp * ((r_major**2 - r_minor**2) / r_minor**2)\n A = 1 + usq/16384 * (4096 + usq*(-768 + usq*(320 - 175*usq)))\n B = usq/1024 * (256 + usq*(-128 + usq*(74 - 47*usq)))\n del_sig = B * sin_sig * (cos_2sigm + 0.25*B*(cos_sig*( \\\n -1 + 2*cos_2sigm**2) - (1.0/6.0)*B*cos_2sigm * ( \\\n -3 + 4*sin_sig**2) * (-3 + 4 * cos_2sigm**2)))\n s = r_minor * A * (sig - del_sig)\n alp1 = m.atan2(cU2*m.sin(lam),(cU1*sU2-sU1*cU2*m.cos(lam)))\n alp2 = m.atan2(cU1*m.sin(lam),(cU1*sU2*m.cos(lam)-sU1*cU2))\n\n return (s, m.degrees(alp1), m.degrees(alp2))", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def reduce_lines(input_horizontal, input_vertical, min_distance):\n\n seen_vertical = set()\n seen_horizontal = set()\n output_vertical = []\n output_horizontal = []\n\n # vertical\n for index, (x1,y1,x2,y2) in enumerate(input_vertical):\n if index in seen_vertical:\n continue\n x_values = [x1]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_vertical):\n if other_index in seen_vertical:\n continue\n if (abs(x1 - x1_b) < min_distance):\n # if the end is further to the top, choose this end\n if (y2_b < y2):\n y2 = y2_b\n # if the start if further to the bottom, choose it\n if (y1_b > y1):\n y1 = y1_b\n\n x_values.append(x1_b)\n seen_vertical.add(other_index)\n\n # taking the average x value for all the lines to get the middle\n x = int(np.mean(x_values))\n output_vertical.append((x,y1,x,y2))\n\n #horizontal\n for index, (x1,y1,x2,y2) in enumerate(input_horizontal):\n if index in seen_horizontal:\n continue\n y_values = [y1, y2]\n for other_index, (x1_b,y1_b,x2_b,y2_b) in enumerate(input_horizontal):\n if other_index in seen_horizontal:\n continue\n if (abs(y1 - y1_b) < min_distance):\n # if the start if further to the left, choose this point\n if (x1_b < x1):\n x1 = x1_b\n # if the end is further to the right, choose it\n if (x2_b > x2):\n x2 = x2_b\n\n y_values += [y1_b, y2_b]\n seen_horizontal.add(other_index)\n\n # taking the average y value for all the lines to get the middle\n y = int(np.mean(y_values))\n output_horizontal.append((x1,y,x2,y))\n\n return (output_vertical, output_horizontal)", "def drawLine2P(x,y,xlims):\n \n xrange = np.arange(xlims[0],xlims[1],1)\n A = np.vstack([x, np.ones(len(x))]).T\n k, b = np.linalg.lstsq(A, y, rcond=None)[0]\n return [xrange, k*xrange + b]", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def offset(x, y, L):\n length = x.size\n offsetx = np.zeros((length, 2))\n offsety = np.zeros((length, 2))\n dx = np.zeros(length-1)\n dy = np.zeros(length-1)\n dxL = np.zeros(length-1)\n dyL = np.zeros(length-1)\n xl = np.zeros(length) # counterclockwise\n xr = np.zeros(length) # clockwise\n yl = np.zeros(length)\n yr = np.zeros(length)\n xl0 = np.zeros(length)\n xr0 = np.zeros(length)\n yl0 = np.zeros(length)\n yr0 = np.zeros(length) \n for i in range(0, length-1):\n dx[i] = x[i+1]-x[i]\n dy[i] = y[i+1]-y[i]\n for i in range(0, length-1):\n r = np.sqrt(dx[i]**2 + dy[i]**2)\n dxL[i] = dx[i]*L/r\n dyL[i] = dy[i]*L/r\n xl0[i] = -dyL[i] + x[i]\n yl0[i] = dxL[i] + y[i]\n xr0[i] = dyL[i] + x[i]\n yr0[i] = -dxL[i] + y[i]\n xl0[length-1] = xl0[length-2] + dx[length-2]\n yl0[length-1] = yl0[length-2] + dy[length-2]\n xr0[length-1] = xr0[length-2] + dx[length-2]\n yr0[length-1] = yr0[length-2] + dy[length-2]\n xl[0] = xl0[0]\n yl[0] = yl0[0]\n xl[length-1] = xl0[length-1]\n yl[length-1] = yl0[length-1]\n xr[0] = xr0[0]\n yr[0] = yr0[0]\n xr[length-1] = xr0[length-1]\n yr[length-1] = yr0[length-1]\n for i in range(1, length-1):\n a = np.array([[dy[i-1], -dx[i-1]], [dy[i], -dx[i]]])\n bl = np.array([dy[i-1]*xl0[i-1]-dx[i-1]*yl0[i-1], dy[i]*xl0[i]-dx[i]*yl0[i]])\n br = np.array([dy[i-1]*xr0[i-1]-dx[i-1]*yr0[i-1], dy[i]*xr0[i]-dx[i]*yr0[i]])\n theta = (dx[i-1]*dx[i]+dy[i-1]*dy[i])/(dx[i-1]**2+dy[i-1]**2)**0.5/(dx[i]**2+dy[i]**2)**0.5\n if theta > 1 - 1e-10:\n xl[i] = xl0[i]\n yl[i] = yl0[i]\n xr[i] = xr0[i]\n yr[i] = yr0[i]\n else:\n pl = np.linalg.solve(a, bl)\n xl[i] = pl[0]\n yl[i] = pl[1]\n pr = np.linalg.solve(a, br)\n xr[i] = pr[0]\n yr[i] = pr[1]\n offsetx[:, 0], offsetx[:, 1] = xl, xr\n offsety[:, 0], offsety[:, 1] = yl, yr\n return offsetx, offsety", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def build_vertical_path_with_increments(coord1, coord2, horizontal_increment, vertical_increment, start_alt, end_alt,\n max_point):\n\n if not isinstance(coord1, GPSCoord):\n raise ValueError('Parameter coord1 have to be a GPSCoord')\n\n if not isinstance(coord2, GPSCoord):\n raise ValueError('Parameter coord2 have to be a GPSCoord')\n\n if coord1 == coord2:\n raise ValueError('The coordinates have the same position')\n\n horizontal_increment = float(horizontal_increment)\n vertical_increment = float(vertical_increment)\n start_alt = float(start_alt)\n end_alt = float(end_alt)\n max_point = int(max_point)\n\n base_line = build_line_with_increment(coord1, coord2, horizontal_increment, max_point, start_alt)\n\n result = []\n result.extend(base_line)\n\n current_alt = start_alt + vertical_increment\n do_reverse = True\n\n while len(result) < max_point and current_alt <= end_alt:\n if do_reverse:\n new_line = build_line_with_increment(coord2, coord1, horizontal_increment, max_point - len(result),\n current_alt)\n\n else:\n new_line = build_line_with_increment(coord1, coord2, horizontal_increment, max_point - len(result),\n current_alt)\n\n do_reverse = not do_reverse\n result.extend(new_line)\n current_alt += vertical_increment\n\n return result", "def _distance2_line_endpoints(line1, line2):\n (A,B),(C,D) = line1, line2\n R2=lambda u,v: (u[0]-v[0])**2+(u[1]-v[1])**2\n pairs = zip((A,A,B,B),(C,D,C,D))\n r2 = [R2(pair[0],pair[1]) for pair in pairs]\n mini=sorted(zip(r2,pairs),key=lambda a,b: a)[0]\n #R2_min = min((R2(A,C), R2(A,D), R2(B,C), R2(B,D)))\n return mini[0], mini[1][0], mini[1][1]", "def parallelogram_vertices_from_grouped_lines(lines):\n if len(lines) > 2:\n raise Exception(\"parallelogram finder \\\n called with too many lines\")\n c_1 = lines[0]\n c_2 = lines[1]\n intercepts = None\n for l1, l2 in list(zip(c_1, c_2)) + list(zip(c_1, c_2[::-1])):\n x = solve_for_intersection(np.array([l1, l2]))\n if intercepts is None:\n intercepts = np.array([x])\n else:\n intercepts = np.vstack((intercepts, x))\n return intercepts", "def compute_start_end_points(linestrings):\n starts = []\n stops = []\n for ls in linestrings:\n pt = Point(ls.coords[0])\n starts.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n pt = Point(ls.coords[-1])\n stops.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n return starts, stops", "def line(x1,y1,x2,y2,z_thickness,laser):\r\n\t#Global variables that are used by all algorithms\r\n\tlayers = int(z_thickness/laser[\"z_spacing\"])\r\n\r\n\t#Works out offset when beginning on a new layer\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\ttaper_x,taper_y = offset(x1,y1,x2,y2,taper)\r\n\r\n\t#Works out offset between each parallel scan on the same layer\r\n\tdelta_x,delta_y = offset(x1,y1,x2,y2,laser[\"xy_spacing\"])\r\n\r\n\t#Works out maximum offset from starting line, we don't want to exceed this at any point.\r\n\tmax_taper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (z_thickness) * 2\r\n\tmax_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)\r\n\t#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y\r\n\r\n\t#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows\r\n\tcutlist = []\r\n\tfor a in range(layers):\r\n\t\tnew_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y\r\n\t\ti = 0\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\twhile abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):\r\n\t\t\t#This use of i is to reduce the jump distance between individual scans\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y\r\n\t\t\ti = i + 1\r\n\t\t#Having completed one layer, the laser moves down to begin the next layer\r\n\t\tmax_delta_x = max_delta_x - taper_x\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def simplify_line_dp(pts, tolerance):\r\n anchor = 0\r\n floater = len(pts) - 1\r\n stack = []\r\n keep = set()\r\n\r\n stack.append((anchor, floater)) \r\n while stack:\r\n anchor, floater = stack.pop()\r\n \r\n # initialize line segment\r\n if pts[floater] != pts[anchor]:\r\n anchorX = float(pts[floater][0] - pts[anchor][0])\r\n anchorY = float(pts[floater][1] - pts[anchor][1])\r\n seg_len = sqrt(anchorX ** 2 + anchorY ** 2)\r\n # get the unit vector\r\n anchorX /= seg_len\r\n anchorY /= seg_len\r\n else:\r\n anchorX = anchorY = seg_len = 0.0\r\n \r\n # inner loop:\r\n max_dist = 0.0\r\n farthest = anchor + 1\r\n for i in range(anchor + 1, floater):\r\n dist_to_seg = 0.0\r\n # compare to anchor\r\n vecX = float(pts[i][0] - pts[anchor][0])\r\n vecY = float(pts[i][1] - pts[anchor][1])\r\n seg_len = sqrt( vecX ** 2 + vecY ** 2 )\r\n # dot product:\r\n proj = vecX * anchorX + vecY * anchorY\r\n if proj < 0.0:\r\n dist_to_seg = seg_len\r\n else: \r\n # compare to floater\r\n vecX = float(pts[i][0] - pts[floater][0])\r\n vecY = float(pts[i][1] - pts[floater][1])\r\n seg_len = sqrt( vecX ** 2 + vecY ** 2 )\r\n # dot product:\r\n proj = vecX * (-anchorX) + vecY * (-anchorY)\r\n if proj < 0.0:\r\n dist_to_seg = seg_len\r\n else: # calculate perpendicular distance to line (pythagorean theorem):\r\n dist_to_seg = sqrt(abs(seg_len ** 2 - proj ** 2))\r\n if max_dist < dist_to_seg:\r\n max_dist = dist_to_seg\r\n farthest = i\r\n\r\n if max_dist <= tolerance: # use line segment\r\n keep.add(anchor)\r\n keep.add(floater)\r\n else:\r\n stack.append((anchor, farthest))\r\n stack.append((farthest, floater))\r\n\r\n keep = list(keep)\r\n keep.sort()\r\n return [pts[i] for i in keep]", "def findPointOnLine(node1, node2, distance):\n m, b, _ = geometry.lineSpec(node1, node2)\n \n xy = []\n if m == True: # parallel to y axis\n xy.append(node1[0])\n if node1[1] <= node2[1]:\n xy.append(node1[1] + distance)\n else:\n xy.append(node1[1] - distance)\n \n elif m == False: # parallel to x axis\n if node1[0] <= node2[0]:\n xy.append(node1[0] + distance)\n else:\n xy.append(node1[0] - distance)\n xy.append(node1[1])\n \n else:\n x = sp.Symbol('x')\n z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2\n xSolution = sp.solve(z, x)\n \n for xSol in xSolution:\n if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):\n xy.append(xSol)\n xy.append(xSol*m + b)\n return xy", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def _lineArcIntersectXY(l,c,inside=True,params=False):\n x=c[0]\n r=c[1][0]\n mpr=mpm.mpf(r)\n \n # is the arc a full circle?\n circle = False\n if c[1][1] == 0 and c[1][2] == 360:\n circle = True\n \n start=c[1][1] % 360.0\n end=c[1][2] %360.0\n\n ## what is the shortest distance between the line and the center\n ## of the arc? If that is greater than r, then there is no\n ## intersection\n dst = linePointXYDist(l,x,inside and not params)\n if dst > r+epsilon:\n return False\n\n ## start by treating the arc as a circle. At this point we know\n ## we have one or two intersections within the line segment,\n ## though perhaps none within the arc segment, which we will test\n ## for later\n \n ## transform points so arc is located at the origin\n p0=sub(l[0],x)\n p1=sub(l[1],x)\n \n ## solve for b in: | b*p0 + (1-b)*p1 | = r\n ## let V= p0-p1, P=p1\n ## | b*V + P |^2 = r^2\n ## b^2(Vx^2 + Vy^2) + 2b(VxPx+VyPy) + Px^2 + Py^2 - r^2 = 0\n ## let a = Vx^2 + Vy^2,\n ## b = 2*(VxPx + VyPy)\n ## cc = Px^2 + Py^2 - r^2\n ## b0 = ( -b + sqrt(b^2 - 4ac) )/ 2a\n ## b1 = ( -b - sqrt(b^2 - 4ac) )/ 2a\n \n V = sub(p0,p1)\n P = p1\n #a = V[0]*V[0]+V[1]*V[1]\n mpV0 = mpm.mpf(V[0])\n mpV1 = mpm.mpf(V[1])\n mpP0 = mpm.mpf(P[0])\n mpP1 = mpm.mpf(P[1])\n a = mpV0*mpV0+mpV1*mpV1\n mpepsilon = mpm.mpf(epsilon)\n if mpm.fabs(a) < mpepsilon*mpepsilon:\n print('degenerate line in lineArcIntersectXY')\n raise ValueError('bad!')\n return False\n # b = 2*(V[0]*P[0]+V[1]*P[1])\n b = 2*(mpV0*mpP0+mpV1*mpP1)\n #cc = P[0]*P[0]+P[1]*P[1]-r*r\n cc = mpP0*mpP0+mpP1*mpP1-mpr*mpr\n d = b*b-4*a*cc\n ## Check to see if we are within epsilon, scaled by the length of the line\n if mpm.fabs(d) < mpm.sqrt(a)*2*mpepsilon: # one point of intersection\n b0 = -b/(2*a)\n b1 = False\n elif d < 0:\n print(\"value of d: \",d,\" value of sqrt(a)*epsilon\",sqrt(a)*epsilon)\n raise ValueError(\"imaginary solution to circle line intersection -- shouldn't happen here\")\n else: # two points of intersection\n b0 = (-b + mpm.sqrt(d))/(2*a)\n b1 = (-b - mpm.sqrt(d))/(2*a)\n\n # use computed parameters to calculate solutions, still in\n # circle-at-origin coordinates\n s = [ add(scale3(V,float(b0)),p1) ]\n if b1:\n s = s + [ add(scale3(V,float(b1)),p1) ]\n\n if not inside or circle or params: # transform back into world\n # coordinates\n pp = list(map(lambda q: add(q,x),s))\n if params:\n uu1 = []\n uu2 = []\n for i in range(len(pp)):\n uu1 = uu1 + [ unsampleline(l,pp[i]) ]\n uu2 = uu2 + [ unsamplearc(c,pp[i]) ]\n return [uu1, uu2]\n else:\n return pp\n\n ## see if any of the intersections we've found lie between\n ## start and end of the arc\n \n ss = []\n for i in s:\n ang = (atan2(i[1],i[0]) % pi2)*360.0/pi2\n\n if end > start and ang >= start and ang <= end:\n ss = ss + [ add(x,i) ]\n elif end < start and (ang >= start or ang<= end):\n ss = ss + [ add(x,i) ]\n\n if len(ss) == 0:\n return False\n return ss", "def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def get_line(start, end): \n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = np.array((y, x)) if is_steep else np.array((x, y))\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return np.array(points)", "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def calculate_line_length(x1, y1, x2, y2):\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return distance", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def calculate_velocity_induced_by_line_vortices(\n points, origins, terminations, strengths, collapse=True\n):\n\n # Expand the dimensionality of the points input. It is now of shape (N x 1 x 3). This will allow numpy to\n # broadcast the upcoming subtractions.\n points = np.expand_dims(points, axis=1)\n\n # Define the vectors from the vortex to the points. r_1 and r_2 now both are of shape (N x M x 3). Each row/column\n # pair holds the vector associated with each point/vortex pair.\n r_1 = points - origins\n r_2 = points - terminations\n\n # Define the vector from the vortex origins to the vortex terminations. This is of shape (N x M x 3).\n r_0 = r_1 - r_2\n\n # Calculate the vector cross product. This is of shape (N x M x 3).\n r_1_cross_r_2 = np.cross(r_1, r_2)\n\n # Calculate the cross product's absolute magnitude. This is of shape (N x M).\n r_1_cross_r_2_absolute_magnitude = (\n r_1_cross_r_2[:, :, 0] ** 2\n + r_1_cross_r_2[:, :, 1] ** 2\n + r_1_cross_r_2[:, :, 2] ** 2\n )\n\n # Calculate the vector lengths. These are of shape (N x M).\n r_1_length = np.linalg.norm(r_1, axis=-1)\n r_2_length = np.linalg.norm(r_2, axis=-1)\n\n # Define the radius of the line vortices. This is used to get rid of any singularities.\n radius = 3.0e-16\n\n # Set the lengths and the absolute magnitudes to zero, at the places where the lengths and absolute magnitudes are\n # less than the vortex radius. This insures that the calculation for the constant k will produce np.inf or np.nan\n # values at the locations where there are singularities.\n r_1_length[r_1_length < radius] = 0\n r_2_length[r_2_length < radius] = 0\n r_1_cross_r_2_absolute_magnitude[r_1_cross_r_2_absolute_magnitude < radius] = 0\n\n # Calculate the vector dot products. This uses numpy's einsum function for speed.\n r_0_dot_r_1 = np.einsum(\"ijk,ijk->ij\", r_0, r_1)\n r_0_dot_r_2 = np.einsum(\"ijk,ijk->ij\", r_0, r_2)\n\n # Calculate k and then the induced velocity, ignoring any divide-by-zero or nan errors. k is of shape (N x M)\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n k = (\n strengths\n / (4 * np.pi * r_1_cross_r_2_absolute_magnitude)\n * (r_0_dot_r_1 / r_1_length - r_0_dot_r_2 / r_2_length)\n )\n\n # Set the shape of k to be (N x M x 1) to support numpy broadcasting in the subsequent multiplication.\n k = np.expand_dims(k, axis=2)\n\n # Multiple k by the cross products of r_1 and r_2 to get the non-collapsed matrix of induced velocities. This is\n # of shape (M x N x 3).\n induced_velocities = k * r_1_cross_r_2\n\n # Set the values of the induced velocity to zero where there are singularities.\n induced_velocities[np.isinf(induced_velocities)] = 0\n induced_velocities[np.isnan(induced_velocities)] = 0\n\n if collapse:\n induced_velocities = np.sum(induced_velocities, axis=1)\n\n return induced_velocities", "def distanceOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return 0\n res = res[None, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)[0]", "def lineseg_dists(p, a, b):\n # normalized tangent vectors\n d_ba = b - a\n d = np.divide(d_ba, (np.hypot(d_ba[:, 0], d_ba[:, 1])\n .reshape(-1, 1)))\n\n # signed parallel distance components\n # rowwise dot products of 2D vectors\n s = np.multiply(a - p, d).sum(axis=1)\n t = np.multiply(p - b, d).sum(axis=1)\n\n # clamped parallel distance\n h = np.maximum.reduce([s, t, np.zeros(len(s))])\n\n # perpendicular distance component\n # rowwise cross products of 2D vectors\n d_pa = p - a\n c = d_pa[:, 0] * d[:, 1] - d_pa[:, 1] * d[:, 0]\n\n return np.hypot(h, c)", "def calc_line(start, target, map):\n\t\"\"\" Returns the real world point at the farthest range \"\"\"\n\tdx = abs(target[0] - start[0])\n\tdy = abs(target[1] - start[1])\n\txi = start[0]\n\tyi = start[1]\n\tn = 1 + dx + dy\n\tx_dir = np.sign(target[0] - start[0])\n\ty_dir = np.sign(target[1] - start[1])\n\terror = dx - dy;\n\tdx *= 2\n\tdy *= 2\n\n\tfor i in xrange(n):\n\t\tif map.grid[xi,yi] is not map.empty and map.grid[xi,yi] > 0:\n\t\t\treturn xi, yi\n\n\t\tif error > 0:\n\t\t\txi += x_dir\n\t\t\terror -= dy\n\t\telse:\n\t\t\tyi += y_dir\n\t\t\terror += dx\n\treturn target", "def line_points(start, end):\n # Setup initial conditions\n x1, y1 = start.astuple()\n x2, y2 = end.astuple()\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = Int2(y, x) if is_steep else Int2(x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def _LineSearch(self, disp_vector):\n self.GetDispDeriv(self.disp_mag, disp_vector)\n disp_mag = self.disp_mag\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_mag *= disp_sign\n disp_sign_same = True\n ref_energy = self.mol.e_total\n\n # binary search to find upper bound on displacement magnitude\n self.n_subiter = 0\n while (disp_sign_same):\n self.n_subiter += 1\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)\n self.GetDispDeriv(disp_mag, disp_vector)\n self._DisplaceCoords(-1.0 * disp_mag, disp_vector)\n if self.mol.e_total > ref_energy:\n disp_mag *= 0.5\n break\n old_disp_sign = disp_sign\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_sign_same = bool(disp_sign == old_disp_sign)\n disp_mag *= 2.0\n self.GetDispDeriv(disp_mag, disp_vector)\n self.AdjustDispMag(self.n_subiter)\n\n # binary search to find value of displacement within bounds\n numer = 1.0\n denom = 2.0\n for i in range(const.NUMLINESEARCHSTEPS):\n self.n_subiter += 1\n test_disp = disp_mag * numer / denom\n self._DisplaceCoords(+1.0 * test_disp, disp_vector)\n self.GetDispDeriv(disp_mag / (2**(-i)), disp_vector)\n self._DisplaceCoords(-1.0 * test_disp, disp_vector)\n direc = 1.0 if self.disp_deriv < 0.0 else -1.0\n numer = 2*numer + direc\n denom = 2*denom\n disp_mag *= numer / denom\n\n # final line search energy minimized molecular coordinates\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)", "def find_circle_line_intersection(P0, r0, P1):\n\t\n\tx_offset, y_offset = P0\n\tx0, y0 = 0, 0\n\tx1, y1 = P1\n\n\tx1, y1 = x1 - x_offset, y1 - y_offset\n\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tdr = math.sqrt(dx*dx + dy*dy)\n\n\tD = x0*y1 - x1*y0\n\n\tdelta0 = r0*r0*dr*dr - D*D\n\n\tx2 = (D*dy + sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty2 = (D*dx + math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx3 = (D*dy - sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty3 = (D*dx - math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx2 += x_offset\n\tx3 += x_offset\n\ty2 += y_offset\n\ty3 += y_offset\n\n\treturn np.array([[x2, y2], [x3, y3]])", "def distancetoline(p, l1, l2):\n vx = l1.x-p.x \n vy = l1.y-p.y\n ux = l2.x-l1.x\n uy = l2.y-l1.y\n\n length = ux*ux+uy*uy;\n\n det = (-vx*ux)+(-vy*uy); \n # if this is < 0 or > length then its outside the line segment\n if det<0 or det>length:\n ux=l2.x-p.x\n uy=l2.y-p.y\n return sqrt(min(vx*vx+vy*vy, ux*ux+uy*uy))\n\n det = ux*vy-uy*vx\n if length == 0.0:\n return 0.0\n else:\n return sqrt((det*det)/length)", "def line_line_shortest_dist_unbounded(r1: np.ndarray, v1: np.ndarray, r2: np.ndarray, v2: np.ndarray,\n eps: float = 1e-5) -> Tuple[float, Tuple[float, float]]:\n\n # check that lines are not parallel\n # normalised dot product must not be 1 or -1\n if np.abs(np.dot(v1, v2)) < np.linalg.norm(v1) * np.linalg.norm(v2) - eps:\n R = r2 - r1\n A = np.array([[np.dot(v1, v1), -np.dot(v1, v2)],\n [np.dot(v2, v1), -np.dot(v2, v2)]])\n b = np.array([np.dot(R, v1), np.dot(R, v2)])\n t1, t2 = np.matmul(np.linalg.inv(A), b)\n d = np.linalg.norm((r1 + v1 * t1) - (r2 + v2 * t2))\n else:\n # case where two lines are parallel\n # then fix one point and find shortest distance to that point\n t1 = 0\n d, t2 = line_point_shortest_dist(r2, v2, r1)\n\n return d, (t1, t2)", "def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def line_equation(x1, y1, x2, y2):\n \n a = y2 - y1\n b = x1 - x2\n c = x2*y1 - x1*y2\n return a, b, c", "def grab_40_vectors(x1,x2,y1,y2) -> list:\n\n if x1 > x2:\n x1,x2 = x2,x1\n if y1 > y2:\n y1,y2 = y2,y1\n\n height = abs(y2-y1)\n length = abs(x2-x1)\n\n height_delta = height/10\n length_delta = length/10\n\n left_vertical_edge = [(x1, y1+height_delta*i) for i in range(0,11)]\n # print(left_vertical_edge)\n right_vertical_edge = [(x2,y1+height_delta*i) for i in range(0,11)]\n # print(right_vertical_edge)\n bottom_horizontal_edge = [(x1+length_delta*i,y1) for i in range(0,11)]\n # print(bottom_horizontal_edge)\n top_horizontal_edge = [(x1+length_delta*i,y2) for i in range(0,11)]\n # print(top_horizontal_edge)\n\n all_vectors = left_vertical_edge+right_vertical_edge+bottom_horizontal_edge+top_horizontal_edge\n return(all_vectors)", "def get_line_end_pts(line_segment, y1, y2):\n if line_segment is None:\n return None\n\n slope, intercept = line_segment\n\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return x1, y1, x2, y2", "def make_coordinates(image, line_params):\n slope, intercept = line_params\n y1 = image.shape[0]\n y2 = int(y1 * (3/5))\n x1 = int((y1 - intercept)/ slope)\n x2 = int((y2 - intercept)/ slope)\n return np.array([x1, y1, x2, y2])", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def _distance2_line_segments(line1, line2, h_line1=None, h_line2=None):\n h_line1 = _homogenous_line(*line1) if h_line1 is None else h_line1\n h_line2 = _homogenous_line(*line2) if h_line2 is None else h_line2\n\n r_11 = _distance2_point_to_h_line(line1[0], h_line2), line2\n r_12 = _distance2_point_to_h_line(line1[1], h_line2), line2\n r_21 = _distance2_point_to_h_line(line2[0], h_line1), line1\n r_22 = _distance2_point_to_h_line(line2[1], h_line1), line1\n\n tests = sorted((r_11,r_12,r_21,r_22), key=lambda x: x[0][0])\n # check for validity starting with the closest point\n for (r2, ps), line in tests:\n if _point_within_bounds(line,ps):\n return r2, ps, line #0 if line==line1 else 1\n\n # none of the corner points is close to any of the line\n # --> line separation is simply the closest distance of\n # corner points\n\n r2, p1, p2 = _distance2_line_endpoints(line1, line2)\n\n return r2, p1, p2", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def trapezoid_decomposition_linear(polygons):\n # Enumerate all the edges and iteratively build up the set of trapezoids\n # Add a vertical line for each point in the polygon\n all_polygons = np.concatenate(polygons, axis=0)\n vertical_lines = SortedDict({x[0]: [x[1], 1000000, 0] for x in all_polygons})\n\n # Loop over Polygons to determine end-points\n for polygon in polygons:\n start_vertex = polygon[0]\n for vertex in polygon[1:]:\n # find the lines in front of the smaller \n x_start = start_vertex[0]\n x_curr = vertex[0]\n start_idx = vertical_lines.bisect_right(min(x_start, x_curr))\n end_idx = vertical_lines.bisect_left(max(x_start, x_curr))\n x_vals = vertical_lines.keys()\n for i in range(start_idx, end_idx):\n x = x_vals[i]\n if x < min(x_start, x_curr) or x > max(x_start, x_curr):\n continue\n y, top, bottom = vertical_lines[x]\n y_val = linear_interpolation(start_vertex, vertex, x)\n if y_val > y and y_val < top:\n vertical_lines[x][1] = y_val\n elif y_val < y and y_val > bottom:\n vertical_lines[x][2] = y_val\n start_vertex = vertex\n return vertical_lines", "def calculate_pull_pts(point1, point2, bilayer_center=3.26):\n ns_vector = point2 - point1\n ns_vector /= np.linalg.norm(ns_vector)\n\n # Identify the anchor points that properly\n # straddle the bilayer center while \n # still being in line with the north-south vector of the graphene sheet\n z_gap = abs(point2[2] - point1[2])\n anchor1_z = bilayer_center + (z_gap/2)\n scale1_pull_vec = ( anchor1_z - point1[2] ) / ns_vector[2]\n anchor2_z = bilayer_center - (z_gap/2)\n scale2_pull_vec = ( anchor2_z - point2[2] ) / ns_vector[2]\n\n pull_vec1 = scale1_pull_vec * ns_vector\n pull_vec2 = scale2_pull_vec * ns_vector\n\n anchor1 = point1 + pull_vec1\n anchor2 = point2 + pull_vec2\n\n return ns_vector, anchor1, anchor2", "def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))", "def lineSpec(node1, node2):\n \n if node1 == node2:\n m = 0\n b = 0\n d = -1\n return m, b, d\n \n elif node1[0] == node2[0]: # parallel to y axis\n m = True\n b = node1[0]\n d = abs(node2[1] - node1[1])\n return m, b, d\n \n elif node1[1] == node2[1]: # parallel to x axis\n m = False\n b = node1[1]\n d = abs(node2[0] - node1[0])\n return m, b, d\n \n else:\n m = (node2[1] - node1[1]) / (node2[0] - node1[0])\n b = (node1[1]) - m*node1[0]\n d = geometry.lineLength(node1, node2)\n return m, b, d", "def _get_lines_parallel_to_axis(\n self,\n axis_parallel_to: NumberLine,\n axis_perpendicular_to: NumberLine,\n freq: float,\n ratio_faded_lines: int,\n ) -> tuple[VGroup, VGroup]:\n\n line = Line(axis_parallel_to.get_start(), axis_parallel_to.get_end())\n if ratio_faded_lines == 0: # don't show faded lines\n ratio_faded_lines = 1 # i.e. set ratio to 1\n step = (1 / ratio_faded_lines) * freq\n lines1 = VGroup()\n lines2 = VGroup()\n unit_vector_axis_perp_to = axis_perpendicular_to.get_unit_vector()\n\n # need to unpack all three values\n x_min, x_max, _ = axis_perpendicular_to.x_range\n\n # account for different axis scalings (logarithmic), where\n # negative values do not exist and [-2 , 4] should output lines\n # similar to [0, 6]\n if axis_perpendicular_to.x_min > 0 and x_min < 0:\n x_min, x_max = (0, np.abs(x_min) + np.abs(x_max))\n\n # min/max used in case range does not include 0. i.e. if (2,6):\n # the range becomes (0,4), not (0,6).\n ranges = (\n [0],\n np.arange(step, min(x_max - x_min, x_max), step),\n np.arange(-step, max(x_min - x_max, x_min), -step),\n )\n\n for inputs in ranges:\n for k, x in enumerate(inputs):\n new_line = line.copy()\n new_line.shift(unit_vector_axis_perp_to * x)\n if (k + 1) % ratio_faded_lines == 0:\n lines1.add(new_line)\n else:\n lines2.add(new_line)\n return lines1, lines2", "def generate_line(point_1, point_2):\r\n A = point_1.y - point_2.y\r\n B = point_2.x - point_1.x\r\n C = point_1.y * B + point_1.x * A\r\n return np.matrix([[A],[B],[-C]])", "def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp", "def lineseg_dists(p, a, b):\n\t\t# normalized tangent vectors\n\t\td_ba = b - a\n\t\td = np.divide(d_ba, (np.hypot(d_ba[:, 0], d_ba[:, 1])\n\t\t .reshape(-1, 1)))\n\n\t\t# signed parallel distance components\n\t\t# rowwise dot products of 2D vectors\n\t\ts = np.multiply(a - p, d).sum(axis=1)\n\t\tt = np.multiply(p - b, d).sum(axis=1)\n\n\t\t# clamped parallel distance\n\t\th = np.maximum.reduce([s, t, np.zeros(len(s))])\n\n\t\t# perpendicular distance component\n\t\t# rowwise cross products of 2D vectors\n\t\td_pa = p - a\n\t\tc = d_pa[:, 0] * d[:, 1] - d_pa[:, 1] * d[:, 0]\n\n\t\tprint(\"C value:\", c)\n\t\treturn np.hypot(h, c)", "def get_candidate_locations_ellipse(f1, f2, major_axis, row_num, col_num):\n f1_y, f1_x = f1\n f2_y, f2_x = f2\n a = major_axis / 2.0\n c = distance(f1_x, f1_y, f2_x, f2_y) / 2.0\n b = np.sqrt(a * a - c * c)\n major_bound_1 = ((f2_x - f1_x) * (a + c) / (2 * c) + f1_x, (f2_y - f1_y) * (a + c) / (2 * c) + f1_y)\n major_bound_2 = ((f1_x - f2_x) * (a + c) / (2 * c) + f2_x, (f1_y - f2_y) * (a + c) / (2 * c) + f2_y)\n delta_x = b * (abs(major_bound_1[1] - major_bound_2[1])) / (2 * a)\n if f1_x == f2_x:\n delta_y = 0\n else:\n delta_y = np.sqrt(b * b - delta_x * delta_x)\n p1 = (major_bound_1[0] + delta_x, major_bound_1[1] - delta_y)\n p2 = (major_bound_1[0] - delta_x, major_bound_1[1] + delta_y)\n p3 = (major_bound_2[0] - delta_x, major_bound_2[1] + delta_y)\n p4 = (major_bound_2[0] + delta_x, major_bound_2[1] - delta_y)\n min_x = int(max(min([p1[0], p2[0], p3[0], p4[0]]), 0))\n max_x = int(min(max([p1[0], p2[0], p3[0], p4[0]]), col_num - 1))\n min_y = int(max(min([p1[1], p2[1], p3[1], p4[1]]), 0))\n max_y = int(min(max([p1[1], p2[1], p3[1], p4[1]]), row_num - 1))\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(f1_x, f1_y, x, y) + distance(x, y, f2_x, f2_y) < major_axis:\n candidates.append((y, x))\n return candidates", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def distance(p1, p2):\n if isparallel(p1, p2):\n # lines are parallel\n l = np.cross(p1.w, p1.v - p2.v * np.dot(p1.w, p2.w) / dot(p2.w, p2.w)) / np.linalg.norm(p1.w)\n else:\n # lines are not parallel\n if abs(p1 * p2) < 10*_eps:\n # lines intersect at a point\n l = 0\n else:\n # lines don't intersect, find closest distance\n l = abs(p1 * p2) / np.linalg.norm(np.cross(p1.w, p2.w))**2\n return l", "def build_line_with_increment(coord1, coord2, increment, max_point, altitude=0):\n\n if not isinstance(coord1, GPSCoord):\n raise ValueError('Parameter coord1 have to be a GPSCoord')\n\n if not isinstance(coord2, GPSCoord):\n raise ValueError('Parameter coord2 have to be a GPSCoord')\n\n increment = float(increment)\n\n start_coord = coord1.clone()\n start_coord.alt = altitude\n result = [start_coord]\n\n distance = coord1.distance_to(coord2).meters\n\n nb_point = int(distance / increment)\n\n if nb_point > max_point:\n nb_point = max_point\n\n for i in range(1, nb_point):\n last_coord = result[len(result) - 1]\n distance = last_coord.distance_to(coord2).meters\n coef_direct = increment / distance\n\n dx = last_coord.lon + coef_direct * (coord2.lon - last_coord.lon)\n dy = last_coord.lat + coef_direct * (coord2.lat - last_coord.lat)\n\n new_coord = GPSCoord(lat=dy, lon=dx, alt=altitude)\n\n result.append(new_coord)\n\n end_coord = coord2.clone()\n end_coord.alt = altitude\n result.append(end_coord)\n\n return result", "def __two_nearest_line__(b1, b2):\n distances = []\n for p in b1:\n for q in b2:\n distances.append([__distance__(p, q), (p, q)])\n distances = sorted(distances, key=lambda d: d[0])\n a1, b1 = distances[0][1][0], distances[0][1][1]\n a2, b2 = distances[1][1][0], distances[1][1][1]\n a1 = (a1[0] + (a2[0] - a1[0]) * 1 / 14, a1[1] + (a2[1] - a1[1]) * 1 / 14)\n b1 = (b1[0] + (b2[0] - b1[0]) * 1 / 14, b1[1] + (b2[1] - b1[1]) * 1 / 14)\n a2 = (a2[0] + (a1[0] - a2[0]) * 1 / 14, a2[1] + (a1[1] - a2[1]) * 1 / 14)\n b2 = (b2[0] + (b1[0] - b2[0]) * 1 / 14, b2[1] + (b1[1] - b2[1]) * 1 / 14)\n return (a1, b1), (a2, b2)", "def closest_distance_between_lines(\n a0,\n a1,\n b0,\n b1,\n clamp_all=False,\n clamp_a0=False,\n clamp_a1=False,\n clamp_b0=False,\n clamp_b1=False,\n):\n\n # If clampAll=True, set all clamps to True\n if clamp_all:\n clamp_a0 = True\n clamp_a1 = True\n clamp_b0 = True\n clamp_b1 = True\n\n a0 = np.asarray(a0)\n a1 = np.asarray(a1)\n b0 = np.asarray(b0)\n b1 = np.asarray(b1)\n\n # Calculate denomitator\n A = a1 - a0\n B = b1 - b0\n magA = np.linalg.norm(A)\n magB = np.linalg.norm(B)\n\n _A = A / magA\n _B = B / magB\n\n # due to numerical instabilities there is a test for the case _A and _B are almost parallel\n if not ((np.allclose(_A, _B) or np.allclose(_A, -_B))):\n # non parallel\n # worsk also for strong parallel lines\n cross = np.cross(_A, _B)\n denom = np.linalg.norm(cross) ** 2\n else:\n # almost paralel vectors\n # this is due to numerical stability\n denom = 0\n\n # If lines are parallel (denom=0) test if lines overlap.\n # If they don't overlap then there is a closest point solution.\n # If they do overlap, there are infinite closest positions, but there is a closest distance\n if not denom:\n d0 = np.dot(_A, (b0 - a0))\n\n # Overlap only possible with clamping\n if clamp_a0 or clamp_a1 or clamp_b0 or clamp_b1:\n d1 = np.dot(_A, (b1 - a0))\n\n # Is segment B before A?\n if d0 <= 0 >= d1:\n if clamp_a0 and clamp_b1:\n if np.absolute(d0) < np.absolute(d1):\n return a0, b0, np.linalg.norm(a0 - b0)\n return a0, b1, np.linalg.norm(a0 - b1)\n\n # Is segment B after A?\n elif d0 >= magA <= d1:\n if clamp_a1 and clamp_b0:\n if np.absolute(d0) < np.absolute(d1):\n return a1, b0, np.linalg.norm(a1 - b0)\n return a1, b1, np.linalg.norm(a1 - b1)\n\n # Segments overlap, return distance between parallel segments\n return None, None, np.linalg.norm(((d0 * _A) + a0) - b0)\n\n # Lines criss-cross: Calculate the projected closest points\n t = b0 - a0\n detA = np.linalg.det([t, _B, cross])\n detB = np.linalg.det([t, _A, cross])\n\n t0 = detA / denom\n t1 = detB / denom\n\n pA = a0 + (_A * t0) # Projected closest point on segment A\n pB = b0 + (_B * t1) # Projected closest point on segment B\n\n # Clamp projections\n if clamp_a0 or clamp_a1 or clamp_b0 or clamp_b1:\n if clamp_a0 and t0 < 0:\n pA = a0\n elif clamp_a1 and t0 > magA:\n pA = a1\n\n if clamp_b0 and t1 < 0:\n pB = b0\n elif clamp_b1 and t1 > magB:\n pB = b1\n\n # Clamp projection A\n if (clamp_a0 and t0 < 0) or (clamp_a1 and t0 > magA):\n dot = np.dot(_B, (pA - b0))\n if clamp_b0 and dot < 0:\n dot = 0\n elif clamp_b1 and dot > magB:\n dot = magB\n pB = b0 + (_B * dot)\n\n # Clamp projection B\n if (clamp_b0 and t1 < 0) or (clamp_b1 and t1 > magB):\n dot = np.dot(_A, (pB - a0))\n if clamp_a0 and dot < 0:\n dot = 0\n elif clamp_a1 and dot > magA:\n dot = magA\n pA = a0 + (_A * dot)\n\n return pA, pB, np.linalg.norm(pA - pB)", "def project_point_to_line(points, reference_points_of_lines, vectors_along_lines):\n k = check_shape_any(points, (3,), (-1, 3), name=\"points\")\n check_shape_any(\n reference_points_of_lines,\n (3,),\n (-1 if k is None else k, 3),\n name=\"reference_points_of_lines\",\n )\n vg.shape.check(locals(), \"vectors_along_lines\", reference_points_of_lines.shape)\n\n return reference_points_of_lines + vg.project(\n points - reference_points_of_lines, onto=vectors_along_lines\n )", "def transform_matrix_from_line_segments(ls11,ls12,LS11,LS12):\n norm = lambda vec: (vec[0]**2 + vec[1]**2 + vec[2]**2)**0.5\n vec1 = ls12 - ls11\n vec2 = LS12 - LS11\n trans_to_origin = translateEuler(-ls11)\n temp = rotate_matrix_from_vectors(vec1, vec2)\n rot_matrix = np.zeros([4,4])\n rot_matrix[:-1, :-1] = temp\n rot_matrix[3,3] = 1\n scale_matrix = np.eye(4)*norm(vec2)/norm(vec1) \n scale_matrix[3,3] = 1\n trans_to_point = translateEuler(LS11)\n # print(trans_to_point)\n # print(rot_matrix)\n # print(trans_to_origin)\n \n return trans_to_point@scale_matrix@rot_matrix@trans_to_origin", "def point_line_dist2(p, l1, l2):\n p, l1, l2 = np.asarray(p), np.asarray(l1), np.asarray(l2)\n ap = l1 - p\n n = l2 - l1\n n /= np.sqrt(sum(n**2))\n dist = ap - np.outer(n, np.dot(ap, n)).T\n return np.sum(dist**2, 1)", "def _boundary_endpoints_image_space_from_line(line: Line, boundary_radius: int, resolution: int) -> (\n np.array, np.array):\n point1 = line.point1.numpy()\n point2 = line.point2.numpy()\n # Without loss of generality, assume point1 has the smaller y coordinate\n if point1[1] > point2[1]:\n point1, point2 = point2, point1\n # Get the direction vector between the two points\n direction_vector = point2 - point1\n # If the slope > 1 (i.e. the y grows faster than x), we check intersections on the tops. Otherwise the sides\n if abs(direction_vector[0]) < abs(direction_vector[1]):\n dist_to_top = boundary_radius - point1[1] # Difference from top boundary to the y of p1\n # Scale the direction vector by the distance to top / y of direction_vector\n top_point = point1 + direction_vector * dist_to_top / direction_vector[1]\n # top_point = point1+direction_vector*resolution/(2*boundary_radius)\n # Repeat for bottom\n dist_to_bottom = boundary_radius + point1[1]\n bottom_point = point1 - direction_vector * dist_to_bottom / direction_vector[1]\n\n # Return the tuple of points\n return (Construction._point_to_image_space(top_point, boundary_radius, resolution),\n Construction._point_to_image_space(bottom_point, boundary_radius, resolution))\n else: # Find where it intersects with the sides\n dist_to_right = boundary_radius - point1[0] # Difference from top boundary to the y of p1\n # Scale the direction vector by the distance to top / y of direction_vector\n right_point = point1 + direction_vector * dist_to_right / direction_vector[0]\n # Repeat for bottom\n dist_to_left = boundary_radius + point1[0]\n left_point = point1 - direction_vector * dist_to_left / direction_vector[0]\n\n # Return the tuple of points\n return (Construction._point_to_image_space(left_point, boundary_radius, resolution),\n Construction._point_to_image_space(right_point, boundary_radius, resolution))", "def find_out_difference_perpendiculars(lap, ref_lap):\n\n distances = []\n\n for i in lap.index:\n point = lap.loc[i]\n\n closest_index = find_closest_point(point, ref_lap)\n closest_point = ref_lap.loc[closest_index]\n\n neighbor_i = len(ref_lap) - 1 if closest_index == 0 else closest_index - 1\n neighbor1 = ref_lap.loc[neighbor_i]\n neighbor_i = 0 if len(ref_lap) == closest_index + 1 else closest_index + 1\n neighbor2 = ref_lap.loc[neighbor_i]\n\n v1 = create_vector(closest_point, point)\n v2 = create_vector(closest_point, neighbor1)\n v3 = create_vector(closest_point, neighbor2)\n\n angle1 = find_angle_between_vectors(v1, v2)\n angle2 = find_angle_between_vectors(v1, v3)\n\n degrees90 = math.pi / 2\n min_dist = -1\n if angle1 > degrees90 and angle2 > degrees90:\n min_dist = line_length(point.LAT, point.LON, closest_point.LAT, closest_point.LON)\n elif angle1 < degrees90 and angle2 < degrees90:\n dist1 = find_shortest_distance(point, closest_point, neighbor1)\n dist2 = find_shortest_distance(point, closest_point, neighbor2)\n min_dist = dist1 if dist1 <= dist2 else dist2\n elif angle1 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor1)\n elif angle2 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor2)\n\n if min_dist == -1:\n print('ERROR: Could not find distance')\n print(\"Indices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif math.isnan(min_dist):\n print(\"NAN value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif min_dist < 0:\n print(\"Negative value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n else:\n min_dist = degrees2kilometers(min_dist) * 100000 # in centimeters\n distances.append(min_dist)\n\n return distances", "def match_min2(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n dist_min=zeros(np1)*1.\n x2=zeros(np1)*1.\n y2=zeros(np1)*1.\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n x2[j],y2[j]=a2[0,i_min],a2[1,i_min]\n match[j]=i_min\n \n salida=list(a1)\n salida.append(x2)\n salida.append(y2)\n\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def section_coordinates():\n \n gh_width = 30.0 # in feet\n gh_width_west = gh_width/2.0\n N_x = 100\n dx = gh_width_west/100.0\n gh_length = 48 # in feet\n \n xvalues = np.linspace(0,(N_x)*dx,N_x+1) # array for width\n yvalues = np.linspace(0,gh_length,num=gh_length+1) # array for height\n zvalues_west = np.zeros(N_x+1) # array for height\n \n for i in range(0,len(xvalues)):\n zvalues_west[i] = 7.29944696 + (1.27415518*xvalues[i]) + (-0.0680139854*xvalues[i]**2) + (0.00152035861*xvalues[i]**3)\n i += 1\n \n roof_slopes_west = np.zeros(N_x+1)\n roof_lengths = np.zeros(N_x+1)\n\n total_length_west = 0\n\n for i in range(1,len(xvalues)):\n dz = zvalues_west[i] - zvalues_west[i-1]\n roof_slopes_west[i] = dz/dx\n roof_lengths[i] = (dz**2 + dx**2)**0.5\n total_length_west += roof_lengths[i]\n \n zvalues_east = np.flip(zvalues_west, axis=0)\n zvalues_west = zvalues_west[:-1]\n zvalues = np.concatenate((zvalues_west, zvalues_east), axis=0)\n \n xx, yy = np.meshgrid(xvalues, yvalues) \n \n plt.plot(xx, yy, marker='.', color='k', linestyle='none')\n plt.axis('equal')\n plt.show() \n\n return roof_slopes_west", "def line_plot_coord(list_start,list_finish):\n cv2.line(image, (int(list_start[0]), int(list_start[1])), (int(list_finish[0]), int(list_finish[1])),\n (255, 255, 255), 1)\n\n grad = np.array(list_finish) - np.array(list_start)\n grad = grad[1]/grad[0]\n # orthograd = -1/grad\n # r = 5\n # theta = np.abs(np.arctan(grad))\n # delta_y = np.sin(theta+np.pi/2)*r\n # delta_x = np.cos(theta+np.pi/2)*r\n\n #cv2.line(image, (int(list_start[0]- delta_x) , int(list_start[1]- delta_y)), (int(list_start[0] + delta_x), int(list_start[1] + delta_y)),\n # (255, 255, 255), 1)\n\n #cv2.line(image, (int(list_finish[0]- delta_x) , int(list_finish[1]- delta_y)), (int(list_finish[0] + delta_x), int(list_finish[1] + delta_y)),\n # (255, 255, 255), 1)", "def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]", "def crossLine(self, other):\n a, b = self.point\n c, d = other.point\n m, n = self.vector\n o, p = other.vector\n if n * o == m * p: # The lines are parallels\n return None\n elif self.angle == -math.pi / 2:\n return Point(a, d)\n elif other.angle == -math.pi / 2:\n return Point(b, c)\n else:\n x = (a * n * o - b * m * o - c * m * p + d * m * o) / (n * o - m * p)\n y = (x - a) * n / m + b\n return Point(x, y)", "def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)", "def _distance2_outline_from_lines(outline, lines, h_outline=None, h_lines=None):\n h_outline = [_homogenous_line(*segment) for segment in outline] if h_outline is None else h_outline\n h_lines = [_homogenous_line(*segment) for segment in lines] if h_lines is None else h_lines\n\n approaches=[]\n for out_idx, (out, h_out) in enumerate(zip(outline, h_outline)):\n data = [(_distance2_line_segments(out, line, h_out, h_line),line_idx) for line_idx, (line, h_line) in enumerate(zip(lines, h_lines))]\n closest = sorted(data, key=lambda x, idx: x[0])[0]\n # distance2, line segement index involved, point involved, line/other point involved\n approaches.append((closest[0][0], closest[1], closest[0][1], closest[0][2]))\n\n return approaches", "def _circleCircleTangentsXY(c1,c2):\n\n a = c1[1][0]\n b = c2[1][0]\n if a>b:\n bigIsOne=True\n bigC = c1\n smallC = c2\n else:\n bigIsOne=False\n bigC = c2\n smallC = c1\n ## Consdier the triangle created by the center of the small\n ## circle, the center of the large circle, and the point at the 90\n ## degree intersection of the line from the center of the small\n ## circle to the radian of the tangent point on the large circle.\n ## This is a right triangle with one leg of length d (distance of\n ## centers), one leg of length bigR-smallR, and one leg of unknown\n ## length, beta. theta is the angle formed by d and beta, which is\n ## also the angle of one of the the tangent lines, the other being\n ## -theta.\n ## \n ## we will calulate theta as follows:\n ## beta^2 - (r2-r1)^2 = d^2\n ## beta = sqrt( d^2 - (r2-r1)^2 )\n ## theta = atan ((r2-r1)/beta)\n \n r1 = smallC[1][0]\n r2 = bigC[1][0]\n\n d = dist(c1[0],c2[0])\n mpd = mpm.mpf(d)\n dr = r2-r1\n mpdr = mpm.mpf(dr)\n\n if d <= dr: #centers too close\n raise ValueError('circleCircleTangentsXY: centers of circles too close')\n \n beta = mpm.sqrt( mpd*mpd - mpdr*mpdr)\n theta = float(mpm.atan2(dr,beta))\n\n ## now, figure out the angle created by the center of the large\n ## circle with respect to the small circle\n dd = sub(bigC[0],smallC[0])\n phi = atan2(dd[1],dd[0])\n\n ## the two lines have angle phi+theta, and phi-theta. The\n ## intersection point of these lines is at the point on the circle\n ## phi+theta+90', and phi-theta-90'\n gamma1 = phi+theta+pi/2\n gamma2 = phi-theta-pi/2\n n1 = point(cos(gamma1),sin(gamma1))\n n2 = point(cos(gamma2),sin(gamma2))\n p1 = add(scale3(n1,r1),smallC[0])\n p2 = add(scale3(n1,r2),bigC[0])\n p3 = add(scale3(n2,r1),smallC[0])\n p4 = add(scale3(n2,r2),bigC[0])\n\n l1 = l2 = []\n if bigIsOne:\n l1=line(p2,p1)\n l2=line(p4,p3)\n else:\n l1 = line(p1,p2)\n l2 = line(p3,p4)\n\n return [l1,l2]", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def vary_distance_lines(self, n_lines,\n start_theta=-180.0, end_theta=+180.0):\n delta_theta = end_theta - start_theta\n thetas = [start_theta + ((float(i) / (n_lines - 1)) * delta_theta)\n for i in range(n_lines)]\n thetas = list(map(np.radians, thetas))\n\n lines = []\n for theta in thetas:\n low = self.build_input(0.0, theta)\n high = self.build_input(self.intersection_distance(theta), theta)\n lines.append((low, high))\n return lines", "def get_lat_offsets(self):\n\n startlat = self.parameters['startlatitude']\n stoplat = self.parameters['stoplatitude']\n\n #Given the start and stops,\n startidx, startvalue = utils.getnearest(self.latitudes, startlat)\n stopidx, stopvalue = utils.getnearest(self.latitudes, stoplat)\n startidx -= 2\n stopidx += 2\n latslice = np.arange(startidx, stopidx + 1)\n if utils.checkmonotonic(latslice):\n latslice = latslice\n else:\n #TODO: Support pole crossing images\n logger.error('Image is pole crossing, not currently supported.')\n '''\n print \"NOT MONOTONIC\"\n #Handle wraps around the poles\n latslice = np.arange(start_idx, stop_idx + 1)\n nlats = self.startlookup.shape[1]\n greatermask = np.where(latslice >= nlats)\n latslice[greatermask] -= nlats\n lessmask = np.where(latslice < 0)\n latslice[lessmask] += self.startlookup.shape[1]\n\n self.latsort = np.argsort(latslice)\n self.latslice = latslice[self.latsort]\n self.latsort = np.argsort(self.latsort)\n '''\n latslice = None\n logger.debug('Start latitude node is {}. Nearest lookup node is {}.'.format(startlat, startidx))\n logger.debug('Stop latitude node is {}. Nearest lookup node is {}.'.format(stoplat, stopidx))\n return latslice", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def discretized_line(x_start, y_start, x_end, y_end, n_elements):\n n_pts = n_elements + 1\n x = np.linspace(x_start, x_end, n_pts)\n y = np.linspace(y_start, y_end, n_pts)\n x1 = x[:-1]\n y1 = y[:-1]\n x2 = x[1:]\n y2 = y[1:]\n return x1, y1, x2, y2", "def box_line_coords(self):\n not_visited = set()\n for shape in self.shapes:\n for r, c in shape:\n # get points next to the numbers in one shape\n # and check which of them are inside the same shape too\n neighbors = [(r-1, c), (r+1, c), (r, c-1), (r, c+1)]\n for next_r, next_c in neighbors:\n if (0 <= next_r < 9 and 0 <= next_c < 9 and\n (next_r, next_c) not in shape):\n \n # maximum of the point and its neighbor will be\n # the beginning of the line part that builds shape \n x0, y0 = max(next_c, c), max(next_r, r)\n x1 = (next_c == c) and c + 1 or x0\n y1 = (next_r == r) and r + 1 or y0\n not_visited.add((x0, y0, x1, y1))\n not_visited.add((x1, y1, x0, y0))\n return not_visited", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)" ]
[ "0.62555534", "0.6251809", "0.62259203", "0.619858", "0.6193819", "0.6192046", "0.61712044", "0.61675787", "0.6118712", "0.61126447", "0.60862005", "0.60712665", "0.60499895", "0.6047329", "0.60366255", "0.602664", "0.59670717", "0.5939942", "0.59200275", "0.5916448", "0.5909158", "0.5883835", "0.5858467", "0.5850972", "0.5833159", "0.58302677", "0.5820241", "0.58168304", "0.58136857", "0.5804104", "0.57831275", "0.5774858", "0.5761587", "0.57450604", "0.5729115", "0.5727741", "0.5727483", "0.5725855", "0.57215536", "0.57116026", "0.5711265", "0.57105744", "0.5707122", "0.56986815", "0.56879765", "0.56771857", "0.5675909", "0.5670832", "0.56644416", "0.566229", "0.56585246", "0.565655", "0.5654063", "0.5638251", "0.5630862", "0.5623985", "0.56231004", "0.561262", "0.5611283", "0.5605658", "0.56010354", "0.5599901", "0.55932885", "0.55756253", "0.5573646", "0.5567679", "0.55654204", "0.55569416", "0.55554056", "0.55502295", "0.5547982", "0.55479014", "0.5540174", "0.55268615", "0.5515004", "0.55126864", "0.5507983", "0.5507064", "0.55032486", "0.5491789", "0.54836774", "0.54835325", "0.54803157", "0.54795897", "0.54727453", "0.5465733", "0.54651743", "0.5464158", "0.5446208", "0.54442805", "0.5443832", "0.5442559", "0.54372835", "0.54360557", "0.5435372", "0.5428701", "0.5423063", "0.5420998", "0.54177254", "0.5414121" ]
0.6228341
2
This algorithm creates a cut list for a cut of depth z_thickness between (x1,y1)>(x2,y2).
def line(x1,y1,x2,y2,z_thickness,laser): #Global variables that are used by all algorithms layers = int(z_thickness/laser["z_spacing"]) #Works out offset when beginning on a new layer taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"] taper_x,taper_y = offset(x1,y1,x2,y2,taper) #Works out offset between each parallel scan on the same layer delta_x,delta_y = offset(x1,y1,x2,y2,laser["xy_spacing"]) #Works out maximum offset from starting line, we don't want to exceed this at any point. max_taper = math.tan(math.radians(laser["kerf_angle"]/2)) * (z_thickness) * 2 max_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper) #max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y #Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows cutlist = [] for a in range(layers): new_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y i = 0 cutlist.append(["z_step", str(-laser["z_spacing"])]) while abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y): #This use of i is to reduce the jump distance between individual scans if i % 2 == 0: cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y2:.6f}"]) else: cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y2:.6f}"]) cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"]) new_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y i = i + 1 #Having completed one layer, the laser moves down to begin the next layer max_delta_x = max_delta_x - taper_x cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_y1 = new_y1-delta\r\n\t\t\ti = i + 1\r\n\t\tif a < layers - 1:\r\n\t\t\tcutlist.append([\"z_step\", str(-deltaz)])\r\n\t\ty_max = y_max - taper_straight - taper_y\r\n\r\n\treturn cutlist", "def generate_cuts(depths, side=SIDE_LENGTH):\n for num, den in depths:\n ad = num * side / den\n poly = Polygon([(0, 0), (side, 0), (side, ad), (0, ad)])\n yield poly", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def define_neighbors(x: int, y: int, z: int) -> list:\n diffs = range(-1, 2)\n coords = []\n # might need to add some if guards (if x > 0) (if x < len(blah) etc)\n xdiffs = (x + diff for diff in diffs)\n ydiffs = (y + diff for diff in diffs)\n zdiffs = (z + diff for diff in diffs)\n neighbors = product(xdiffs, ydiffs, zdiffs)\n for index, neighbor in enumerate(neighbors):\n if neighbor != (x, y, z) and all(c >= 0 for c in neighbor):\n coords.append(neighbor)\n return coords", "def _quickhull(self, pt1, pt2, point_list):\n if not point_list:\n return []\n pt3 = max(point_list, key=lambda p: oriented_area(pt1, p, pt2))\n # Nie trzeba dzielic przez abs(pt2-pt1).\n list1 = self._points_on_the_right(pt1, pt3, point_list)\n list2 = self._points_on_the_right(pt3, pt2, point_list)\n return (self._quickhull(pt1, pt3, list1) + [pt3]\n + self._quickhull(pt3, pt2, list2))", "def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()", "def cut_points(data_x, data_y, data_z, std_cut=3.0, debug=False):\n\n assert len(data_x) == len(data_y) == len(data_z)\n dat = numpy.zeros((len(data_x), 3))\n dat[:,0] = data_x\n dat[:,1] = data_y\n dat[:,2] = data_z\n\n mean = dat.mean(axis=0)\n diff_vec = dat - mean\n distances = map(numpy.linalg.norm, diff_vec)\n\n norm_dist = (distances - numpy.mean(distances)) / numpy.std(distances)\n\n # cut everything that is more than std_cut std deviations from center\n keeper_idx = numpy.where(norm_dist <= std_cut)[0]\n\n assert len(keeper_idx) <= len(data_x)\n print \"cut %f std deviations, keeping %i/%i points\" % (std_cut, len(keeper_idx), len(data_x))\n\n if debug:\n import pylab\n pylab.hist(norm_dist, bins=100)\n pylab.show()\n\n return keeper_idx", "def buildZGrid(self, plot=False):\r\n\r\n print(\"Constructing Z corners\")\r\n\r\n # self.zcorn = np.array(self.zcorn, dtype=float)\r\n # temp = np.zeros( ((self.ne+1)*(self.nn+1)*self.nz) )\r\n temp = []\r\n count = 0\r\n for item in self.zcorn:\r\n\r\n if \"*\" in item:\r\n ct = (int)(item.split(\"*\")[0])\r\n vl = (float)(item.split(\"*\")[1])\r\n temp += np.tile(vl, ct).tolist()\r\n count += ct\r\n else:\r\n temp += [(float)(item)]\r\n count += 1\r\n\r\n # layers = np.resize(temp, (8, self.ne*self.nn*self.nz ))\r\n layers = np.resize(temp, (self.nz * 2, self.ne * self.nn * 4))\r\n \"\"\"\r\n plt.plot(newtemp[0,:]) # TOP 0 0\r\n plt.plot(newtemp[1,:]) # SAME -- # BOTTOM 0 1\r\n #plt.plot(newtemp[2,:]) # SAME -- # TOP 1 2\r\n\r\n plt.plot(newtemp[3,:]) # SAME -- # BOTTOM 1 3\r\n #plt.plot(newtemp[4,:]) # SAME -- # TOP 2 4\r\n\r\n plt.plot(newtemp[5,:]) # SAME -- # BOTTOM 2 5\r\n #plt.plot(newtemp[6,:]) # SAME -- # TOP 3 6\r\n plt.plot(newtemp[7,:]) # BOTTOM 3 7\r\n \"\"\"\r\n self.ZZT = {} # zztop ha ha...two year's later this is still funny -TI\r\n self.ZZB = {}\r\n for ilay in range(self.nz):\r\n self.ZZT[ilay] = np.zeros((self.ndx, self.ndy))\r\n self.ZZB[ilay] = np.zeros((self.ndx, self.ndy))\r\n iis = 0\r\n # plt.plot(layers[ilay*2])\r\n for iin in range(self.nn):\r\n nears = {}\r\n fars = {}\r\n bnears = {}\r\n bfars = {}\r\n for iif in range(2):\r\n # top\r\n nears[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][0::2].tolist()\r\n fars[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2][iis:iis + 2 * self.ne][0::2] *= 0. # check\r\n layers[ilay * 2][iis:iis + 2 * self.ne][1::2] *= 0.\r\n nears[iif].append(fars[iif][-1])\r\n fars[iif] = [nears[iif][0]] + fars[iif]\r\n # bottom\r\n bnears[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2].tolist()\r\n bfars[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2] *= 0.\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2] *= 0.\r\n bnears[iif].append(bfars[iif][-1])\r\n bfars[iif] = [bnears[iif][0]] + bfars[iif]\r\n #\r\n iis += 2 * self.ne\r\n\r\n self.ZZT[ilay][:, iin] = nears[0]\r\n self.ZZB[ilay][:, iin] = bnears[0]\r\n # NaN mask for visualizing, but can be sort of a pain to deal with\r\n # imask = np.nonzero( 1-self.ActiveCells[:,iin,ilay] )\r\n # self.ZZT[ilay][:,iin][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin][0] = np.nan\r\n # self.ZZB[ilay][:,iin][0] = np.nan\r\n if iin == self.nn - 1:\r\n self.ZZT[ilay][:, iin + 1] = fars[1]\r\n self.ZZB[ilay][:, iin + 1] = bfars[1]\r\n # NaN mask\r\n # self.ZZT[ilay][:,iin+1][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin+1][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin+1][0] = np.nan\r\n # self.ZZB[ilay][:,iin+1][0] = np.nan\r\n\r\n print(\"Layers ||\", np.linalg.norm(layers), \"||\")\r\n # exit()\r\n\r\n # visualize\r\n if plot:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe( self.X0, self.Y0, self.Z0, rstride=1, cstride=1)\r\n\r\n ax.plot_wireframe(self.X0, self.Y0, self.ZZT[0], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[1], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[2], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[3], rstride=1, cstride=1, color=\"blue\")\r\n\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZB[3], rstride=1, cstride=1, color=\"green\")\r\n\r\n plt.gca().set_xlim(np.min(self.X0), np.max(self.X0))\r\n plt.gca().set_ylim(np.max(self.Y0), np.min(self.Y0))\r\n # plt.gca().set_zlim( np.max(self.ZZB[3]), np.min(self.ZZT[0]) )\r\n plt.gca().set_zlim(5000, 4000)\r\n plt.savefig(\"mesh.png\")\r\n plt.show()", "def thickenXYList( list, tester, biSectionMax=6, interpolation=xDataEnumsModule.Interpolation.linlin):\n\n def thickenXYList2( interpolation, xl, yl, xu, yu, newList, tester, level ) :\n\n if( level == biSectionMax ) : return\n level += 1\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.loglin:\n xMid = 0.5 * ( xl + xu )\n else :\n xMid = math.sqrt( xl * xu );\n\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.linlog:\n yMid = 0.5 * ( yl + yu )\n else :\n yMid = math.sqrt( yl * yu )\n\n y = tester.evaluateAtX( xMid )\n\n dy = abs( y - yMid )\n if( ( dy > abs( y * tester.relativeTolerance ) ) and ( dy > tester.absoluteTolerance ) ) :\n newList.append( [ xMid, y ] )\n thickenXYList2( interpolation, xl, yl, xMid, y, newList, tester, level )\n thickenXYList2( interpolation, xMid, y, xu, yu, newList, tester, level )\n\n if( len( list ) < 2 ) : raise Exception( \"len( list ) = %d < 2\" % len( list ) )\n newList = []\n for i1, xy in enumerate( list ) :\n x2, y2 = xy\n if( i1 > 0 ) : thickenXYList2( interpolation, x1, y1, x2, y2, newList, tester, 0 )\n newList.append( [ x2, y2 ] )\n x1, y1 = x2, y2\n newList.sort( )\n return( newList )", "def corner_combinations(zdim: int):\n return combinations(range(zdim), 2)", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def get_obstList(self,X,Y,Z):\n \n \t#Bed\n floor_part = np.array(np.where(Y < 2*self.cyl_rad)).flatten()\n\t\n\t#Piling\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n\n # then add the cylinder\n obst_list = np.union1d(floor_part[:],cyl_part[:])\n \n return list(obst_list[:])", "def z_focus(block,cut,laser):\r\n\tcutlist = []\r\n\titerations = int(cut[\"final_dimension_z\"]/laser[\"z_spacing\"])\r\n\t#Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes\r\n\ty = cut[\"final_dimension_y\"]/2\r\n\toffset = laser[\"xy_spacing\"]\r\n\tx = 0\r\n\r\n\tcutlist.append([\"z_abs\",\"0\"])\r\n\tfor a in range(iterations):\r\n\t\tcutlist.append([\"jump\", f\"{x:.6f}\", f\"{y:.6f}\"])\r\n\t\tcutlist.append([\"mark\", f\"{x:.6f}\", f\"{-y:.6f}\"])\r\n\t\tcutlist.append([\"z_rel\", str(-laser[\"z_spacing\"])])\r\n\t\tx = x + offset\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def find_cuts(width, height, isOdd):\n\n if isOdd:\n return width//2, height//2\n\n if width % 4 == 0:\n return width//2, height//2\n return (width-2)//2, (height-2)//2", "def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H", "def graph_3d_grid(xyz, k=18):\n if np.size(xyz) == 0:\n return None\n lxyz = xyz - xyz.min(0)\n m = 3 * lxyz.max(0).sum() + 2\n\n # six neighbours\n n6 = [np.array([1, m, m ** 2]), np.array([m ** 2, 1, m]),\n np.array([m, m ** 2, 1])]\n\n # eighteen neighbours\n n18 = [np.array([1 + m, 1 - m, m ** 2]),\n np.array([1 + m, m - 1, m ** 2]),\n np.array([m ** 2, 1 + m, 1 - m]),\n np.array([m ** 2, 1 + m, m - 1]),\n np.array([1 - m, m ** 2, 1 + m]),\n np.array([m - 1, m ** 2, 1 + m])]\n\n # twenty-six neighbours\n n26 = [np.array([1 + m + m ** 2, 1 - m, 1 - m ** 2]),\n np.array([1 + m + m ** 2, m - 1, 1 - m ** 2]),\n np.array([1 + m + m ** 2, 1 - m, m ** 2 - 1]),\n np.array([1 + m + m ** 2, m - 1, m ** 2 - 1])]\n\n # compute the edges in each possible direction\n def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]),\n weights=np.array([])):\n q = 0\n for nn_row in nn:\n v1 = np.dot(lxyz, nn_row)\n o1 = np.argsort(v1)\n sv1 = v1[o1]\n nz = np.squeeze(np.nonzero(sv1[: - 1] - sv1[1:] == - l1dist))\n o1z, o1z1 = o1[nz], o1[nz + 1]\n left = np.hstack((left, o1z, o1z1))\n right = np.hstack((right, o1z1, o1z))\n q += 2 * np.size(nz)\n weights = np.hstack((weights, np.sqrt(l1dist) * np.ones(q)))\n return left, right, weights\n\n i, j, d = create_edges(lxyz, n6, 1.)\n if k >= 18:\n i, j, d = create_edges(lxyz, n18, 2, i, j, d)\n if k == 26:\n i, j, d = create_edges(lxyz, n26, 3, i, j, d)\n i, j = i.astype(np.int_), j.astype(np.int_)\n\n # reorder the edges to have a more standard order\n order = np.argsort(i + j * (len(i) + 1))\n i, j, d = i[order], j[order], d[order]\n return i, j, d", "def cutting_characters(character, image_2cut):\n\n preparing = []\n m = len(character)\n image_2cut = image_2cut.copy()\n\n for n in character:\n\n # The information is extracted from the the tupla n in character list.\n # For more information about this coordinates check the Bounding Rectangle function resources\n ulc_X = n[0]\n ulc_Y = n[1]\n\n width = n[2]\n height = n[3]\n\n #There is asigned new name to the above information and is constructed the rectangle.\n start_x = int(ulc_X)\n start_y = int(ulc_Y)\n\n width_new = int(width)\n height_new = int(height)\n\n\n final_x = start_x + width_new\n final_y = start_y + height_new\n\n # A width and height outter value is placed that allow a prudential margin of the principal content.\n width_outer = 25\n height_outer = 45\n\n\n #Then the rectangle is constructed with these outter width and heigt and the x and y coordinate are displaced too.\n x_outer = int(ulc_X) - 4\n y_outer = int(ulc_Y) - 6\n\n outer_xf = x_outer + width_outer\n outer_yf = y_outer + height_outer\n\n # Both rectangles are cutted by image_2cut\n\n rec_char_outer = image_2cut[y_outer:outer_yf, x_outer:outer_xf]\n\n rec_char_inter = image_2cut[start_y:final_y, start_x: final_x]\n\n # Imperfections are corrected and filling with white color by filling_white\n\n prep = filling_white(rec_char_outer, rec_char_inter)\n\n prep, _= resizing(prep, prep, 15)\n\n preparing.append(prep)\n\n return preparing", "def cut_bonds_z_highest(xy, NL, KL, BL, target_z, check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n BLbulk = BL[binds]\n BLboun = BL[Binds]\n\n # bBinds bonds connect bulk to boundary\n # Treat these as is connecting bulk(z) to bulk(z)\n bBinds = np.where(np.logical_xor(is_a, is_b))[0]\n BLbB = BL[bBinds]\n\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n # Can write in terms of bonds? 2have = zt\n # nbulk2cut = int(max([0,round((z_start - target_z)*0.5*float(NP_bulk))]))\n # nbulk2have = len(binds) - nbulk2cut\n # print 'nboun2have = ', nboun2have\n # print 'nbulk2have = ', nbulk2have\n\n # CUT BONDS FROM HIGHEST Z NODES (sum of endpts)\n # Unfortunately, this has to be done iteratively.\n # Algorithm: find zvals of all bonds. For all bonds with zval = max(zval),\n # cut all the bonds that don't share endpts with any of the other bonds.\n # Find these by going through in-place-randomized B2cut and cross off if later bonds share indices.\n # Let boundary bonds be cut, or not, and pay no attention to them, since lattice will be cropped.\n\n # First cut most coordinated, whether on bulk or boundary, but keep track of which.\n # Get bonds with highest z pairs of nodes\n NN = np.shape(KL)[1]\n zz = np.sum(KL, axis=1)\n # print 'zz = ', zz\n zbulk = float(np.sum(zz[bulk])) / float(len(bulk))\n print 'zbulk so far = ', zbulk\n\n # As long as we haven't cut enough bonds, cut some more\n while zbulk > target_z:\n print 'zbulk = ', zbulk\n zb = zz[BL[:, 0]] + zz[BL[:, 1]]\n zcut = np.where(zb == max(zb))[0]\n np.random.shuffle(zcut)\n B2cut = BL[zcut]\n # print 'B2cut = ', B2cut\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.title('Initial counting marks these')\n plt.pause(0.01)\n plt.clf()\n\n # print 'B2cut = ', B2cut\n # Cross off if later bonds share indices\n keep = np.ones(len(B2cut), dtype=bool)\n for ii in range(len(B2cut)):\n row = B2cut[ii]\n if row[0] in B2cut[ii + 1:, :].ravel():\n # print 'found ', row[0], 'in rest of array '\n # print ' --> len BL[ii+1:,:] = ', len(B2cut[ii+1:,:] )\n keep[ii] = False\n elif row[1] in B2cut[ii + 1:, :].ravel():\n keep[ii] = False\n\n # print 'keep = ', keep\n # print 'keep.any() = ', keep.any()\n if keep.any():\n B2cut = B2cut[keep]\n else:\n print 'The highest nodes are all connected to at least one other. Killing one bond...'\n B2cut = B2cut[0:1]\n\n # Only interested in the bulk bonds for measurement, but cutting boundary\n # bonds will get us out of a situation where bulk is less coordinated than\n # boundary so don't do --> B2cut = intersect2d(B2cut,BLbulk)\n\n N2cut = len(B2cut)\n\n # See what would happen if we cut all of these\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we can cut all of these, do that. Otherwise, cut only as many as needed after shuffling.\n if len(np.where(zzt == 0)[0]) > 0:\n print 'There are dangling points. Removing bonds2cut that would make these...'\n # There are dangling points.\n # Remove the bonds that make zzt elems zero from the bonds to cut list\n # and recalculate.\n dangle_pts = np.where(zzt == 0)[0]\n # protect dangle points --> there is only one bond to find since we have run a \"keep\" search on B2cut\n inb0 = np.where(np.in1d(B2cut[:, 0], dangle_pts))[0]\n inb1 = np.where(np.in1d(B2cut[:, 1], dangle_pts))[0]\n keep = np.setdiff1d(np.arange(len(B2cut)), inb0)\n keep = np.setdiff1d(keep, inb1)\n print 'Protecting dangling bond: keep for dangle =', keep\n\n # Check --> show bond numbers and bond to cut and protect (dangles)\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5, (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5,\n str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.plot([xy[B2cut[keep, 0], 0], xy[B2cut[keep, 1], 0]], [xy[B2cut[keep, 0], 1], xy[B2cut[keep, 1], 1]],\n 'b-', lw=5)\n plt.show()\n plt.clf()\n\n B2cut = B2cut[keep]\n N2cut = len(B2cut)\n\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we end up in a place where these are the only bonds to cut, raise exception\n # --> means target_z is just too low for our given lattice.\n if np.size(B2cut) == 0:\n raise RuntimeError('target_z is too low for the given lattice! Cutting bonds led to dangling points.')\n\n if zbulk > target_z:\n print 'Still above: zbulk = ', zbulk\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n\n plt.pause(0.01)\n plt.clf()\n\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n print 'Approaching z = ', target_z, ' tuning one bond at a time...'\n # Cut a bond unless there is only one to cut\n # (in which case we are within threshold)\n if N2cut == 1:\n zbulk = 0.\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5,\n (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5, str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.pause(0.01)\n plt.clf()\n\n BL = dh.setdiff2d(BL, B2cut[0:1])\n NL, KL = BL2NLandKL(BL, NP=NP, NN=NN)\n zz = np.sum(KLt, axis=1)\n print 'zz = ', zz\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n\n # IGNORE BOUNDARY: MUST CUT OUT DESIRED REGION. OTHERWISE, IT'S JUST TOO HARD TO MAKE IT RIGHT.\n # Only interested in the boundary bonds now\n # number of bonds to cut in the boundary = nbulkcut * (# boundary bonds)/(#bulk bonds)\n # nB2cut = int(round(nbulk2cut * float(len(Binds))/float(len(binds))))\n # nboun2have = len(Binds) - nB2cut\n #\n # while nboun > nboun2have:\n # zz = np.sum(KL, axis=1)\n # zb = zz[BL[:,0]] + zz[BL[:,1]]\n # zcut = np.where(zb== max(zb))[0]\n # np.random.shuffle(zcut)\n # B2cut = BL[zcut]\n # # Only interested in the boundary bonds now\n # B2cut = intersect2d(B2cut,BLboun)\n # # Cross off if later bonds share indices\n # keep = np.ones(len(B2cut),dtype = bool)\n # for ii in range(len(B2cut)):\n # row = B2cut[ii]\n # if row[0] in BL[ii+1,:].ravel():\n # keep[ii] = False\n # B2cut = B2cut[keep]\n # # Cut only as many as needed\n # nboun2cut = min([nboun - nboun2have, len(B2cut)])\n # BL = dh.setdiff2d(BL,B2cut[0:nboun2cut])\n # nboun = len(intersect2d(BL,BLboun))\n # print 'nbound so far =', nboun\n # NL, KL = BL2NLandKL(BL,NP=NP,NN=NN)\n\n zz = np.sum(KL, axis=1)\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n print 'Tuned to zbulk = ', zbulk\n\n if check:\n display_lattice_2D(xy, BL, close=False)\n plt.show()\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n\n return NL, KL, BL", "def get_boundary_corners_2D(points):\r\n\tpadding=0.05\r\n\tif points.shape[0] == 3:\r\n\t\tassert (len(points.shape)==2)\r\n\t\tminPt_3d_x = np.amin(points[0,:])\r\n\t\tmaxPt_3d_x = np.amax(points[0,:])\r\n\t\tminPt_3d_y = np.amin(points[1,:])\r\n\t\tmaxPt_3d_y = np.amax(points[1,:])\r\n\r\n\t\tboudary = [minPt_3d_x-padding, maxPt_3d_x+padding, minPt_3d_y-padding, maxPt_3d_y+padding]\r\n\r\n\telse:\r\n\t\traise Exception(\"wrong dimension of points!\")\r\n\r\n\treturn boudary", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def drawcutline(f,layernamelist,cutline_entities_count): \r\n \r\n #layernamelist=[layernamelist[0]] \r\n layercount=0\r\n ringlist=[[[-0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]]]\r\n flashlist=buildflashlist()\r\n cutlineset=buildcutlineset() \r\n \r\n f.write(\"0\\nSECTION\\n2\\nENTITIES\\n\")\r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n for polyline in cutlineset:\r\n cutline_entities_count=cutline_entities_count+1\r\n f.write(\"0\\nPOLYLINE\\n8\\n\"+layername+\"\\n5\\n\"+hex(cutline_entities_count)[2:]) # begin writing a polyline\r\n f.write(\"\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n40\\n0.08\\n41\\n0.08\\n\")\r\n cutline_entities_count=drawwidthpolyline(polyline, cutline_entities_count, f,layername)\r\n cutline_entities_count=drawring(ringlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawflash(flashlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawtext(cutline_entities_count, f, layername,layercount)\r\n \r\n return cutline_entities_count", "def get_surround(xy, dim_x=10, dim_y=10, radius=1, exclude_self=True):\n laterals = []\n for dx in range(-int(radius), int(radius)+1, 1):\n for dy in range(-int(radius), int(radius)+1, 1):\n if dx**2 + dy**2 > radius**2:\n continue\n if (xy[0]+dx >= 0) and (xy[0]+dx < dim_x) and (xy[1]+dy >= 0) and (xy[1]+dy < dim_y):\n if not (exclude_self and dx == 0 and dy == 0):\n laterals.append((xy[0]+dx, xy[1]+dy))\n return laterals", "def get_obstList(self,X,Y,Z):\n \n x_c_cone = self.x_c\n\tz_c_cone = self.z_c\n y_c_cone = 0\n x_s = 2.25*2*self.cyl_rad\n rad_cone = x_s + self.cyl_rad\n\th_cone = rad_cone*0.57735\n\n floor_part = np.array(np.where(Y < h_cone)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - x_c_cone)**2 + (Z - z_c_cone)**2 <= ((self.cyl_rad/cone)/(h_cone))**2*(Y - y_c_cone)**2))\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n \n \t#Bed\n\twaveh = 0.125\n\twavel = 10 \n\tfloor_part = np.array(np.where(Y < (waveh*np.sin(wavel*Z) + 2*self.cyl_rad))).flatten()\n\t\n\t#Piling\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n\n # then add the cylinder\n obst_list = np.union1d(floor_part[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n #Pipe_1\n\tpipe_1 = np.array(np.where((X - 1)**2 + (Y - 4)**2 >= 0.5**2)).flatten()\n\tpipe_1_stop_z = np.array(np.where(Z <= 3.0)).flatten()\n\tpipe_1_stop_y = np.array(np.where(Y >= 3.25)).flatten()\n\tpipe_1_stop = np.intersect1d(pipe_1_stop_z[:],pipe_1_stop_y[:])\n\tpipe_1 = np.intersect1d(pipe_1[:],pipe_1_stop[:])\n\n\t#Turn_1\n\tturn_1 = np.array(np.where((0.75 - np.sqrt((Y - 3.25)**2 + (Z -3)**2))**2 + (X - 1)**2 >= 0.5**2)).flatten()\n\tturn_1_stop_z = np.array(np.where(Z >= 3.0)).flatten()\n\tturn_1_stop_y = np.array(np.where(Y>= 1.75)).flatten()\n\tturn_1_stop = np.intersect1d(turn_1_stop_z[:],turn_1_stop_y[:])\n\tturn_1 = np.intersect1d(turn_1[:],turn_1_stop[:])\n\n\t#Pipe_2\n\tpipe_2 = np.array(np.where((X - 1)**2 + (Y - 2.5)**2 >= 0.5**2)).flatten()\n\tpipe_2_start_z = np.array(np.where(Z >= 1.5)).flatten()\n\tpipe_2_start_y_up = np.array(np.where(Y <= 3.25)).flatten()\n\tpipe_2_start_y_down = np.array(np.where(Y >= 1.75)).flatten()\n\tpipe_2_start_y = np.intersect1d(pipe_2_start_y_up[:],pipe_2_start_y_down[:])\t\n\tpipe_2_start = np.intersect1d(pipe_2_start_z[:],pipe_2_start_y[:])\n\tpipe_2 = np.intersect1d(pipe_2[:],pipe_2_start[:])\n\tpipe_2_stop_z = np.array(np.where(Z <= 3.0)).flatten()\n\tpipe_2_stop_y = np.array(np.where(Y <= 3.25)).flatten()\n\tpipe_2_stop = np.intersect1d(pipe_2_stop_z[:],pipe_2_stop_y[:])\n\tpipe_2 = np.intersect1d(pipe_2[:],pipe_2_stop[:])\n\n\t#Turn_2\n\tturn_2 = np.array(np.where((0.75 - np.sqrt((Y - 1.75)**2 + (Z -1.5)**2))**2 + (X - 1)**2 >= 0.5**2)).flatten()\n\tturn_2_stop_z = np.array(np.where(Z <= 1.5)).flatten()\n\tturn_2_stop_y = np.array(np.where(Y <= 3.25)).flatten()\n\tturn_2_stop = np.intersect1d(turn_2_stop_z[:],turn_2_stop_y[:])\n\tturn_2 = np.intersect1d(turn_2[:],turn_2_stop[:])\n\t\n\t#Pipe_3\n\tpipe_3 = np.array(np.where((X - 1)**2 + (Y - 1.0)**2 >= 0.5**2)).flatten()\n\tpipe_3_start_z = np.array(np.where(Z >= 1.5)).flatten()\n\tpipe_3_start_y = np.array(np.where(Y <= 1.75)).flatten()\n\tpipe_3_start = np.intersect1d(pipe_3_start_z[:],pipe_3_start_y[:])\n\tpipe_3 = np.intersect1d(pipe_3[:],pipe_3_start[:])\t\n\n\t#Put the pieces together\n\n\tpipe = np.union1d(pipe_1[:],turn_1[:])\n\tpipe = np.union1d(pipe[:],pipe_2[:])\n\tpipe = np.union1d(pipe[:],turn_2[:])\t\n\tpipe = np.union1d(pipe[:],pipe_3[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def writeCutLines(self, fid, drawing_code, X1, Y1, X2, Y2):\n def notEdge(x, X):\n return round(abs(1000*(x-X)))\n\n assert self.x and self.y\n\n radius = config.GAT[drawing_code].dimx/2.0\n \n # Start at lower-left, proceed clockwise\n x = self.x - radius\n y = self.y - radius\n\n left = notEdge(self.x, X1)\n right = notEdge(self.x+self.width_in(), X2)\n bot = notEdge(self.y, Y1)\n top = notEdge(self.y+self.height_in(), Y2)\n\n BL = ((x), (y))\n TL = ((x), (y+self.height_in()+2*radius))\n TR = ((x+self.width_in()+2*radius), (y+self.height_in()+2*radius))\n BR = ((x+self.width_in()+2*radius), (y))\n\n if not left:\n BL = (BL[0]+2*radius, BL[1])\n TL = (TL[0]+2*radius, TL[1])\n\n if not top:\n TL = (TL[0], TL[1]-2*radius)\n TR = (TR[0], TR[1]-2*radius)\n\n if not right:\n TR = (TR[0]-2*radius, TR[1])\n BR = (BR[0]-2*radius, BR[1])\n\n if not bot:\n BL = (BL[0], BL[1]+2*radius)\n BR = (BR[0], BR[1]+2*radius)\n\n BL = (util.in2gerb(BL[0]), util.in2gerb(BL[1]))\n TL = (util.in2gerb(TL[0]), util.in2gerb(TL[1]))\n TR = (util.in2gerb(TR[0]), util.in2gerb(TR[1]))\n BR = (util.in2gerb(BR[0]), util.in2gerb(BR[1]))\n\n # The \"if 1 or ...\" construct draws all four sides of the job. By\n # removing the 1 from the expression, only the sides that do not\n # correspond to panel edges are drawn. The former is probably better\n # since panels tend to have a little slop from the cutting operation\n # and it's easier to just cut it smaller when there's a cut line.\n # The way it is now with \"if 1 or....\", much of this function is\n # unnecessary. Heck, we could even just use the boardoutline layer\n # directly.\n if 1 or left:\n fid.write('X%07dY%07dD02*\\n' % BL)\n fid.write('X%07dY%07dD01*\\n' % TL)\n\n if 1 or top:\n if not left: fid.write('X%07dY%07dD02*\\n' % TL)\n fid.write('X%07dY%07dD01*\\n' % TR)\n\n if 1 or right:\n if not top: fid.write('X%07dY%07dD02*\\n' % TR)\n fid.write('X%07dY%07dD01*\\n' % BR)\n\n if 1 or bot:\n if not right: fid.write('X%07dY%07dD02*\\n' % BR)\n fid.write('X%07dY%07dD01*\\n' % BL)", "def lattice_coords(cutoffs):\n Nx = cutoffs[0]\n Ny = cutoffs[1]\n Nz = cutoffs[2]\n \n return np.array([np.kron(np.arange(-Nx,Nx+1, dtype = int), np.kron(np.ones(2*Ny+1,dtype = int), np.ones(2*Nz+1, dtype = int)) ),\n np.kron(np.ones(2*Nx+1, dtype = int), np.kron(np.arange(-Ny,Ny+1, dtype = int), np.ones(2*Nz+1, dtype = int)) ),\n np.kron(np.ones(2*Nx+1, dtype = int), np.kron(np.ones(2*Ny+1, dtype = int), np.arange(-Nz, Nz+1, dtype = int)) )]).T", "def build_partition_tree(self):\n \n xmin = 0\n xmax = self.shape[0]\n ymin = 0\n ymax = self.shape[1]\n zmin = 0\n zmax = self.shape[2]\n total_xwidth = xmax - xmin\n total_ywidth = ymax - ymin\n total_zwidth = zmax - zmin\n q = queue.PriorityQueue()\n M = (xmax - xmin) * (ymax - ymin) * (zmax - zmin)\n self.partition_tree = np.zeros((M - 1, 2))\n q.put((0, xmin, xmax, ymin, ymax, zmin, zmax, -1, False))\n ind = len(self.partition_tree) - 1\n while not q.empty():\n _, xmin, xmax, ymin, ymax, zmin, zmax, parent_ind, is_left = q.get()\n \n if parent_ind >= 0:\n self.partition_tree[parent_ind, 0 if is_left else 1] = ind\n\n # make sure we line up with a flattened indexing scheme\n if ind < 0:\n assert -ind - 1 == xmin * total_ywidth * total_zwidth + ymin * total_zwidth + zmin\n\n xwidth = xmax - xmin\n ywidth = ymax - ymin\n zwidth = zmax - zmin\n if xwidth == 1 and ywidth == 1 and zwidth == 1:\n pass\n else:\n\n # by default our ranges remain unchanged\n lxmin = rxmin = xmin\n lxmax = rxmax = xmax\n lymin = rymin = ymin\n lymax = rymax = ymax\n lzmin = rzmin = zmin\n lzmax = rzmax = zmax\n\n # split the xaxis if it is the largest dimension\n if xwidth >= ywidth and xwidth > 1:\n xmid = xmin + xwidth // 2\n lxmax = xmid\n rxmin = xmid\n\n # split the yaxis\n elif ywidth > 1:\n ymid = ymin + ywidth // 2\n lymax = ymid\n rymin = ymid\n\n # split the zaxis only when the other ranges are already width 1\n else:\n zmid = zmin + zwidth // 2\n lzmax = zmid\n rzmin = zmid\n\n lsize = (lxmax - lxmin) * (lymax - lymin) * (lzmax - lzmin)\n rsize = (rxmax - rxmin) * (rymax - rymin) * (rzmax - rzmin)\n\n q.put((-lsize, lxmin, lxmax, lymin, lymax, lzmin, lzmax, ind, True))\n q.put((-rsize, rxmin, rxmax, rymin, rymax, rzmin, rzmax, ind, False))\n\n ind -= 1\n self.partition_tree += int(M)", "def get_loop_borders(pdb_hierarchy, center_resnum_list, ss_annot):\n f_start_res_num =-9999\n f_end_res_num = 9999999\n if ss_annot is not None:\n for elem in ss_annot.simple_elements():\n if f_start_res_num < elem.get_end_resseq_as_int() <= hy36decode(4, center_resnum_list[0]):\n # print \" cutting...\"\n f_start_res_num = elem.get_end_resseq_as_int()\n if hy36decode(4, center_resnum_list[-1]) <= elem.get_start_resseq_as_int() < f_end_res_num:\n # print \" cutting...\"\n f_end_res_num = elem.get_start_resseq_as_int()\n loop_length = f_end_res_num - f_start_res_num\n return f_start_res_num, f_end_res_num", "def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)", "def get_obstList(self,X,Y,Z):\n #Pipe in - find all points exterior of small\n\tpipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()\n\tpipe_in_stop = np.array(np.where(Z <= 3 + 0.5*(self.diam_out - self.diam_in))).flatten()\n\tpipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])\n\n\t#Expansion - find all points exterior of expansion\n\tr_cone = self.diam_in\n\th_cone = self.diam_in\t\n\texpansion = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - 3)**2)).flatten()\n\texpansion_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_out - self.diam_in)))\n\t#expansion_stop = np.array(np.where(Z <= 4)).flatten()\n\texpansion = np.intersect1d(expansion[:],expansion_start[:])\n\t#expansion = np.intersect1d(expansion[:],expansion_stop[:])\n\n\t#Pipe out - final all points exterior of smaller pipe\n\tpipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()\n\tpipe_out_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_in - self.diam_out))).flatten()\n\tpipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])\n\n\n\t#Put the pieces together\n\n\tpipe = expansion[:]\n\tpipe = np.union1d(expansion[:],pipe_in[:])\n\tpipe = np.union1d(pipe[:],pipe_out[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def drawShadedContours(xlist, ylist, zmatrix, levels):\n dislin.conshd(xlist, len(xlist), ylist, len(ylist), zmatrix, levels, len(levels))", "def extract_walls(img_array,x_scale,y_scale,wall_height):\n\n wall_th = 2\n length = 0\n wall_list = []\n\n #check for horizontal walls first\n for row in range(img_array.shape[0]):\n for col in range(img_array.shape[1]):\n \n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if left_edge(sec):\n #check two steps to the right\n next_sec = img_array.astype(int)[row:row+2, col+1:col+3]\n next_next_sec = img_array.astype(int)[row:row+2, col+2:col+4]\n\n #if horizontal wall, get coordinates and count length\n if is_wall(next_sec) and not right_edge(next_next_sec): \n #record corner coordinates\n x = col +1\n y = row\n while is_wall(next_sec):\n #start counting length across, until right edge found\n length +=1\n col +=1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list \n new_wall = Wall(x*x_scale,y*y_scale,length*x_scale,wall_th*y_scale,wall_height)\n wall_list.append(new_wall)\n length = 0\n\n #check for vertical walls\n for col in range(img_array.shape[1]):\n for row in range(img_array.shape[0]):\n\n sec = img_array.astype(int)[row:row+2,col:col+2]\n \n if top_edge(sec): \n #check two steps below\n next_sec = img_array.astype(int)[row+1:row+3, col:col+2]\n next_next_sec = img_array.astype(int)[row+2:row+4, col:col+2]\n\n #if vertical wall, get coordinates and count length\n if is_wall(next_sec) and is_wall(next_next_sec):\n x = col\n y = row\n while is_wall(next_sec):\n #start counting length downwards, until bottom edge found\n length += 1\n row += 1\n next_sec = img_array.astype(int)[row:row+2, col:col+2]\n #create wall object and store in list\n new_wall = Wall(x*x_scale,y*y_scale,wall_th*x_scale,length*y_scale, wall_height)\n wall_list.append(new_wall)\n length = 0\n\n return wall_list", "def create_line_list(self,depth_arr):\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n body=[['shoulder_line',[self.rpts[11],self.rpts[12]]],['waist_line',[self.rpts[23],self.rpts[24]]],['left_shoulder_waist',[self.rpts[11],self.rpts[23]]],['right_shoulder_waist',[self.rpts[12],self.rpts[24]]],['right_thigh',[self.rpts[24],self.rpts[26]]],['left_thigh',[self.rpts[23],self.rpts[25]]],['right_leg',[self.rpts[26],self.rpts[28]]],['left_leg',[self.rpts[25],self.rpts[27]]],['right_forearm',[self.rpts[14],self.rpts[16]]],['left_forearm',[self.rpts[13],self.rpts[15]]],['right_bicep',[self.rpts[12],self.rpts[14]]],['left_bicep',[self.rpts[11],self.rpts[13]]]]\n self.linelist.points=[]\n self.linelist.header.frame_id = \"kinect_frame\"\n self.linelist.header.stamp = rospy.Time.now()\n self.linelist.type = Marker.LINE_LIST\n \n self.linelist.id=1\n self.linelist.action = Marker.ADD \n self. linelist.scale.x = 0.05\n\n self.linelist.color.g = 1.0\n self.linelist.color.a = 1.0\n\n \n\n for _,pointl in body:\n for pt in pointl:\n depth_val=float(depth_arr[pt[1], pt[0]])\n ptl_x,ptl_y,ptl_z=self.depth_to_xyz(pt[0],pt[1],depth_val)\n \n self.linelist_point=Point()\n self.linelist_point.x = ptl_x\n self.linelist_point.y = ptl_y\n self.linelist_point.z = ptl_z\n self.linelist.points.append(self.linelist_point)\n \n except:\n pass", "def vertical_cylinders(xy_size: int, z_depth: int, dtype=np.uint8):\n shape = (xy_size, xy_size, z_depth)\n image_size_px = shape[0] // 3\n z_depth = shape[2]\n half_atom = image_size_px // 2\n quarter_atom = image_size_px // 4\n cylinders = [\n # center cylinder, z-aligned, 64x64 radius = 16\n ((image_size_px + half_atom, image_size_px + half_atom, 0),\n (image_size_px + half_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 4),\n # first tile overlapping to other tiles, z-aligned, 64x64 radius = 16\n ((image_size_px - quarter_atom, image_size_px - quarter_atom, 0),\n (image_size_px - quarter_atom, image_size_px - quarter_atom, z_depth - 1),\n image_size_px // 4),\n # lower middle tile overlapping to other tiles, z-aligned, 64x64 radius = 8\n ((image_size_px * 2 + quarter_atom, image_size_px + half_atom, 0),\n (image_size_px * 2 + quarter_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 8),\n ]\n data_mask = create_cylinders_volume(shape, cylinders, foreground=1, dtype=dtype)\n return data_mask, cylinders", "def get_traversable_tiles(room, x, y, length, width):\n traversables = []\n # Checking that we are not going out of bounds\n if x > length - 1 or y > width - 1 or x < 0 or y < 0:\n return\n # Checking above\n if not (x - 1 < 0):\n if not room.tiles[x - 1][y].border:\n traversables.append([x - 1, y])\n # Checking left\n if not (y - 1 < 0):\n if not room.tiles[x][y - 1].border:\n traversables.append([x, y - 1])\n # Checking right\n if not (y + 1 > width - 1):\n if not room.tiles[x][y + 1].border:\n traversables.append([x, y + 1])\n # Checking below\n if not (x + 1 > length - 1):\n if not room.tiles[x + 1][y].border:\n traversables.append([x + 1, y])\n\n return traversables", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def cut_path(cutpath, mesh):\n #cutpath = np.array([[x0,0],[x0,2]])\n cutedges = []\n xis, ps = [], []\n for eidx,edge in enumerate(mesh.edges):\n x0 = np.array([mesh.x[edge[0]], mesh.y[edge[0]]])\n x1 = np.array([mesh.x[edge[1]], mesh.y[edge[1]]])\n if intersect(cutpath[0],cutpath[1], x0, x1):\n p, xi = intersection(x0, x1, cutpath[0],cutpath[1])\n xis.append(xi); ps.append(p)\n cutedges.append(eidx)\n cutedges = np.array(cutedges, dtype='int'); ps = np.array(ps)\n # Sort according to y-coordinate\n isort = np.argsort(ps[:,1])\n return cutedges[isort], np.array(xis)[isort]", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def wall_2d_vertices(self, thickness=0.1):\n wall_offset = {\n Direction.North: np.array([thickness, 1.0 - thickness]),\n Direction.East: np.array([1.0 - thickness, 1.0 - thickness]),\n Direction.South: np.array([1.0 - thickness, thickness]),\n Direction.West: np.array([thickness, thickness]),\n }\n walker_position = self._start.copy()\n walker_direction = Direction.North\n positions = [walker_position + np.array([thickness, 0.0])]\n while walker_position in self:\n room = self[walker_position]\n left = walker_direction.turn_left()\n if room.can_move(left):\n positions.append(walker_position + wall_offset[left])\n walker_direction = left\n elif room.can_move(walker_direction):\n pass\n else:\n positions.append(walker_position + wall_offset[walker_direction])\n right = walker_direction.turn_right()\n if room.can_move(right):\n walker_direction = right\n else:\n positions.append(walker_position + wall_offset[right])\n walker_direction = walker_direction.reverse()\n walker_position += walker_direction.offset()\n positions.append(self._start + np.array([1.0 - thickness, 0.0]))\n return np.array(positions, dtype=np.float32)", "def vertical_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\ttaper = math.tan(angle) * laser[\"z_spacing\"]\r\n\r\n\tu = math.tan(2 * angle) * (block[\"thickness\"] + laser[\"z_final_overshoot\"])\r\n\tz_0 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 - block[\"origin_y\"] + u)\r\n\tz_1 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 + block[\"origin_x\"] + u)\r\n\tz_2 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 + block[\"origin_y\"] + u)\r\n\tz_3 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 - block[\"origin_x\"] + u)\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", f\"{math.degrees(angle):.6f}\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", f\"{z_0:.6f}\"])\r\n\r\n\ty_start_wide = ((u + cut[\"final_dimension_x\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\ty_start_length = ((u + cut[\"final_dimension_y\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\r\n\tdepth_cut = (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * math.cos(angle)/math.cos(2*angle)\r\n\r\n\tcut1 = json.loads(line(block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],-block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut2 = json.loads(line(block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],-block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],depth_cut,laser))\r\n\r\n\tcut3 = json.loads(line(block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],-block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut4 = json.loads(line(block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],-block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],depth_cut,laser))\r\n\r\n\t#cut1 = json.loads(line(block[\"width\"]/2,y_start_length,-block[\"width\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut2 = json.loads(line(block[\"length\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\t#cut3 = json.loads(line(block[\"width\"]/2,y_start_length,-cut[\"final_dimension_x\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut4 = json.loads(line(cut[\"final_dimension_y\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\tcutlist = (cutlist + cut1\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_1:.6f}\"],] \r\n\t + cut2\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_2:.6f}\"]] \r\n\t\t\t\t\t + cut3 \r\n\t\t\t\t\t + [[\"z_abs\", f\"{z_3:.6f}\"],[\"c_rel\", \"90\"]] \r\n\t\t\t\t\t + cut4)\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\r\n\treturn json.dumps(cutlist)", "def pointListForCurve(x, y, type):\n\n\tif x < 10:\n\t\txString = \"0%d\" % x\n\telse:\n\t\txString = \"%d\" % x\n\n\tif x < 11:\n\t\txMString = \"0%d\" % (x - 1)\n\telse:\n\t\txMString = \"%d\" % (x - 1)\n\n\tif x < 9:\n\t\txPString = \"0%d\" % (x + 1)\n\telse:\n\t\txPString = \"%d\" % (x + 1)\n\n\tif x < 8:\n\t\txPPString = \"0%d\" % (x + 2)\n\telse:\n\t\txPPString = \"%d\" % (x + 2)\n\n\tif y < 11:\n\t\tyMString = \"0%d\" % (y - 1)\n\telse:\n\t\tyMString = \"%d\" % (y - 1)\n\n\tif y < 9:\n\t\tyPString = \"0%d\" % (y + 1)\n\telse:\n\t\tyPString = \"%d\" % (y + 1)\n\n\tif y < 8:\n\t\tyPPString = \"0%d\" % (y + 2)\n\telse:\n\t\tyPPString = \"%d\" % (y + 2)\n\n\tif y < 10:\n\t\tyString = \"0%d\" % y\n\telse:\n\t\tyString = \"%d\" % y\n\n\tinnerRadius = 54.0 / 64.0\n\touterRadius = 87.0 / 64.0\n\n\tslices = 10\n\n\t# Dots are numbered as xxyy[IO]z\n\t# The I means it is the inside trek, the O the outside\n\t# The z is which particular dot it is (0-9)\n\t# Note that all paths are marked as being inside the top-left square\n\t# Except for entrence and exit dots.\n\t# Curves are generated from star + 10 to end - 10\n\n\tif type == 8:\t\t\t# Bottom right\n\t\tcenterX = 25.0 / 64.0\n\t\tcenterY = 25.0 / 64.0\n\t\tstart = 0.0\n\t\tend = 90.0\n\n\t\tenterIn =\t[\"%s%sTL\" % (xPString, yString), 0.25, 0.25, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sBL\" % (xString, yPString), 0.25, 0.75, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sTL\" % (xString, yPString), 0.25, 0.25, [\"%s%sTR\" % (xMString, yPString)]]\n\t\texitOut =\t[\"%s%sTR\" % (xPString, yString), 0.75, 0.25, [\"%s%sBR\" % (xPString, yMString)]]\n\n\t\tendIn = \"%s%sTL\" % (xString, yPString)\n\t\tendOut = \"%s%sTR\" % (xPString, yString)\n\n\telif type == 9:\t\t\t# Bottom left\n\t\tcenterX = 103.0 / 64.0\n\t\tcenterY = 25.0 / 64.0\n\t\tstart = 90.0\n\t\tend = 180.0\n\n\t\tenterIn =\t[\"%s%sTR\" % (xPString, yPString), 0.75, 0.25, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\"%s%sBR\" % (xString, yMString)]]\n\t\texitOut =\t[\"%s%sBR\" % (xPString, yPString), 0.75, 0.75, [\"%s%sBL\" % (xPPString, yPString)]]\n\n\t\tendIn = \"%s%sTR\" % (xString, yString)\n\t\tendOut = \"%s%sBR\" % (xPString, yPString)\n\n\telif type == 10:\t\t# Top left\n\t\tcenterX = 103.0 / 64.0\n\t\tcenterY = 103.0 / 64.0\n\t\tstart = 180.0\n\t\tend = 270.0\n\n\t\tenterIn =\t[\"%s%sBR\" % (xString, yPString), 0.75, 0.75, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sTR\" % (xPString, yString), 0.75, 0.25, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sBR\" % (xPString, yString), 0.75, 0.75, [\"%s%sBL\" % (xPPString, yString)]]\n\t\texitOut =\t[\"%s%sBL\" % (xString, yPString), 0.25, 0.75, [\"%s%sTL\" % (xString, yPPString)]]\n\n\t\tendIn = \"%s%sBR\" % (xPString, yString)\n\t\tendOut = \"%s%sBL\" % (xString, yPString)\n\n\telse: # type == 11:\t\t# Top right\n\t\tcenterX = 25.0 / 64.0\n\t\tcenterY = 103.0 / 64.0\n\t\tstart = 270.0\n\t\tend = 360.0\n\n\t\tenterIn =\t[\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sBR\" % (xPString, yPString), 0.75, 0.75, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sBL\" % (xPString, yPString), 0.25, 0.75, [\"%s%sTL\" % (xPString, yPPString)]]\n\t\texitOut =\t[\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\"%s%sTR\" % (xMString, yString)]]\n\n\t\tendIn = \"%s%sBL\" % (xPString, yPString)\n\t\tendOut = \"%s%sTL\" % (xString, yString)\n\n\tpointList = [enterIn, enterOut, exitIn, exitOut]\n\n\tstring = \"%s%s\" % (xString, yString)\n\tstep = ((end - 1) - (start + 1)) / float(slices)\n\n\tfor i in range(slices):\n\n\t\tangle = radians(start + step * i)\n\n\t\tif i < 9:\n\t\t\ttemp = [\"%sI%d\" % (string, i), centerX + cos(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\"%sI%d\" % (string, i + 1)]]\n\t\telse:\n\t\t\ttemp = [\"%sI%d\" % (string, i), centerX + cos(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[endIn]]\n\n\t\tpointList.append(temp)\n\n\t\tangle = radians(start + step * (10 - i))\n\n\t\tif i < 9:\n\t\t\ttemp = [\"%sO%d\" % (string, i), centerX + cos(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\"%sO%d\" % (string, i + 1)]]\n\t\telse:\n\t\t\ttemp = [\"%sO%d\" % (string, i), centerX + cos(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[endOut]]\n\n\t\tpointList.append(temp)\n\n\treturn pointList", "def get_obstList(self,X,Y,Z):\n #Pipe in - find all points exterior of large pipe\n\tpipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()\n\tpipe_in_stop = np.array(np.where(Z <= 4)).flatten()\n\tpipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])\n\n\t#Contraction - find all points exterior of contraction\n\tr_cone = self.diam_out\n\th_cone = self.diam_out\t\n\tcontraction = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - (4 + h_cone))**2)).flatten()\n\tcontraction_start = np.array(np.where(Z >= 4)).flatten()\n\tcontraction_stop = np.array(np.where(Z <= 4 + .5*self.diam_out)).flatten()\n\tcontraction = np.intersect1d(contraction[:],contraction_start[:])\n\tcontraction = np.intersect1d(contraction[:],contraction_stop[:])\n\n\t#Pipe out - final all points exterior of smaller pipe\n\tpipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()\n\tpipe_out_start = np.array(np.where(Z >= 4 + .5*self.diam_out)).flatten()\n\tpipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])\n\n\n\t#Put the pieces together\n\n\t#pipe = pipe_in[:]\n\tpipe = np.union1d(contraction[:],pipe_in[:])\n\tpipe = np.union1d(pipe[:],pipe_out[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def detector_outline( bottom_vec3d_list, top_vec3d_list ):\n # hardcoded angular offset for hexagon\n phi0 = -20.0 * I3Units.degree \n\n # hardcoded threshold for an edge\n cos_angle_threshold = math.cos( 7.0 * I3Units.degree ) \n\n bottom = Vec3dList()\n top = Vec3dList()\n\n string_coords = []\n for b, t in zip( bottom_vec3d_list, top_vec3d_list ):\n if t[2] < 450.0 * I3Units.meter: # ignore deep-core\n continue\n string_coords.append(( math.atan2(t[1], t[0]),\n t[0], t[1], b[2], t[2] ))\n\n # border detection:\n # check if there is a point in each angular segment of hexagon\n border = []\n for i, cur in enumerate( string_coords ):\n counts = [False, False, False, False, False , False]\n for j, other in enumerate( string_coords ):\n if i == j: continue\n dx = cur[1] - other[1]\n dy = cur[2] - other[2]\n phi = int((math.atan2( dy, dx ) - phi0) / I3Units.degree)\n if phi < 0:\n phi += 360\n counts[phi // 60] = True\n neighbor_count = sum( counts )\n # border points don't have a full hexagon of neighbors\n if neighbor_count < 6:\n border.append( cur )\n\n border.sort() # put in circular order\n\n # edge detection:\n # check if differential vectors of three consecutive points have an angle\n for i in xrange( len(border) ):\n ax = border[i - 1][1] - border[i - 2][1]\n ay = border[i - 1][2] - border[i - 2][2]\n bx = border[i][1] - border[i - 1][1]\n by = border[i][2] - border[i - 1][2]\n anorm = (ax ** 2 + ay ** 2) ** 0.5\n bnorm = (bx ** 2 + by ** 2) ** 0.5\n cos_angle = (bx * ax + by * ay) / (anorm * bnorm)\n if cos_angle < cos_angle_threshold:\n cur = border[i - 1]\n bottom.append( vec3d(cur[1], cur[2], cur[3]) )\n top.append( vec3d(cur[1], cur[2], cur[4]) )\n\n return bottom, top", "def nine_to_3x3(listy):\n new_side = []\n k = int(len(listy) / 3)\n \n for i in range(k):\n intermediate = []\n for j in range(3):\n intermediate.append(listy.pop(0))\n \n new_side.append(intermediate)\n return new_side", "def get_segmented_point_clouds(seg_masks, depth): \n obj_labels = np.unique(seg_masks)\n num_objs = obj_labels.shape[0]+1\n rows, cols = seg_masks.shape\n cm = plt.get_cmap('gist_rainbow')\n colors = [cm(1. * i/num_objs) for i in range(num_objs)]\n \n object_dict = {}\n # key - object label; val - depth array of that object\n for i in obj_labels:\n object_dict[i] = np.zeros((rows,cols), dtype = np.float32)\n\n for i in range(rows):\n for j in range(cols):\n if seg_masks[i][j] != 0 and seg_masks[i][j] != -1:\n object_dict[seg_masks[i][j]][i][j] = depth[i][j]\n \n segmented_pcds = []\n for key, val in object_dict.items():\n if key == -1 or key == 0:\n continue\n img = o3d.geometry.Image(val)\n pcd_from_depth = o3d.geometry.PointCloud.create_from_depth_image(\n img,\n o3d.camera.PinholeCameraIntrinsic(\n o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n\n # Multiply with Transformation matrix to get correct view of the PCD\n pcd_from_depth.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n pcd_from_depth.paint_uniform_color(np.array(colors[key][:3], dtype = np.uint8) * 255)\n segmented_pcds.append(pcd_from_depth)\n return segmented_pcds", "def numtocoor(lis,walllength,wallheight,x=10):\n coorlist = []\n for tup in lis:\n tuplist = list(tup)\n tuplist.sort()\n \n big = max(tup)\n small = min(tup)\n coormax = ((big%walllength+1-walllength/2)*x,-(big//walllength+1+wallheight/2)*x)\n coormin = ((small%walllength-walllength/2)*x,-(small//walllength+wallheight/2)*x)\n coor = [coormax,coormin]\n coorlist.append(coor)\n return coorlist", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def get_obstList(self,X,Y,Z):\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))", "def rect_splitter(split_depth, rect, start_dimension='vertical'):\n\n splitters = (rect_vertical_split, rect_horizontal_split)\n if start_dimension == 'vertical':\n pass\n elif start_dimension == 'horizontal':\n splitters = splitters[1], splitters[0]\n else:\n raise ValueError(\"Invalid `start_dimension`\")\n\n split_results = [rect.copy()]\n\n for i in range(split_depth):\n new_split_results = []\n for x in split_results:\n new_split_results.extend(splitters[i % 2](x))\n split_results = new_split_results\n\n return split_results", "def get_one_depth_per_pixel(self, depth_min, depth_max):\n y_px_size = self.GetSize()[1] \n depth_list = []\n for depth_px in range(y_px_size):\n depth = self._get_depth_from_ypixel(depth_px) \n if depth > depth_min and depth < depth_max: \n depth_list.append(depth)\n return depth_list", "def create_subgrid(self)->list:\n return [subgrid.Subgrid(i) for i in range(0, 9)]", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles", "def get_objects(color, depth, threshold1, threshold2):\n\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n surf = cv2.xfeatures2d.SURF_create(500)\n\n # find and draw the keypoints\n kp = surf.detect(blur,None)\n\n pts = [p.pt for p in kp]\n xpts = []\n ypts = []\n\n # evaluate the keypoints and only save the keypoints who are between the given threshold\n depth_values = []\n for i in range(0,len(pts)):\n xco = int(pts[i][0])\n yco = int(pts[i][1])\n depth_value = depth[yco][xco]\n if depth_value >= float(threshold1) and depth_value <= float(threshold2):\n xpts.append(xco)\n ypts.append(yco)\n depth_values.append(depth_value)\n\n # make histogram of x coordinates of the saved keypoints\n n, distr, _ = plt.hist(xpts)\n plt.savefig('hist.png')\n\n # evaluate the histogram and make seperate arrays for the different objects\n objectarray = []\n temp = []\n for i in range(len(n)):\n if n[i] > 0:\n temp.append(distr[i])\n temp.append(distr[i+1])\n else:\n if len(temp)!=0:\n objectarray.append(temp)\n temp = []\n objectarray.append(temp)\n\n objects = []\n\n # determine the objects with the previous calculated arrays\n for i in range(len(objectarray)):\n y_values = []\n min_x = int(np.amin(objectarray[i]))\n max_x = int(np.amax(objectarray[i]))\n\n for j in range(len(xpts)):\n if xpts[j] > min_x and xpts[j] < max_x:\n y_values.append(ypts[j])\n\n min_y = int(np.amin(y_values))\n max_y = int(np.amax(y_values))\n x = min_x\n y = min_y\n w = max_x - min_x\n h = max_y - min_y\n\n depth_mean = round(get_depth_mean(depth, x, y, w, h), 3)\n\n object = DetectedObject(x, y, w, h, depth_mean)\n objects.append(object)\n\n return objects", "def get_dancefloor_area(self):\n cords = []\n\n x1 = self.coordinates[0]\n y1 = self.coordinates[1]\n x2 = self.coordinates[0] + self.width\n y2 = self.coordinates[1] + self.height\n if self.width <= 0:\n x1, x2 = x2, x1\n if self.height <= 0:\n y1, y2 = y2, y1\n\n for x in range(x1, x2):\n for y in range(y1, y2):\n if x % 20 == 0 and y % 20 == 0:\n cords.append([x, y])\n return cords", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def cut_image(im):\n width, height = im.size\n # Three pictures in a row\n item_width = int(width / 3)\n box_list = []\n for i in range(0, 3):\n for j in range(0, 3):\n box = (j * item_width, i * item_width, (j + 1) * item_width, (i + 1) * item_width)\n box_list.append(box)\n image_list = [im.crop(box) for box in box_list]\n return image_list", "def find_within_range(self, center, size, shape):\n\n if shape == \"cube\":\n \n payloads = []\n templist = [self.root]\n list_list = []\n list_list.append([self.root])\n for level in range(self.maxiter):\n list_list.append([])\n\n #print list_list\n for level in range(self.maxiter):\n for node in list_list[level]:\n Xedge_max = center[0] + size\n Xedge_min = center[0] - size\n Yedge_max = center[1] + size\n Yedge_min = center[1] - size\n Zedge_max = center[2] + size\n Zedge_min = center[2] - size\n\n corner0 = (Xedge_max, Yedge_max, Zedge_max)\n corner1 = (Xedge_max, Yedge_max, Zedge_min)\n corner2 = (Xedge_max, Yedge_min, Zedge_max)\n corner3 = (Xedge_max, Yedge_min, Zedge_min)\n corner4 = (Xedge_min, Yedge_max, Zedge_max)\n corner5 = (Xedge_min, Yedge_max, Zedge_min)\n corner6 = (Xedge_min, Yedge_min, Zedge_max)\n corner7 = (Xedge_min, Yedge_min, Zedge_min)\n corners = [corner0, corner1, corner2, corner3, corner4, corner5, corner6, corner7]\n table = ((corner0[0] > node.Xcenter),(corner0[1] > node.Ycenter) ,(corner0[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXposYposZ)\n table = ((corner1[0] > node.Xcenter),(corner1[1] > node.Ycenter) ,(corner1[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXposYnegZ)\n table = ((corner2[0] > node.Xcenter),(corner2[1] < node.Ycenter) ,(corner2[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXnegYposZ)\n table = ((corner3[0] > node.Xcenter),(corner3[1] < node.Ycenter) ,(corner3[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXnegYnegZ)\n table = ((corner4[0] < node.Xcenter),(corner4[1] > node.Ycenter) ,(corner4[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXposYposZ)\n table = ((corner5[0] < node.Xcenter),(corner5[1] > node.Ycenter) ,(corner5[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXposYnegZ)\n table = ((corner6[0] < node.Xcenter),(corner6[1] < node.Ycenter) ,(corner6[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXnegYposZ)\n table = ((corner7[0] < node.Xcenter),(corner7[1] < node.Ycenter) ,(corner7[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXnegYnegZ)\n\n\n #must remove children that aren't real yet\n temp_templist = []\n for node in list_list[level+1]:\n try:\n node.Xcenter \n temp_templist.append(node)\n except AttributeError:\n pass\n list_list[level+1] = temp_templist\n \n\n payloads = [i.value for i in list_list[-1]]\n return payloads", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d", "def triple_cut(deck_of_cards):\n new_deck =[]\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n\t \n index1 = deck_of_cards.index(small_joker_value)\n index2 = deck_of_cards.index(big_joker_value)\n index_top_card = min(index1, index2)\n index_bottom_card = max(index1, index2)\n # This function will give us the joker that is on the top and the joker \n # that is in the bottom of the deck regardless of their value\n \n new_top = deck_of_cards[(index_bottom_card + 1):]\n # Creates a deck that is to be moved the top, from the lower joker and\n # below \n middle = deck_of_cards[index_top_card : index_bottom_card + 1]\n # Middle portion of the deck that is not moved that is in between the jokers\n new_bottom = deck_of_cards[:index_top_card]\n # The deck portion that is to be moved to the bottom, from higher joker and\n # above.\n deck = new_top + middle + new_bottom\n deck_of_cards[:] = deck\n # This will then give a new deck that shifts the cards above the higher \n # joker to the end and the cards below the lower joker to the top.", "def sub_division(width: float, minimum_division: float, stretch_factor: float) -> list:\n\n sum_x = 0\n next_ = minimum_division\n new_grid = []\n max_dx = 20/100\n x = width/2\n\n while sum_x < x:\n remaining = x - sum_x\n\n if next_ > max_dx:\n n = np.ceil(remaining/max_dx)\n\n if n == 0:\n new_grid.append(remaining)\n\n next_ = remaining/n\n\n for _ in range(0, int(n)):\n new_grid.append(next_)\n sum_x += next_\n\n remaining = x - sum_x\n\n if next_ < remaining:\n new_grid.append(next_)\n sum_x += next_\n else:\n remaining += new_grid[-1]\n new_grid[-1] = remaining/2\n new_grid.append(remaining/2)\n sum_x = x\n\n next_ = next_ * stretch_factor\n\n x1 = new_grid[::-1]\n x2 = new_grid+x1\n\n return x2", "def extract_diag_blocks(x: np.ndarray, y: List) -> List:\n\n def func(cum, this):\n x_crop, res = cum\n return [\n x_crop[len(this):, len(this):],\n res + [x_crop[:len(this), :len(this)]]\n ]\n\n return functools.reduce(func, list(y), [x, []])[-1]", "def triple_cut(deck: List[int]) -> None:\n\n small_joker_index = deck.index(get_small_joker_value(deck))\n big_joker_index = deck.index(max(deck))\n\n if big_joker_index > small_joker_index:\n left_joker = small_joker_index\n right_joker = big_joker_index\n\n else:\n right_joker = small_joker_index\n left_joker = big_joker_index\n\n left_list = deck[:left_joker]\n right_list = deck[right_joker + 1:]\n middle_list = deck[left_joker:right_joker + 1]\n del deck[:]\n deck.extend(right_list)\n deck.extend(middle_list)\n deck.extend(left_list)", "def neighborhood((y, x), (height, width)):\n return [(yt, xt) for xt in [x + 1, x, x - 1]\n for yt in [y + 1, y, y - 1]\n if 0 <= xt < width and 0 <= yt < height\n and (xt, yt) != (x, y)]", "def grab_40_vectors(x1,x2,y1,y2) -> list:\n\n if x1 > x2:\n x1,x2 = x2,x1\n if y1 > y2:\n y1,y2 = y2,y1\n\n height = abs(y2-y1)\n length = abs(x2-x1)\n\n height_delta = height/10\n length_delta = length/10\n\n left_vertical_edge = [(x1, y1+height_delta*i) for i in range(0,11)]\n # print(left_vertical_edge)\n right_vertical_edge = [(x2,y1+height_delta*i) for i in range(0,11)]\n # print(right_vertical_edge)\n bottom_horizontal_edge = [(x1+length_delta*i,y1) for i in range(0,11)]\n # print(bottom_horizontal_edge)\n top_horizontal_edge = [(x1+length_delta*i,y2) for i in range(0,11)]\n # print(top_horizontal_edge)\n\n all_vectors = left_vertical_edge+right_vertical_edge+bottom_horizontal_edge+top_horizontal_edge\n return(all_vectors)", "def make_tiles_limits(im, n_splits, margin=0):\n \n if n_splits == 1:\n return [0, im.shape[1], 0, im.shape[0]]\n # number of splits per axis\n ax_splits = int(np.log2(n_splits))\n x_segments = split_range(im.shape[1], ax_splits)\n y_segments = split_range(im.shape[0], ax_splits)\n \n if margin > 0:\n x_segments = extend_indices(x_segments, margin=margin)\n y_segments = extend_indices(y_segments, margin=margin)\n \n # make combinations of [xmin, xmax, ymin, ymax] indices of tiles\n tiles_indices = []\n for xlim in x_segments:\n for ylim in y_segments:\n tiles_indices.append(xlim + ylim)\n return tiles_indices", "def generate_points(self):\n for x in range(self.num_sides):\n for y in range(self.num_sides):\n for z in range(self.num_sides):\n col_name = y + 4\n top_num = 0\n if 1 < z < 4:\n top_name = 'b'\n else:\n top_name = 'd'\n if z == 3 or z == 1:\n top_num += 4\n top_num += x\n\n top_name += str(top_num)\n\n k = Node(x*self.length-self.center, y*self.length -\n self.center, z*self.length-self.center, top_name, col_name)\n self.c_layers[y].append(k)\n self.points.append(k)", "def get_clipped_pointcloud(pointcloud, boundary):\r\n\tassert (pointcloud.shape[0]>=2)\r\n\tpointcloud = pointcloud[:,np.logical_and(pointcloud[0,:]<boundary[1], pointcloud[0,:]>boundary[0])]\r\n\tpointcloud = pointcloud[:,np.logical_and(pointcloud[1,:]<boundary[3], pointcloud[1,:]>boundary[2])]\r\n\treturn pointcloud", "def find_points(z_k, r, offset_x1=0, offset_x2=0):\n # Assume nu_0 < 0, nu_1 > 0\n t_min = z_k.t_min\n t_max = z_k.t_max\n mu_0 = z_k.mu_0\n mu_1 = z_k.mu_1\n x_1_min = z_k.x_1_min\n x_1_max = z_k.x_1_max\n\n h = t_max - t_min # height of set in t direction\n n_t = int(h/(2*r)) # number of cylinders in t direction\n\n # Note that the number of cylinders in x_1 direction\n # is constant in t, while the number of cylinders\n # in x_2 direction varies in t\n\n l_x_1 = x_1_max - x_1_min - offset_x1 # length of set in x_1 direction\n n_x_1 = int(l_x_1/(2*r)) # number of cylinders in x_1 direction\n\n # Start at t = t_min and compute the points for the first row\n # then compute the points for each row\n\n points = []\n for i in xrange(n_t):\n t_i = t_min + i*2*r\n x_2_min = mu_0*t_i\n x_2_max = mu_1*t_i\n l_x_2 = x_2_max - x_2_min - offset_x1 if abs(x_2_max) > abs(x_2_min + offset_x1) else 0\n n_x_2 = int(l_x_2/(2*r))\n # x_1 direction\n for j in range(n_x_1):\n row_points = x_2_min + offset_x2 + r + np.arange(n_x_2)*2*r\n for point in row_points:\n # (t, x1, x2)\n points.append((t_i + r, x_1_min + offset_x1 + (2*j+1)*r, point))\n return np.array(points)", "def generate_pre_heights(self):\n\n config = self.config\n\n def get_lands_oceans():\n oceans, lands = [], []\n for x in xrange(self.size):\n for y in xrange(self.size):\n coord = x, y\n if self[coord] <= 0:\n oceans.append(coord)\n else:\n lands.append(coord)\n return lands, oceans\n\n def add_heights():\n \"\"\"Add pre heights for diamond-square\n \"\"\"\n fac_min = 50\n fac_max = 40\n\n print 'Get lands and oceans'\n t = time.time()\n lands, oceans = get_lands_oceans()\n print 'lands and oceans getted: ', time.time() - t\n\n # TODO: create one def with params: mount_level and other for create heights\n # add default heights\n for coord in lands:\n self[coord] = self.config.land_mount_level[1]\n\n for coord in oceans:\n self[coord] = -self.config.mid_mount_level[1]\n\n # add low heights for lands\n count_land = int(round(len(lands) * config.factor_low_mount / 100.))\n land_coords = []\n\n starts = random.randint(count_land / fac_min, count_land / fac_max)\n for start in xrange(starts):\n start_coord = lands[random.randint(0, len(lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n if coord not in land_coords:\n self[coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n # -------------------------------------------------------------------------------\n # add mid heights for lands\n count_land = int(round(len(target_lands) * (config.factor_mid_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 3), count_land / (fac_max*3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n\n if land_coords == []:\n return\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n\n # -------------------------------------------------------------------------------\n # add high heights for lands\n count_land = int(round(len(target_lands) * (config.factor_high_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 4), count_land / (fac_max * 3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n try:\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n except ValueError:\n coord = lands[random.randint(0, len(lands) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n\n\n def square_diamond(sx, sy, size, strong):\n \"\"\"Algorithm Square-diamond generate terrain heights\n\n -> http://www.lighthouse3d.com/opengl/terrain/index.php?mpd2\n \"\"\"\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)\n\n # align\n def align_it(start, strong):\n \"\"\"Deprecated\n \"\"\"\n water = 0\n #map3d = self.copy()\n size = (abs(start)*2) + self.size - strong\n start = start + strong\n coords_map = []\n for x in xrange(start,size):\n for y in xrange(start,size):\n coords_map.append( (x, y) )\n\n random.shuffle(coords_map)\n\n lens = strong * (3.0 ** 2)\n for coord in coords_map:\n average = 0.0\n x, y = coord\n #rounds = self.get_round_xy_land(coord, -strong, False)\n #for r_coord in rounds:\n #average += self[r_coord]\n for x in xrange(-strong, strong+1):\n for y in xrange(-strong, strong+1):\n average += self[x, y]\n\n height = int(round(average / lens))\n #height = int(round(average / float(len(rounds))))\n if self[coord] <= water and height > water:\n height = water\n elif self[coord] > water and height <= water:\n height = water + 1\n\n #print self[coord], '->', height\n\n self[coord] = height\n\n if self.config.add_pre_heights:\n print 'Add heights start'\n add_heights()\n print 'Diamond-Square start'\n for x in xrange(1):\n square_diamond(\n sx = 0,\n sy = 0,\n size = self.size, strong=100)", "def cut_trees(self, )\n\n\n\n def random_spot(x_low, y_low, x_range, y_range):\n x = randint(x_low, x_low + x_range)\n y = randint(y_low, y_low + y_range)\n dur = random.uniform(0.5, 3.0)\n\n return pyautogui.moveTo(x, y, dur)", "def find_zbins(z, zstart=2.1, deltaz=0.2):\n curr_zbins = [zstart]\n curr_z = zstart\n\n while True:\n pos = np.where((z > curr_z) & (z <= curr_z + 0.2))[0]\n\n if len(pos) > 50:\n curr_z += 0.2\n curr_zbins.append(curr_z)\n else:\n pos = np.where((z > curr_z) & (z <= curr_z + 0.18))[0]\n if len(pos) > 50:\n curr_z += 0.18\n curr_zbins.append(curr_z)\n else:\n pos = np.where((z > curr_z) & (z <= curr_z + 0.24))[0]\n if len(pos) > 50:\n curr_z += 0.24\n curr_zbins.append(curr_z)\n else:\n break\n return np.array(curr_zbins)", "def cull(self):", "def _build_list_of_excluded_pixels(self, exclude_zones):\n \n pixels = []\n for x, y, width, height in exclude_zones:\n for row in range(height):\n for col in range(width):\n pixels.append(Pixel(col + x, row + y))\n \n return pixels", "def create_wild_lists(amount,length):\r\n box = []\r\n\r\n k = 0\r\n while k < amount:\r\n sublist = []\r\n j = 0\r\n while j < length:\r\n num = random.randint(1, 100)\r\n sublist.append(num)\r\n j += 1\r\n box.append(sublist)\r\n k += 1\r\n\r\n if amount == 1:\r\n return sublist\r\n\r\n return box", "def _find_largest_Rectangles_in_cross_hatch(x, y):\n if x < y: # Swap to iterate over the longest side.\n x, y = y, x\n\n rectangles = []\n for i in range(1, x): # Iterate over lower-edge vertices, ignoring corners\n a0, a1 = i, -i # Slope-intercepts for cross-hatch lines running through point (0, i)\n for j in range(1, x): # Iterate over upper-edge vertices, still ignoring corners\n b0, b1 = y - j, y + j # Slope-intercepts for cross-hatch lines running through point (y, j)\n x0, x1 = (a0 - b0) / 2, (b1 - a1) / 2\n if x >= x0 >= 0 and x >= x1 >= 0 and y > -x0 + a0 > 0 and y > x1 + a1 > 0: # All four corners are w/i grid\n rectangles.append(Rectangle((i, 0), (j, y), aligned_with_grid=False))\n # assert len(rectangles) == (2*y - 1) * (x - y) + (y - 1)\n return rectangles", "def slice(objects, position, axis, precision=1e-3, layer=0, datatype=0):\n if not isinstance(layer, list):\n layer = [layer]\n if not isinstance(objects, list):\n objects = [objects]\n if not isinstance(position, list):\n pos = [position]\n else:\n pos = sorted(position)\n result = [[] for _ in range(len(pos) + 1)]\n polygons = []\n for obj in objects:\n if isinstance(obj, PolygonSet):\n polygons.extend(obj.polygons)\n elif isinstance(obj, CellReference) or isinstance(obj, CellArray):\n polygons.extend(obj.get_polygons())\n else:\n polygons.append(obj)\n scaling = 1 / precision\n for pol in polygons:\n for r, p in zip(result, clipper._chop(pol, pos, axis, scaling)):\n r.extend(p)\n for i in range(len(result)):\n result[i] = PolygonSet(result[i], layer[i % len(layer)], datatype)\n return result", "def slice_depth(self, top=None, base=None):\n top = top or self.top\n base = base or self.base\n assert base > top, \"Slice boundaries must maintain depth order.\"\n\n assert top < self.base, f\"Cannot slice to top {top} with base {self.base}\"\n assert base > self.top, f\"Cannot slice to base {base} with top {self.top}\"\n\n # check that there's a difference, and that it isn't a superset of current range\n if [top, base] != [self.top, self.base] and (\n top > self.top or base < self.base\n ):\n idxs = np.logical_and(self.depths >= top, self.depths <= base)\n # self.img = self.img[idxs]\n # self.depths = self.depths[idxs]\n # self.top, self.base = top, base\n return CoreColumn(\n self.img[idxs, ...],\n depths=self.depths[idxs],\n top=top,\n base=base,\n add_mode=self.add_mode,\n add_tol=self.add_tol,\n )\n else:\n return self", "def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height):\n \n full_image = numpy.ones((img_height, img_width), dtype=uint8)\n for x, y, width, height in exclude_zones:\n \n # creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep\n exclusion = numpy.zeros((height, width), dtype=uint8)\n exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1)\n \n full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions)\n \n return full_image", "def sub(z: list[int], x: int, y: int) -> list[int]:\n i: int = 0\n lists = list()\n end: int = y\n start: int = x\n if len(z) == 0 or end <= 0 or len(z) < start:\n return lists\n elif len(z) < end: \n while len(z) > start:\n lists.append(z[start])\n start += 1 \n return lists\n elif start >= 0: \n while end > start: \n lists.append(z[start])\n start += 1 \n return lists\n elif start < 0: \n while end > i:\n lists.append(z[i])\n i += 1\n return lists\n return z", "def grid_th_list(k, l, x_th_b, y_th_b, x_ticks, y_ticks):\n x_th_l,x_th_u = x_th_b\n y_th_l,y_th_u = y_th_b\n\n return [(x_th_l + (x_th_u-x_th_l)*i/max(1,x_ticks-1),\n y_th_l + (y_th_u-y_th_l)*j/(max(1,y_ticks-1)))\n for i in range(x_ticks) for j in range(y_ticks)]", "def make_groups(length_of_stays, borders):\n length_of_stays = np.array(length_of_stays)\n groups = [np.where(length_of_stays <= borders[0])[0]] # first group\n for od, do in zip(borders, borders[1:]):\n groups.append(np.where(np.logical_and(od < length_of_stays, length_of_stays <= do))[0])\n groups.append(np.where(borders[len(borders) - 1] < length_of_stays)[0]) # last group\n return groups", "def __get_adjacents_from_id(self, position):\n if position == 1: #Upper-left corner.\n return [position + 5, position + 1]\n elif position == 5: #Upper-right corner.\n return [position + 5, position - 1]\n elif position == 21: #Lower-left corner.\n return [position - 5, position + 1]\n elif position == 25: #Lower-right corner.\n return [position - 5, position - 1]\n elif position == 2 or position == 3 or position == 4: #Upper wall.\n return [position + 5, position - 1, position + 1]\n elif position == 10 or position == 15 or position == 20: #Right wall.\n return [position + 5, position - 5, position - 1]\n elif position == 6 or position == 11 or position == 16: #Left wall.\n return [position + 5, position - 5, position + 1]\n elif position == 22 or position == 23 or position == 24: #Bottom wall.\n return [position - 5, position - 1, position + 1]\n else: #All other positions.\n return [position - 5, position + 5, position - 1, position + 1]", "def ind_thick(layer_coords, roi_vec_coords, tissue_image):\n #find thickness for each ind layer\n \n vecall_lall_lst = []\n for vec in roi_vec_coords:\n #coordinates for intersection of vector and layer\n x_in_pts = list(map(math.floor, np.linspace(vec[0][0], vec[1][0], 70)))[1:-1] #or70\n # print(x_in_pts)\n l1_vec = vec[0], list(np.squeeze([i for i in layer_coords[1] if i[0] == x_in_pts[0]]))\n l2_vec = l1_vec[1], list(np.squeeze([i for i in layer_coords[2] if i[0] == x_in_pts[1]]))\n l3_vec = l2_vec[1], list(np.squeeze([i for i in layer_coords[3] if i[0] == x_in_pts[2]]))\n l4_vec = l3_vec[1], list(np.squeeze([i for i in layer_coords[4] if i[0] == x_in_pts[3]]))\n l5_vec = l4_vec[1], list(np.squeeze([i for i in layer_coords[5] if i[0] == x_in_pts[4]]))\n l6_vec = l5_vec[1], vec[1]\n \n\n #thickness values for each layer\n lall_vec = l1_vec, l2_vec, l3_vec, l4_vec, l5_vec, l6_vec\n lall_thick = vec_thick(layer_coords, lall_vec, tissue_image)\n \n # print(l5_vec, \"\\n\", l6_vec)\n print(lall_thick) \n\n vecall_lall_lst.append(lall_thick)\n \n return vecall_lall_lst", "def get_shapes_for_low_coverage_cutoff(\n per_base_coverage_by_sample,\n region_position_list,\n low_coverage_cutoff,\n max_coverage_value,\n max_z_score,\n min_z_score,\n has_low_coverage,\n):\n\n ## Identify low coverage areas\n shapes = []\n if has_low_coverage:\n for cov_list in per_base_coverage_by_sample:\n prev_pos = -1\n low_cov_start = -1\n for cov, pos in zip(cov_list, region_position_list):\n\n ## If low coverage\n if cov <= low_coverage_cutoff:\n ## If consecutive positions\n if prev_pos + 1 == pos:\n prev_pos = pos\n ## If new region\n else:\n if prev_pos != -1:\n ## Add shapes\n shapes.append(\n {\n \"type\": \"rect\",\n \"x0\": low_cov_start - 0.1,\n \"x1\": prev_pos + 0.1\n if prev_pos != low_cov_start\n else low_cov_start + 0.1,\n \"y0\": 0,\n \"y1\": max_coverage_value,\n \"line_color\": \"grey\",\n \"fillcolor\": \"red\",\n \"opacity\": 0.5,\n \"layer\": \"below\",\n \"xref\": \"x1\",\n \"yref\": \"y1\",\n \"visible\": True,\n }\n )\n\n shapes.append(\n {\n \"type\": \"rect\",\n \"x0\": low_cov_start - 0.1,\n \"x1\": prev_pos + 0.1\n if prev_pos != low_cov_start\n else low_cov_start + 0.1,\n \"y0\": min_z_score,\n \"y1\": max_z_score,\n \"line_color\": \"grey\",\n \"fillcolor\": \"red\",\n \"opacity\": 0.5,\n \"layer\": \"below\",\n \"xref\": \"x2\",\n \"yref\": \"y2\",\n \"visible\": True,\n }\n )\n\n ## set position trackers to current pos\n low_cov_start = pos\n prev_pos = pos\n\n ## Add shapes\n if prev_pos != -1:\n shapes.append(\n {\n \"type\": \"rect\",\n \"x0\": low_cov_start - 0.1,\n \"x1\": prev_pos + 0.1\n if prev_pos != low_cov_start\n else low_cov_start + 0.1,\n \"y0\": 0,\n \"y1\": max_coverage_value,\n \"line_color\": \"grey\",\n \"fillcolor\": \"red\",\n \"opacity\": 0.5,\n \"layer\": \"below\",\n \"xref\": \"x1\",\n \"yref\": \"y1\",\n \"visible\": True,\n }\n )\n\n shapes.append(\n {\n \"type\": \"rect\",\n \"x0\": low_cov_start - 0.1,\n \"x1\": prev_pos + 0.1\n if prev_pos != low_cov_start\n else low_cov_start + 0.1,\n \"y0\": min_z_score,\n \"y1\": max_z_score,\n \"line_color\": \"grey\",\n \"fillcolor\": \"red\",\n \"opacity\": 0.5,\n \"layer\": \"below\",\n \"xref\": \"x2\",\n \"yref\": \"y2\",\n \"visible\": True,\n }\n )\n\n return shapes", "def removeBounded(self, bounds):\n if bounds==None or len(bounds)!=4:\n return\n x1,y1,x2,y2 = bounds\n if x1>x2 :\n temp=x1;x1=x2;x2=temp\n if y1>y2:\n temp=y1;y1=y2;y2=temp\n lst=[]\n for i in range(0,self.length()):\n x=self.x[i]; y=self.y[i]\n if (x>x1 and x<x2) and (y>y1 and y<y2): \n lst.append(i)\n self.removeMultiple(lst)\n return", "def neighbor(pt,cpt):\n nbrs = []\n dirs = []\n if pt[0] == cpt[0]:\n if pt[1] == cpt[1]+1:\n nbrs = [[pt[0]+1,pt[1]],[pt[0]-1,pt[1]],[pt[0],pt[1]+1]]\n dirs = [right,left,ahead]\n elif pt[1] == cpt[1]-1:\n nbrs = [[pt[0]+1,pt[1]],[pt[0]-1,pt[1]],[pt[0],pt[1]-1]]\n dirs = [left,right,ahead]\n if pt[1] == cpt[1]:\n if pt[0] == cpt[0]+1:\n nbrs = [[pt[0]+1,pt[1]],[pt[0],pt[1]+1],[pt[0],pt[1]-1]]\n dirs = [ahead,left,right]\n elif pt[0] == cpt[0]-1:\n nbrs = [[pt[0]-1,pt[1]],[pt[0],pt[1]+1],[pt[0],pt[1]-1]]\n dirs = [ahead,right,left]\n assert (len(nbrs) == 3 and len(dirs) == 3), 'error! number of neighbors is invalid!' # sanity check\n return [nbrs,dirs]", "def pointListForT(x, y, type):\n\n\tpointList = []\n\n\tif x < 10:\n\t\txString = \"0%d\" % x\n\telse:\n\t\txString = \"%d\" % x\n\n\tif x < 11:\n\t\txMString = \"0%d\" % (x - 1)\n\telse:\n\t\txMString = \"%d\" % (x - 1)\n\n\tif x < 9:\n\t\txPString = \"0%d\" % (x + 1)\n\telse:\n\t\txPString = \"%d\" % (x + 1)\n\n\tif y < 11:\n\t\tyMString = \"0%d\" % (y - 1)\n\telse:\n\t\tyMString = \"%d\" % (y - 1)\n\n\tif y < 9:\n\t\tyPString = \"0%d\" % (y + 1)\n\telse:\n\t\tyPString = \"%d\" % (y + 1)\n\n\tif y < 10:\n\t\tyString = \"0%d\" % y\n\telse:\n\t\tyString = \"%d\" % y\n\n\tif type == 3:\t\t# Down\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTL\" % (xString, yPString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\telif type == 4:\t\t# Left\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sBR\" % (xString, yMString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sTL\" % (xString, yPString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString)]]\t\n\telif type == 5:\t\t# Up\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yMString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\telse: # Type == 6\t# Right\n\t\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sBR\" % (xString, yMString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\t\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTL\" % (xString, yPString)]]\n\t\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString)]]\n\t\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\n\tpointList = [tl, tr, bl, br, cc]\n\n\treturn pointList", "def borders((u,v)):\r\n return ((u,v+1,S), (u+1,v,W), (u,v,S), (u,v,W))", "def discrete_layer(width: float, steps: int) -> list:\n\n min_x = 0.001\n steps = steps/2\n\n def sum_function(stretch_factor):\n return width - min_x * ((1 - stretch_factor**steps)/(1 - stretch_factor))\n\n stretch = float(fsolve(sum_function, 1.3)[0])\n\n return sub_division(width, min_x, stretch)", "def get_fan_in(xy=(0, 0), dim_x_l=10, dim_y_l=10, dim_x_u=9, dim_y_u=9, block_x=2, block_y=2, radius=2):\n x = xy[0]\n y = xy[1]\n if dim_x_u > 1:\n factor_x = ((dim_x_l-1)-(block_x-1))/(1.0*(dim_x_u-1))\n else:\n factor_x = ((dim_x_l-1)-(block_x))/2.0\n if dim_y_u > 1:\n factor_y = ((dim_y_l-1)-(block_y-1))/(1.0*(dim_y_u-1))\n else:\n factor_y = ((dim_y_l-1)-(block_y))/2.0\n results = []\n if dim_x_u > 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x))+xx), int((factor_y*(y))+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((factor_y*(y)+yy))))\n return results\n elif dim_x_u > 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x)+xx)), int((dim_y_l-block_y)/2.0+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((dim_y_l-block_y)/2.0+yy)))\n return results", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def detectBlocksInDepthImage(self):\n depth_range_dict = {'1':[173,178],'2':[169,172],'3':[165,169],'4':[159,163],'5':[156,158],'6':[147,155],'7':[139,146],'8':[132,138]}\n depth_frame = self.DepthFrameRaw\n rgb_frame = self.VideoFrame\n rgb_frame = cv2.resize(rgb_frame, (640,480))\n depth_frame = cv2.resize(depth_frame, (640, 480))\n np.clip(depth_frame,0,2**10 - 1,depth_frame)\n depth_frame >>= 2\n depth_frame = depth_frame.astype(np.uint8)\n filt_block = []\n for k,v in depth_range_dict.items():\n thresh = cv2.inRange(depth_frame,v[0],v[1])\n cv2.imwrite(\"/home/student/armlab-w20/log/img.jpeg\", thresh)\n _ , contours, _ = cv2.findContours(thresh, 1, 2)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 400 and area < 700:\n block = []\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n color = self.determine_color(rgb_frame, box)\n org = (box[0][0], box[0][1])\n rgb_frame = cv2.putText(rgb_frame, color, org,cv2.FONT_HERSHEY_SIMPLEX , 0.5 ,(0,0,0),2, cv2.LINE_AA)\n rgb_frame = cv2.drawContours(rgb_frame,[box],0,(0,0,0),0)\n self.VideoFrame = rgb_frame\n block.append(box)\n block.append(int(k))\n block.append(color)\n filt_block.append(block)\n return filt_block", "def get_filtration(self, x):\n n = x.shape[0]\n s = sorted([(i, x[i]) for i in range(n)], key=lambda x: x[1])\n selected = [False for i in range(n)]\n \n sets = {}\n ancestor = {i: i for i in range(n)}\n i = 0\n while False in selected:\n newpoint = s[i]\n j = s[i][0]\n val = s[i][1]\n\n selected[j] = True\n\n if j == 0 and selected[1]:\n ancestor[0] = ancestor[1]\n sets[ancestor[1]].appendPoint(0)\n elif j == 0:\n sets[0] = PersistenceInterval(0, val)\n elif j == n - 1 and selected[n - 2]:\n ancestor[n - 1] = ancestor[n - 2]\n sets[ancestor[n - 2]].appendPoint(n -1)\n elif j == n - 1:\n sets[n - 1] = PersistenceInterval(n - 1, val)\n elif selected[j - 1] and selected[j + 1]:\n i_a = ancestor[j - 1]\n i_b = ancestor[j + 1]\n a = x[i_a]\n b = x[i_b]\n if a < b:\n ancestor[j] = i_a\n for key in range(n):\n if ancestor[key] == i_b:\n ancestor[key] = i_a\n sets[i_b].death = val\n sets[i_b].appendPoint(j)\n sets[i_a].appendChild(sets[i_b])\n sets[i_a].appendPoint(j)\n else:\n ancestor[j] = i_b\n for key in range(n):\n if ancestor[key] == i_a:\n ancestor[key] = i_b\n sets[i_a].death = val\n sets[i_a].appendPoint(j)\n sets[i_b].appendChild(sets[i_a])\n sets[i_b].appendPoint(j)\n elif selected[j - 1]:\n ancestor[j] = ancestor[j - 1]\n sets[ancestor[j - 1]].appendPoint(j)\n elif selected[j + 1]:\n ancestor[j] = ancestor[j + 1]\n sets[ancestor[j + 1]].appendPoint(j)\n else:\n sets[j] = PersistenceInterval(j, val)\n\n i += 1\n\n sets[s[0][0]].death = self.infty\n\n setList = sorted([sets[i] for i in sets.keys()], key=lambda x:x.getRelevance(), reverse=True)\n\n self.sets = setList\n return setList", "def depth_conversion(point_depth, w, h, f):\n i_c = np.float(h) / 2 - 1\n j_c = np.float(w) / 2 - 1\n columns, rows = np.meshgrid(np.linspace(0, w - 1, num=w), np.linspace(0, h - 1, num=h))\n distance_from_center = ((rows - i_c) ** 2 + (columns - j_c) ** 2) ** 0.5\n return point_depth / (1 + (distance_from_center / f) ** 2) ** 0.5", "def asteroidCreator(numCorner,win):\n\n xCoor = []\n yCoor = []\n\n # Creating coordinates of the points\n coorRange = [i for i in range(-10,10) if i not in [0]] # to avoid 0\n\n for i in range(numCorner):\n xCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n yCoor.append(round(random.choice(coorRange)*random.uniform(0.01,1),2))\n\n # Sorting the coordinates\n bubbleSort(xCoor,len(xCoor))\n bubbleSort(yCoor,len(yCoor))\n\n\n # Isolating the extreme points\n xSmallest = xCoor.pop(0)\n xLargest = xCoor.pop()\n\n ySmallest = yCoor.pop(0)\n yLargest = yCoor.pop()\n\n # Shuffle the coordinates\n random.shuffle(xCoor)\n random.shuffle(yCoor)\n\n # Divide them into two sets\n xCoorLower = xCoor[:len(xCoor)//2]\n xCoorUpper = xCoor[len(xCoor)//2:]\n\n yCoorLower = yCoor[:len(yCoor)//2]\n yCoorUpper = yCoor[len(yCoor)//2:]\n\n # Append back the extreme points, and sort them again\n xCoorLower.append(xSmallest)\n xCoorLower.append(xLargest)\n xCoorUpper.append(xSmallest)\n xCoorUpper.append(xLargest)\n\n yCoorLower.append(ySmallest)\n yCoorLower.append(yLargest)\n yCoorUpper.append(ySmallest)\n yCoorUpper.append(yLargest)\n\n bubbleSort(xCoorLower,len(xCoorLower))\n bubbleSort(xCoorUpper,len(xCoorUpper))\n bubbleSort(yCoorLower,len(yCoorLower))\n bubbleSort(yCoorUpper,len(yCoorUpper))\n\n # Getting the vector lengths out of the points\n # We will get vectors in 4 directions from 4 lists\n xVectorLengths = []\n yVectorLengths = []\n\n for i in range(len(xCoorLower)-1):\n xVectorLengths.append(xCoorLower[i]-xCoorLower[i+1])\n for i in range(len(xCoorUpper)-1):\n xVectorLengths.append(xCoorUpper[i+1]-xCoorUpper[i])\n for i in range(len(yCoorLower)-1):\n yVectorLengths.append(yCoorLower[i]-yCoorLower[i+1])\n for i in range(len(yCoorUpper)-1):\n yVectorLengths.append(yCoorUpper[i+1]-yCoorUpper[i])\n\n random.shuffle(xVectorLengths)\n random.shuffle(yVectorLengths)\n\n # Creating the vectors\n vectors = []\n defaultVector = [0,0]\n\n for i in range(len(xVectorLengths)):\n defaultVector[0] = round(xVectorLengths[i],2)\n defaultVector[1] = round(yVectorLengths[i],2)\n vectors.append(defaultVector.copy())\n\n # Sorting vectors by their angle\n sortedVectors = []\n quadrant1 = []\n quadrant2 = []\n quadrant3 = []\n quadrant4 = []\n\n ### Dividing them by quadrants\n for vector in vectors:\n if vector[0] >= 0 and vector[1] >= 0:\n quadrant1.append(vector)\n elif vector[0] <= 0 and vector[1] >= 0:\n quadrant2.append(vector)\n elif vector[0] <= 0 and vector[1] <= 0:\n quadrant3.append(vector)\n elif vector[0] >= 0 and vector[1] <= 0:\n quadrant4.append(vector)\n\n ### Sorting them inside the quadrants\n quadrant1 = angleSort(quadrant1,1,len(quadrant1))\n quadrant2 = angleSort(quadrant2,2,len(quadrant2))\n quadrant3 = angleSort(quadrant3,3,len(quadrant3))\n quadrant4 = angleSort(quadrant4,4,len(quadrant4))\n\n ### Adding them up in order\n for vector in quadrant1:\n sortedVectors.append(vector)\n for vector in quadrant2:\n sortedVectors.append(vector)\n for vector in quadrant3:\n sortedVectors.append(vector)\n for vector in quadrant4:\n sortedVectors.append(vector)\n\n # Creating the points for the polygon\n points = []\n points = vectorsToPoints(sortedVectors,points)\n\n rightEdge = 0\n leftEdge = 0\n upperEdge = 0\n lowerEdge = 0\n\n # getting the boundaries for the asteroid\n for point in points:\n if point[0] > rightEdge:\n rightEdge = point[0]\n elif point[0] < leftEdge:\n leftEdge = point[0]\n if point[1] > upperEdge:\n upperEdge = point[1]\n elif point[1] < lowerEdge:\n lowerEdge = point[1]\n\n # Width and height are only required since it is a child of rotating_block class\n width = rightEdge - leftEdge\n height = upperEdge - lowerEdge\n\n centerPoint = [(rightEdge + leftEdge) / 2 , (upperEdge + lowerEdge) / 2]\n\n asteroid = pho.Asteroid(win,width,height,points,centerPoint[0],centerPoint[1])\n\n return asteroid", "def convert_to_xywh(pts):\n boxes = []\n for pt1, pt2 in pts:\n x = pt1[0]\n y = pt1[1]\n w = pt2[0] - pt1[0]\n h = pt2[1] - pt1[1]\n boxes.append([x, y, w, h])\n return boxes" ]
[ "0.6802484", "0.639531", "0.570502", "0.55743366", "0.5564015", "0.54822946", "0.54609275", "0.54286385", "0.541968", "0.54144114", "0.5410887", "0.5384912", "0.5378323", "0.53586614", "0.53429186", "0.53311557", "0.53209656", "0.52847123", "0.52787846", "0.5276502", "0.5276502", "0.52530223", "0.52474594", "0.52359664", "0.5218197", "0.52103525", "0.5175183", "0.51683044", "0.5166727", "0.51525813", "0.51480776", "0.51413727", "0.5110471", "0.5092367", "0.5081779", "0.5080602", "0.50768155", "0.5041659", "0.5024591", "0.50218457", "0.50083", "0.49989763", "0.4998347", "0.4996021", "0.49939027", "0.49932912", "0.49862465", "0.49769035", "0.49714622", "0.49714622", "0.4970931", "0.49693155", "0.49662325", "0.4965717", "0.49618793", "0.49582544", "0.49402696", "0.4940073", "0.49387184", "0.49367258", "0.49305168", "0.49293151", "0.49135068", "0.49127406", "0.49120864", "0.49075317", "0.49040052", "0.4903124", "0.4890913", "0.4886706", "0.4883222", "0.4880586", "0.48732945", "0.4865525", "0.48637605", "0.48628616", "0.48622486", "0.48616046", "0.48584044", "0.48546714", "0.48544115", "0.48541918", "0.4849831", "0.48473316", "0.48395467", "0.48384953", "0.48303616", "0.48297006", "0.48225915", "0.48219955", "0.48176172", "0.48167822", "0.48142728", "0.48134953", "0.48127273", "0.48095548", "0.4807404", "0.4805337", "0.48052734", "0.48046565" ]
0.555994
5
This algorithm returns a cutlist which describes a series of parallel lines, each with a different z value, to calibrate the z value for the laser.
def z_focus(block,cut,laser): cutlist = [] iterations = int(cut["final_dimension_z"]/laser["z_spacing"]) #Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes y = cut["final_dimension_y"]/2 offset = laser["xy_spacing"] x = 0 cutlist.append(["z_abs","0"]) for a in range(iterations): cutlist.append(["jump", f"{x:.6f}", f"{y:.6f}"]) cutlist.append(["mark", f"{x:.6f}", f"{-y:.6f}"]) cutlist.append(["z_rel", str(-laser["z_spacing"])]) x = x + offset cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def line(x1,y1,x2,y2,z_thickness,laser):\r\n\t#Global variables that are used by all algorithms\r\n\tlayers = int(z_thickness/laser[\"z_spacing\"])\r\n\r\n\t#Works out offset when beginning on a new layer\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\ttaper_x,taper_y = offset(x1,y1,x2,y2,taper)\r\n\r\n\t#Works out offset between each parallel scan on the same layer\r\n\tdelta_x,delta_y = offset(x1,y1,x2,y2,laser[\"xy_spacing\"])\r\n\r\n\t#Works out maximum offset from starting line, we don't want to exceed this at any point.\r\n\tmax_taper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (z_thickness) * 2\r\n\tmax_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)\r\n\t#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y\r\n\r\n\t#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows\r\n\tcutlist = []\r\n\tfor a in range(layers):\r\n\t\tnew_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y\r\n\t\ti = 0\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\twhile abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):\r\n\t\t\t#This use of i is to reduce the jump distance between individual scans\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y\r\n\t\t\ti = i + 1\r\n\t\t#Having completed one layer, the laser moves down to begin the next layer\r\n\t\tmax_delta_x = max_delta_x - taper_x\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def collect_lines(xy, BL, bs, climv):\n lines = [zip(xy[BL[i, :], 0], xy[BL[i, :], 1]) for i in range(len(BL))]\n line_segments = LineCollection(lines, # Make a sequence of x,y pairs\n linewidths=1., # could iterate over list\n linestyles='solid',\n cmap='coolwarm',\n norm=plt.Normalize(vmin=-climv, vmax=climv))\n line_segments.set_array(bs)\n print(lines)\n return line_segments", "def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()", "def drawcutline(f,layernamelist,cutline_entities_count): \r\n \r\n #layernamelist=[layernamelist[0]] \r\n layercount=0\r\n ringlist=[[[-0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]]]\r\n flashlist=buildflashlist()\r\n cutlineset=buildcutlineset() \r\n \r\n f.write(\"0\\nSECTION\\n2\\nENTITIES\\n\")\r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n for polyline in cutlineset:\r\n cutline_entities_count=cutline_entities_count+1\r\n f.write(\"0\\nPOLYLINE\\n8\\n\"+layername+\"\\n5\\n\"+hex(cutline_entities_count)[2:]) # begin writing a polyline\r\n f.write(\"\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n40\\n0.08\\n41\\n0.08\\n\")\r\n cutline_entities_count=drawwidthpolyline(polyline, cutline_entities_count, f,layername)\r\n cutline_entities_count=drawring(ringlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawflash(flashlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawtext(cutline_entities_count, f, layername,layercount)\r\n \r\n return cutline_entities_count", "def toArray(parsedList):\n interpretCommand = {\n 'C' : lambda x, prevL : x[-2:], # bezier curve. Ignore the curve.\n 'L' : lambda x, prevL : x[0:2],\n 'M' : lambda x, prevL : x[0:2],\n 'Z' : lambda x, prevL : prevL[0],\n }\n\n points =[]\n for i,(c, arg) in enumerate(parsedList):\n #debug('toArray ', i, c , arg)\n newp = interpretCommand[c](arg, points)\n points.append( newp)\n a=numpy.array( points )\n\n # Some times we have points *very* close to each other\n # these do not bring any meaning full info, so we remove them\n #\n x,y, w,h = computeBox(a)\n sizeC = 0.5*(w+h)\n #deltas = numpy.zeros((len(a),2) )\n deltas = a[1:] - a[:-1] \n #deltas[-1] = a[0] - a[-1]\n deltaD = numpy.sqrt(numpy.sum( deltas**2, 1 ))\n sortedDind = numpy.argsort(deltaD)\n # expand longuest segments\n nexp = int(len(deltaD)*0.9)\n newpoints=[ None ]*len(a)\n medDelta = deltaD[sortedDind[len(deltaD)/2] ]\n for i,ind in enumerate(sortedDind):\n if deltaD[ind]/sizeC<0.005: continue\n if i>nexp:\n np = int(deltaD[ind]/medDelta)\n pL = [a[ind]]\n #print i,'=',ind,'adding ', np,' _ ', deltaD[ind], a[ind], a[ind+1]\n for j in range(np-1):\n f = float(j+1)/np\n #print '------> ', (1-f)*a[ind]+f*a[ind+1]\n pL.append( (1-f)*a[ind]+f*a[ind+1] )\n newpoints[ind] = pL\n else:\n newpoints[ind]=[a[ind]]\n if(D(a[0],a[-1])/sizeC > 0.005 ) :\n newpoints[-1]=[a[-1]]\n\n points = numpy.concatenate([p for p in newpoints if p!=None] )\n ## print ' medDelta ', medDelta, deltaD[sortedDind[-1]]\n ## print len(a) ,' ------> ', len(points)\n\n rel_norms = numpy.sqrt(numpy.sum( deltas**2, 1 )) / sizeC\n keep = numpy.concatenate([numpy.where( rel_norms >0.005 )[0],numpy.array([len(a)-1])])\n\n #return a[keep] , [ parsedList[i] for i in keep]\n #print len(a),' ',len(points)\n return points , []", "def generateCutList(cut_configuration):\r\n\t#Check that this line reads json.loads(cut_configuration)\r\n\tinput_json = json.load(cut_configuration)\r\n\r\n\t#Currently only desired_cut and laser_cut_config are required\r\n\ttry:\r\n\t\tblock = input_json[\"block\"]\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tcut = input_json[\"desired_cut\"]\r\n\t\tlaser = input_json[\"laser_cut_config\"]\r\n\texcept:\r\n\t\traise Exception(\"Either desired_cut or laser_cut_config not provided\")\r\n\r\n\tif cut[\"cut_process\"] == \"line\":\r\n\t\tfinal_list = line(cut[\"x1\"],cut[\"y1\"],cut[\"x2\"],cut[\"y2\"],cut[\"final_dimension_z\"]+laser[\"z_final_overshoot\"],laser)\r\n\telif cut[\"cut_process\"] == \"simple_core\":\r\n\t\tfinal_list = simple_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"vertical_core\":\r\n\t\tfinal_list = vertical_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"oss_stacked\":\r\n\t\tfinal_list = oss_stacked(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"z_focus\":\r\n\t\tfinal_list = z_focus(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"cross\":\r\n\t\tfinal_list = cross(block,cut,laser)\r\n\telse:\r\n\t\traise Exception(\"No such cut exists: Check cut_process\")\r\n\t#print(time_taken(final_list, laser))\r\n\tnow = datetime.now()\r\n\ttimestamp = str(now.strftime(\"%m-%d_%H_%M\"))\r\n\tcomplete_name = os.path.join(save_path, timestamp+\".csv\")\r\n\twith open(complete_name, mode='w',newline ='') as test_data:\r\n\t data_writer = csv.writer(test_data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\t list_data = json.loads(final_list)\r\n\t for line1 in list_data:\r\n\t \tdata_writer.writerow(line1)\r\n\treturn final_list", "def linelist(self):\n line_list = Marker()\n line_list.header = self._header\n line_list.type = Marker.LINE_LIST\n line_list.action = Marker.ADD\n line_list.scale.x = 0.005\n line_list.color = self.YELLOW\n line_list.pose = deepcopy(self.POSE)\n\n line_list.points.extend((self._p1, self._p2))\n line_list.points.extend((self._p2, self._p3))\n line_list.points.extend((self._p3, self._p4))\n line_list.points.extend((self._p4, self._p1))\n line_list.points.extend((self._p5, self._p6))\n line_list.points.extend((self._p6, self._p7))\n line_list.points.extend((self._p7, self._p8))\n line_list.points.extend((self._p8, self._p5))\n line_list.points.extend((self._p1, self._p5))\n line_list.points.extend((self._p2, self._p6))\n line_list.points.extend((self._p3, self._p7))\n line_list.points.extend((self._p4, self._p8))\n\n return line_list", "def get_landmarks(self, sorted_cut_endo_pts, lowest_pt_idx, display_opt):\n\n # make polydata out of sorted endo pts\n numPoints = sorted_cut_endo_pts.shape[0]\n vtk_float_arr = numpy_support.numpy_to_vtk(num_array=np.asarray(sorted_cut_endo_pts), deep=True, array_type=vtk.VTK_FLOAT)\n vtkpts = vtk.vtkPoints()\n vtkpts.SetData(vtk_float_arr)\n cut_endo_poly = vtk.vtkPolyData()\n cut_endo_poly.SetPoints(vtkpts)\n\n # now make lines\n polyLine = vtk.vtkPolyLine()\n polyLine.GetPointIds().SetNumberOfIds(numPoints)\n\n for i in range(numPoints):\n polyLine.GetPointIds().SetId(i, i) # from 0,1 then 2,3 then 4,5 ...\n\n cells = vtk.vtkCellArray()\n cells.InsertNextCell(polyLine)\n\n # add points and lines to polydata container\n cut_endo_poly.SetLines(cells)\n\n # create tree for intersection process\n bspTree = vtk.vtkModifiedBSPTree() # bsp tree is much faster than obbtree due to rejection test\n bspTree.SetDataSet(cut_endo_poly)\n bspTree.BuildLocator()\n\n top_left = np.asarray(sorted_cut_endo_pts[0])\n top_right = np.asarray(sorted_cut_endo_pts[-1])\n low_pt = np.asarray(sorted_cut_endo_pts[lowest_pt_idx])\n\n # get direction of lines\n line_dir = normalize(top_right - top_left) # top_pt[0] to top_pt[1]\n\n # add distance on both sides to make sure the line can pass through the entire LV horizontally\n dist = np.linalg.norm(top_right - top_left)\n pSource_0 = top_right + dist*line_dir\n pTarget_0 = top_left - dist*line_dir\n\n # determine the length to travel from top to bottom\n top_center = (top_right + top_left)/2.0\n midline = normalize(low_pt - top_center)\n max_dist = np.linalg.norm(low_pt - top_center)\n\n left_pts = []\n right_pts = []\n\n weights = np.linspace(0.00, 0.98, self.numSamples)\n\n for i in range(self.numSamples):\n # determine source and target points\n pSource = pSource_0 + weights[i]*max_dist*midline\n pTarget = pTarget_0 + weights[i]*max_dist*midline\n center = (pSource + pTarget) / 2.0\n\n # set empty variables\n subId = vtk.mutable(0)\n pcoords = [0, 0, 0]\n t = vtk.mutable(0)\n left = [0, 0, 0]\n right = [0, 0, 0]\n\n # # run interesect command\n # pointid1 = bspTree.IntersectWithLine(pSource, pTarget, 0.001, t, left, pcoords, subId)\n # pointid2 = bspTree.IntersectWithLine(pTarget, pSource, 0.001, t, right, pcoords, subId)\n\n # intersect with line that goes from source to center or target to center\n pointid1 = bspTree.IntersectWithLine(pSource, center, 0.001, t, left, pcoords, subId)\n pointid2 = bspTree.IntersectWithLine(pTarget, center, 0.001, t, right, pcoords, subId)\n\n left_pts.append(list(left))\n right_pts.append(list(right))\n\n if display_opt:\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cut_endo_poly)\n\n all_act = vtk.vtkActor()\n all_act.SetMapper(mapper)\n\n right_act = include_points(left_pts, len(left_pts), 4, (1,0,0))\n left_act = include_points(right_pts, len(right_pts), 4, (1,0,0))\n low_pt_act = include_points(list(low_pt), 1, 10, (1,0,1))\n\n top_right_act = include_points(list(top_right), 1, 10, (0,0,1))\n top_left_act = include_points(list(top_left), 1, 10, (0,0,1))\n\n ren = vtk.vtkRenderer()\n ren.AddActor(all_act)\n ren.AddActor(right_act)\n ren.AddActor(left_act)\n ren.AddActor(top_right_act)\n ren.AddActor(top_left_act)\n ren.AddActor(low_pt_act)\n\n vtk_show(ren)\n\n # ensure that left and right points have the same number of points as numSamples\n if len(left_pts) != self.numSamples or len(right_pts) != self.numSamples:\n print('Either left or right points do not have the same number of points as numSamples!')\n\n return left_pts, right_pts", "def thickenXYList( list, tester, biSectionMax=6, interpolation=xDataEnumsModule.Interpolation.linlin):\n\n def thickenXYList2( interpolation, xl, yl, xu, yu, newList, tester, level ) :\n\n if( level == biSectionMax ) : return\n level += 1\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.loglin:\n xMid = 0.5 * ( xl + xu )\n else :\n xMid = math.sqrt( xl * xu );\n\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.linlog:\n yMid = 0.5 * ( yl + yu )\n else :\n yMid = math.sqrt( yl * yu )\n\n y = tester.evaluateAtX( xMid )\n\n dy = abs( y - yMid )\n if( ( dy > abs( y * tester.relativeTolerance ) ) and ( dy > tester.absoluteTolerance ) ) :\n newList.append( [ xMid, y ] )\n thickenXYList2( interpolation, xl, yl, xMid, y, newList, tester, level )\n thickenXYList2( interpolation, xMid, y, xu, yu, newList, tester, level )\n\n if( len( list ) < 2 ) : raise Exception( \"len( list ) = %d < 2\" % len( list ) )\n newList = []\n for i1, xy in enumerate( list ) :\n x2, y2 = xy\n if( i1 > 0 ) : thickenXYList2( interpolation, x1, y1, x2, y2, newList, tester, 0 )\n newList.append( [ x2, y2 ] )\n x1, y1 = x2, y2\n newList.sort( )\n return( newList )", "def grid_lineify(f, x_lim=(0.,256) ,y_lim=(0.,256), ntraj = 600,\n max_step = 3000, gamma = 0.02, dt = 9., e0 = 0.1,\n T = 0.1,\n e_thresh = 0.001, h = 2e-1, m = 3, bounce = False\n ):\n lines = []\n nx = int(np.sqrt(ntraj))\n x_starts, y_starts = np.meshgrid(np.linspace(x_lim[0],x_lim[1],nx),\n np.linspace(y_lim[0],y_lim[1],nx))\n x_starts = x_starts.flatten()\n y_starts = y_starts.flatten()\n for traj in range(len(x_starts)):\n x,y = x_starts[traj].item(), y_starts[traj].item()\n PE = f(x, y)\n v0 = np.sqrt(e0/m)\n vx,vy = np.random.normal(0,v0), np.random.normal(0,v0)\n line = []\n step = 0\n while step < max_step and np.sqrt(vx*vx+vy*vy) > e_thresh:\n PE = f(x, y)\n if (np.exp(-PE/.01) > np.random.random()):\n break\n # cdiff grad\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n x += vx*dt\n y += vy*dt\n # Bounce off edges.\n if (bounce):\n if (x > x_lim[1]):\n x -= 2.0*np.abs(x-x_lim[1])\n vx *= -1\n if (x < x_lim[0]):\n x += 2.0*np.abs(x-x_lim[0])\n vx *= -1\n if (y > y_lim[1]):\n y -= 2.0*np.abs(y-y_lim[1])\n vy *= -1\n if (y < y_lim[0]):\n y += 2.0*np.abs(y-y_lim[0])\n vy *= -1\n else: # absorb\n if (x > x_lim[1]):\n break\n elif (x < x_lim[0]):\n break\n elif (y > y_lim[1]):\n break\n elif (y < y_lim[0]):\n break\n line.append([x,y])\n gx = ((f(x+h,y)-f(x-h,y))/(2*h)).item()\n gy = ((f(x,y+h)-f(x,y-h))/(2*h)).item()\n vx += 0.5*dt*(gx - gamma*vx + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n vy += 0.5*dt*(gy - gamma*vy + np.random.normal(0,np.sqrt(gamma*e0)) )/m\n step += 1\n lines.append(line)\n return lines", "def __init__(self, controlPoints=None):\n super(CatmullRomSpline, self).__init__(controlPoints)", "def getShelves(detections, lines):\r\n\r\n for idx,det in enumerate(detections):\r\n dist_to_shelf = np.zeros(len(lines))\r\n b_points = det['box_points']\r\n b_height = b_points[3] - b_points[1] \r\n for l in range(len(lines)): \r\n dist_to_shelf[l] = lines[l] - b_points[1] ### distance of upper-left corner from lines\r\n if dist_to_shelf[l] < 0:\r\n dist_to_shelf[l] = 100000 ## sth huge \r\n #print(dist_to_shelf) \r\n det['shelf'] = np.argmin(dist_to_shelf)\r\n return detections", "def __init__(self, roi_warped_points):\n\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n #average x values of the fitted line over the last n iterations\n self.bestx = None\n #polynomial coefficients averaged over the last n iterations\n self.best_fit = [np.array([False])]\n #polinomial coefficients for the last n fits of the lane\n self.recent_fit = []\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = 0\n #distance in meters of vehicle center from the line\n self.line_base_pos = 0\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n #x values for detected line pixels\n self.allx = None\n #maximum number of iterations to average\n self.max_n = 10 #25\n\n # roi image points in bird's view space\n self.roi_warped_points = roi_warped_points\n\n #y values for detected line pixels\n self.ally = np.linspace(0, self.roi_warped_points[2][1] - 1, self.roi_warped_points[2][1])\n\n # line base pos is calculated through the roi information\n # the used four point ROI has two points at the bottom that are straight\n # with respect to the bottom - as this points are right next to the lines,\n # they can be translated from pixels into meters with the knowledge of\n # a U.S. highway standard lane - this is an apprximation, but should be\n # good enough for this project\n # U.S. regulations minimum lane width: 3.7m\n self.xm_per_pix = 3.7 / (self.roi_warped_points[1][0] - self.roi_warped_points[0][0])\n\n # each dashed line is 3m long --> about 33m for warped image\n self.ym_per_pix = 33 / (self.roi_warped_points[2][1] - self.roi_warped_points[0][1])", "def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def marker_lines(self) -> list[Line]:\n upper_point = (\n self.leaf_center_px - self.leaf_width_px / 2 * self._analysis_ratio\n )\n lower_point = (\n self.leaf_center_px + self.leaf_width_px / 2 * self._analysis_ratio\n )\n\n lines = []\n for mlc_position in self.position:\n if self._orientation == Orientation.UP_DOWN:\n line = Line((mlc_position, upper_point), (mlc_position, lower_point))\n else:\n line = Line((upper_point, mlc_position), (lower_point, mlc_position))\n lines.append(line)\n return lines", "def get_lightcurves(\n self,\n pipeline=\"pdcsap\",\n cadence=\"short\",\n sectors=None,\n remove_outliers=False,\n quality_bitmask=None,\n ):\n if sectors is None:\n all_sectors = self.all_sectors\n else:\n all_sectors = sectors\n\n for n, sector in enumerate(all_sectors):\n if pipeline == \"pdcsap\":\n l = ShortCadence(\n ticid=self.ticid, sector=sector, verbose=False\n )\n lc = l.get_lc()\n else:\n errmsg = \"pdcsap is only currently available\"\n raise NotImplementedError(errmsg)\n\n if quality_bitmask == \"hard\":\n lc = lc[(lc.quality == 0) | np.isnan(lc.quality)]\n\n if remove_outliers:\n lc, mask = lc.remove_outliers(\n sigma_upper=3, sigma_lower=10, return_mask=True\n )\n\n if n == 0:\n lcs = lc.copy()\n else:\n lcs = lcs.append(lc)\n print(\n f\"{sector}: cdpp={lc.estimate_cdpp():.2f}, std={lc.flux.std():.2f}\"\n )\n\n lcs.sector = all_sectors\n return lcs", "def create_line_list(self,depth_arr):\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n body=[['shoulder_line',[self.rpts[11],self.rpts[12]]],['waist_line',[self.rpts[23],self.rpts[24]]],['left_shoulder_waist',[self.rpts[11],self.rpts[23]]],['right_shoulder_waist',[self.rpts[12],self.rpts[24]]],['right_thigh',[self.rpts[24],self.rpts[26]]],['left_thigh',[self.rpts[23],self.rpts[25]]],['right_leg',[self.rpts[26],self.rpts[28]]],['left_leg',[self.rpts[25],self.rpts[27]]],['right_forearm',[self.rpts[14],self.rpts[16]]],['left_forearm',[self.rpts[13],self.rpts[15]]],['right_bicep',[self.rpts[12],self.rpts[14]]],['left_bicep',[self.rpts[11],self.rpts[13]]]]\n self.linelist.points=[]\n self.linelist.header.frame_id = \"kinect_frame\"\n self.linelist.header.stamp = rospy.Time.now()\n self.linelist.type = Marker.LINE_LIST\n \n self.linelist.id=1\n self.linelist.action = Marker.ADD \n self. linelist.scale.x = 0.05\n\n self.linelist.color.g = 1.0\n self.linelist.color.a = 1.0\n\n \n\n for _,pointl in body:\n for pt in pointl:\n depth_val=float(depth_arr[pt[1], pt[0]])\n ptl_x,ptl_y,ptl_z=self.depth_to_xyz(pt[0],pt[1],depth_val)\n \n self.linelist_point=Point()\n self.linelist_point.x = ptl_x\n self.linelist_point.y = ptl_y\n self.linelist_point.z = ptl_z\n self.linelist.points.append(self.linelist_point)\n \n except:\n pass", "def datasetratiocopy_extend(l,ratio,x_offset,y_offset):#全部四边上的点都延伸\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def parks(self):\n point_array = [0, 2, 8, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14]\n park_coords = []\n parks_sorted = []\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'p':\n park_coords.append(tuple([i, j]))\n while len(park_coords) > 0:\n x, y = park_coords.pop(0)\n if len(parks_sorted) == 0:\n parks_sorted.append([(x, y)])\n else:\n borders_bool = []\n for block_no, park_block in enumerate(parks_sorted):\n borders_bool.append(False)\n for i, j in park_block:\n if abs(x - i) + abs(y - j) == 1:\n borders_bool[block_no] = True\n if (num_true := borders_bool.count(True)) == 1:\n parks_sorted[borders_bool.index(True)].append((x, y))\n elif num_true > 1:\n new_parks_sorted = []\n i_mega_park = None\n for block_no, park_block in enumerate(parks_sorted):\n if borders_bool[block_no]: # If it is bordering\n if i_mega_park is None:\n i_mega_park = block_no\n new_parks_sorted.append(park_block)\n else:\n new_parks_sorted[i_mega_park] += park_block\n new_parks_sorted[i_mega_park] += [(x, y)]\n parks_sorted = new_parks_sorted\n else:\n new_parks_sorted.append(park_block)\n parks_sorted = new_parks_sorted\n else:\n parks_sorted.append([(x, y)])\n\n return sum([point_array[len(block)] for block in parks_sorted])", "def SLTrace(self,NSL=100,Pts=[]):\n TOF_end=[]\n SL_end=[]\n \n for i in range(4): #4 Subgrids\n \n if(len(Pts)==0):\n nsl=int(NSL*self.theta[i]/2/np.pi)\n Pts_init=PointOnUnitEdge(nsl) #Generating the start point along the well edge(alpha=0)\n else:\n nsl=len(Pts)\n Pts_init=Pts\n \n for j in range(nsl): #nsl streamlines\n GridID=i\n temp_trace=self.SubGrids[GridID].Trace1SL(Pts=Pts_init[j])\n \n SLtemp=RotateSL(temp_trace[3],Single=1,angle=self.SubGrids[GridID].RotateAngle)\n SLtemp=TranslateSL(SLtemp,Single=1,new_origin=self.SubGrids[GridID].NewOrigin)\n TOFtemp=temp_trace[5]\n \n flag=True\n while (flag==True): #the streamline will continue travel in another subgrid\n Pts_end=temp_trace[2][-1]\n temp_neighbor=self.NeighborTest(GridID,Pts_end) #test of crossing trace of a streamline\n flag=temp_neighbor[0]\n if(flag==True):\n temp_trace=[]\n SLtemp2=[]\n TOFtemp2=[]\n \n GridID_next=temp_neighbor[1]\n Pts_init_next=temp_neighbor[2]\n #Pts and TOF base starts from previous node\n temp_trace=self.SubGrids[GridID_next].Trace1SL(Pts=Pts_init_next,TOF_base=TOFtemp[-1])\n\n SLtemp2=RotateSL(temp_trace[3],Single=1,angle=self.SubGrids[GridID_next].RotateAngle)\n SLtemp2=TranslateSL(SLtemp2,Single=1,new_origin=self.SubGrids[GridID_next].NewOrigin)\n TOFtemp2=temp_trace[5]\n \n #SLtemp=np.append(SLtemp,SLtemp2,axis=0)\n #TOFtemp=np.append(TOFtemp,TOFtemp2,axis=0)\n SLtemp=np.append(SLtemp,SLtemp2[1:],axis=0)\n TOFtemp=np.append(TOFtemp,TOFtemp2[1:],axis=0)\n \n SL_end.append(SLtemp[-1])\n TOF_end.append(TOFtemp[-1])\n #Add all nodes and TOF into SL list\n self.SL.append(SLtemp)\n self.TOF.append(TOFtemp)\n \n \n #Plot the stremline\n plt.figure(figsize=(3, 3))\n plt.ylim(bottom=0,top=50)\n plt.xlim(left=0,right=50)\n plt.axes().set_aspect('equal')\n plt.title(r'Streamline in Physical Space ($x,y$)')\n \n #Grid edge\n Bound_vert=[self.Pts[0],self.Pts[1],self.Pts[2],self.Pts[3],self.Pts[0]]\n Internal_edge=[self.Pts[0],self.Pts[2],self.Pts[3],self.Pts[1]]\n \n plt.plot(*np.asarray(Bound_vert).T,lw=3,color='red')\n plt.plot(*np.asarray(Internal_edge).T,lw=2,ls='--',color='red')\n \n #Streamline\n for i in range(len(self.SL)):\n plt.plot(*np.asarray(self.SL[i]).T,lw=1,marker='o',markersize=0,color='blue')\n \n\n\n plt.show()\n return self.SL,self.TOF,SL_end,TOF_end", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def trajectories(self):\n # OPTIMIZE: take too much time due to too much solver call\n alpha_min = self.search_alpha_min()\n alpha_finder = self.FOV_img/2\n\n if self.display_trajectories is True:\n plt.figure('Trajectories plan')\n plt.clf() #clear the graph to avoir superposing data from the same set (can be deactivated if need to superpose)\n ax = plt.subplot(111, projection='polar') #warning if use python in ligne (!= graphical) graphs got superposed\n ax.set_title(\"light trajectories close to a black hole\\n\", va='bottom')\n ax.set_xlabel('R(UA)')\n plt.ylabel('phi(°)\\n\\n\\n\\n', rotation=0)\n ax.set_rlim((0, 4*self.D))\n ax.set_rlabel_position(-90)\n\n seen_angle = np.array([])\n deviated_angle = np.array([])\n\n booli = False # avoid points from the first loop to exceed points from the second loop\n points = 40 # careful with this if using kind=linear\n\n for i in range(6):\n # print(alpha_finder)\n\n for alpha in np.linspace(alpha_finder, alpha_min,\n num=points, endpoint=booli):\n r, phi = self.solver(alpha)\n\n if r[-1] > 1.1*self.Rs: # if not capture by black hole\n seen_angle = np.append(seen_angle, 180-alpha)\n dev_angle = phi[-1] + math.asin(self.D/r[-1]*math.sin(phi[-1]))\n dev_angle = math.degrees(dev_angle)\n deviated_angle = np.append(deviated_angle, dev_angle)\n Ci = 'C'+str(i)\n\n if self.display_trajectories is True:\n ax.plot(phi, r, Ci) # plot one trajectory\n\n if self.kind == 'linear':\n alpha_finder = alpha_min + (alpha_finder - alpha_min)/(points/3 + 1) # start a more precise cycle from last point\n\n else:\n alpha_finder = alpha_min + (alpha_finder - alpha_min)/(points + 1) # start a more precise cycle from last point\n\n points = 10 # careful with this if using kind=linear\n\n if i == 4:\n booli = True # allow to display the last point\n\n if self.display_trajectories is True:\n # plt.savefig('trajectories.png', format='png', dpi=1000, bbox_inches='tight')\n plt.draw()\n\n return seen_angle, deviated_angle", "def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):\n abund_offset_range = (-1,1)\n species_offset_range = (-1,1)\n ew_dist_width = 30\n ep_range = (0,12)\n loggf_range = (-6.0,0.5) \n \n theta = 5040.0/teff\n \n # # TODO: remove this calculation???\n # # # fix to a particular line which should be by the turnoff\n # # # Fe I 88.2 2.22 EP -4.2 loggf\n # loggf = -4.2\n # ep = 2.22\n # x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep\n # x-x_turnoff = -5\n # \n # based on the model abundance used in the cog file\n xnorm = -6.5\n ynorm = -2.0\n \n # read in the parameters \n if species_params is None:\n species_params = _elements_params\n el_params = species_params.copy()\n for el,pars in _elements_params.items():\n el_params.setdefault(el,pars)\n \n\n coeffs, knots, centers, scales = np.array(cog_ppol_hf[\"coefficients\"]), np.array(cog_ppol_hf[\"knots\"]), np.array(cog_ppol_hf[\"centers\"]), np.array(cog_ppol_hf[\"scales\"])\n iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)\n iqp_deriv = iqp.deriv()\n \n # calc the linelist\n linelist = {}\n element_abund = {}\n for species,pars in list(species_params.items()):\n wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])\n solar_abund_offset = np.random.uniform(*abund_offset_range)\n \n # get the abundance for this element, ignore species\n abund = abund_standard[species]['abundance']+solar_abund_offset\n element_abund.setdefault(abund_standard[species]['element'],abund) \n \n species_offset = np.random.uniform(*species_offset_range) \n species_abund = element_abund[abund_standard[species]['element']]+species_offset\n species_abund = np.repeat(species_abund,pars['n'])\n \n # generate the parameters for the lines\n spe_col = np.repeat(abund_standard.species_id(species),pars['n'])\n ew = np.random.exponential(ew_dist_width,pars['n'])\n ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])\n loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])\n \n # calculate the line strengths from the COG\n #x = species_abund + loggf - theta*ep + xnorm\n logrw = np.log10(ew/wvs)\n x = iqp.inverse(logrw-ynorm)\n loggf = species_abund - x - theta*ep + xnorm\n\n # estimate the lorzentian and gaussian widths for this line\n lorz_width = estimate_lorentz_width(x, iqp_deriv)\n gauss_width = np.repeat(99.9,pars['n'])\n \n # add to the linelist\n linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]\n \n if filepath is not None:\n # save moog file\n f = open(filepath,'w')\n header = \"# Fake linelist created THIMBLES with teff {} # \"\n header += \"wvs species ep loggf ew gauss_width lorz_width # \"\n header += \"guassian and lorentzian widths are estimate\\n\"\n f.write(header.format(teff))\n \n fmt = \"{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}\"+20*\" \"+\" {4:>9.2f}\"+10*\" \"\n fmt += \" {5:>9.2f} {6:>9.2f} FAKE_LINE\\n\"\n for species,ll in linelist.items():\n for row in ll:\n f.write(fmt.format(*row)) \n return linelist", "def get_centerlines_most_aligned_with_trajectory(xy: np.ndarray, candidate_cl: List[np.ndarray]) -> List[np.ndarray]:\n\n max_dist_along_cl = -float(\"inf\")\n\n for centerline in candidate_cl:\n centerline_linestring = LineString(centerline)\n start_dist = centerline_linestring.project(Point(xy[0, 0], xy[0, 1]))\n end_dist = centerline_linestring.project(Point(xy[-1, 0], xy[-1, 1]))\n dist_along_cl = end_dist - start_dist\n if max_dist_along_cl < -100 or dist_along_cl > max_dist_along_cl + 1:\n max_dist_along_cl = dist_along_cl\n candidate_centerlines = [centerline]\n elif dist_along_cl > max_dist_along_cl - 1:\n candidate_centerlines.append(centerline)\n max_dist_along_cl = max(max_dist_along_cl, dist_along_cl)\n\n return candidate_centerlines", "def GetXYListAndPolyListWithLimitedPointsBetweenNodes_CVLS(\n cVLS,\n allValsByFrame,\n orderOfSCsByValueByFrame,\n splitLength=1,\n fixedNumInteriorPoints=None,\n interpolate=True,\n):\n cVLS2 = GetCVLSWithLimitedPointsBetweenNodes(\n cVLS,\n allValsByFrame,\n splitLength,\n fixedNumInteriorPoints,\n interpolate=interpolate,\n )\n return GetXYListAndPolyListFromCVLS(cVLS2, allValsByFrame, orderOfSCsByValueByFrame)", "def generate_lines(self):\n x = self.square[0]\n y = self.square[1]\n lines = [[]]\n \n lines.append( ( (x, y+a) for a in range(1,8) ) )\n lines.append( ( (x+a, y) for a in range(1,8) ) )\n lines.append( ( (x, y-a) for a in range(1,8) ) )\n lines.append( ( (x-a, y) for a in range(1,8) ) )\n \n return lines", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def build_steps(self) -> List[Coordinate]:\n if self.orientation == Orientation.HORIZONTAL:\n fill = self.start.y\n else:\n fill = self.start.x\n\n if self.end.x < self.start.x:\n x_range = range(self.start.x, self.end.x - 1, -1)\n else:\n x_range = range(self.start.x, self.end.x + 1)\n\n if self.end.y < self.start.y:\n y_range = range(self.start.y, self.end.y - 1, -1)\n else:\n y_range = range(self.start.y, self.end.y + 1)\n\n return list(zip_longest(x_range, y_range, fillvalue=fill))", "def getNewIntersections(self):\n sections = []\n sections2D = []\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n xis = xs / L\n R = self.XYZCoordinate.T\n if len(xis) != len(self.SkeletonPoints):\n raise Exception(\"Conflit between xis and SkeletonPoints.\", self.SKeletonPoints)\n for i in range(len(xis)):\n xi = xis[i]\n normalVec = self.returnTangentVectorAtXi(xi)\n originPoint = self.SkeletonPoints[i]\n \n # define transform manually\n T = np.zeros((4,4))\n T[:3,:3] = R[:,np.array([1,2,0])]; # 3rd axis is reduced by projection\n T[:3,3] = originPoint; \n T[3,3] = 1.0\n to_2D = (np.linalg.inv(T)).astype(float)\n \n try:\n slice = self.mesh.section(plane_origin=originPoint, plane_normal=normalVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar(to_2D=to_2D,check=False)\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if True: #slice_2D.area > 1e-1:\n sections2D.append(slice_2D)\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections2D = sections2D\n self.Intersections = sections", "def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def __get_pts_lines(self, lines, arcs):\n # store unique points\n points_dict = {}\n all_lines = []\n for ind, line in enumerate(lines):\n tup = self.__fix_tuple((line.start[0], line.start[1]))\n start = self.__find_make_pt(tup, points_dict)\n tup = self.__fix_tuple((line.end[0], line.end[1]))\n end = self.__find_make_pt(tup, points_dict)\n line = geometry.Line(start, end)\n all_lines.append(line)\n for ind, arc in enumerate(arcs):\n # dxfgrabber arcs are stored ccw when looking at xy plane\n # x horizontal\n # y vertical\n tup = self.__fix_tuple((arc.center[0], arc.center[1]))\n center = self.__find_make_pt(tup, points_dict)\n sign = -1\n if self.__swapxy:\n sign = 1\n startangle = arc.start_angle*sign\n endangle = arc.end_angle*sign\n angle = endangle - startangle\n if arc.end_angle < arc.start_angle:\n angle = angle + 360*sign\n \"\"\"\n print('---------------------------------------')\n print('| ARC')\n print('center: %s' % center)\n print('startangle: %f' % startangle)\n print('endangle: %f' % endangle)\n print('traversed_angle: %f' % angle)\n \"\"\"\n start_vect = geometry.Point(0, arc.radius)\n if self.__swapxy == False:\n start_vect = geometry.Point(arc.radius, 0)\n start_vect.rot_ccw_deg(arc.start_angle*sign)\n end_vect = geometry.Point(0, arc.radius)\n if self.__swapxy == False:\n end_vect = geometry.Point(arc.radius, 0)\n end_vect.rot_ccw_deg(arc.end_angle*sign)\n start = center + start_vect\n start_tup = (start.x, start.y)\n end = center + end_vect\n end_tup = (end.x, end.y)\n start = self.__find_make_pt(start_tup, points_dict)\n end = self.__find_make_pt(end_tup, points_dict)\n rvect = start - center\n if abs(angle) <= 90:\n arc = geometry.Arc(start, end, center)\n all_lines.append(arc)\n print('1 arc made')\n continue\n #print(' %s' % arc)\n pieces = math.ceil(abs(angle)/90)\n print('%i arcs being made' % pieces)\n points = [start, end]\n # 2 pieces need 3 points, we have start + end already --> 1 pt\n inserts = pieces + 1 - 2\n piece_ang = angle/pieces\n #print('piece_ang = %f' % piece_ang)\n while inserts > 0:\n rvect.rot_ccw_deg(piece_ang)\n point = center + rvect\n tup = (point.x, point.y)\n point = self.__find_make_pt(tup, points_dict)\n points.insert(-1, point)\n inserts = inserts - 1\n for ind in range(len(points)-1):\n #print(' %s' % arc)\n arc = geometry.Arc(points[ind], points[ind+1], center)\n all_lines.append(arc)\n for line in all_lines:\n line.save_to_points()\n return [list(points_dict.values()), all_lines]", "def get_obstList(self,X,Y,Z):\n #Pipe_1\n\tpipe_1 = np.array(np.where((X - 1)**2 + (Y - 4)**2 >= 0.5**2)).flatten()\n\tpipe_1_stop_z = np.array(np.where(Z <= 3.0)).flatten()\n\tpipe_1_stop_y = np.array(np.where(Y >= 3.25)).flatten()\n\tpipe_1_stop = np.intersect1d(pipe_1_stop_z[:],pipe_1_stop_y[:])\n\tpipe_1 = np.intersect1d(pipe_1[:],pipe_1_stop[:])\n\n\t#Turn_1\n\tturn_1 = np.array(np.where((0.75 - np.sqrt((Y - 3.25)**2 + (Z -3)**2))**2 + (X - 1)**2 >= 0.5**2)).flatten()\n\tturn_1_stop_z = np.array(np.where(Z >= 3.0)).flatten()\n\tturn_1_stop_y = np.array(np.where(Y>= 1.75)).flatten()\n\tturn_1_stop = np.intersect1d(turn_1_stop_z[:],turn_1_stop_y[:])\n\tturn_1 = np.intersect1d(turn_1[:],turn_1_stop[:])\n\n\t#Pipe_2\n\tpipe_2 = np.array(np.where((X - 1)**2 + (Y - 2.5)**2 >= 0.5**2)).flatten()\n\tpipe_2_start_z = np.array(np.where(Z >= 1.5)).flatten()\n\tpipe_2_start_y_up = np.array(np.where(Y <= 3.25)).flatten()\n\tpipe_2_start_y_down = np.array(np.where(Y >= 1.75)).flatten()\n\tpipe_2_start_y = np.intersect1d(pipe_2_start_y_up[:],pipe_2_start_y_down[:])\t\n\tpipe_2_start = np.intersect1d(pipe_2_start_z[:],pipe_2_start_y[:])\n\tpipe_2 = np.intersect1d(pipe_2[:],pipe_2_start[:])\n\tpipe_2_stop_z = np.array(np.where(Z <= 3.0)).flatten()\n\tpipe_2_stop_y = np.array(np.where(Y <= 3.25)).flatten()\n\tpipe_2_stop = np.intersect1d(pipe_2_stop_z[:],pipe_2_stop_y[:])\n\tpipe_2 = np.intersect1d(pipe_2[:],pipe_2_stop[:])\n\n\t#Turn_2\n\tturn_2 = np.array(np.where((0.75 - np.sqrt((Y - 1.75)**2 + (Z -1.5)**2))**2 + (X - 1)**2 >= 0.5**2)).flatten()\n\tturn_2_stop_z = np.array(np.where(Z <= 1.5)).flatten()\n\tturn_2_stop_y = np.array(np.where(Y <= 3.25)).flatten()\n\tturn_2_stop = np.intersect1d(turn_2_stop_z[:],turn_2_stop_y[:])\n\tturn_2 = np.intersect1d(turn_2[:],turn_2_stop[:])\n\t\n\t#Pipe_3\n\tpipe_3 = np.array(np.where((X - 1)**2 + (Y - 1.0)**2 >= 0.5**2)).flatten()\n\tpipe_3_start_z = np.array(np.where(Z >= 1.5)).flatten()\n\tpipe_3_start_y = np.array(np.where(Y <= 1.75)).flatten()\n\tpipe_3_start = np.intersect1d(pipe_3_start_z[:],pipe_3_start_y[:])\n\tpipe_3 = np.intersect1d(pipe_3[:],pipe_3_start[:])\t\n\n\t#Put the pieces together\n\n\tpipe = np.union1d(pipe_1[:],turn_1[:])\n\tpipe = np.union1d(pipe[:],pipe_2[:])\n\tpipe = np.union1d(pipe[:],turn_2[:])\t\n\tpipe = np.union1d(pipe[:],pipe_3[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def filter_candidate_centerlines(\n xy: np.ndarray,\n candidate_cl: List[np.ndarray],\n stationary_threshold: float = 2.0,\n max_dist_margin: float = 2.0,\n) -> List[np.ndarray]:\n\n # Check if stationary\n if math.sqrt((xy[0, 0] - xy[-1, 0]) ** 2 + (xy[0, 1] - xy[-1, 1]) ** 2) < stationary_threshold:\n stationary = True\n else:\n stationary = False\n\n # Filtering candidates to retain only those with distance along centerline close to traj length\n # Fit a second order polynomial and find trajectory length\n POLY_ORDER = 2\n poly = np.poly1d(np.polyfit(xy[:, 0], xy[:, 1], POLY_ORDER))\n obs_y_smooth = [poly(x) for x in xy[:, 0]]\n xy_smooth = [(xy[i, 0], obs_y_smooth[i]) for i in range(xy.shape[0])]\n traj_len = LineString(xy_smooth).length\n\n filtered_candidate_centerlines = []\n for centerline in candidate_cl:\n\n if stationary:\n filtered_candidate_centerlines.append(centerline)\n else:\n centerLine = LineString(centerline)\n start_dist = centerLine.project(Point(xy[0, 0], xy[0, 1]))\n end_dist = centerLine.project(Point(xy[-1, 0], xy[-1, 1]))\n\n dist_along_cl = end_dist - start_dist\n if dist_along_cl > traj_len - max_dist_margin and dist_along_cl < traj_len + max_dist_margin:\n filtered_candidate_centerlines.append(centerline)\n return filtered_candidate_centerlines", "def generate_lcs(x, y, id, N, nspot_min=50, nspot_max=500, incl_min=0,\n incl_max=np.pi/4., amp_min=1, amp_max=100, pmin=.5,\n pmax=90, tau_min=5, tau_max=20):\n\n params = [nspot_min, nspot_max, incl_min, incl_max, amp_min*rvar,\n amp_max*rvar, pmin, pmax, tau_min, tau_max]\n nspots, incl, periods, amps, tau = injection_params(N, params)\n true_params = {'nspots': nspots, 'incl': incl, 'periods': periods,\n 'amps': amps, 'tau': tau}\n\n xarr = np.zeros((len(x), N))\n yarr = np.zeros((len(x), N))\n for i in range(N):\n print(i, \"of\", N)\n res0, res1 = mklc(x, nspot=nspots[i], incl=incl[i], tau=tau[i],\n p=periods[i])\n med = np.median(res1[2, :])\n ys = (res1[2, :] / med - 1) * amps[i] # median normalise and scale\n yarr[:, i] = ys + y\n xarr[:, i] = x\n if N == 1:\n return xarr.T[0], yarr.T[0], x, y, true_params\n\n # save the results\n np.savetxt(\"lcs.txt\", yarr.T)\n np.savetxt(\"xs.txt\", xarr.T)\n np.savetxt(\"truth.txt\", np.vstack((nspots, incl, periods, amps, tau)).T)\n\n return xarr, yarr, true_params", "def getLatticePoints():\n latticePoints = []\n\n for y in arange(yMin, yMax + yStep, yStep):\n for x in arange(xMin, xMax + xStep, xStep):\n latticePoints.append(LatticePoint(x, y))\n\n \n return latticePoints", "def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1", "def raster_linify(channel):\n k_lines = []\n right = True\n for X in range(channel.shape[0]):\n linestart = None\n inline = False\n if right == True:\n Y=0\n while (Y < channel.shape[1]):\n if (inline):\n if (channel[X,Y]>0.5):\n pass\n else:\n k_lines.append(copy.copy([linestart,[X,Y]]))\n inline = False\n else:\n if (channel[X,Y]>0.5):\n linestart = copy.copy([X,Y])\n inline=True\n else:\n pass\n Y=Y+1\n right = False\n else:\n Y=channel.shape[1]-1\n while (Y > 0):\n if (inline):\n if (channel[X,Y]>0.5):\n pass\n else:\n k_lines.append(copy.copy([linestart,[X,Y]]))\n inline = False\n else:\n if (channel[X,Y]>0.5):\n linestart = copy.copy([X,Y])\n inline=True\n else:\n pass\n Y=Y-1\n right = True\n return k_lines", "def get_obstList(self,X,Y,Z):\n #Pipe in - find all points exterior of small\n\tpipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()\n\tpipe_in_stop = np.array(np.where(Z <= 3 + 0.5*(self.diam_out - self.diam_in))).flatten()\n\tpipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])\n\n\t#Expansion - find all points exterior of expansion\n\tr_cone = self.diam_in\n\th_cone = self.diam_in\t\n\texpansion = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - 3)**2)).flatten()\n\texpansion_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_out - self.diam_in)))\n\t#expansion_stop = np.array(np.where(Z <= 4)).flatten()\n\texpansion = np.intersect1d(expansion[:],expansion_start[:])\n\t#expansion = np.intersect1d(expansion[:],expansion_stop[:])\n\n\t#Pipe out - final all points exterior of smaller pipe\n\tpipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()\n\tpipe_out_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_in - self.diam_out))).flatten()\n\tpipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])\n\n\n\t#Put the pieces together\n\n\tpipe = expansion[:]\n\tpipe = np.union1d(expansion[:],pipe_in[:])\n\tpipe = np.union1d(pipe[:],pipe_out[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n \n \t#Bed\n floor_part = np.array(np.where(Y < 2*self.cyl_rad)).flatten()\n\t\n\t#Piling\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n\n # then add the cylinder\n obst_list = np.union1d(floor_part[:],cyl_part[:])\n \n return list(obst_list[:])", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def calculate_ctrl_pts(self) -> list[tuple]:\n ctrl_pts = []\n # Skip last point if path is non-cyclic\n point_inds = range(self.num_points) if self.is_cyclic else range(self.num_points - 1)\n for i in point_inds:\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n rho_coefficient = z_i.alpha * velocity(z_i.theta, z_j.phi)\n sigma_coefficient = z_j.beta * velocity(z_j.phi, z_i.theta)\n ctrl_pt_a = z_i + (1 / 3) * rho_coefficient * cmath.exp(complex(0, z_i.theta)) * (z_j - z_i)\n ctrl_pt_b = z_j - (1 / 3) * sigma_coefficient * cmath.exp(complex(0, -z_j.phi)) * (z_j - z_i)\n ctrl_pts.append((ctrl_pt_a.real, ctrl_pt_a.imag))\n ctrl_pts.append((ctrl_pt_b.real, ctrl_pt_b.imag))\n return ctrl_pts", "def create_pf_coils(self) -> list:\n\n # from diagram\n lower_lefts = np.array(\n [\n (345.5429497568881, -788.0537547271744),\n (800.6482982171799, -703.3664235548353),\n (1160.9400324149108, -272.69381415451096),\n (1158.833063209076, 268.97622906537015),\n (798.5413290113453, 621.6369529983793),\n (345.5429497568881, 707.7795786061589),\n ]\n )\n\n top_rights = np.array(\n [\n (511.99351701782825, -690.4038357644516),\n (880.7131280388979, -611.8280659103187),\n (1222.0421393841166, -162.37506753106413),\n (1230.4700162074555, 379.2780929227446),\n (861.7504051863858, 681.9523230686117),\n (446.677471636953, 803.4508373851974),\n ]\n )\n\n outboard_pf_coils = []\n for counter, (top_right, lower_left) in enumerate(zip(top_rights, lower_lefts), 1):\n pf_coil = paramak.PoloidalFieldCoilFP(\n corner_points=(top_right, lower_left),\n rotation_angle=self.rotation_angle,\n name=f\"outboard_pf_coils_{counter}\",\n )\n outboard_pf_coils.append(pf_coil)\n\n x_inner, x_outer = 132.73905996758506, 202.26904376012968\n\n pf_coils_1 = paramak.RotateStraightShape(\n points=[\n (x_inner, -600),\n (x_outer, -600),\n (x_outer, -400),\n (x_inner, -400),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_1\",\n )\n\n pf_coils_2 = paramak.RotateStraightShape(\n points=[\n (x_inner, -400),\n (x_outer, -400),\n (x_outer, -200),\n (x_inner, -200),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_2\",\n )\n\n pf_coils_3 = paramak.RotateStraightShape(\n points=[\n (x_inner, -200),\n (x_outer, -200),\n (x_outer, 0),\n (x_inner, 0),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_3\",\n )\n\n pf_coils_4 = paramak.RotateStraightShape(\n points=[\n (x_inner, 0),\n (x_outer, 0),\n (x_outer, 200),\n (x_inner, 200),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_4\",\n )\n\n pf_coils_5 = paramak.RotateStraightShape(\n points=[\n (x_inner, 200),\n (x_outer, 200),\n (x_outer, 400),\n (x_inner, 400),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_5\",\n )\n\n pf_coils_6 = paramak.RotateStraightShape(\n points=[\n (x_inner, 400),\n (x_outer, 400),\n (x_outer, 600),\n (x_inner, 600),\n ],\n rotation_angle=self.rotation_angle,\n name=\"pf_coil_6\",\n )\n\n return outboard_pf_coils + [\n pf_coils_1,\n pf_coils_2,\n pf_coils_3,\n pf_coils_4,\n pf_coils_5,\n pf_coils_6,\n ]", "def build_splines(self, nsc, alpha=0.05, initial_beta=1.5):\n pass", "def interpolateCubicPeriodic() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th parameter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolatePeriodicSpline(keytime, y)\n S.append(interpolants)\n return S", "def vertical_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\ttaper = math.tan(angle) * laser[\"z_spacing\"]\r\n\r\n\tu = math.tan(2 * angle) * (block[\"thickness\"] + laser[\"z_final_overshoot\"])\r\n\tz_0 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 - block[\"origin_y\"] + u)\r\n\tz_1 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 + block[\"origin_x\"] + u)\r\n\tz_2 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 + block[\"origin_y\"] + u)\r\n\tz_3 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 - block[\"origin_x\"] + u)\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", f\"{math.degrees(angle):.6f}\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", f\"{z_0:.6f}\"])\r\n\r\n\ty_start_wide = ((u + cut[\"final_dimension_x\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\ty_start_length = ((u + cut[\"final_dimension_y\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\r\n\tdepth_cut = (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * math.cos(angle)/math.cos(2*angle)\r\n\r\n\tcut1 = json.loads(line(block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],-block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut2 = json.loads(line(block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],-block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],depth_cut,laser))\r\n\r\n\tcut3 = json.loads(line(block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],-block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut4 = json.loads(line(block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],-block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],depth_cut,laser))\r\n\r\n\t#cut1 = json.loads(line(block[\"width\"]/2,y_start_length,-block[\"width\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut2 = json.loads(line(block[\"length\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\t#cut3 = json.loads(line(block[\"width\"]/2,y_start_length,-cut[\"final_dimension_x\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut4 = json.loads(line(cut[\"final_dimension_y\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\tcutlist = (cutlist + cut1\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_1:.6f}\"],] \r\n\t + cut2\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_2:.6f}\"]] \r\n\t\t\t\t\t + cut3 \r\n\t\t\t\t\t + [[\"z_abs\", f\"{z_3:.6f}\"],[\"c_rel\", \"90\"]] \r\n\t\t\t\t\t + cut4)\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\r\n\treturn json.dumps(cutlist)", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_y1 = new_y1-delta\r\n\t\t\ti = i + 1\r\n\t\tif a < layers - 1:\r\n\t\t\tcutlist.append([\"z_step\", str(-deltaz)])\r\n\t\ty_max = y_max - taper_straight - taper_y\r\n\r\n\treturn cutlist", "def get_obstList(self,X,Y,Z):\n #Pipe in - find all points exterior of large pipe\n\tpipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()\n\tpipe_in_stop = np.array(np.where(Z <= 4)).flatten()\n\tpipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])\n\n\t#Contraction - find all points exterior of contraction\n\tr_cone = self.diam_out\n\th_cone = self.diam_out\t\n\tcontraction = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - (4 + h_cone))**2)).flatten()\n\tcontraction_start = np.array(np.where(Z >= 4)).flatten()\n\tcontraction_stop = np.array(np.where(Z <= 4 + .5*self.diam_out)).flatten()\n\tcontraction = np.intersect1d(contraction[:],contraction_start[:])\n\tcontraction = np.intersect1d(contraction[:],contraction_stop[:])\n\n\t#Pipe out - final all points exterior of smaller pipe\n\tpipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()\n\tpipe_out_start = np.array(np.where(Z >= 4 + .5*self.diam_out)).flatten()\n\tpipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])\n\n\n\t#Put the pieces together\n\n\t#pipe = pipe_in[:]\n\tpipe = np.union1d(contraction[:],pipe_in[:])\n\tpipe = np.union1d(pipe[:],pipe_out[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])", "def cspline_params(self):\n b = np.zeros(self.n)\n c = np.zeros(self.n-1)\n d = np.zeros(self.n-1)\n B = np.zeros(self.n)\n Q = np.ones(self.n-1)\n D = 2 * np.ones(self.n)\n dx = np.zeros(self.n-1)\n p = np.zeros(self.n-1)\n\n # Calculate x-interval and slope\n for j in range(self.n-1):\n dx[j] = self.x[j+1] - self.x[j]\n p[j] = (self.y[j+1] - self.y[j]) / dx[j]\n\n # Fill B\n B[0] = 3 * p[0]\n for i in range(self.n-2):\n B[i+1] = 3 * (p[i] + p[i+1] * dx[i] / dx[i+1])\n B[-1] = 3 * p[-2]\n \n # Fill D\n for i in range(self.n-2):\n D[i+1] = 2 * dx[i] / dx[i+1] + 2\n\n # Fill Q\n for i in range(self.n-2):\n Q[i+1] = dx[i] / dx[i+1]\n\n # Gauss elimination\n for i in range(1, self.n):\n D[i] = D[i] - Q[i-1] / D[i-1]\n B[i] = B[i] - B[i-1] / D[i-1]\n\n # Back-substitution\n b[-1] = B[-1] / D[-1]\n list = range(self.n-1)\n for i in list[::-1]:\n b[i] = (B[i] - Q[i] * b[i+1]) / D[i]\n\n # Calculate c and d\n for i in range(self.n-1):\n c[i] = (3 * p[i] - 2 * b[i] - b[i+1]) / dx[i]\n d[i] = (b[i] + b[i+1] - 2 * p[i]) / dx[i]\n c[-1] = -3 * d[-1] * dx[-1]\n\n return b, c, d", "def grid_line(object):\n def __init__(self, casenum):\n self.casenum = casenum\n \n def getList(self):\n \"\"\"\n to get the whole list of grid line data\n \"\"\"\n lineList = get_array(self.casenum, 'branch')\n self.lineList = lineList\n return lineList\n \n def getLineNum(self):\n \"\"\"\n :return: number of lines\n \"\"\"\n self.lineNum = len(self.getList())\n return self.lineNum\n \n def getFBus(self):\n \"\"\"\n :return:list of \"from\" bus numbers\n \"\"\"\n FBus = []\n for e in self.getList():\n FBus.append(e[0])\n self.FBus = FBus\n return self.Bus\n \n def getTBus(self):\n \"\"\"\n :return: list of \"to\" bus numbers\n \"\"\"\n TBus = []\n for e in self.getList():\n TBus.append(e[1])\n self.TBus = TBus\n return self.TBus", "def get_obstList(self,X,Y,Z):\n \n \t#Bed\n\twaveh = 0.125\n\twavel = 10 \n\tfloor_part = np.array(np.where(Y < (waveh*np.sin(wavel*Z) + 2*self.cyl_rad))).flatten()\n\t\n\t#Piling\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n\n # then add the cylinder\n obst_list = np.union1d(floor_part[:],cyl_part[:])\n \n return list(obst_list[:])", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def get_verts(v_l, v_r):\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points", "def power_points():\n next_reading = power_readings()\n stretch = []\n\n def next():\n nonlocal stretch, next_reading\n stretch.append(next_reading())\n if len(stretch) > XMAX + 1:\n stretch.pop(0)\n x = XMAX + 1 - len(stretch)\n points = []\n for y in stretch:\n points.append((x, y))\n points.append((x, 0))\n x += 1\n return points\n\n return next", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def _get_lines(fname):\n @_adapt_rgb(_skimage.color.adapt_rgb.hsv_value)\n def sobel_hsv(image):\n return _filters.sobel(image)\n\n # read in image file\n data = _skimageio.imread(fname)\n\n # select default ranges for the first 4 lines (this may vary from site to site)\n # possibly make these input parameters\n l1_range = data[0:27, 0:850, :]\n l2_range = data[28:55, 0:500, :]\n l3_range = data[56:83, 0:350, :]\n l4_range = data[84:111, 0:350, :]\n\n # Look for a right edge in the image in the default ranges which\n # would indicate an end of the overlay in the x direction\n intensity_limit = 2\n l1_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l1_range)),\n axis=2) < intensity_limit\n l2_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l2_range)),\n axis=2) < intensity_limit\n l3_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l3_range)),\n axis=2) < intensity_limit\n l4_edges = _np.sum(\n _skimage.exposure.rescale_intensity(1 - sobel_hsv(l4_range)),\n axis=2) < intensity_limit\n\n # try to adjust the default ranges if an edge was found\n sumlim = 25\n try:\n l1_right_edge = 0 + \\\n _np.where(_np.sum(l1_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l1_right_edge = 850\n if l1_right_edge < 2:\n l1_right_edge = 850\n\n try:\n l2_right_edge = 0 + \\\n _np.where(_np.sum(l2_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l2_right_edge = 500\n if l2_right_edge < 2:\n l2_right_edge = 500\n\n try:\n l3_right_edge = 0 + \\\n _np.where(_np.sum(l3_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l3_right_edge = 350\n if l3_right_edge < 2:\n l3_right_edge = 350\n\n try:\n l4_right_edge = 0 + \\\n _np.where(_np.sum(l4_edges, axis=0) >= sumlim)[0].max()\n except ValueError:\n l4_right_edge = 350\n if l4_right_edge < 2:\n l4_right_edge = 350\n\n # extract an array for each of the first four lines\n line1 = data[0:27, :l1_right_edge, :]\n line2 = data[28:55, :l2_right_edge, :]\n line3 = data[56:83, :l3_right_edge, :]\n line4 = data[84:111, :l4_right_edge, :]\n \n return line1, line2, line3, line4", "def make_m3_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m3_left_list = TSUGITE_list[2]\n m3_left_upper = m3_left_list[0]\n m3_left_middle = m3_left_list[1]\n m3_left_lower = m3_left_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[6]\n m3_KUMIKI_points2 = SHIGUCHI_list[7]\n\n # m3_KUMIKI_points1.reverse()\n\n m3_left_upper.extend(m3_KUMIKI_points1)\n m3_left_upper.append(m3_left_upper[0])\n m3_left_upper_crv = rs.AddPolyline(m3_left_upper)\n\n m3_left_middle.extend(m3_KUMIKI_points1)\n m3_left_middle.append(m3_left_middle[0])\n m3_left_middle_crv = rs.AddPolyline(m3_left_middle)\n\n m3_left_lower.extend(m3_KUMIKI_points1)\n m3_left_lower.append(m3_left_lower[0])\n m3_left_lower_crv = rs.AddPolyline(m3_left_lower)\n\n m3_left_crvs = [m3_left_upper_crv, m3_left_middle_crv, m3_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n # material3\n m3_right_list = TSUGITE_list[3]\n m3_right_upper = m3_right_list[0]\n m3_right_middle = m3_right_list[1]\n m3_right_lower = m3_right_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[2]\n m3_KUMIKI_points2 = SHIGUCHI_list[3]\n\n # Extend\n # material3\n m3_right_upper.extend(m3_KUMIKI_points1)\n m3_right_upper.append(m3_right_upper[0])\n m3_right_upper_crv = rs.AddPolyline(m3_right_upper)\n\n m3_right_middle.extend(m3_KUMIKI_points1)\n m3_right_middle.append(m3_right_middle[0])\n m3_right_middle_crv = rs.AddPolyline(m3_right_middle)\n\n m3_right_lower.extend(m3_KUMIKI_points1)\n m3_right_lower.append(m3_right_lower[0])\n m3_right_lower_crv = rs.AddPolyline(m3_right_lower)\n\n m3_right_crvs = [m3_right_upper_crv, m3_right_middle_crv, m3_right_lower_crv]\n\n return m3_left_crvs, m3_right_crvs", "def make_m2_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m2_left_list = TSUGITE_list[0]\n m2_left_upper = m2_left_list[0]\n m2_left_middle = m2_left_list[1]\n m2_left_lower = m2_left_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[4]\n m2_KUMIKI_points2 = SHIGUCHI_list[5]\n\n m2_KUMIKI_points1.reverse()\n\n m2_left_upper.extend(m2_KUMIKI_points1)\n m2_left_upper.append(m2_left_upper[0])\n m2_left_upper_crv = rs.AddPolyline(m2_left_upper)\n\n m2_left_middle.extend(m2_KUMIKI_points1)\n m2_left_middle.append(m2_left_middle[0])\n m2_left_middle_crv = rs.AddPolyline(m2_left_middle)\n\n m2_left_lower.extend(m2_KUMIKI_points1)\n m2_left_lower.append(m2_left_lower[0])\n m2_left_lower_crv = rs.AddPolyline(m2_left_lower)\n\n m2_left_crvs = [m2_left_upper_crv, m2_left_middle_crv, m2_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n m2_right_list = TSUGITE_list[1]\n m2_right_upper = m2_right_list[0]\n m2_right_middle = m2_right_list[1]\n m2_right_lower = m2_right_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[0]\n m2_KUMIKI_points2 = SHIGUCHI_list[1]\n\n # Extend\n # material2\n m2_right_upper.reverse()\n m2_right_middle.reverse()\n m2_right_lower.reverse()\n\n # m2_KUMIKI_points1.reverse()\n\n m2_right_upper.extend(m2_KUMIKI_points1)\n m2_right_upper.append(m2_right_upper[0])\n m2_right_upper_crv = rs.AddPolyline(m2_right_upper)\n\n m2_right_middle.extend(m2_KUMIKI_points1)\n m2_right_middle.append(m2_right_middle[0])\n m2_right_middle_crv = rs.AddPolyline(m2_right_middle)\n\n m2_right_lower.extend(m2_KUMIKI_points1)\n m2_right_lower.append(m2_right_lower[0])\n m2_right_lower_crv = rs.AddPolyline(m2_right_lower)\n\n m2_right_crvs = [m2_right_upper_crv, m2_right_middle_crv, m2_right_lower_crv]\n\n return m2_left_crvs, m2_right_crvs", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def point_list(self,res,llc,urc,direction):\n\t\tif direction == 2:\n\t\t\tZdist=urc[2]-llc[2]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,0,deltaZ*i]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 1:\n\t\t\tZdist=urc[1]-llc[1]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([0,deltaZ*i,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]\n\t\tif direction == 0:\n\t\t\tZdist=urc[0]-llc[0]\n\t\t\tnumPoints=int(numpy.ceil(Zdist/res))\n\t\t\tdeltaZ=Zdist/numPoints\n\t\t\tpoints=[llc+numpy.array([deltaZ*i,0,0]) for i in range(numPoints)]\n\t\t\treturn points, points[0], points[-1]", "def build_geometry(self):\n\n [Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, rot_sign] = self._comp_point_coordinate()\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z2))\n curve_list.append(Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards()))\n curve_list.append(Segment(Z3, Z4))\n curve_list.append(Arc3(Z4, Z5, self.is_outwards()))\n curve_list.append(Segment(Z5, Z6))\n curve_list.append(Arc1(Z6, Z7, rot_sign * self.R1, self.is_outwards()))\n curve_list.append(Segment(Z7, Z8))\n\n return curve_list", "def plot_lines(line_list, line_width=1.0):\n \n for line in line_list: \n start_lat, end_lat, start_lon, end_lon, color, style, input_projection, resolution = line\n \n assert style in list(line_style_dict.keys())\n assert resolution in ['high', 'low']\n\n start_lat = float(start_lat)\n start_lon = float(start_lon)\n end_lat = float(end_lat)\n end_lon = float(end_lon)\n\n lons = iris.analysis.cartography.wrap_lons(numpy.array([start_lon, end_lon]), 0, 360)\n # FIXME: start=0 might not work for all input/output projection combos\n\n if resolution == 'low':\n lats = numpy.array([start_lat, end_lat]) \n elif resolution == 'high':\n assert start_lat == end_lat or start_lon == end_lon, \\\n \"High res lines need constant lat or lon in reference coordinate system\"\n\n if start_lat == end_lat:\n lons = numpy.arange(lons[0], lons[-1] + 0.5, 0.5)\n lats = numpy.repeat(start_lat, len(lons))\n else:\n lats = numpy.arange(start_lat, end_lat + 0.5, 0.5)\n lons = numpy.repeat(lons[0], len(lats))\n\n plt.plot(lons, lats, \n linestyle=line_style_dict[style], \n color=color, linewidth=line_width,\n transform=input_projections[input_projection])", "def __init__(self, points):\n self.points = points\n self.lines = []\n\n orientation = 1\n for i, point in enumerate(self.points):\n try:\n if points[i+1].x > point.x:\n orientation = orientation\n else:\n orientation = - 1\n point.orientation = orientation\n self.points[i+1].orientation = orientation\n self.lines.append(Line(point, self.points[i+1]))\n except IndexError:\n point.orientation = orientation\n self.lines.append(Line(point, self.points[0]))", "def pointListForCurve(x, y, type):\n\n\tif x < 10:\n\t\txString = \"0%d\" % x\n\telse:\n\t\txString = \"%d\" % x\n\n\tif x < 11:\n\t\txMString = \"0%d\" % (x - 1)\n\telse:\n\t\txMString = \"%d\" % (x - 1)\n\n\tif x < 9:\n\t\txPString = \"0%d\" % (x + 1)\n\telse:\n\t\txPString = \"%d\" % (x + 1)\n\n\tif x < 8:\n\t\txPPString = \"0%d\" % (x + 2)\n\telse:\n\t\txPPString = \"%d\" % (x + 2)\n\n\tif y < 11:\n\t\tyMString = \"0%d\" % (y - 1)\n\telse:\n\t\tyMString = \"%d\" % (y - 1)\n\n\tif y < 9:\n\t\tyPString = \"0%d\" % (y + 1)\n\telse:\n\t\tyPString = \"%d\" % (y + 1)\n\n\tif y < 8:\n\t\tyPPString = \"0%d\" % (y + 2)\n\telse:\n\t\tyPPString = \"%d\" % (y + 2)\n\n\tif y < 10:\n\t\tyString = \"0%d\" % y\n\telse:\n\t\tyString = \"%d\" % y\n\n\tinnerRadius = 54.0 / 64.0\n\touterRadius = 87.0 / 64.0\n\n\tslices = 10\n\n\t# Dots are numbered as xxyy[IO]z\n\t# The I means it is the inside trek, the O the outside\n\t# The z is which particular dot it is (0-9)\n\t# Note that all paths are marked as being inside the top-left square\n\t# Except for entrence and exit dots.\n\t# Curves are generated from star + 10 to end - 10\n\n\tif type == 8:\t\t\t# Bottom right\n\t\tcenterX = 25.0 / 64.0\n\t\tcenterY = 25.0 / 64.0\n\t\tstart = 0.0\n\t\tend = 90.0\n\n\t\tenterIn =\t[\"%s%sTL\" % (xPString, yString), 0.25, 0.25, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sBL\" % (xString, yPString), 0.25, 0.75, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sTL\" % (xString, yPString), 0.25, 0.25, [\"%s%sTR\" % (xMString, yPString)]]\n\t\texitOut =\t[\"%s%sTR\" % (xPString, yString), 0.75, 0.25, [\"%s%sBR\" % (xPString, yMString)]]\n\n\t\tendIn = \"%s%sTL\" % (xString, yPString)\n\t\tendOut = \"%s%sTR\" % (xPString, yString)\n\n\telif type == 9:\t\t\t# Bottom left\n\t\tcenterX = 103.0 / 64.0\n\t\tcenterY = 25.0 / 64.0\n\t\tstart = 90.0\n\t\tend = 180.0\n\n\t\tenterIn =\t[\"%s%sTR\" % (xPString, yPString), 0.75, 0.25, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\"%s%sBR\" % (xString, yMString)]]\n\t\texitOut =\t[\"%s%sBR\" % (xPString, yPString), 0.75, 0.75, [\"%s%sBL\" % (xPPString, yPString)]]\n\n\t\tendIn = \"%s%sTR\" % (xString, yString)\n\t\tendOut = \"%s%sBR\" % (xPString, yPString)\n\n\telif type == 10:\t\t# Top left\n\t\tcenterX = 103.0 / 64.0\n\t\tcenterY = 103.0 / 64.0\n\t\tstart = 180.0\n\t\tend = 270.0\n\n\t\tenterIn =\t[\"%s%sBR\" % (xString, yPString), 0.75, 0.75, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sTR\" % (xPString, yString), 0.75, 0.25, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sBR\" % (xPString, yString), 0.75, 0.75, [\"%s%sBL\" % (xPPString, yString)]]\n\t\texitOut =\t[\"%s%sBL\" % (xString, yPString), 0.25, 0.75, [\"%s%sTL\" % (xString, yPPString)]]\n\n\t\tendIn = \"%s%sBR\" % (xPString, yString)\n\t\tendOut = \"%s%sBL\" % (xString, yPString)\n\n\telse: # type == 11:\t\t# Top right\n\t\tcenterX = 25.0 / 64.0\n\t\tcenterY = 103.0 / 64.0\n\t\tstart = 270.0\n\t\tend = 360.0\n\n\t\tenterIn =\t[\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\"%s%sI0\" % (xString, yString)]]\n\t\tenterOut =\t[\"%s%sBR\" % (xPString, yPString), 0.75, 0.75, [\"%s%sO0\" % (xString, yString)]]\n\t\texitIn =\t[\"%s%sBL\" % (xPString, yPString), 0.25, 0.75, [\"%s%sTL\" % (xPString, yPPString)]]\n\t\texitOut =\t[\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\"%s%sTR\" % (xMString, yString)]]\n\n\t\tendIn = \"%s%sBL\" % (xPString, yPString)\n\t\tendOut = \"%s%sTL\" % (xString, yString)\n\n\tpointList = [enterIn, enterOut, exitIn, exitOut]\n\n\tstring = \"%s%s\" % (xString, yString)\n\tstep = ((end - 1) - (start + 1)) / float(slices)\n\n\tfor i in range(slices):\n\n\t\tangle = radians(start + step * i)\n\n\t\tif i < 9:\n\t\t\ttemp = [\"%sI%d\" % (string, i), centerX + cos(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\"%sI%d\" % (string, i + 1)]]\n\t\telse:\n\t\t\ttemp = [\"%sI%d\" % (string, i), centerX + cos(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * innerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[endIn]]\n\n\t\tpointList.append(temp)\n\n\t\tangle = radians(start + step * (10 - i))\n\n\t\tif i < 9:\n\t\t\ttemp = [\"%sO%d\" % (string, i), centerX + cos(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\"%sO%d\" % (string, i + 1)]]\n\t\telse:\n\t\t\ttemp = [\"%sO%d\" % (string, i), centerX + cos(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\tcenterY + sin(angle) * outerRadius,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[endOut]]\n\n\t\tpointList.append(temp)\n\n\treturn pointList", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def get_all_lines(self, game_state, current_color):\n\n singles = []\n doubles = []\n triples = []\n\n #Collect single \n for coord in all_coords():\n if game_state[coord] == current_color:\n singles.append([coord])\n\n #Add all positive-direction movements to form the doubles\n for single in singles:\n for vector in [(1, 0), (0, 1), (1, 1)]:\n\n coord = sum_tuples(single[0], vector)\n if coord_in_board(coord) and game_state[ coord ] == current_color:\n doubles.append(single + [coord])\n\n #Add the the third marble in the row after the two unless it doesn't exist\n for double in doubles:\n vector = sub_tuples(double[1], double[0])\n coord = sum_tuples(double[1], vector)\n\n if coord_in_board(coord) and game_state[ coord ] == current_color:\n triples.append(double + [coord])\n\n\n return singles + doubles + triples", "def polylinedictarraycopy(d):#d——原始图层多段线字典 \r\n dictlist=[]\r\n ratiolist=[] #放缩率列表\r\n rationumaccumulationlist=[] #放缩率数量累加列表\r\n \r\n eachrationum=globalconfig.X_ARRAY_NUM//globalconfig.RATIO_NUM\r\n leftrationum=globalconfig.X_ARRAY_NUM%globalconfig.RATIO_NUM\r\n \r\n eachrationumlist=[eachrationum]*globalconfig.RATIO_NUM #各个放缩率对应数量的列表\r\n \r\n for i in range((globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2,(globalconfig.RATIO_NUM-1)//2-(leftrationum-1)//2+leftrationum):\r\n eachrationumlist[i]=eachrationumlist[i]+1 #将整除后的余值加入到靠中间放缩率的方案中。\r\n \r\n rationumaccumulationlist.append(0) \r\n \r\n for i in range(1,globalconfig.RATIO_NUM): #计算放缩率数量累加列表\r\n rationumaccumulationlist.append(rationumaccumulationlist[i-1]+eachrationumlist[i-1])\r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #计算放缩率列表\r\n ratiolist.append((globalconfig.CENTER_RATIO-((globalconfig.RATIO_NUM+1)//2-1)*globalconfig.RATIO_DIFF)+i*globalconfig.RATIO_DIFF) \r\n \r\n for i in range(0,globalconfig.RATIO_NUM): #每种放缩率\r\n for j in range(0,eachrationumlist[i]): #每种放缩率对应数量\r\n newdict={}\r\n for e in d: #将字典中值即每一图层对应的多段线列表进行复制并移动到指定位置\r\n newdict[e]=polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist)) \r\n #newdict.append([e,polylinedatasetarraycopy(d[e],ratiolist[i],globalconfig.CUTLINE_X_OFFSET+globalconfig.X_BLANK+(rationumaccumulationlist[i]+j+0.5)*globalconfig.X_LENGTH/globalconfig.CENTER_RATIO,globalconfig.CUTLINE_Y_OFFSET+globalconfig.Y_BLANK+0.5*globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO,e,len(dictlist))])\r\n dictlist.append(newdict) \r\n return (dictlist,ratiolist,eachrationumlist)", "def interpolateCubicNatural() :\n\n S = []\n\n # for all parameters\n for i in range(11):\n y = []\n # get i-th paramter\n for k in range(len(keyframe)):\n y.append(keyframe[k][i])\n\n interpolants = interpolateSpline(keytime, y)\n S.append(interpolants)\n return S", "def get_obstList(self,X,Y,Z):\n \n x_c_cone = self.x_c\n\tz_c_cone = self.z_c\n y_c_cone = 0\n x_s = 2.25*2*self.cyl_rad\n rad_cone = x_s + self.cyl_rad\n\th_cone = rad_cone*0.57735\n\n floor_part = np.array(np.where(Y < h_cone)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - x_c_cone)**2 + (Z - z_c_cone)**2 <= ((self.cyl_rad/cone)/(h_cone))**2*(Y - y_c_cone)**2))\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def as_lines(self, steps):\n res = []\n p0 = self.lerp(0)\n for step in range(steps):\n p1 = self.lerp((step + 1) / steps)\n s = Line(p0=p0, p1=p1)\n res.append(s)\n p0 = p1\n\n if self.line is not None:\n p0 = self.line.lerp(0)\n for step in range(steps):\n p1 = self.line.lerp((step + 1) / steps)\n res[step].line = Line(p0=p0, p1=p1)\n p0 = p1\n return res", "def cut_lines(lines, pseudoread_length):\n step = int(pseudoread_length / 2)\n\n line_iterate = [x for x in range(0, len(lines), 2)]\n\n result = []\n\n for index in line_iterate:\n\n if (index % 100000) == 0:\n print(index)\n\n id = lines[index].strip()\n\n sequence = lines[index + 1].strip()\n\n # if sequence is shorter than single window, we return just window\n end_of_range = len(sequence) - step if (len(sequence) - step > 0) else len(sequence)\n range_iterate = [x for x in\n range(0, end_of_range, step)]\n\n for i in range_iterate:\n new_id = id + '|{}'.format(i)\n kmer = sequence[i:i + pseudoread_length]\n result.append(new_id)\n result.append(kmer)\n\n return result", "def addMarkersSpline(self):\r\n\r\n self.markersSpline = QtGui.QGraphicsItemGroup(\r\n parent=self.contour_item,\r\n scene=self.scene)\r\n\r\n for x, y in zip(*self.spline_data[0]):\r\n\r\n # put airfoil contour points as graphicsitem\r\n points = gc.GraphicsCollection()\r\n points.pen.setColor(QtGui.QColor(60, 60, 80, 255))\r\n points.brush.setColor(QtGui.QColor(180, 180, 50, 230))\r\n points.pen.setCosmetic(True) # no pen thickness change when zoomed\r\n\r\n points.Circle(x, y, 0.003, marker=True)\r\n\r\n marker = PGraphicsItem.GraphicsItem(points, self.scene)\r\n self.markersSpline.addToGroup(marker)\r\n\r\n self.contour_group.addToGroup(self.markersSpline)", "def _computeSpline(self, p0, p1, p2, p3):\n t = 0.0\n while t <= 1:\n point = CatmullRomSpline.computePoint(p0, p1, p2, p3, t)\n self.points.append(point)\n t += 0.1", "def get_transit_light_curves(self, params, plots=False):\n time_diffs = np.diff(sorted(self.times.jd))\n diff_between_transits = params.per/2.\n split_inds = np.argwhere(time_diffs > diff_between_transits) + 1\n\n if len(split_inds) > 1:\n\n split_ind_pairs = [[0, split_inds[0][0]]]\n split_ind_pairs.extend([[split_inds[i][0], split_inds[i+1][0]]\n for i in range(len(split_inds)-1)])\n split_ind_pairs.extend([[split_inds[-1], len(self.times)]])\n\n transit_light_curves = []\n counter = -1\n for start_ind, end_ind in split_ind_pairs:\n counter += 1\n if plots:\n plt.plot(self.times.jd[start_ind:end_ind],\n self.fluxes[start_ind:end_ind], '.-')\n\n parameters = dict(times=self.times[start_ind:end_ind],\n fluxes=self.fluxes[start_ind:end_ind],\n errors=self.errors[start_ind:end_ind],\n quarters=self.quarters[start_ind:end_ind],\n name=counter)\n transit_light_curves.append(TransitLightCurve(**parameters))\n if plots:\n plt.show()\n else:\n transit_light_curves = []\n\n return transit_light_curves", "def datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及左边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01:\r\n if pos_x<0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def group_parallel_lines(lines):\n parallel_bins = {}\n for line in lines:\n if len(parallel_bins) == 0:\n parallel_bins[line[1]] = [line]\n else:\n keys = list(parallel_bins.keys())\n closeness = np.isclose(line[1], np.array(\n keys), atol=np.radians(10))\n if np.any(closeness):\n k = keys[np.where(closeness)[0][0]]\n parallel_bins[k].append(line)\n else:\n parallel_bins[line[1]] = [line]\n return list(parallel_bins.values())", "def discretize_polyline(pts, segments, keep_all_pts=True):\n total_length = polyline_length(pts)\n lines = []\n p0 = Point(pts[0])\n inc_length = 0\n for p in pts[1:]:\n p1 = Point(p)\n line_length = p0.distance_to(p1)\n lines.append((p0, p1, line_length, inc_length))\n inc_length += line_length\n p0 = p1\n if isinstance(segments, (int, float)):\n segments = [(x + 1) / segments for x in list(range(int(segments)))]\n vtx = [Point(pts[0]).as_tuple()]\n num_segments = len(segments)\n for i, segment in enumerate(segments):\n ds = segment * total_length\n i_1 = i + 1\n for line in lines:\n if ds <= (line[2] + line[3]):\n dl = (ds - line[3]) / line[2]\n v = discretize_line(line[0], line[1], [dl])\n vtx.append(v[1])\n if i_1 < num_segments and keep_all_pts:\n dn = (segments[i_1]) * total_length\n if (dn - line[3]) / line[2] > 1.0:\n vtx.append(line[1])\n break\n return vtx", "def build_splines(self, nsc, alpha=0.05, initial_beta=1.5):\n raise NotImplementedError", "def bearline(source,la,lo,dist,wdir):\r\n \r\n \r\n bearlines =[]\r\n lon = source[lo]\r\n lat = source[la]\r\n wdir = source[wdir] \r\n for coor in zip(lon,lat,wdir): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n in_dir = coor[2]\r\n # start point of the line \r\n origin = geopy.Point(in_lat,in_lon)\r\n # find the end point of the line \r\n end_p = distance(kilometers=dist).destination(origin, in_dir)\r\n e_lat, e_lon = end_p.latitude, end_p.longitude\r\n # create points object for start point and end point \r\n pt1 = Point(in_lon,in_lat)\r\n pt2 = Point(e_lon,e_lat)\r\n # create downwind trajectory \r\n line = LineString([pt1,pt2])\r\n bearlines.append(line) \r\n \r\n return bearlines", "def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All", "def getValuesLorentz(x1, x2, x3, x4, iters=1000, h=0.01):\n xs, ys, zs = [], [], []\n for i in range(1, iters + 1):\n xs.append(x1)\n ys.append(x2)\n zs.append(x3)\n a1, a2, a3, a4 = lorentz(x1, x2, x3, x4, h)\n b1, b2, b3, b4 = lorentz(x1 + a1 / 2, x2 + a2 / 2, x3 + a3 / 2, x4 + a4 / 2, h)\n c1, c2, c3, c4 = lorentz(x1 + b1 / 2, x2 + b2 / 2, x3 + b3 / 2, x4 + b4 / 2, h)\n d1, d2, d3, d4 = lorentz(x1 + c1, x2 + c2, x3 + c3, x4 + c4, h)\n x1 = x1 + (a1 + 2 * b1 + 2 * c1 + d1) / 6\n x2 = x2 + (a2 + 2 * b2 + 2 * c2 + d2) / 6\n x3 = x3 + (a3 + 2 * b3 + 2 * c3 + d3) / 6\n x4 = x4 + (a4 + 2 * b4 + 2 * c4 + d4) / 6\n return xs, ys, zs", "def linecut_points( **kwargs ):\n npoints = kwargs.get('npoints', 320)\n extents = kwargs.get('extents',None)\n lims = kwargs.get('lims', (-80.,80.))\n direc = kwargs.get('direc', (np.pi/2, 0.))\n origin = kwargs.get('origin', vec3(0.,0.,0.))\n\n if extents is not None:\n lims = (-extents, extents)\n\n # Prepare set of points for plot \n t = np.linspace( lims[0], lims[1], npoints )\n unit = vec3()\n th = direc[0]\n ph = direc[1] \n unit.set_spherical(1, th, ph) \n # Convert vec3s to ndarray\n unit = np.array(unit)\n origin = np.array(origin) \n #\n XYZ = origin + np.outer(t, unit)\n X = XYZ[:,0]\n Y = XYZ[:,1]\n Z = XYZ[:,2]\n \n return t, X, Y, Z, lims", "def datasetratiocopy_notextend(l,ratio,x_offset,y_offset):#虽然说是不延伸,但是上下两边上的点Y方向还是会延伸的。\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+y_offset+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH) #虽然说是不延伸,但是上下两边上的点Y方向还是会延伸的。\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n \r\n \r\n return dataset", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def __init__(self, is_p1_turn: bool, side_length: int) -> None:\n super().__init__(is_p1_turn)\n self.side_length = side_length\n # ISSUE: what if node is more than 26 --> no need to handle side more than 5\n # construct a list of uppercase and lower case letters\n alph_lst_upper = list(string.ascii_uppercase)\n alph_lst_lower = list(string.ascii_lowercase)\n # alph_lst has a length of 52\n alph_lst = alph_lst_upper + alph_lst_lower\n\n # assign original value for each ley-line\n hori_result = []\n for i in range(side_length + 1):\n hori_result.append(\"@\")\n left_result = []\n for i in range(side_length + 1):\n left_result.append(\"@\")\n right_result = []\n for i in range(side_length + 1):\n right_result.append(\"@\")\n self.hori_result = hori_result\n self.left_result = left_result\n self.right_result = right_result\n\n self.hori_lst = []\n self.left_lst = []\n self.right_lst = []\n\n # construct horizontal ley-lines\n n = 2\n start_index = 0\n end_index = 0\n while n <= side_length + 1:\n end_index = start_index + n\n self.hori_lst.append(alph_lst[start_index:end_index])\n start_index = end_index\n n += 1\n end_index = start_index + side_length\n self.hori_lst.append(alph_lst[start_index:end_index])\n\n # copy hori_lst\n hori_copy = []\n for item in self.hori_lst:\n hori_copy.append(item)\n\n # construct left ley-lines\n for i in range(side_length + 1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) > i:\n temp.append(lst[i])\n self.left_lst.append(temp)\n for i in range(1, side_length + 1):\n self.left_lst[i].append(hori_copy[-1][i - 1])\n\n # construct right ley-lines\n for i in range(-1, side_length * (-1) - 2, -1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) >= i * (-1):\n temp.append(lst[i])\n self.right_lst.append(temp)\n self.right_lst = self.right_lst[::-1]\n for i in range(side_length):\n self.right_lst[i].append(hori_copy[-1][i])", "def CatmullRomLoop(loop, pointsPerUnitDist=1.):\n if len(loop) < 4:\n raise ValueError(\"Loop must have at least 4 points in it\")\n ret = []\n # Add extra control points to ends\n loop = [loop[-2],] + loop + [loop[1],]\n # Produce coords for loop\n for i in xrange(len(loop)-3):\n numPoints = int(distance(loop[i+1], loop[i+2]) * pointsPerUnitDist)\n ret.append(CatmullRomSpline(loop[i], loop[i+1], loop[i+2], loop[i+3], nPoints=numPoints))\n ret = [tuple(coords) for seg in ret for coords in seg]\n return ret", "def create_vessel_components(self) -> list:\n\n # Blanket computed from plasma\n blanket = paramak.BlanketFP(\n plasma=self.plasma,\n thickness=4.06e2 - 3.52e2,\n start_angle=-70,\n stop_angle=230,\n rotation_angle=self.rotation_angle,\n vertical_displacement=self.plasma.vertical_displacement,\n offset_from_plasma=[[-70, 0, 90, 180, 230], [50, 20, 59, 16, 50]],\n name=\"blanket\",\n )\n\n # SN Divertor\n divertor = paramak.ITERtypeDivertor(\n anchors=((4.34e2, -3.3e2), (5.56e2, -3.74e2)),\n coverages=(105, 125),\n lengths=(45, 75),\n radii=(68, 65),\n tilts=(-30, 2),\n dome_height=45,\n dome_pos=0.45,\n rotation_angle=self.rotation_angle,\n name=\"divertor\",\n )\n\n # Vacuum vessel\n divertor.points # trigger the building of the points for divertor\n # the inner part of the vacuum vessel is computed from the outer\n # points of the blanket and the divertor\n vac_vessel_inner = paramak.RotateMixedShape(\n points=blanket.outer_points + divertor.casing_points,\n rotation_angle=self.rotation_angle,\n name=\"vessel\",\n )\n\n vac_vessel = paramak.RotateSplineShape(\n points=[\n (327.77, 36.5026668124882),\n (327.77, 73.37741270075162),\n (327.77, 108.31180820215741),\n (327.77, 143.2462037035632),\n (327.77, 178.18059920496898),\n (327.77, 213.11499470637477),\n (327.77, 248.04939020778068),\n (327.77, 282.98378570918646),\n (327.77, 317.9181812105922),\n (328.6121587814181, 368.23899806938385),\n (336.18303032328333, 422.4306297110355),\n (350.4835654579176, 457.5437492206628),\n (371.95910957013655, 492.47041663587777),\n (404.3208742000702, 522.0151685493631),\n (439.6516080621078, 544.4559826211985),\n (474.98234192414554, 556.3610266211815),\n (510.2245275810152, 564.0927634387052),\n (545.6438096482208, 565.1200145185009),\n (565.832800426528, 563.1864687746993),\n (580.9745435102584, 559.4390362932862),\n (616.3052773722961, 548.4109567158157),\n (651.6360112343338, 533.224020531035),\n (686.9667450963714, 515.3041214328789),\n (722.297478958409, 492.23516177329117),\n (757.6282128204466, 466.8689289401416),\n (792.9589466824843, 437.10619055069265),\n (825.7660566972336, 403.7167485984509),\n (853.525919017406, 369.42176700251196),\n (877.9209495411939, 333.90960594986575),\n (898.9511482685972, 300.5186330502012),\n (916.616515199616, 265.2383422522439),\n (932.5994662324425, 230.72194441870647),\n (946.0587934179808, 193.1122328856627),\n (956.1532888071343, 156.87835598377137),\n (962.8829523999035, 118.10702768634405),\n (967.9302000944803, 80.39197257542594),\n (968.7714080435763, 38.24754419835381),\n (968.7714080435763, 25.77097437642317),\n (964.5653682980957, -1.670738783514139),\n (956.9944967562304, -29.93883090626548),\n (956.1532888071343, -34.59540221679083),\n (946.0587934179808, -71.15339839027786),\n (931.7582582833464, -104.25874435511184),\n (914.9340993014238, -139.91477225259314),\n (898.9511482685972, -174.48160361826422),\n (883.8094051848669, -213.64300914878197),\n (867.8264541520404, -248.21908241802464),\n (851.0022951701176, -284.2078188440911),\n (834.1781361881949, -319.9470238737184),\n (818.1951851553683, -359.0978394110024),\n (800.5298182243495, -391.2313539579658),\n (776.1347877005617, -427.87174371008393),\n (744.1688856349085, -460.45530873911446),\n (708.8381517728709, -490.0255912806248),\n (673.5074179108332, -512.7040543014494),\n (638.1766840487956, -528.371873327094),\n (602.8459501867579, -539.0490644239661),\n (567.5152163247203, -546.1219131278361),\n (532.1844824626827, -548.9566889080664),\n (496.85374860064496, -547.7514325554811),\n (461.52301473860734, -541.3971156414638),\n (426.1922808765697, -527.596464992453),\n (390.8615470145321, -501.2796363633471),\n (360.57806084707124, -468.0473902249954),\n (340.389070068764, -431.4355817359209),\n (329.87397070506233, -399.072068113844),\n (327.770950832322, -357.4796824533661),\n (327.770950832322, -311.73270913617455),\n (327.770950832322, -276.79831363476876),\n (327.770950832322, -241.86391813336297),\n (327.770950832322, -206.92952263195718),\n (327.770950832322, -171.99512713055117),\n (327.770950832322, -137.06073162914538),\n (327.770950832322, -102.12633612773948),\n (327.770950832322, -67.19194062633369),\n ],\n cut=[vac_vessel_inner], # to make a hollow shape\n rotation_angle=self.rotation_angle,\n name=\"vessel_inner\",\n )\n\n return [divertor, blanket, vac_vessel, vac_vessel_inner]", "def GetXYListAndPolyListFromCVLS(cVLS, allValsByFrame, orderOfSCsByValueByFrame):\n numFrames = len(cVLS)\n xyList = [\n sorted(list(set(tuple(pt) for c in cvlsByVal for pt in c[2])))\n for cvlsByVal in cVLS\n ]\n polyList = []\n\n for t in range(numFrames):\n polyList.append({})\n for v in allValsByFrame[t]:\n subContours = [\n (cVLS[t][-index][2][::-1] if index < 0 else cVLS[t][index][2])\n for index in orderOfSCsByValueByFrame[t][v]\n ] # reconstruct the sc's, flipping if index is negative\n polyList[-1][v] = [\n xyList[t].index(totuple(pt)) for sc in subContours for pt in sc[:-1]\n ]\n polyList[-1][v] = polyList[-1][v] + [\n polyList[-1][v][0]\n ] # Tack on the first point at the end to close the loop\n # VFMin doesn't like this format; make sure to remove this last point before saving to a file or passing to VFM...\n # polyList[-1][v] = removeDuplicates(polyList[-1][v])+[polyList[-1][v][0]] # Remove interior duplication...\n return xyList, polyList", "def _get_lines(self) -> tuple[VGroup, VGroup]:\n center = self.get_origin()\n ratio_faded_lines = self.faded_line_ratio\n offset = self.azimuth_offset\n\n if ratio_faded_lines == 0: # don't show faded lines\n ratio_faded_lines = 1 # i.e. set ratio to 1\n rstep = (1 / ratio_faded_lines) * self.x_axis.x_range[2]\n astep = (1 / ratio_faded_lines) * (TAU * (1 / self.azimuth_step))\n rlines1 = VGroup()\n rlines2 = VGroup()\n alines1 = VGroup()\n alines2 = VGroup()\n\n rinput = np.arange(0, self.x_axis.x_range[1] + rstep, rstep)\n ainput = np.arange(0, TAU, astep)\n\n unit_vector = self.x_axis.get_unit_vector()[0]\n\n for k, x in enumerate(rinput):\n new_line = Circle(radius=x * unit_vector)\n if k % ratio_faded_lines == 0:\n alines1.add(new_line)\n else:\n alines2.add(new_line)\n\n line = Line(center, self.get_x_axis().get_end())\n\n for k, x in enumerate(ainput):\n new_line = line.copy()\n new_line.rotate(x + offset, about_point=center)\n if k % ratio_faded_lines == 0:\n rlines1.add(new_line)\n else:\n rlines2.add(new_line)\n\n lines1 = VGroup(*rlines1, *alines1)\n lines2 = VGroup(*rlines2, *alines2)\n return lines1, lines2", "def discretize_polyline(self, polyline):\n discret = []\n # ~~> Calculate the minimum mesh resolution\n dxy = math.sqrt(min(np.square(np.sum(np.fabs(\\\n self.meshx[self.ikle2]-\\\n self.meshx[np.roll(self.ikle2, 1)]),\n axis=1)/3.0) + \\\n np.square(np.sum(np.fabs(\\\n self.meshy[self.ikle2]-\\\n self.meshy[np.roll(self.ikle2, 1)]),\n axis=1)/3.0)))\n for i in range(len(polyline)-1):\n dio = math.sqrt(sum(np.square(np.array(polyline[i])\\\n -np.array(polyline[i+1]))))\n discret.append(int(dio/dxy))\n\n return discret", "def Flowline_CSV(filename, nlines=None, has_width=False, flip_order=True):\n \n f = open(filename,'r')\n \n header = f.readline() #header line\n hdr = header.strip('\\r\\n')\n keys = hdr.split(',') #get names of variables\n #keys[-1] = keys[-1].strip('\\r\\n')\n \n data = {k : [] for k in keys} #end of line has hidden characters, so 'point_m' does not get read\n #data['Line number'] = []\n data['Length_ID'] = collections.OrderedDict() #new dictionary that counts how many points (i.e. lines of file) are in each flowline. Must be ordered for later iteration!\n #if nlines is not None:\n # data['Lineslist'] = [[] for k in range(nlines)] \n data['Lineslist'] = [] #initialize as empty list\n \n lines = f.readlines()\n f.close()\n \n temp = []\n j = 0\n for i,l in enumerate(lines):\n linstrip = l.strip('\\r\\n')\n parts = linstrip.split(',')\n \n #data['Line-number'].append(parts[0])\n #data['x-coord'].append(parts[1])\n #data['y-coord'].append(parts[2])\n \n x_coord = float(parts[1])\n y_coord = float(parts[2])\n \n if parts[0] not in data['Length_ID'].keys(): #finding out where lines separate \n temp = []\n data['Lineslist'].append(temp) #initialize new empty array that can be modified in-place later\n data['Length_ID'][parts[0]] = 1\n j+=1 \n else:\n data['Length_ID'][parts[0]] += 1\n #if xbounds[0]<x_coord<xbounds[1]: #taking out values outside of map area\n # if ybounds[0]<y_coord<ybounds[1]: \n \n if has_width:\n width = float(parts[3])\n temp.append((x_coord, y_coord, width))\n else:\n temp.append((x_coord, y_coord))\n \n data['Lineslist'][j-1] = np.array(temp) #need to modify an existing array rather than append to keep correct indexing\n\n #data['Lineslist'][j] = np.array(temp) \n \n if nlines is None:\n nlines = len(data['Length_ID'].keys())\n \n if flip_order: \n centrelines_list = [np.array(data['Lineslist'][j])[::-1] for j in range(nlines)] #making arrays, reversed to start at terminus rather than peak\n else:\n centrelines_list = [np.array(data['Lineslist'][j]) for j in range(nlines)] # arrays already start at terminus\n\n \n return centrelines_list", "def lanczos(dx, width, cutoff, /):\n # Coefficients and initial stuff\n # n = (width/dx)//1 # convert window width from 'time units' to 'steps'\n # n = width//2\n # Convert alpha to wavenumber (new units are 'inverse timesteps')\n alpha = 1.0 / (cutoff / dx)\n n = width\n n = (n - 1) // 2 + 1\n tau = np.arange(1, n + 1) # lag time\n C0 = 2 * alpha # integral of cutoff-response function is alpha*pi/pi\n Ck = np.sin(2 * np.pi * alpha * tau) / (np.pi * tau)\n Cktilde = Ck * np.sin(np.pi * tau / n) / (np.pi * tau / n)\n\n # Return filter\n # Example: n = 9 returns 4 + 4 + 1 points\n order = n * 2 - 1\n print(f'Order-{order} Lanczos window')\n window = np.concatenate((np.flipud(Cktilde), np.array([C0]), Cktilde))\n return window[1:-1], 1", "def harvest_coupled_cluster(lines, psivar):\n \"\"\"Sample (canonical) CCSD results block\"\"\"\n #----------------------\n #COUPLED CLUSTER ENERGY\n #----------------------\n #\n #E(0) ... -76.063720080\n #E(CORR) ... -0.288938791\n #E(TOT) ... -76.352658871\n #Singles Norm <S|S>**1/2 ... 0.021106262\n #T1 diagnostic ... 0.007462191\n #\n\n \"\"\"Sample DLPNO coupled cluster block (CCSD)\"\"\"\n #----------------------\n #COUPLED CLUSTER ENERGY\n #----------------------\n #\n #E(0) ... -76.026019996\n #E(CORR)(strong-pairs) ... -0.211953159\n #E(CORR)(weak-pairs) ... -0.000007244\n #E(CORR)(corrected) ... -0.211960403\n #E(TOT) ... -76.237980399\n #Singles Norm <S|S>**1/2 ... 0.014443573\n #T1 diagnostic ... 0.005106574\n #\n\n \"\"\"Sample CCSD(T) block (same for DLPNO and canonical)\"\"\"\n #\n #Triples Correction (T) ... -0.001544381\n #Final correlation energy ... -0.134770265\n #E(CCSD) ... -75.709548429\n #E(CCSD(T)) ... -75.711092810\n #\n\n cc_start = find_start(lines, 'COUPLED CLUSTER ENERGY')\n if cc_start == -1:\n return\n\n #psivar[\"CC REFERENCE\"] = float(lines[cc_start + 3].split()[-1])\n\n # CCSD energy block is less than 20 lines\n for i, line in enumerate(lines[cc_start:cc_start + 20], start=cc_start):\n if line[:6] == \"E(TOT)\":\n psivar[\"CCSD TOTAL ENERGY\"] = line.split()[-1]\n psivar[\"CCSD CORRELATION ENERGY\"] = lines[i-1].split()[-1]\n #psivar[\"SINGLES NORM\"] = lines[i+1].split()[-1]\n #psivar[\"T1 DIAGNOSTIC\"] = lines[i+2].split()[-1]\n break\n\n # CCSD(T) energy block\n for i, line in enumerate(lines[cc_start:], start=cc_start):\n if line[:22] == \"Triples Correction (T)\":\n #psivar[\"TRIPLES CORRELATION ENERGY\"] = line.split()[-1]\n psivar[\"CCSD(T) CORRELATION ENERGY\"] = lines[i+1].split()[-1]\n psivar[\"CCSD TOTAL ENERGY\"] = lines[i+2].split()[-1]\n psivar[\"CCSD(T) TOTAL ENERGY\"] = lines[i+3].split()[-1]\n break", "def simulation_to_lines(data: List(Float))->List(Tuple(Int, Float)):\n result = []\n counter = 0\n for payoff in data:\n result = result + [(counter, payoff)]\n counter+=1\n return result\n\n #print(str(result))", "def fixed_points(self, epsilon = 0.000001):\n # (a b) (z) = (Lz)\n # (c d) (1) ( L) \n\n # az + b = c z^2 + d z\n # 0 = c z^2 + (d-a)z - b\n # z = ((a-d) \\pm sqrt( (a-d)^2 + 4 bc )) / 2c\n\n a, b, c, d = self\n\n assert abs(c) > epsilon\n zp = ((a - d) + sqrt( (a - d)**2 + 4*b*c)) / 2*c\n zm = ((a - d) - sqrt( (a - d)**2 + 4*b*c)) / 2*c\n Lp = c*zp + d\n Lm = c*zm + d\n if abs(Lp) < abs(Lm):\n Ls = [Lp, Lm]\n out = [zp, zm]\n else:\n Ls = [Lm, Lp]\n out = [zm, zp] \n assert abs(Ls[0]) < 1 - epsilon and 1 + epsilon < abs(Ls[1])\n return [CP1((z,1)) for z in out]" ]
[ "0.6449699", "0.6432063", "0.59427017", "0.5941269", "0.5936059", "0.5897232", "0.5840504", "0.5797004", "0.57667243", "0.57253015", "0.5718147", "0.5635782", "0.56259125", "0.5621187", "0.55818194", "0.5545392", "0.5544098", "0.5543384", "0.55397063", "0.553851", "0.55319935", "0.5523383", "0.5512527", "0.54779583", "0.54613113", "0.5457675", "0.5445482", "0.54241127", "0.5410201", "0.5403958", "0.5383393", "0.53820735", "0.53670627", "0.5364326", "0.5364326", "0.5350846", "0.534261", "0.53397775", "0.5338971", "0.53353846", "0.53289664", "0.53275424", "0.53251433", "0.5323634", "0.53232825", "0.53203005", "0.5316526", "0.53163457", "0.5307334", "0.5300596", "0.5294851", "0.5288405", "0.52869755", "0.52854615", "0.5282707", "0.52821064", "0.52820724", "0.52781224", "0.5276395", "0.52708787", "0.52706665", "0.52702415", "0.5266431", "0.52631056", "0.52592105", "0.5247029", "0.52463686", "0.52453744", "0.52443576", "0.5243663", "0.5242613", "0.524079", "0.5238812", "0.5238485", "0.52267754", "0.52251184", "0.52249366", "0.5220765", "0.5219479", "0.52171123", "0.5216164", "0.5215081", "0.5212984", "0.5212036", "0.5190554", "0.51903254", "0.5185733", "0.5185388", "0.518204", "0.5178518", "0.51556057", "0.51531744", "0.5148947", "0.5146856", "0.51456374", "0.51444316", "0.5137707", "0.51363", "0.51361966", "0.5131589" ]
0.5417235
28
This algorithm returns a cutlist which performs a simple core operation. The laser runs race track style around the specified core, going around all 4 sides before the laser moves down to the next layer. The poly is expected to fall off the core at the end of the entire cutting operation.
def simple_core(block,cut,laser): layers = int(block["thickness"]/laser["z_spacing"]) #Since all cuts are square, the offsets are more obvious than in the general linear case. taper = math.tan(math.radians(laser["kerf_angle"]/2)) * laser["z_spacing"] max_delta = math.tan(math.radians(laser["kerf_angle"]/2)) * (block["thickness"] + laser["z_final_overshoot"]) * 2 cutlist = [] cutlist.append(["a_abs", "0"]) cutlist.append(["c_abs", str(block["physical_rotation"])]) cutlist.append(["z_abs", str(block["thickness"])]) for a in range(layers): x1, y1 = cut["final_dimension_x"]/2 + a*taper, cut["final_dimension_y"]/2 + a*taper while abs(x1-cut["final_dimension_x"]/2) < abs(max_delta): cutlist.append(["jump", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])]) cutlist.append(["mark", str(x1 + block["origin_x"]), str(-y1 + block["origin_y"])]) cutlist.append(["mark", str(-x1 + block["origin_x"]), str(-y1 + block["origin_y"])]) cutlist.append(["mark", str(-x1 + block["origin_x"]), str(y1 + block["origin_y"])]) cutlist.append(["mark", str(x1 + block["origin_x"]), str(y1 + block["origin_y"])]) x1, y1 = x1 + laser["xy_spacing"], y1 + laser["xy_spacing"] cutlist.append(["z_step", str(-laser["z_spacing"])]) max_delta = max_delta - taper return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_traces_for_core(traces, traces_per_core, core_num):\n start = traces_per_core * core_num\n end = min(len(traces), traces_per_core * (core_num + 1))\n return traces[start:end]", "def minimize_core(s, core):\n for i in range(len(core)):\n new_core = core[:i] + core[i+1:]\n print \"trying \", new_core\n is_sat = s.check(new_core)\n print is_sat\n if is_sat == unsat:\n return minimize_core(s, list(s.unsat_core()))\n return core", "def drawcutline(f,layernamelist,cutline_entities_count): \r\n \r\n #layernamelist=[layernamelist[0]] \r\n layercount=0\r\n ringlist=[[[-0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]]]\r\n flashlist=buildflashlist()\r\n cutlineset=buildcutlineset() \r\n \r\n f.write(\"0\\nSECTION\\n2\\nENTITIES\\n\")\r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n for polyline in cutlineset:\r\n cutline_entities_count=cutline_entities_count+1\r\n f.write(\"0\\nPOLYLINE\\n8\\n\"+layername+\"\\n5\\n\"+hex(cutline_entities_count)[2:]) # begin writing a polyline\r\n f.write(\"\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n40\\n0.08\\n41\\n0.08\\n\")\r\n cutline_entities_count=drawwidthpolyline(polyline, cutline_entities_count, f,layername)\r\n cutline_entities_count=drawring(ringlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawflash(flashlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawtext(cutline_entities_count, f, layername,layercount)\r\n \r\n return cutline_entities_count", "def get_lightcurves(\n self,\n pipeline=\"pdcsap\",\n cadence=\"short\",\n sectors=None,\n remove_outliers=False,\n quality_bitmask=None,\n ):\n if sectors is None:\n all_sectors = self.all_sectors\n else:\n all_sectors = sectors\n\n for n, sector in enumerate(all_sectors):\n if pipeline == \"pdcsap\":\n l = ShortCadence(\n ticid=self.ticid, sector=sector, verbose=False\n )\n lc = l.get_lc()\n else:\n errmsg = \"pdcsap is only currently available\"\n raise NotImplementedError(errmsg)\n\n if quality_bitmask == \"hard\":\n lc = lc[(lc.quality == 0) | np.isnan(lc.quality)]\n\n if remove_outliers:\n lc, mask = lc.remove_outliers(\n sigma_upper=3, sigma_lower=10, return_mask=True\n )\n\n if n == 0:\n lcs = lc.copy()\n else:\n lcs = lcs.append(lc)\n print(\n f\"{sector}: cdpp={lc.estimate_cdpp():.2f}, std={lc.flux.std():.2f}\"\n )\n\n lcs.sector = all_sectors\n return lcs", "def _makeAssemPatches(core):\n patches = []\n\n if isinstance(core.spatialGrid, grids.HexGrid):\n nSides = 6\n elif isinstance(core.spatialGrid, grids.ThetaRZGrid):\n raise TypeError(\n \"This plot function is not currently supported for ThetaRZGrid grids.\"\n )\n else:\n nSides = 4\n\n pitch = core.getAssemblyPitch()\n for a in core:\n x, y, _ = a.spatialLocator.getLocalCoordinates()\n if nSides == 6:\n assemPatch = matplotlib.patches.RegularPolygon(\n (x, y), nSides, pitch / math.sqrt(3), orientation=math.pi / 2.0\n )\n elif nSides == 4:\n # for rectangle x, y is defined as sides instead of center\n assemPatch = matplotlib.patches.Rectangle(\n (x - pitch[0] / 2, y - pitch[1] / 2), *pitch\n )\n else:\n raise ValueError(f\"Unexpected number of sides: {nSides}.\")\n patches.append(assemPatch)\n return patches", "def convert_string_into_chip_and_core_subset(cores):\n ignored_cores = CoreSubsets()\n if cores is not None and cores != \"None\":\n for downed_core in cores.split(\":\"):\n x, y, processor_id = downed_core.split(\",\")\n ignored_cores.add_processor(int(x), int(y), int(processor_id))\n return ignored_cores", "def get_core_bonds(core_xyz, inp):\n core_bonds = []\n\n if inp.core_en:\n dists = cdist(core_xyz, core_xyz)\n if inp.core_shape != \"shell\":\n logger.info(\"\\tBuilding elastic network based on first neighbors...\")\n close_dists = dists <= (2*inp.bead_radius+0.01)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = np.where(close_dists[i])[0]\n if len(close_ndxs) == 1:\n dists_sorted = np.argsort(dists[i])\n close_ndxs = dists_sorted[[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n core_bonds.append([ndx1, ndx2])\n\n else:\n logger.info(\"\\tBuilding elastic network based on six nearest neighbours and one farthest neighbour...\")\n neighboring_bonds = []\n antipodal_bonds = []\n dists_sorted = np.argsort(dists, axis=1)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = dists_sorted[i,[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n neighboring_bonds.append([ndx1, ndx2])\n antipodal_ndx = dists_sorted[i,-1]\n if antipodal_ndx != i and [ndx1, antipodal_ndx] not in core_bonds and [antipodal_ndx, ndx1] not in core_bonds:\n antipodal_bonds.append([ndx1, antipodal_ndx, \"antipodal\"])\n core_bonds = neighboring_bonds + antipodal_bonds\n\n return core_bonds", "def setup(self, core) :\n # We need direct access to the core\n self.core = core\n # Validate a square quarter core. (Not applicable to 1/2 or 1/8)\n assert(len(self.core.stencil[0,:])==len(self.core.stencil[:,0]))\n # Core size per dimension.\n self.dimension = len(self.core.stencil[0,:])\n # Assembly boundaries\n self.widths = np.zeros(self.dimension+1)\n self.widths[:] = self.core.width\n self.widths[0] = 0.5 * self.core.width\n # Subdivisions. Not really used.\n self.subdivisions = np.ones(self.dimension,dtype='i')\n # Peaking factor map\n self.peaking_map = np.zeros((self.dimension, self.dimension))\n self.peaking = np.zeros(len(self.core.assemblies))\n # Create the static top part of the LABAN-PEL input\n self.make_input_top()", "def test_find_core():\n\n # Working examples from the original paper by Lynch and Willett.\n smiles = '>>'.join([\n 'CC(=O)CC(C)C(CC#N)C(=O)N',\n 'CC(=O)CC(C)C(CC#N)C#N'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == 'NC=O>>C#N'\n\n smiles = '>>'.join([\n 'NC(=O)C(c1ccccc1)(c1ccccc1)NS(=O)(=O)c1ccccc1',\n 'O=C(O)C(NS(=O)(=O)c1ccccc1)(c1ccccc1)c1ccccc1'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == 'NC=O>>O=CO'\n\n smiles = '>>'.join([\n 'CCC(c1cc(OC)c(OC(C)=O)cc1Cc1ccc(OC(C)=O)c(OC)c1)C(C)OC(C)=O',\n 'CCC(c1cc(OC)c(OC(C)=O)cc1C(=O)c1ccc(OC(C)=O)c(OC)c1)C(C)OC(C)=O'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == 'cCc>>cC(c)=O'\n\n smiles = '>>'.join([\n 'ON(O)c1ccccc1S(=O)(=O)N(C)c1ccccc1',\n 'Nc1ccccc1S(=O)(=O)N(C)c1ccccc1'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == 'cN(O)O>>cN'\n\n smiles = '>>'.join([\n 'COc1ccc2c(c1)sc1c2CCC2C1=CCC2O',\n 'COc1ccc2c(c1)sc1c2CCC2C1CCC2O'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == 'C=CC>>CCC'\n\n # Problematic reactions, violating algorithm assumption---no atom should\n # be deleted; also from the paper by Lynch and Willett.\n #\n #\n # Ambiguous mapping.\n #\n # RDKit (2013_09_01) is having problem with this SMILES so we are skipping\n # them for now.\n #smiles = '>>'.join([\n # 'c1ccc2c(c1)C=Cc1ccccc1NN2',\n # 'CN1Nc2ccccc2C=Cc2c1cccc2'\n #])\n #rxn = Reaction(smiles)\n #assert rxn.find_core == smiles\n\n smiles = '>>'.join([\n 'N=C1ON=C2CCCCC12',\n 'Nc1onc2c1CCCC2'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == smiles\n\n # Match radius is to small.\n smiles = '>>'.join([\n 'CC(C)(O)C1CCCO1',\n 'CC1=COCCC1C'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == smiles\n\n smiles = '>>'.join([\n 'O=CC1C=CCC=C1',\n 'O=CC1=CCCC=C1'\n ])\n rxn = Reaction(smiles)\n assert rxn.find_core() == smiles", "def polyCut(*args, caching: bool=True, constructionHistory: bool=True, cutPlaneCenter:\n Union[List[float, float, float], bool]=None, cutPlaneCenterX: Union[float,\n bool]=0.0, cutPlaneCenterY: Union[float, bool]=0.0, cutPlaneCenterZ: Union[float,\n bool]=0.0, cutPlaneHeight: Union[float, bool]=0.0, cutPlaneRotate: Union[List[float,\n float, float], bool]=None, cutPlaneRotateX: Union[float, bool]=0.0,\n cutPlaneRotateY: Union[float, bool]=0.0, cutPlaneRotateZ: Union[float, bool]=0.0,\n cutPlaneSize: Union[List[float, float], bool]=None, cutPlaneWidth: Union[float,\n bool]=0.0, cuttingDirection: AnyStr=\"\", deleteFaces: bool=False, extractFaces:\n bool=False, extractOffset: Union[List[float, float, float], bool]=None,\n extractOffsetX: Union[float, bool]=0.0, extractOffsetY: Union[float, bool]=0.0,\n extractOffsetZ: Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, onObject: bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def set_core(self, core):\n self.core = core\n mask = 1 << core\n self.cmd += ' --cores {}'.format(str(hex(mask)))", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def split2(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n connectivity = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\n\r\n newLabel = sm.watershed(eccMap, minMarker, connectivity=connectivity, mask=self.array)\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int8) - self.array\r\n\r\n for i in range(1, np.amax(newLabel) + 1):\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[newLabel == i] = 1\r\n currBorder = ni.binary_dilation(currArray).astype(np.int8) - currArray\r\n border = border + currBorder\r\n\r\n border[border > 1] = 1\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def _make_core_universe(self):\n # Goes through and actually autofills the positions\n\n lattice = TemplatedLattice(lattice_id=c.ROOT_LATTICE, name='Core Lattice')\n lattice.setTemplate(self.core_lattice_template)\n lattice.pitch = [c.latticePitch, c.latticePitch]\n lattice.lower_left = [-3.0 * c.latticePitch / 2.0, -3.0 * c.latticePitch / 2.0]\n for pos in self.core_positions:\n wgt = self.fuel_wgt_positions[pos]\n boron = self.fuel_ppm_positions[pos]\n # Things are simple when you are just putting in 9 identical fuel elements\n lattice.setPosition(pos, self.elements.u_fuel_p[wgt, boron])\n lattice.finalize()\n\n self.u_coreLattice = lattice", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def GetContourValuesLengthsAndSubContoursAndOrderOfSubContoursByFrame(\n watershed, allValsByFrame\n):\n scListByFrame, orderOfSCsByValueByFrame = GetSubContoursAndOrderingByFrame(\n watershed, allValsByFrame\n )\n cVLSByFrame = [[sc.cVLS() for sc in scList] for scList in scListByFrame]\n return cVLSByFrame, orderOfSCsByValueByFrame\n\n ## NOT NEEDED! KEEPING FOR REFERENCE!\n # for i in range(len(cVLS)-1,0,-1):\n # for j in range(i-1,-1,-1): # Loop backwards through the sorted list of cvls's... if the value pair matches, check the endpoints (they will always be reversed for adjacent regions (always go ccw...))\n # if cVLS[i][0]!=cVLS[j][0]: # once we no longer match the value pair, we know there are no more matches in the list...\n # break\n # ######## VERIFY THIS ACTUALLY WORKS THE SAME WAY!!!\n # elif (cVLS[i][2][-1],cVLS[i][2][0]]) == (cVLS[j][2][0],cVLS[j][2][-1]): # if 2 subcoutours are the same,\n # if cVLS[j][1]>cVLS[i][1]:\n # cVLS[j],cVLS[i] = cVLS[i],cVLS[j] #swap!\n # shortest = min(cVLS[j][1],cVLS[i][1]) # keep only the one with the minimum length computation\n #\n # cVLS[j][1] = shortest\n # del(cVLS[i])\n # break", "def get_core(evth, ncore=0):\n x, y = (evth[98 + ncore], evth[118 + ncore])\n return (x, y, sqrt((x * x) + (y * y)))", "def routine(core):\n return Routine(core)", "def chercherChemin(self):\n\n \n liste=self._circuit.vue(self.x,self.y,self.rayonVision)\n \n listeSuppr=[]\n couche_vehicule= self._circuit.Couche_vehicules\n \n for case in liste :\n #on élimine les cases infranchissbles les cases qui ne sont pas sur le chemin à suivre \n\n if self._circuit.numeroWayPoint(case[0],case[1])==0 or ( self._circuit.numeroWayPoint(self.x,self.y)!=self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])<= self._circuit.numeroWayPoint(self.x,self.y)) or( self._circuit.numeroWayPoint(case[0],case[1])>= 5*self._circuit.numeroWayPoint(self.x,self.y) and self._circuit.numeroWayPoint(self.x,self.y)!=0) or ( self._circuit.numeroWayPoint(self.x,self.y)==self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])== self._circuit.numeroWayPoint(self.x,self.y)) or self._circuit.plateau[case[1],case[0],couche_vehicule]!=None:#on élimine les points derrière\n \n listeSuppr.append(case)\n\n \n for case in listeSuppr:\n \n liste.remove(case)\n \n if len(liste)>=1:\n l=liste[0]\n\n for nour in liste :\n \n if distance((self.x,self.y),(l[0],l[1])) > distance((self.x,self.y),(nour[0],nour[1])):\n l=nour\n pasx=0\n pasy=0\n if self.x<l[0] : \n pasx=1\n elif self.x>l[0] :\n pasx=-1\n if self.y<l[1] : \n pasy=1\n elif self.y>l[1] :\n pasy=-1\n debug.dprint(\" id {} {}:({},{}) Waypoint {} Point:({},{}) WayPoint {} vitesse :{} reservoir:{}\".format(self.id,self.typeV,self.x,self.y,self._circuit.numeroWayPoint(self.x,self.y),l[0],l[1],self._circuit.numeroWayPoint(l[0],l[1]),self.vitesse,self.reservoir))\n self.orientation=atan2(pasy,pasx)\n\n self.vitesse=1\n\n debug.dprint(self) \n \n super().deplacer()\n \n\n self.rayonVision=4\n else :# on augemente le rayon de vision au cas ou toutes les cases sont occupées ou non franchissables\n self.rayonVision*=3", "def generateCutList(cut_configuration):\r\n\t#Check that this line reads json.loads(cut_configuration)\r\n\tinput_json = json.load(cut_configuration)\r\n\r\n\t#Currently only desired_cut and laser_cut_config are required\r\n\ttry:\r\n\t\tblock = input_json[\"block\"]\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tcut = input_json[\"desired_cut\"]\r\n\t\tlaser = input_json[\"laser_cut_config\"]\r\n\texcept:\r\n\t\traise Exception(\"Either desired_cut or laser_cut_config not provided\")\r\n\r\n\tif cut[\"cut_process\"] == \"line\":\r\n\t\tfinal_list = line(cut[\"x1\"],cut[\"y1\"],cut[\"x2\"],cut[\"y2\"],cut[\"final_dimension_z\"]+laser[\"z_final_overshoot\"],laser)\r\n\telif cut[\"cut_process\"] == \"simple_core\":\r\n\t\tfinal_list = simple_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"vertical_core\":\r\n\t\tfinal_list = vertical_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"oss_stacked\":\r\n\t\tfinal_list = oss_stacked(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"z_focus\":\r\n\t\tfinal_list = z_focus(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"cross\":\r\n\t\tfinal_list = cross(block,cut,laser)\r\n\telse:\r\n\t\traise Exception(\"No such cut exists: Check cut_process\")\r\n\t#print(time_taken(final_list, laser))\r\n\tnow = datetime.now()\r\n\ttimestamp = str(now.strftime(\"%m-%d_%H_%M\"))\r\n\tcomplete_name = os.path.join(save_path, timestamp+\".csv\")\r\n\twith open(complete_name, mode='w',newline ='') as test_data:\r\n\t data_writer = csv.writer(test_data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\t list_data = json.loads(final_list)\r\n\t for line1 in list_data:\r\n\t \tdata_writer.writerow(line1)\r\n\treturn final_list", "def vertical_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\ttaper = math.tan(angle) * laser[\"z_spacing\"]\r\n\r\n\tu = math.tan(2 * angle) * (block[\"thickness\"] + laser[\"z_final_overshoot\"])\r\n\tz_0 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 - block[\"origin_y\"] + u)\r\n\tz_1 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 + block[\"origin_x\"] + u)\r\n\tz_2 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 + block[\"origin_y\"] + u)\r\n\tz_3 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 - block[\"origin_x\"] + u)\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", f\"{math.degrees(angle):.6f}\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", f\"{z_0:.6f}\"])\r\n\r\n\ty_start_wide = ((u + cut[\"final_dimension_x\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\ty_start_length = ((u + cut[\"final_dimension_y\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\r\n\tdepth_cut = (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * math.cos(angle)/math.cos(2*angle)\r\n\r\n\tcut1 = json.loads(line(block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],-block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut2 = json.loads(line(block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],-block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],depth_cut,laser))\r\n\r\n\tcut3 = json.loads(line(block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],-block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut4 = json.loads(line(block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],-block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],depth_cut,laser))\r\n\r\n\t#cut1 = json.loads(line(block[\"width\"]/2,y_start_length,-block[\"width\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut2 = json.loads(line(block[\"length\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\t#cut3 = json.loads(line(block[\"width\"]/2,y_start_length,-cut[\"final_dimension_x\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut4 = json.loads(line(cut[\"final_dimension_y\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\tcutlist = (cutlist + cut1\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_1:.6f}\"],] \r\n\t + cut2\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_2:.6f}\"]] \r\n\t\t\t\t\t + cut3 \r\n\t\t\t\t\t + [[\"z_abs\", f\"{z_3:.6f}\"],[\"c_rel\", \"90\"]] \r\n\t\t\t\t\t + cut4)\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\r\n\treturn json.dumps(cutlist)", "def include_cut_poly_array(self, planes, fix_pts):\n planeActors = []\n\n for i in range(3):\n # get plane coefficients\n a = planes[i][0]\n b = planes[i][1]\n c = planes[i][2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n VTKplane.SetNormal(a, b, c)\n if fix_pts[0] == 'var': # for variability test\n VTKplane.SetOrigin(self.epi_apex_node)\n else: # for foreshortening test\n VTKplane.SetOrigin(fix_pts[1+i])\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.endo_poly) # always cut through endo\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.GenerateTrianglesOn()\n cutEdges.SetValue(0, 0.5)\n\n # create strips # just for output purposes\n cutStrips = vtk.vtkStripper()\n cutStrips.SetInputConnection(cutEdges.GetOutputPort())\n cutStrips.Update()\n\n # get polydata from strips (just for output purposes)\n cutPoly = vtk.vtkPolyData()\n cutPts = cutStrips.GetOutput().GetPoints()\n cutPoly.SetPoints(cutPts)\n cutPoly.SetPolys(cutStrips.GetOutput().GetLines())\n\n cutterMapper = vtk.vtkPolyDataMapper()\n cutterMapper.SetInputConnection(cutEdges.GetOutputPort())\n cutterMapper.ScalarVisibilityOff()\n\n # create plane actor\n planeActor = vtk.vtkActor()\n planeActor.SetMapper(cutterMapper)\n planeActor.GetProperty().SetColor(self.plane_colors[i])\n planeActor.GetProperty().SetLineWidth(6)\n\n # store the actors of the specific planes to add later into 1 renderer\n planeActors.append(planeActor)\n\n return planeActors", "def profile(n_shells, sld_solvent, sld, thickness,\n interface, shape, nu, n_steps):\n\n n_shells = int(n_shells + 0.5)\n n_steps = int(n_steps + 0.5)\n z = []\n rho = []\n z_next = 0\n # two sld points for core\n z.append(z_next)\n rho.append(sld[0])\n\n for i in range(0, n_shells):\n z_next += thickness[i]\n z.append(z_next)\n rho.append(sld[i])\n dz = interface[i]/n_steps\n sld_l = sld[i]\n sld_r = sld[i+1] if i < n_shells-1 else sld_solvent\n fun = SHAPE_FUNCTIONS[int(np.clip(shape[i], 0, len(SHAPE_FUNCTIONS)-1))]\n for step in range(1, n_steps+1):\n portion = fun(float(step)/n_steps, max(abs(nu[i]), 1e-14))\n z_next += dz\n z.append(z_next)\n rho.append((sld_r - sld_l)*portion + sld_l)\n z.append(z_next*1.2)\n rho.append(sld_solvent)\n # return sld profile (r, beta)\n return np.asarray(z), np.asarray(rho)", "def fullcore_detectors():\n\n cwd = os.getcwd()\n fname = get_sample_data('%s/oecd-fullcore_geom1.png' % (cwd))\n im = plt.imread(fname)\n\n # crop the image\n height, width, color = np.shape(im)\n y1 = int(height*0.15)\n y2 = int(height*0.6)\n x1 = int(width*0.45)\n x2 = int(width)\n plt.imshow(im[y1:y2,x1:x2,:])\n plt.axis('off')\n\n # Axial 1\n x = 158\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='1- Axial1')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='1', fontsize=20, color='w')\n\n # Axial 2\n x = 210\n y = 321\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='2- Axial2')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.text(x=x+37, y=y+40, s='2', fontsize=20, color='w')\n\n # Axial 3\n x = 262\n y = 291\n P = 55\n s = P/2/np.cos(np.pi/6)\n plt.plot([s+x, 2*s+x], [0+y, 0+y], 'r-', lw=1.5, label='3- Axial3')\n plt.plot([s+x, 2*s+x], [P+y, P+y], 'r-', lw=1.5)\n plt.plot([s+x, s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([s/2+x, s+x], [P/2+y, P+y], 'r-', lw=1.5)\n plt.plot([2*s+x, 2*s+s/2+x], [0+y, P/2+y], 'r-', lw=1.5)\n plt.plot([2*s+s/2+x, 2*s+x], [P/2+y, P+y], 'r-', lw=1.5)\n\n plt.text(x=x+37, y=y+40, s='3', fontsize=20, color='w')\n\n # Radial 1\n x = 52\n y = 349\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='4- Radial1')\n plt.plot([x, 495+x], [y, y], 'r-', lw=1.5, label='5- Radial2')\n plt.text(x=x+380, y=y-10, s='4, 5', fontsize=20, color='black')\n\n # Radial 2\n x = 52\n y = 349\n L = 495\n plt.plot([x, L*np.cos(np.pi/6)+x], [y, -L/2+y], 'r-', lw=1.5, label='6- Radial3')\n plt.text(x=350, y=y-200, s='6', rotation=30, fontsize=20, color='black')\n plt.legend(loc='best')\n\n plt.savefig(\"oecd-fullcore-detectors\", dpi=300, bbox_inches=\"tight\")", "def tracers(traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2,\n interpolation = 'weighted', trace_sub = 1, intQ = [''], varfile = 'VAR0',\n ti = -1, tf = -1,\n integration = 'simple', datadir = 'data/', destination = 'tracers.dat', nproc = 1):\n\n # returns the tracers for the specified starting locations\n def subTracers(q, vv, p, tracers0, iproc, hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, \n interpolation = 'weighted', integration = 'simple', intQ = ['']):\n \n tracers = tracers0\n mapping = np.zeros((tracers.shape[0], tracers.shape[1], 3))\n \n for ix in range(tracers.shape[0]):\n for iy in range(tracers.shape[1]):\n xx = tracers[ix, iy, 2:5].copy()\n s = pc.stream(vv, p, interpolation = interpolation, integration = integration, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, xx = xx)\n tracers[ix, iy, 2:5] = s.tracers[s.sl-1]\n tracers[ix, iy, 5] = s.l\n if (any(intQ == 'curlyA')):\n for l in range(s.sl-1):\n aaInt = pc.vecInt((s.tracers[l+1] + s.tracers[l])/2, aa, p, interpolation)\n tracers[ix, iy, 6] += np.dot(aaInt, (s.tracers[l+1] - s.tracers[l]))\n \n # create the color mapping\n if (tracers[ix, iy, 4] > grid.z[-2]):\n if (tracers[ix, iy, 0] - tracers[ix, iy, 2]) > 0:\n if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0:\n mapping[ix, iy, :] = [0,1,0]\n else:\n mapping[ix, iy, :] = [1,1,0]\n else:\n if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0:\n mapping[ix, iy, :] = [0,0,1]\n else:\n mapping[ix, iy, :] = [1,0,0]\n else:\n mapping[ix, iy, :] = [1,1,1]\n \n q.put((tracers, mapping, iproc))\n \n \n # multi core setup\n if (np.isscalar(nproc) == False) or (nproc%1 != 0):\n print(\"error: invalid processor number\")\n return -1\n queue = mp.Queue()\n \n # read the data\n # make sure to read the var files with the correct magic\n if (traceField == 'bb'):\n magic = 'bb'\n if (traceField == 'jj'):\n magic = 'jj'\n if (traceField == 'vort'):\n magic = 'vort'\n \n # convert intQ string into list\n if (isinstance(intQ, list) == False):\n intQ = [intQ]\n intQ = np.array(intQ)\n \n grid = pc.read_grid(datadir = datadir, trim = True, quiet = True) \n dim = pc.read_dim(datadir = datadir) \n tol2 = tol**2\n \n # check if user wants a tracer time series\n if ((ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti)):\n series = True\n n_times = tf-ti+1\n else:\n series = False\n n_times = 1\n \n tracers = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 6+len(intQ)])\n mapping = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 3])\n t = np.zeros(n_times)\n \n for tIdx in range(n_times):\n if series:\n varfile = 'VAR' + str(tIdx)\n \n # read the data\n var = pc.read_var(varfile = varfile, datadir = datadir, magic = magic, quiet = True, trimall = True) \n grid = pc.read_grid(datadir = datadir, quiet = True, trim = True)\n t[tIdx] = var.t\n \n # extract the requested vector traceField\n vv = getattr(var, traceField)\n if (any(intQ == 'curlyA')):\n aa = var.aa\n \n # initialize the parameters\n p = pc.pClass()\n p.dx = var.dx; p.dy = var.dy; p.dz = var.dz\n p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0]\n p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz\n p.nx = dim.nx; p.ny = dim.ny; p.nz = dim.nz\n \n # initialize the tracers\n for ix in range(int(trace_sub*dim.nx)):\n for iy in range(int(trace_sub*dim.ny)):\n tracers[ix, iy, tIdx, 0] = grid.x[0] + int(grid.dx/trace_sub)*ix\n tracers[ix, iy, tIdx, 2] = tracers[ix, iy, tIdx, 0]\n tracers[ix, iy, tIdx, 1] = grid.y[0] + int(grid.dy/trace_sub)*iy\n tracers[ix, iy, tIdx, 3] = tracers[ix, iy, tIdx, 1]\n tracers[ix, iy, tIdx, 4] = grid.z[0]\n \n # declare vectors\n xMid = np.zeros(3)\n xSingle = np.zeros(3)\n xHalf = np.zeros(3)\n xDouble = np.zeros(3)\n \n tmp = []\n subTracersLambda = lambda queue, vv, p, tracers, iproc: \\\n subTracers(queue, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol,\n interpolation = interpolation, integration = integration, intQ = intQ)\n proc = []\n for iproc in range(nproc):\n proc.append(mp.Process(target = subTracersLambda, args = (queue, vv, p, tracers[iproc::nproc,:,tIdx,:], iproc)))\n for iproc in range(nproc):\n proc[iproc].start()\n for iproc in range(nproc):\n tmp.append(queue.get())\n for iproc in range(nproc):\n proc[iproc].join()\n for iproc in range(nproc):\n tracers[tmp[iproc][2]::nproc,:,tIdx,:], mapping[tmp[iproc][2]::nproc,:,tIdx,:] = (tmp[iproc][0], tmp[iproc][1])\n for iproc in range(nproc):\n proc[iproc].terminate()\n \n tracers = np.copy(tracers.swapaxes(0, 3), order = 'C')\n if (destination != ''):\n f = open(datadir + destination, 'wb')\n f.write(np.array(trace_sub, dtype = 'float32'))\n # write tracers into file\n for tIdx in range(n_times):\n f.write(t[tIdx].astype('float32'))\n f.write(tracers[:,:,tIdx,:].astype('float32'))\n f.close()\n \n tracers = tracers.swapaxes(0, 3)\n tracers = tracers.swapaxes(0, 1)\n mapping = mapping.swapaxes(0, 1)\n\n return tracers, mapping, t", "def main():\n # Handle CLI.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--molecule\", type=str, default=\"H2\", help=\"the \"\n \"UCCSD molecule to perform HPO on\")\n parser.add_argument(\"--slice-index\", type=int, default=-1, help=\"the \"\n \"slice to perform HPO on, do not specify to run HPO \"\n \"on the full circuit\")\n parser.add_argument(\"--core-count\", type=int, default=1, help=\"the \"\n \"number of cpu cores this run may use\")\n args = vars(parser.parse_args())\n molecule = args[\"molecule\"]\n slice_index = args[\"slice_index\"]\n core_count = args[\"core_count\"]\n\n # Generate the state object that encapsulates the optimization for the circuit.\n state = ProcessState(molecule, slice_index)\n\n # Redirect everything the central process puts out to a log file.\n # By default, ray redirects the stdout of each worker process\n # to the central process.\n log_file = state.file_name + \".log\"\n log_file_path = os.path.join(state.data_path, log_file)\n with open(log_file_path, \"a+\") as log:\n sys.stdout = sys.stderr = log\n\n # Display run characteristics.\n print(\"PID={}\\nWALL_TIME={}\\nSLICE_INDEX={}\\nPULSE_TIME={}\\n\"\n \"(LR_LB, LR_UB)=({}, {})\\n(DECAY_LB, DECAY_UB)=({}, {})\\n\"\n \"CORE_COUNT={}\\n{}\"\n \"\".format(os.getpid(), time.time(), state.slice_index,\n state.pulse_time, LR_LB, LR_UB, DECAY_LB, DECAY_UB, \n core_count, state.circuit))\n\n # Define the search space on the parameters: learning rate and\n # learning rate decay.\n space = {\n \"lr\": hp.loguniform(\"lr\", np.log(LR_LB), np.log(LR_UB)),\n \"decay\": hp.uniform(\"decay\", DECAY_LB, DECAY_UB),\n }\n \n # We want to minimize QOC error/loss, i.e. we want to maximize\n # negative loss.\n algo = ray.tune.suggest.HyperOptSearch(space, max_concurrent=core_count,\n reward_attr=\"neg_loss\")\n run_config = {\n \"num_samples\": HPO_MAX_ITERATIONS,\n \"name\": state.file_name,\n \"loggers\": [ray.tune.logger.NoopLogger],\n \"search_alg\": algo,\n \"verbose\": 1,\n \"local_dir\": state.data_path,\n \"resume\": True,\n }\n \n # Ray cannot serialize python objects in its object store,\n # so we have to pass the state in a lambda wrapper.\n objective_wrapper = lambda config, reporter: objective(state, config,\n reporter)\n \n # Start ray and run HPO.\n ray.init(num_cpus=core_count, object_store_memory=OBJECT_STORE_MEMORY,\n redis_max_memory=REDIS_MAX_MEMORY)\n ray.tune.register_trainable(\"lambda_id\", objective_wrapper)\n ray.tune.run(\"lambda_id\", **run_config)", "def identify_lipid_leaflets_legacy(pts,vec,monolayer_cutoff,\n\tmonolayer_cutoff_retry=True,max_count_asymmetry=0.05,pbc_rewrap=True,\n\ttopologize_tolerance=None,topologize_time_limit=30):\n\t#---previous default was somewhat high, but typically came in from specs, and we reduced it incrementally\n\tif monolayer_cutoff==None: monolayer_cutoff = 2.0\n\t#---time limit on the tolerance checker\n\ttry:\n\t\twith time_limit(topologize_time_limit): \n\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t**({'tol':topologize_tolerance} if topologize_tolerance else {}))\n\texcept TimeoutException: \n\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\twrapper = np.zeros((len(pts),3))\n\tfindframe = pts + wrapper*np.array(vec)\n\tstatus('this step is somewhat slow. it uses scipy.spatial.pdist.',tag='warning')\n\tpd = [scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(findframe[:,d:d+1])) \n\t\tfor d in range(3)]\n\tif pbc_rewrap:\n\t\tpd3pbc = np.sqrt(np.sum(np.array([pd[d]-(pd[d]>vec[d]/2.)*vec[d]+(pd[d]<-1*vec[d]/2.)*vec[d] \n\t\t\tfor d in range(3)])**2,axis=0))\n\telse: pd3pbc = pd\n\tnbors = np.transpose(np.where(pd3pbc<monolayer_cutoff))\n\tnlipids = len(pts)\n\timono = np.zeros(nlipids)\n\tnlist = []\n\tfor i in range(nlipids):\n\t\tstatus('cataloging lipids',i=i,looplen=nlipids,tag='compute')\n\t\tnlist.append(nbors[np.where(nbors[:,0]==i)[0],1])\n\tiref = 0\n\tmono = np.zeros(nlipids)\n\tsearched = np.zeros(nlipids)\n\timono[iref],searched[iref] = 1,1\n\timono[nlist[iref]] = 1\n\twhile np.any(np.all((imono==1,searched==0),axis=0)):\n\t\tfor iref in np.where(np.all((imono==1,searched==0),axis=0))[0]: \n\t\t\timono[nlist[iref]] = 1\n\t\t\tsearched[iref] = 1\n\t#---check that the leaflets were properly distinguished by looking at the number in each monolayer\n\tif np.mean(imono)==0.5: \n\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\treturn imono\n\telif (monolayer_cutoff_retry and (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry)):\n\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\tstatus('[COMPUTE] leaflets = '+str(np.sum(imono))+'/'+str(len(imono)))\n\t\tstatus('[WARNING] previous monolayer_cutoff = '+str(monolayer_cutoff))\n\t\traise Exception(\n\t\t\t'[ERROR] failed to identify leaflets so we are returning an exception to the LeafletFinder')\n\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\treturn imono", "def test_rocket():\n ring = [(0,0), (10, 0), (15,5), (10,9), (1,7), (6,4), (0,0)]\n conv = ToPointsAndSegments()\n conv.add_polygon([ring])\n skel = calc_skel(conv, output=True, pause=True)\n print \"DONE\"", "def __init__(self, roi_warped_points):\n\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = []\n #average x values of the fitted line over the last n iterations\n self.bestx = None\n #polynomial coefficients averaged over the last n iterations\n self.best_fit = [np.array([False])]\n #polinomial coefficients for the last n fits of the lane\n self.recent_fit = []\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = 0\n #distance in meters of vehicle center from the line\n self.line_base_pos = 0\n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float')\n #x values for detected line pixels\n self.allx = None\n #maximum number of iterations to average\n self.max_n = 10 #25\n\n # roi image points in bird's view space\n self.roi_warped_points = roi_warped_points\n\n #y values for detected line pixels\n self.ally = np.linspace(0, self.roi_warped_points[2][1] - 1, self.roi_warped_points[2][1])\n\n # line base pos is calculated through the roi information\n # the used four point ROI has two points at the bottom that are straight\n # with respect to the bottom - as this points are right next to the lines,\n # they can be translated from pixels into meters with the knowledge of\n # a U.S. highway standard lane - this is an apprximation, but should be\n # good enough for this project\n # U.S. regulations minimum lane width: 3.7m\n self.xm_per_pix = 3.7 / (self.roi_warped_points[1][0] - self.roi_warped_points[0][0])\n\n # each dashed line is 3m long --> about 33m for warped image\n self.ym_per_pix = 33 / (self.roi_warped_points[2][1] - self.roi_warped_points[0][1])", "def _core_subgraph(G, k_filter, k=None, core=None):\n if core is None:\n core = core_number(G)\n if k is None:\n k = max(core.values())\n nodes = (v for v in core if k_filter(v, k, core))\n return G.subgraph(nodes).copy()", "def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 1')\r\n plt.show()\r\n\r\n minMarker = minMarker.astype(np.int32)\r\n selfArray = self.array.astype(np.int32)\r\n minMarker = minMarker + 1\r\n minMarker[minMarker == 1] = 0\r\n minMarker = minMarker + (-1 * (selfArray - 1))\r\n # minMarker: marker type for opencv watershed,\r\n # sure background = 1\r\n # unknow = 0\r\n # sure forgrand = 2,3,4... etc\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 2')\r\n plt.show()\r\n\r\n eccMapNor = (np.round(ia.array_nor(eccMap) * 255)).astype(np.uint8)\r\n eccMapRGB = cv2.cvtColor(eccMapNor, cv2.COLOR_GRAY2RGB)\r\n # eccMapRGB: image type for opencv watershed, RGB, [uint8, uint8, uint8]\r\n\r\n newLabel = cv2.watershed(eccMapRGB, minMarker)\r\n\r\n plt.figure()\r\n plt.imshow(newLabel, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 3')\r\n plt.show()\r\n\r\n newBorder = np.zeros(newLabel.shape).astype(np.int)\r\n\r\n newBorder[newLabel == -1] = 1\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int) - self.array\r\n\r\n border = newBorder + border\r\n\r\n border[border > 1] = 1\r\n\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def generate_cuts(depths, side=SIDE_LENGTH):\n for num, den in depths:\n ad = num * side / den\n poly = Polygon([(0, 0), (side, 0), (side, ad), (0, ad)])\n yield poly", "def _cal_core(tik_instance, total_core_loop_num, num_core, core_number):\n core_loop = tik_instance.Scalar(\"uint64\")\n sum_core = tik_instance.Scalar(\"uint64\")\n\n with tik_instance.if_scope(num_core < total_core_loop_num % MAX_CORE_NUM):\n core_loop.set_as((total_core_loop_num + core_number - 1) //\n core_number)\n sum_core.set_as(core_loop * num_core)\n\n with tik_instance.else_scope():\n core_loop.set_as(total_core_loop_num // core_number)\n sum_core.set_as((core_loop + 1) *\n (total_core_loop_num % MAX_CORE_NUM) +\n core_loop *\n (num_core - total_core_loop_num % MAX_CORE_NUM))\n\n return core_loop, sum_core", "def bricks_per_square_roof(base_width):\n corner_bricks = 0; four_bricks = 0; two_bricks = 0; one_bricks = 0\n for level_length in peg_width_per_levels(base_width):\n four_sides = [level_length for x in range(ROOFING_SIDES)]\n corner_bricks, four_bricks, two_bricks, one_bricks = tuple(map(sum, zip(slope_bricks_per_level(four_sides),\n (corner_bricks, four_bricks, two_bricks,\n one_bricks))))\n return corner_bricks, four_bricks, two_bricks, one_bricks", "def remove_external_core(lab_main, lab_ext):\n \n # for each component of lab_ext, compute the overlap with lab_main\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(lab_ext,s) # labeling\n sizes = ndimage.sum(lab_ext,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n new_lab_ext = np.zeros_like(lab_ext)\n for i in range(len(sizes)):\n sizei = sizes_list[i]\n labeli = np.where(sizes == sizei)[0] + 1\n componenti = labeled_array == labeli\n overlap = componenti * lab_main\n if((overlap.sum()+ 0.0)/sizei >= 0.5):\n new_lab_ext = np.maximum(new_lab_ext, componenti)\n return new_lab_ext", "def remove_external_core(lab_main, lab_ext):\n \n # for each component of lab_ext, compute the overlap with lab_main\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(lab_ext,s) # labeling\n sizes = ndimage.sum(lab_ext,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n new_lab_ext = np.zeros_like(lab_ext)\n for i in range(len(sizes)):\n sizei = sizes_list[i]\n labeli = np.where(sizes == sizei)[0] + 1\n componenti = labeled_array == labeli\n overlap = componenti * lab_main\n if((overlap.sum()+ 0.0)/sizei >= 0.5):\n new_lab_ext = np.maximum(new_lab_ext, componenti)\n return new_lab_ext", "def comb_levy_flight(self, pop):\n children = []\n used = []\n step = tlf(len(pop), len(pop[0]))\n for i in range(0, int(self.population * self.fracLevy)):\n k = int(rand() * self.population)\n while k in used:\n k = int(rand() * self.population)\n\n used.append(k)\n children.append(cp.deepcopy(pop[k]))\n tmp = [ children[(-1)][x] for x in range(0, len(self.xID)) if self.xID[x] == 1\n ]\n for j in range(0, len(tmp) - 1):\n flight = (tmp[j] + int(step[i][j] * len(tmp))) % (len(tmp) - 1)\n if tmp[(j + 1)] != flight:\n ind = np.where(tmp == flight)[0][0]\n if ind > j:\n tmp[(j + 1):(ind + 1)] = reversed(tmp[j + 1:ind + 1])\n if j > ind:\n tmp[ind:(j + 1)] = reversed(tmp[ind:j + 1])\n\n for x in range(0, len(self.xID)):\n if self.xID[x] == 1:\n children[(-1)][x] = tmp[0]\n del tmp[0]\n\n return (\n children, used)", "def cross_section(R, L, F_C, show_every = 20, nr = 10, lagre = \"N\", fs = 10):\n\n R_sun = 6.96E8 # [m]\n L_sun = 3.846E26 # [W]\n\n plt.figure(figsize = (10.5, 10))\n fig = plt.gcf()\n ax = plt.gca()\n\n r_range = 1.2 * R[0] / R_sun\n rmax = np.max(R)\n\n ax.set_xlim(-r_range, r_range)\n ax.set_ylim(-r_range, r_range)\n ax.set_aspect('equal')\n\n core_limit = 0.995 * L_sun\n\n j = 0\n for k in range(0, len(R) - 1):\n j += 1\n # plot every <show_every> steps\n if j%show_every == 0:\n if L[k] >= core_limit: # outside core\n if F_C[k] > 0.0: # plot convection outside core\n circle_red = plt.Circle((0, 0), R[k] / rmax, color = 'red', fill = False)\n ax.add_artist(circle_red)\n else: # plot radiation outside core\n circle_yellow = plt.Circle((0, 0), R[k] / rmax, color = 'yellow', fill = False)\n ax.add_artist(circle_yellow)\n else: # inside core\n if F_C[k] > 0.0: # plot convection inside core\n circle_blue = plt.Circle((0, 0), R[k] / rmax, color = 'blue', fill = False)\n ax.add_artist(circle_blue)\n else: # plot radiation inside core\n circle_cyan = plt.Circle((0, 0), R[k] / rmax, color = 'cyan', fill = False)\n ax.add_artist(circle_cyan)\n\n # create legends\n circle_red = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'red', fill = True)\n circle_yellow = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'yellow', fill = True)\n circle_blue = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'blue', fill = True)\n circle_cyan = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'cyan', fill = True)\n\n ax.legend([circle_red, circle_yellow, circle_cyan, circle_blue], \\\n ['Convection outside core', 'Radiation outside core', 'Radiation inside core', 'Convection inside core'], \\\n fontsize = fs)\n plt.xlabel(r'$R/R_{\\odot}$', fontsize = fs)\n plt.ylabel(r'$R/R_{\\odot}$', fontsize = fs)\n plt.title('Cross section of star', fontsize = fs + 2)\n if lagre == \"J\":\n plt.savefig(\"Figur%02i.png\"%nr)\n\n plt.show()", "def ParallelCsys(movableCsys: str, fixedCsys: str) -> \"Feature\":\n return Feature()", "def main(argv):\n scanSpeed = 1.\n iList = -1\n opt1 = \"\"\n intMode = False\n if (len(argv) >= 1): opt1 = argv[0]\n if \"-i\" in (opt1): intMode = True\n\n # Set input data and cuts\n inputFile = TFile(\"~/project/match-skim/waveletSkimDS5_run23920.root\") # calibration\n # inputFile = TFile(\"~/project/v2-processwfs/waveletSkimDS5_90.root\") # noisy BG\n waveTree = inputFile.Get(\"skimTree\")\n theCut = inputFile.Get(\"cutUsedHere\").GetTitle()\n theCut += \" && waveS5/trapENFCal < 1200 && trapENFCal > 1.5 && trapENFCal < 3\"\n\n # Print cut and events passing cut\n waveTree.Draw(\">>elist\", theCut, \"entrylist\")\n elist = gDirectory.Get(\"elist\")\n waveTree.SetEntryList(elist)\n nList = elist.GetN()\n print \"Using cut:\\n\",theCut,\"\\n\"\n print \"Found\",waveTree.GetEntries(),\"input entries.\"\n print \"Found\",nList,\"entries passing cuts.\"\n\n # Make a figure\n fig = plt.figure(figsize=(15,10), facecolor='w')\n p1 = plt.subplot2grid((6,7), (0,0), colspan=4, rowspan=2) # original\n p2 = plt.subplot2grid((6,7), (2,0), colspan=4, rowspan=3) # rising edge\n p3 = plt.subplot2grid((6,7), (5,0), colspan=4, rowspan=1) # residual\n p4 = plt.subplot2grid((6,7), (0,4), colspan=3, rowspan=2) # trace 1\n p5 = plt.subplot2grid((6,7), (2,4), colspan=3, rowspan=2) # trace 2\n p6 = plt.subplot2grid((6,7), (4,4), colspan=3, rowspan=2) # trace 3\n plt.show(block=False)\n\n # Make template(s)\n # npzfile = np.load(\"./data/genTemplateWF.npz\") # gen-template.py\n # temp, tempTS, tempE, tempST = npzfile['arr_0']+1, npzfile['arr_1'], npzfile['arr_2'], npzfile['arr_3']*10\n\n samp, r, z, tempE, tempST, smooth = 2016, 0, 15, 3938, 1000, 100 # gen-template.py\n # samp, r, z, tempE, tempST, smooth = 5000, 0, 15, 10, 2500, 100 # huge template\n # samp, r, z, tempE, tempST, smooth = 2016, 0, 15, 10, 1000, 100 # regular size template\n # samp, r, z, tempE, tempST, smooth = 2016, 30, 30, 10, 1000, 100 # regular size temp, slower rise\n # samp, r, z, tempE, tempST, smooth = 500, 0, 15, 10, 100, 100 # small template\n temp, tempTS = wl.MakeSiggenWaveform(samp,r,z,tempE,tempST,smooth)\n tempST = tempST * 10 # convert to ns\n\n\n # Loop over events\n while True:\n saveMe = False\n iList += 1\n if intMode==True and iList != 0:\n value = raw_input()\n if value=='q': break\n if value=='s': saveMe=True\n if value=='p': iList -= 2 # previous\n if (value.isdigit()): iList = int(value) # go to entry\n if iList >= elist.GetN(): break\n\n entry = waveTree.GetEntryNumber(iList);\n waveTree.LoadTree(entry)\n waveTree.GetEntry(entry)\n nChans = waveTree.channel.size()\n numPass = waveTree.Draw(\"channel\",theCut,\"GOFF\",1,iList)\n chans = waveTree.GetV1()\n chanList = list(set(int(chans[n]) for n in xrange(numPass)))\n\n # Loop over hits passing cuts\n hitList = (iH for iH in xrange(nChans) if waveTree.channel.at(iH) in chanList) # a 'generator expression'\n for iH in hitList:\n\n # ------------------------------------------------------------------------\n\n # Load waveform for this hit\n run = waveTree.run\n chan = waveTree.channel.at(iH)\n dataE = waveTree.trapENFCal.at(iH)\n dataST = waveTree.butterTime.at(iH) # replace with blrwfFMR50?\n toe = waveTree.kvorrT.at(iH)/dataE\n print \"%d / %d Run %d nCh %d chan %d trapENF %.1f t/e %.1f\" % (iList,nList,run,nChans,chan,dataE,toe)\n signal = wl.processWaveform(waveTree.MGTWaveforms.at(iH),opt='full')\n waveBLSub = signal.GetWaveBLSub()\n waveFilt = signal.GetWaveFilt()\n waveTS = signal.GetTS()\n baseAvg, dataNoise = signal.GetBaseNoise()\n\n # Denoise the data waveform (take only lowest-frequency components)\n wp = pywt.WaveletPacket(data=waveBLSub, wavelet='haar', mode='symmetric',maxlevel=3)\n new_wp = pywt.WaveletPacket(data=None, wavelet='haar', mode='symmetric')\n new_wp['aaa'] = wp['aaa'].data\n waveDenoised = new_wp.reconstruct(update=False)\n\n # Window the fit around rising edge - start time calculator method\n loWin, hiWin = dataST - 1000, dataST + 4000 # ns\n if loWin < waveTS[0] or hiWin > waveTS[-1]:\n print \"Window out of range! dataST: %.1f loWin %.1f hiWin %.1f\" % (dataST,loWin,hiWin)\n idx = np.where((waveTS >= loWin) & (waveTS <= hiWin))\n data = waveBLSub[idx]\n # data = waveDenoised[idx]\n dataTS = waveTS[idx]\n\n # Pack into lists\n rawList = [waveBLSub, waveTS, dataE, dataST]\n dataList = [data, dataTS, dataE, dataST, loWin, hiWin, dataNoise]\n tempList = [temp, tempTS, tempE, tempST]\n\n # Optionally save something to a file\n if saveMe: np.savez(\"./data/tailSlopeInputs.npz\",rawList,tempList)\n\n # Recreate the guess and the guess's rising edge\n guessFull, guessFullTS = wm.MakeModel(dataList, tempList, [dataST,dataE,1.], opt=\"full\")\n guess, guessTS = wm.MakeModel(dataList, tempList, [dataST,dataE,1.], opt=\"!fancy\")\n\n # Make an \"almost complete\" guess - no fitting\n # st, en, slo = dataST-100, dataE, 5\n InterpFn = interpolate.interp1d(tempTS, temp, kind=\"linear\", copy=\"False\", assume_sorted=\"True\")\n # model, modelTS = wm.MakeModel(dataList, tempList, [st,en,slo], fn=InterpFn)\n\n # Fit with MCMC and get best-fit parameters\n # numSteps, burnIn = 10000, 5000 # default - 10000, 5000. try 3000, 2000. long test: 20000,10000\n # myModel = TemplateModel( dataList, dataNoise, tempList )\n # waveModel = pymc.Model( myModel )\n # M = pymc.MCMC( waveModel )\n # M.use_step_method(pymc.Metropolis, M.startTime, proposal_sd=100., proposal_distribution='Normal')\n # M.use_step_method(pymc.Metropolis, M.energy, proposal_sd=1., proposal_distribution='Normal')\n # M.use_step_method(pymc.Metropolis, M.slowness, proposal_sd=100., proposal_distribution='Normal')\n # M.sample(iter=numSteps, verbose=0) # do the fit\n # st = np.median(M.trace('startTime')[burnIn:])\n # en = np.median(M.trace('energy')[burnIn:])\n # slo = np.median(M.trace('slowness')[burnIn:])\n # InterpFn = interpolate.interp1d(tempTS, temp, kind=\"linear\", copy=\"False\", assume_sorted=\"True\")\n # model, modelTS = wm.MakeModel(dataList, tempList, [st,en,slo], fn=InterpFn)\n\n\n # Fit with SciPy minimizer(s) and get best-fit parameters\n\n # how long does it take?\n start = time.time()\n\n MakeTracesGlobal() # creates 3 global arrays: startTrace, enTrace, sloTrace\n datas = [dataList, tempList, InterpFn]\n\n # floats = [1.]\n # result = op.basinhopping(findLnLike, floats, T=1000, stepsize=15, niter_success=2, minimizer_kwargs={\"args\":datas})\n # print \"slowness:\",result[\"x\"],\"dataE\",dataE,\"dataST\",dataST\n # floats = [dataST, dataE, result[\"x\"]]\n # MakeTracesGlobal() # resets traces\n\n floats = [dataST, dataE, 20.]\n result = op.minimize(findLnLike, floats, args=datas, method=\"Nelder-Mead\")\n if not result[\"success\"]: print result[\"message\"]\n st, en, slo = result[\"x\"]\n\n model, modelTS = wm.MakeModel(dataList, tempList, [st,en,slo], fn=InterpFn)\n\n stop = time.time()\n print \"fitting took\",stop-start\n\n\n\n # Calculate residual, Chi2/NDF, likelihood, etc.\n residual = model - data\n frac = (np.power(data - model, 2)) / np.abs(model)\n chi2NDF = np.sum(frac) / len(model)\n inv_sigma2 = 1.0/(dataNoise**2)\n lnLike = -0.5*(np.sum((data - model)**2*inv_sigma2 - np.log(inv_sigma2)))\n\n # Fill the figure\n p1.cla()\n p1.set_ylabel(\"ADC\")\n p1.set_title(\"Run %d Channel %d Entry %d\\ntrapENFCal %.1f T/E %.1f ST %.1f\" % (run,chan,iList,dataE,toe,dataST))\n p1.plot(waveTS,waveBLSub,color='blue')\n p1.plot(waveTS,waveDenoised,color='red',alpha=0.8)\n p1.plot(guessFullTS,guessFull,color='orange',linewidth=2)\n p1.axvline(x = dataST, color='green',linewidth=2)\n p1.axvline(x = loWin, color='black')\n p1.axvline(x = hiWin, color='black')\n\n p2.cla()\n p2.plot(dataTS,data,color='blue',label='Data')\n p2.plot(guessTS,guess,color='orange',label='Guess')\n p2.plot(modelTS,model,color='red',linewidth=3,label='Best Fit')\n p2.legend(loc=4)\n\n p3.cla()\n p3.set_xlabel(\"Time [ns]\", x=0.95, ha='right')\n p3.set_ylabel(\"Residual [ADC]\")\n p3.plot(guessTS,residual, color='red')\n p3.axhline(y = 0, color='blue', alpha=0.3)\n p3.axhline(y = dataNoise, color='blue', alpha=0.3)\n p3.axhline(y = -1.0*dataNoise, color='blue', alpha=0.3)\n\n p4.cla()\n p4.set_title(\"startTime %.1f Energy %.2f Slow %.1f\" % (st,en,slo))\n\n p4.plot(stTrace[1:])\n p4.set_ylabel('startTime')\n\n p5.cla()\n p5.plot(enTrace[1:])\n p5.set_ylabel('energy')\n\n p6.cla()\n p6.plot(sloTrace[1:])\n p6.set_ylabel('slowness')\n\n plt.tight_layout()\n plt.subplots_adjust(hspace=0.35)\n plt.pause(scanSpeed)\n\n # ------------------------------------------------------------------------", "def line(x1,y1,x2,y2,z_thickness,laser):\r\n\t#Global variables that are used by all algorithms\r\n\tlayers = int(z_thickness/laser[\"z_spacing\"])\r\n\r\n\t#Works out offset when beginning on a new layer\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\ttaper_x,taper_y = offset(x1,y1,x2,y2,taper)\r\n\r\n\t#Works out offset between each parallel scan on the same layer\r\n\tdelta_x,delta_y = offset(x1,y1,x2,y2,laser[\"xy_spacing\"])\r\n\r\n\t#Works out maximum offset from starting line, we don't want to exceed this at any point.\r\n\tmax_taper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (z_thickness) * 2\r\n\tmax_delta_x, max_delta_y = offset(x1,y1,x2,y2,max_taper)\r\n\t#max_delta_x, max_delta_y = 2*max_delta_x, 2*max_delta_y\r\n\r\n\t#Loops through each layer, in which we fit as many parallel raster scans as the maximum offset allows\r\n\tcutlist = []\r\n\tfor a in range(layers):\r\n\t\tnew_x1,new_x2,new_y1,new_y2 = x1 + a*taper_x, x2 + a*taper_x, y1 + a*taper_y, y2 + a*taper_y\r\n\t\ti = 0\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\twhile abs(new_x1-x1) < abs(max_delta_x) or abs(new_y1-y1) < abs(max_delta_y):\r\n\t\t\t#This use of i is to reduce the jump distance between individual scans\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y2:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_x1,new_x2,new_y1,new_y2 = new_x1 + delta_x, new_x2 + delta_x, new_y1 + delta_y, new_y2 + delta_y\r\n\t\t\ti = i + 1\r\n\t\t#Having completed one layer, the laser moves down to begin the next layer\r\n\t\tmax_delta_x = max_delta_x - taper_x\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def prep_optics(SetofObjects, epsilon):\n\n for j in SetofObjects._index:\n # Find smallest nonzero distance\n SetofObjects._core_dist[j] = np.sort(SetofObjects.data[j,:])[1]\n print(\n 'Core distances and neighborhoods prepped for ' + str(\n SetofObjects._n) + ' points.')", "def __init__(self, flagTrackShape = 0):\n\n \"\"\" Nos interesa que el planner tenga una pista algo mas reducida de la real\n para conservar algo de robustez y no salirnos de la pista en el primer segundo. \"\"\"\n \n ### is HW is the half width of vehicle dimension + some saftey factor?\n ### what is slack??\n \n # HW = rospy.get_param(\"halfWidth\")+0.1\n HW = 0.4\n # print (\"HW\",HW)\n # if flagTrackShape == 0:\n # selectedTrack = rospy.get_param(\"trackShape\") # comentado para el testeo del planner\n # # selectedTrack = \"L_shape\"\n # else:\n # selectedTrack = \"oval\"\n\n selectedTrack = \"L_shape\"\n print (\"track selected\",selectedTrack)\n if selectedTrack == \"3110\":\n self.halfWidth = 0.6\n self.slack = 0.15\n spec = np.array([[60 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [60 * 0.03, +60 * 0.03 * 5 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [80 * 0.03, 0]])\n\n elif selectedTrack == \"oval\":\n self.halfWidth = HW\n self.slack = 0.15\n spec = np.array([[1.0, 0],\n [4.5, 4.5 / np.pi],\n [2.0, 0],\n [4.5, 4.5 / np.pi],\n [1.0, 0]])\n\n # elif selectedTrack == \"L_shape\":\n # self.halfWidth = HW\n # self.slack = 0.01\n # lengthCurve = 4.5\n # spec = np.array([[1.0, 0],\n # [lengthCurve, lengthCurve / np.pi],\n # # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n # [lengthCurve/2,-lengthCurve / np.pi ],\n # [lengthCurve, lengthCurve / np.pi],\n # [lengthCurve / np.pi *2, 0],\n # [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_n\":\n self.halfWidth = HW\n self.slack = 0.01\n lengthCurve = 4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_IDIADA\":\n self.halfWidth = HW\n self.slack = 6*0.45\n lengthCurve = 10*4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape\":\n # elif selectedTrack == \"SLAM_shape1\":\n self.halfWidth = 0.4\n self.slack = 0.01\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[2.5,0],\n [2*lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi],\n [2.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [4.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [2.6,0]])\n\n\n elif selectedTrack == \"8_track\":\n self.halfWidth = 0.4\n self.slack = 0.15\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[0.5,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [1.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2)/np.pi],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi]])\n\n\n\n # Now given the above segments we compute the (x, y) points of the track and the angle of the tangent vector (psi) at\n # these points. For each segment we compute the (x, y, psi) coordinate at the last point of the segment. Furthermore,\n # we compute also the cumulative s at the starting point of the segment at signed curvature\n # PointAndTangent = [x, y, psi, cumulative s, segment length, signed curvature]\n\n ### what is cumulative s and signed curvature.?\n\n PointAndTangent = np.zeros((spec.shape[0] + 1, 6))\n for i in range(0, spec.shape[0]):\n if spec[i, 1] == 0.0: # If the current segment is a straight line\n l = spec[i, 0] # Length of the segments\n if i == 0:\n ang = 0 # Angle of the tangent vector at the starting point of the segment\n x = 0 + l * np.cos(ang) # x coordinate of the last point of the segment\n y = 0 + l * np.sin(ang) # y coordinate of the last point of the segment\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the starting point of the segment\n x = PointAndTangent[i-1, 0] + l * np.cos(ang) # x coordinate of the last point of the segment\n y = PointAndTangent[i-1, 1] + l * np.sin(ang) # y coordinate of the last point of the segment\n psi = ang # Angle of the tangent vector at the last point of the segment\n\n # # With the above information create the new line\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 0])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 0])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n else:\n l = spec[i, 0] # Length of the segment\n r = spec[i, 1] # Radius of curvature\n\n\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n if i == 0:\n ang = 0 # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = 0 \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = 0 \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = PointAndTangent[i-1, 0] \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = PointAndTangent[i-1, 1] \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n spanAng = l / np.abs(r) # Angle spanned by the circle\n psi = wrap(ang + spanAng * np.sign(r)) # Angle of the tangent vector at the last point of the segment\n\n angleNormal = wrap((direction * np.pi / 2 + ang))\n angle = -(np.pi - np.abs(angleNormal)) * (sign(angleNormal))\n x = CenterX + np.abs(r) * np.cos(\n angle + direction * spanAng) # x coordinate of the last point of the segment\n y = CenterY + np.abs(r) * np.sin(\n angle + direction * spanAng) # y coordinate of the last point of the segment\n\n # With the above information create the new line\n # plt.plot(CenterX, CenterY, 'bo')\n # plt.plot(x, y, 'ro')\n\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 1 / r])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 1 / r])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n # plt.plot(x, y, 'or')\n\n # Now update info on last point\n # xs = PointAndTangent[PointAndTangent.shape[0] - 2, 0]\n # ys = PointAndTangent[PointAndTangent.shape[0] - 2, 1]\n # xf = PointAndTangent[0, 0]\n # yf = PointAndTangent[0, 1]\n # psif = PointAndTangent[PointAndTangent.shape[0] - 2, 2]\n #\n # # plt.plot(xf, yf, 'or')\n # # plt.show()\n # l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n #\n # NewLine = np.array([xf, yf, psif, PointAndTangent[PointAndTangent.shape[0] - 2, 3] + PointAndTangent[\n # PointAndTangent.shape[0] - 2, 4], l, 0])\n # PointAndTangent[-1, :] = NewLine\n\n\n xs = PointAndTangent[-2, 0]\n ys = PointAndTangent[-2, 1]\n xf = 0\n yf = 0\n psif = 0\n\n # plt.plot(xf, yf, 'or')\n # plt.show()\n l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n\n NewLine = np.array([xf, yf, psif, PointAndTangent[-2, 3] + PointAndTangent[-2, 4], l, 0])\n PointAndTangent[-1, :] = NewLine\n\n self.PointAndTangent = PointAndTangent\n self.TrackLength = PointAndTangent[-1, 3] + PointAndTangent[-1, 4]", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def main():\n utl.calibrate(False)\n undistort(False)\n edge_detect(False)\n transform(False)\n identify_line(False)\n lane_line(True)", "def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)", "def core_set(self, core):\n if core in self.core_groups:\n return self.data[self.data[\"Core\"] == core]\n return None", "def calculate_contours_fit(L_x, L_y, e, leaflet, ts, Plots, side):\n \n n = np.load(input_dir + 'directors_'+leaflet+'_tail_'+ str(ts) + '.npy') \n\n pos = np.load(input_dir + 'coordinates_'+leaflet+'_tail_' + str(ts) + '.npy') \n\n resid = np.load(input_dir + 'residues_'+leaflet+'_tail_' + str(ts) + '.npy')\n box = np.load(input_dir + 'box' + str(ts) + '.npy')\n\n \n chl = np.load(input_dir + 'cholesterol_'+leaflet+'_tail_' + str(ts) + '.npy')\n dlipc = np.load(input_dir + 'dlipc_'+leaflet+'_tail_' + str(ts) + '.npy') \n dspc = np.load(input_dir + 'dspc_'+leaflet+'_tail_' + str(ts) + '.npy')\n ssm = np.load(input_dir + 'ssm_'+leaflet+'_tail_' + str(ts) + '.npy')\n \n #n= np.ones(len(pos))\n \"\"\" END: read the input data \"\"\"\n\n\n field = order_vector_field(L_x, L_y, pos, n, e, box)\n\n c = pd.DataFrame(data=field).mean(axis=0).rolling(50, center=True, min_periods=1).mean() #50\n c.dropna(inplace=True)\n middle = 0.5*(np.max(c) + np.min(c)) \n #middle = 0.025\n contours = measure.find_contours(field, middle) # Marching Cubes algorith\n #save contours\n fac_x = box[0] / L_x #to get the right dimensions (range_x)\n fac_y = box[1] / L_y # (range_y)\n \n contours_x = []\n contours_y = []\n contours_x_y = []\n \n contours_all = []\n for m, contour in enumerate(contours):\n contours_x.append((contour[:, 1] * fac_x))\n contours_y.append((contour[:, 0] * fac_y))\n \n \n contours_x_y = np.column_stack((contours_x[m], contours_y[m]))\n contours_all.append(contours_x_y)\n np.save(output_contours + 'contours_'+leaflet+'.' + str(ts) + '.npy', contours_all)\n \n\n#===================================================\n#To assign resids to the different phases\n phase_belonging = np.zeros((len(pos)))\n ordered =[]\n disordered = []\n for i in np.arange(len(pos)):\n \n def apply_pbc(pos, box):\n if pos >= box:\n pos -= box\n if pos < 0:\n pos += box\n return pos\n \n idx_x = int(apply_pbc(pos[i,0], box[0]) / fac_x - 1.e-5) #the - 1.e-5 is because accuracy issue in the /\n idx_y = int(apply_pbc(pos[i,1], box[1]) / fac_y - 1.e-5) #this - 1.e-5 is because accuracy issue in the /\n #print(idx_x, idx_y)\n order= field[idx_y, idx_x]\n if (order > middle):\n ordered.append(order)\n order = 1 #ordered lipids\n \n else :\n disordered.append(order)\n order =0 #disordered lipids\n phase_belonging[i] = order\n \n\n resid_phases = np.column_stack((resid[:,0], phase_belonging))\n np.save(output_dir + 'resid_phases'+leaflet+'.'+ str(j) + '.npy', resid_phases)\n\n if Plots == True:\n plt.figure(figsize=(15,10)) \n \n contours_sorted = sorted(contours, key=len, reverse=True)\n \n for i in range(2):\n plt.plot(contours_sorted[i][:,1]* fac_x+0.5*fac_x, contours_sorted[i][:,0]* fac_y+0.5*fac_y, linewidth=3, color='#0000FF' ) ##00CC00\n \n #for m, contour in enumerate(contours_sorted):\n # print(contour[:,0])\n # for contour in contours: \n \n # plt.plot((contour[:, 1] * fac_x+0.5*fac_x),\n # (contour[:, 0] * fac_y+0.5*fac_y),\n # linewidth=4, color='#00CC00')\n \n plt.imshow(field, interpolation='nearest', \n cmap=plt.cm.gray_r,\n extent=[0, box[0], 0, box[1]], origin='lower', alpha=0.7) \n \n plt.axis('off')\n plot_scatter_order_field(pos, resid, dlipc, dspc, chl,ssm, n , box, ts, side) #phase_belonging.reshape(-1,1)\n plt.savefig(output_dir + 'contours-'+ leaflet + str(ts) + '.png', dpi=300) \n plt.close() \n \n return resid_phases #, ordered, disordered ", "def core_slices(self, borders=None):\n if borders is None:\n borders = self.all_borders\n\n core_slices = list(self.slices)\n for border, direction in borders:\n core_slice = core_slices[border]\n if direction < 0:\n core_slice = slice(core_slice.start + self.overlap[border], core_slice.stop)\n else:\n core_slice = slice(core_slice.start, core_slice.stop - self.overlap[border])\n core_slices[border] = core_slice\n\n return tuple(core_slices)", "def assign_core_periphery(graph):\n # borgatti-everett\n be = bct.core_periphery_dir(nx.convert_matrix.to_numpy_array(graph))\n for i, node in enumerate(graph.nodes):\n graph.nodes[node]['core_be'] = be[0][i]\n graph.graph['coreness_be'] = be[1]\n # rombach\n rb = cpa.Rombach()\n rb.detect(graph)\n if rb.get_coreness() != 0:\n for node, coreness in rb.get_coreness().items():\n graph.nodes[node]['core_rb'] = coreness\n graph.graph['coreness_rb'] = rb.score()[0]\n else:\n for node in graph.nodes:\n graph.nodes[node]['core_rb'] = 0\n graph.graph['coreness_rb'] = 0", "def _get_traces(\n run, runs, running_runs, all_cses, trace_type='deconvolved', length_fr=15,\n pad_fr=31, offset_fr=1, running_threshold_cms=4., correct_trials=False,\n lick_cutoff=-1, lick_window=(-1, 0), running_fraction=0.3,\n max_n_onsets=-1, remove_stim=True, activity_scale=None):\n if run not in running_runs:\n # Prepare running baseline data.\n # NOTE: running thresholding is done differently here than later during\n # stimulus runs.\n out = {'other-running': _get_run_onsets(\n runs=running_runs,\n length_fr=length_fr,\n pad_fr=pad_fr,\n offset_fr=offset_fr,\n running_threshold_cms=running_threshold_cms)}\n\n for training_run in runs:\n t2p = training_run.trace2p()\n\n # Get the trace from which to extract time points\n trs = t2p.trace(trace_type)\n\n if activity_scale is not None:\n trs = (trs.T*activity_scale).T\n\n # If the target run is also a training run, make sure that we aren't\n # training on the same data that will later be used for comparison\n if remove_stim or training_run != run:\n # Search through all stimulus onsets, correctly coding them\n for ncs in t2p.cses(): # t.cses(self._pars['add-ensure-quinine']):\n if ncs in all_cses:\n # Remap cs name if needed\n # NOTE: blank trials are just labeled 'other' and not\n # checked for running.\n cs = all_cses[ncs]\n # Initialize output\n if cs not in out:\n out[cs] = []\n\n ons = t2p.csonsets(\n ncs, 0 if correct_trials else -1, lick_cutoff,\n lick_window)\n\n for on in ons:\n start = on + offset_fr\n toappend = trs[:, start:start + length_fr]\n # Make sure interval didn't run off the end.\n if toappend.shape[1] == length_fr:\n out[cs].append(toappend)\n\n # If the target run is in the training runs, don't use the times\n # that will later be used for comparison.\n if training_run != run:\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr, -1)\n\n if len(t2p.speed()) > 0:\n running = t2p.speed() > running_threshold_cms\n for ot in others:\n start = ot + offset_fr\n if nanmean(running[start:start + length_fr]) > \\\n running_fraction:\n out['other-running'].append(\n trs[:, start:start + length_fr])\n else:\n if 'other' not in out:\n out['other'] = []\n out['other'].append(\n trs[:, start:start + length_fr])\n\n # Selectively remove onsets if necessary\n if max_n_onsets > 0:\n for cs in out:\n if 'other' not in cs:\n print('WARNING: Have not yet checked new timing version')\n\n # Account for shape of array\n if len(out[cs]) > max_n_onsets:\n out[cs] = np.random.choice(\n out[cs], max_n_onsets, replace=False)\n\n for cs in out:\n out[cs] = np.array(out[cs])\n\n return out", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def shorten_cores(cores):\n cores = sorted(list(cores))\n if len(cores) == 0:\n return ''\n core_buffer = ''\n start = 0\n while start < len(cores):\n cont_seq = find_max_continous_sequence(cores, start)\n start += len(cont_seq)\n if len(cont_seq) > 1:\n core_buffer += ',%d-%d' % (cont_seq[0], cont_seq[-1])\n else:\n core_buffer += ',%d' % cont_seq[0]\n return core_buffer[1:]", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)", "def start_core(c):\n with c.cd('images'):\n print('Starting Core database')\n c.run('sudo docker-compose up -d stellar-core-db', hide='stderr')\n sleep(2)\n\n # setup core database\n # https://www.stellar.org/developers/stellar-core/software/commands.html\n print('Initializing Core database')\n c.run('sudo docker-compose run stellar-core --newdb --forcescp', hide='both')\n\n # setup cache history archive\n print('Initializing Core history archive')\n c.run('sudo docker-compose run stellar-core --newhist cache', hide='both')\n\n # start a local private testnet core\n # https://www.stellar.org/developers/stellar-core/software/testnet.html\n print('Starting Core')\n c.run('sudo docker-compose up -d stellar-core', hide='stderr')", "def SCEC_LOH_1():\n\n #Initialize CrustModel\n model = CrustModel(2)\n\n #Slow layer\n vp=4.000\n vs=2.000\n rho=2.600\n Qa=10000.\n Qb=10000.\n thickness = 1.0\n\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n #Halfspace\n vp=6.000\n vs=3.464\n rho=2.700\n Qa=10000.\n Qb=10000.\n thickness = 0 #Infinite thickness!\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n return model", "def RemovePolygonHoles_management(in_fc, threshold=0.0):\n desc = arcpy.Describe(in_fc)\n if desc.dataType != \"FeatureClass\" and desc.dataType != \"ShapeFile\":\n print(\"Invalid data type. The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n else:\n if desc.shapeType != \"Polygon\":\n print(\"The input is supposed to be a Polygon FeatureClass or Shapefile.\")\n return\n if threshold < 0.0:\n threshold = 0.0\n with arcpy.da.UpdateCursor(in_fc, [\"SHAPE@\"]) as updateCursor:\n for updateRow in updateCursor:\n shape = updateRow[0]\n new_shape = arcpy.Array()\n for part in shape:\n new_part = arcpy.Array()\n if threshold > 0:\n # find None point in shape part\n # in arcpy module, a None point is used to seperate exterior and interior vertices\n null_point_index = []\n for i in range(len(part)):\n if part[i] is None:\n null_point_index.append(i)\n # if interior vertices exist, create polygons and compare polygon shape area to given threshold\n # if larger, keep vertices, else, dismiss them\n if len(null_point_index) > 0:\n for k in range(0, null_point_index[0]):\n new_part.add(part[k])\n for i in range(len(null_point_index)):\n pointArray = arcpy.Array()\n # determine if the None point is the last one\n if i+1 < len(null_point_index):\n for j in range(null_point_index[i] + 1, null_point_index[i+1]):\n pointArray.add(part[j])\n else:\n for j in range(null_point_index[i] + 1, len(part)):\n pointArray.add(part[j])\n # create a polygon to check shape area against the given threshold\n inner_poly = arcpy.Polygon(pointArray)\n # if larger than threshold, then add to the new part Array\n if inner_poly.area > threshold:\n if i+1 < len(null_point_index):\n for k in range(null_point_index[i], null_point_index[i+1]):\n new_part.add(part[k])\n else:\n for k in range(null_point_index[i], len(part)):\n new_part.add(part[k])\n new_shape.add(new_part)\n # if interior does not exist, add the whole part\n else:\n new_shape.add(part)\n else:\n # get the first None point index\n first_null_point_index = 0\n for i in range(len(part)):\n if part[i] is None:\n first_null_point_index = i\n break\n if first_null_point_index == 0:\n new_shape.add(part)\n else:\n for j in range(first_null_point_index):\n new_part.add(part[j])\n new_shape.add(new_part)\n if len(new_shape) > 0:\n new_poly = arcpy.Polygon(new_shape)\n updateRow[0] = new_poly\n updateCursor.updateRow(updateRow)", "def run():\n\n # Build list of stations\n stations = build_station_list()\n \n # Update latest level data for all stations\n update_water_levels(stations)\n \n # Stations at which the current relative level is over 0.8\n z= stations_level_over_threshold(stations, 0.8)\n for a in z:\n print(a[0],a[1])\n print(\".\") \n print(\".\")", "def corescan(filename, core):\n \n pssm = np.loadtxt(filename, skiprows=1)\n pssmf = pssm[:,1:].transpose()\n\n # iterpssm = np.concatenate((matlog, pssmf, matlog), axis=1) #iterable PSSM , flanked by buffer arrays\n\n lenpssm = len(pssmf.transpose())\n\n score = -1000\n pos = 0\n for j in regenerateseq(core, \"numpy\"):\n beta = pssmwalk(pssmf,j, 0, \"numpy\")\n \n\n betascore = beta[0]\n\n betapos = beta[1]\n \n if betascore > score :\n score = betascore\n pos = betapos\n else:\n pass\n\n return [score,pos,pssmf]", "def _get_lt_problem(self,x,n_seg=[10,10], high_fidelity=True):\n\tfrom PyKEP import epoch, lambert_problem, DAY2SEC, fb_prop, propagate_lagrangian\n\tfrom PyGMO import population\n\tfrom math import pi, acos,cos,sin,sqrt, exp\n\tfrom scipy.linalg import norm\n\t\n\tretval = []\n\t#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience\n\tT = x[3::4]\n\tn_legs = len(x)/4\n\tseq = self.get_sequence()\n\tcommon_mu = seq[0].mu_central_body\n\t#2 - We compute the epochs and ephemerides of the planetary encounters\n\tt_P = list([None] * (n_legs))\n\tr_P = list([None] * (n_legs))\n\tv_P = list([None] * (n_legs))\n\tDV = list([None] * (n_legs))\n\t\n\tfor i,planet in enumerate(seq):\n\t\tt_P[i] = epoch(x[0]+sum(T[:i+1]))\n\t\tr_P[i],v_P[i] = seq[i].eph(t_P[i])\n\n\t#3 - We start with the first leg: a lambert arc\n\ttheta = 2*pi*x[1]\n\tphi = acos(2*x[2]-1)-pi/2\n\tr = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection\n\tr = [JR*1000*d for d in r]\n\t\n\tl = lambert_problem(r,r_P[0],T[0]*DAY2SEC,common_mu, False, False)\n\n\t#Lambert arc to reach seq[1]\n\tv_end_l = l.get_v2()[0]\n\tv_beg_l = l.get_v1()[0]\n\t\n\t#We start appending in the lt chromosome (see mga_incipit_lt)\n\tretval.append(theta)\n\tretval.append(phi)\n\t\n\t#First DSM occuring at the very beginning (will be cancelled by the optimizer)\n\tDV[0] = abs(norm(v_beg_l) - 3400)\n\t\n\t#Start of the first lt leg encoding \n\tretval.append(T[0])\n\tretval.append(exp(-DV[0]/9.80665/2000)*2000) #Tsiolkowsky\n\tretval.extend(v_beg_l)\n\tretval.extend([a-b for a,b in zip(v_end_l,v_P[0])])\n\n\t#4 - And we proceed with each successive leg\n\tfor i in xrange(1,n_legs):\n\t\t#Fly-by \n\t\tv_out = fb_prop(v_end_l,v_P[i-1],x[1+4*i]*seq[i-1].radius,x[4*i],seq[i-1].mu_self)\n\t\t#s/c propagation before the DSM\n\t\tr,v = propagate_lagrangian(r_P[i-1],v_out,x[4*i+2]*T[i]*DAY2SEC,common_mu)\n\t\t#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)\n\t\tdt = (1-x[4*i+2])*T[i]*DAY2SEC\n\t\tl = lambert_problem(r,r_P[i],dt,common_mu, False, False)\n\t\tv_end_l = l.get_v2()[0]\n\t\tv_beg_l = l.get_v1()[0]\n\t\t#DSM occuring at time nu2*T2\n\t\tDV[i] = norm([a-b for a,b in zip(v_beg_l,v)])\n\t\t\n\t\t#lt encoding of all legs\n\t\tretval.append(T[i])\n\t\tretval.append(exp(-sum(DV[:i+1])/9.80665/2000)*2000) #Tsiolkowsky\n\t\tretval.extend([a-b for a,b in zip(v_out,v_P[i-1])])\n\t\tif i != n_legs-1:\n\t\t\tretval.extend([a-b for a,b in zip(v_end_l,v_P[i])])\n\t\n\tretval = retval + [0]*sum(n_seg)*3\n\tprob = mga_incipit_lt(high_fidelity=high_fidelity,seq=seq, n_seg = n_seg,tf = epoch(x[0]+sum(T)), vf = [a-b for a,b in zip(v_end_l,v_P[i])])\n\t# solves the problem of chemical trajectories wanting higher launch dv\n\tub = list(prob.ub)\n\tlb = list(prob.lb)\n\tub[4:7] = [5000,5000,5000]\n\tlb[4:7] = [-5000,-5000,-5000]\n\tprob.set_bounds(lb, ub)\n\tpop = population(prob)\n\tpop.push_back(retval)\n\treturn (prob,pop)", "def GetSubContoursAndOrderingByFrame(watershed, allValsByFrame):\n cellNetworkList = GetCellNetworksByFrame(watershed, allValsByFrame)\n scListByFrame = [cellNetworkList[i].subContours for i in range(len(watershed))]\n orderOfSCsByValueByFrame = [\n cellNetworkList[i].orderOfSubContoursDict for i in range(len(watershed))\n ]\n return scListByFrame, orderOfSCsByValueByFrame", "def parse_cores(core_str):\n num_cores = os.cpu_count()\n cores = []\n\n # remove spaces\n core_str.replace(\" \", \"\")\n\n # check if not a range\n if '-' not in core_str:\n cores = list(map(int, core_str.strip().split(',')))\n else:\n # parse range e.g. 2-8\n core_str = core_str.strip().split('-')\n for i in range(int(core_str[0]), int(core_str[1]) + 1):\n cores.append(i)\n\n # ensure valid cores specified\n for core in cores:\n if core < 0 or core >= num_cores:\n print(\"Core {} out of range!\".format(core), file=sys.stderr)\n raise Exception()\n\n return cores", "def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)", "def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected", "def getChipCoreAndCxId(layer):\n core_ids = []\n cx_ids = []\n chip_ids = []\n for id in layer.nodeIds:\n _, chip_id, core_id, cx_id, _, _ = layer.net.resourceMap.compartment(id)\n chip_ids.append(chip_id)\n core_ids.append(core_id)\n cx_ids.append(cx_id)\n return np.array(chip_ids), np.array(core_ids), np.array(cx_ids)", "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)", "def main_CL():\r\n version=1.0\r\n st = time.time()\r\n parser = OptionParser(usage=usage(), version='%s'%version)\r\n parser.add_option(\"-n\", \"--days\", dest=\"days\", default=\"30\", help=\"Days ago, defaults to 30 days\")\r\n parser.add_option(\"-s\", \"--stream\", dest=\"stream\", default=\"all\", help=\"Code Stream, defaults to all\")\r\n parser.add_option(\"-u\", \"--usage\", dest=\"usage\", default=\"\", help=\"Show usage information\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug', action=\"count\", help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n\r\n if options.debug > 1:\r\n print ' days %s' %(options.days)\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n \r\n if options.usage:\r\n print usage()\r\n else:\r\n obj=ListCRs()\r\n obj.setUp()\r\n since = options.days \r\n \r\n #stream = str(stream).strip() \r\n obj.listCRsCL(since, options, st) \r\n \r\n print '\\nTook a total of %3.2f secs -^' %(time.time()-st)", "def main():\n\n ocp = prepare_ocp(\n biorbd_model_path=\"models/cube_and_line.bioMod\",\n n_shooting=30,\n final_time=2,\n initialize_near_solution=True,\n )\n\n # --- Solve the program --- #\n sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == \"Linux\"))\n\n # --- Show results --- #\n sol.animate()", "def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4):\n a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height))\n\n area = (lenght + overhang)*(width + overhang)\n area_3 = 8*a**2\n area_1 = (lenght - 2)*(width - 4*a)\n area_2 = area - area_3 - area_1\n return area, area_1, area_2, area_3", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def cutNow(self,leftMonomers,definitive=False):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n for A1 in leftMonomers:\n A2 = A1 + 1\n # Mise a jour de la matrice laplacienne\n self.LaplacianMatrix[A1,A2] = 0\n self.LaplacianMatrix[A2,A1] = 0\n self.LaplacianMatrix[A1,A1] -= 1 \n self.LaplacianMatrix[A2,A2] -= 1 \n # Mise a jour de la liste d'adjacence\n self.cutEdge(A1,A2)\n # Add new free ends to freeMonomers list\n self.freeMonomers.extend([A1,A2])\n \n if definitive:\n self.generatePossibleEncounters()\n# \n for i in range(len(self.freeMonomers)):\n self.freeMonomersNames[self.freeMonomers[i]] = chr(97 + i//2) + str(1 + i%2)", "def get_landmarks(self, sorted_cut_endo_pts, lowest_pt_idx, display_opt):\n\n # make polydata out of sorted endo pts\n numPoints = sorted_cut_endo_pts.shape[0]\n vtk_float_arr = numpy_support.numpy_to_vtk(num_array=np.asarray(sorted_cut_endo_pts), deep=True, array_type=vtk.VTK_FLOAT)\n vtkpts = vtk.vtkPoints()\n vtkpts.SetData(vtk_float_arr)\n cut_endo_poly = vtk.vtkPolyData()\n cut_endo_poly.SetPoints(vtkpts)\n\n # now make lines\n polyLine = vtk.vtkPolyLine()\n polyLine.GetPointIds().SetNumberOfIds(numPoints)\n\n for i in range(numPoints):\n polyLine.GetPointIds().SetId(i, i) # from 0,1 then 2,3 then 4,5 ...\n\n cells = vtk.vtkCellArray()\n cells.InsertNextCell(polyLine)\n\n # add points and lines to polydata container\n cut_endo_poly.SetLines(cells)\n\n # create tree for intersection process\n bspTree = vtk.vtkModifiedBSPTree() # bsp tree is much faster than obbtree due to rejection test\n bspTree.SetDataSet(cut_endo_poly)\n bspTree.BuildLocator()\n\n top_left = np.asarray(sorted_cut_endo_pts[0])\n top_right = np.asarray(sorted_cut_endo_pts[-1])\n low_pt = np.asarray(sorted_cut_endo_pts[lowest_pt_idx])\n\n # get direction of lines\n line_dir = normalize(top_right - top_left) # top_pt[0] to top_pt[1]\n\n # add distance on both sides to make sure the line can pass through the entire LV horizontally\n dist = np.linalg.norm(top_right - top_left)\n pSource_0 = top_right + dist*line_dir\n pTarget_0 = top_left - dist*line_dir\n\n # determine the length to travel from top to bottom\n top_center = (top_right + top_left)/2.0\n midline = normalize(low_pt - top_center)\n max_dist = np.linalg.norm(low_pt - top_center)\n\n left_pts = []\n right_pts = []\n\n weights = np.linspace(0.00, 0.98, self.numSamples)\n\n for i in range(self.numSamples):\n # determine source and target points\n pSource = pSource_0 + weights[i]*max_dist*midline\n pTarget = pTarget_0 + weights[i]*max_dist*midline\n center = (pSource + pTarget) / 2.0\n\n # set empty variables\n subId = vtk.mutable(0)\n pcoords = [0, 0, 0]\n t = vtk.mutable(0)\n left = [0, 0, 0]\n right = [0, 0, 0]\n\n # # run interesect command\n # pointid1 = bspTree.IntersectWithLine(pSource, pTarget, 0.001, t, left, pcoords, subId)\n # pointid2 = bspTree.IntersectWithLine(pTarget, pSource, 0.001, t, right, pcoords, subId)\n\n # intersect with line that goes from source to center or target to center\n pointid1 = bspTree.IntersectWithLine(pSource, center, 0.001, t, left, pcoords, subId)\n pointid2 = bspTree.IntersectWithLine(pTarget, center, 0.001, t, right, pcoords, subId)\n\n left_pts.append(list(left))\n right_pts.append(list(right))\n\n if display_opt:\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cut_endo_poly)\n\n all_act = vtk.vtkActor()\n all_act.SetMapper(mapper)\n\n right_act = include_points(left_pts, len(left_pts), 4, (1,0,0))\n left_act = include_points(right_pts, len(right_pts), 4, (1,0,0))\n low_pt_act = include_points(list(low_pt), 1, 10, (1,0,1))\n\n top_right_act = include_points(list(top_right), 1, 10, (0,0,1))\n top_left_act = include_points(list(top_left), 1, 10, (0,0,1))\n\n ren = vtk.vtkRenderer()\n ren.AddActor(all_act)\n ren.AddActor(right_act)\n ren.AddActor(left_act)\n ren.AddActor(top_right_act)\n ren.AddActor(top_left_act)\n ren.AddActor(low_pt_act)\n\n vtk_show(ren)\n\n # ensure that left and right points have the same number of points as numSamples\n if len(left_pts) != self.numSamples or len(right_pts) != self.numSamples:\n print('Either left or right points do not have the same number of points as numSamples!')\n\n return left_pts, right_pts", "def main():\n # Verbosity: 1=Selection Results, >1 is various debugging information\n verbose = 0\n print \"build_all.py running with verbose=%s\"%(str(verbose))\n if verbose:\n print \"Fiducial Cut: \",fid_cut_hex,\"(apethum, z_min, z_max)\"\n print \"Max Drift Distance = %.4f us\"%(max_drift_time)\n\n tree = get_data_tree(list='All') # Golden All\n \n # We use the EXOFitting processed tree to get high-level physical quantities\n # like the anticorrelated energy, etc. \n #ptree_file = ROOT.TFile(preprocessed_tree)\n #ptree = ROOT.Get(\"dataTree\")\n #if verbose: print \"Indexing EXOFitting PreProcessed tree\"\n #ptree.BuildIndex(\"runNum\", \"eventNum\")\n #if verbose: print \" ...done\"\n\n cuts = \"\"\n\n #There must be at least 1 scintillation cluster:\n #cuts = \"@fScintClusters.size()>=1\"\n #cuts = \"(fScintClusters.GetCountsOnAPDPlane(0)+fScintClusters.GetCountsOnAPDPlane(1))>20000\"\n\n # The minimum scintinlation counts must be > 20000 and <70000\n # I observe that three peaks presumable alphas are at 38500, 42200, and 55000\n # So Rn222=5.4MeV, Po218=6MeV, Po214=7.7MeV\n # calibrate:: y=mx+b, m=6167, b=5198\n #cuts = \"fScintClusters.fRawEnergy>20000 && fScintClusters.fRawEnergy<70000\"\n #cuts += \"&& fScintClusters.fRawEnergy>22000 && fScintClusters.fRawEnergy<80000\"\n #cuts += \" && Sum$(fAPDs.fRawCounts) > 8000\"\n\n # Ignore Noise and Muon tagged events\n cuts +=\"fEventHeader.fTaggedAsNoise==0 && fEventHeader.fTaggedAsMuon==0\" \n\n # That's the last of the cuts, lets show the user what the cut looks like\n print \"Applying Cuts to data: \\n%s\"%cuts\n\n #Draw is the fastest method to apply cuts, in the end what we want is a reduced data list\n # to perform a more targeted analysis...\n tree.Draw(\">>+elist_alpha_canidates\",cuts,\"goff\")\n elist_alpha_canidates = ROOT.gDirectory.Get(\"elist_alpha_canidates\")\n print \"There are %d events passing the initial cuts\"%elist_alpha_canidates.GetN()\n\n #Now we have to look at events passing the cuts individually\n tf = ROOT.TFile(\"all.root\",\"RECREATE\")\n Rntree = tree.CloneTree(0)\n \n for i in range(elist_alpha_canidates.GetN()):\n # Print Progress\n if i%int(elist_alpha_canidates.GetN()/20) == 0:\n print \"%d of %d\"%(i,elist_alpha_canidates.GetN())\n\n #Grab the event data\n tree.GetEntry(elist_alpha_canidates.GetEntry(i))\n #ed = tree.EventBranch\n #if verbose>1: print_event_data(ed,verbose)\n\n #is_alphaish = check_alpha_like(ed,verbose)\n \n #is the event a fully reconstructed BiPo?\n #is_bipo = check_full_BiPo(ed,verbose)\n\n # Case1 (position matched Bi-Po)\n #is_case1 = check_case1(ed,verbose)\n #print \"BiPo=%s, Case1=%s\"%(is_bipo, is_case1) \n #raw_input('<hit any key to continue>')\n #if is_bipo or is_alphaish:\n # Write the EventData of events which pass any of our selection criteria\n # to ROOT file\n Rntree.Fill()\n\n Rntree.AutoSave()", "def set_grids(self, core_size, patch_shape, psf_model_shape):\n # core foo\n ravel_size = patch_shape[0] * patch_shape[1]\n self.core_shape = (core_size, core_size)\n xcenter = (patch_shape[0] - 1) / 2\n ycenter = (patch_shape[1] - 1) / 2\n buff = (core_size - 1) / 2\n xcore = xcenter - buff, xcenter + buff + 1\n ycore = ycenter - buff, ycenter + buff + 1\n core_ind = np.arange(ravel_size, dtype=np.int).reshape(patch_shape)\n self.core_ind = core_ind[xcore[0]:xcore[1], ycore[0]:ycore[1]].ravel()\n\n # grid defs\n self.psf_grid, self.patch_grid = get_grids(patch_shape, psf_model_shape)", "def run():\n step = 0\n start_change = 0\n duration = 0\n # we start with phase 0 where NS has left green\n traci.trafficlight.setPhase(\"Origin\", 0)\n controlled_lanes = traci.trafficlight.getControlledLanes(\"Origin\")\n # print(rg.get_wait_time_for_light(\"Origin\"))\n while traci.simulation.getMinExpectedNumber() > 0:\n traci.simulationStep()\n wait_times = rg.get_network_waiting_time(summed=True)[0]\n NS = wait_times[0]+wait_times[2]\n EW = wait_times[1]+wait_times[3]\n if(EW>(20+NS)):\n if(traci.trafficlight.getPhase(\"Origin\") < 2):\n traci.trafficlight.setPhase(\"Origin\", 2)\n start_change = step\n duration=0\n else:\n if(step-start_change>6):\n if(duration<8):\n #Left turns for EW\n traci.trafficlight.setPhase(\"Origin\", 3)\n duration+=1\n else:\n #Straight for EW\n traci.trafficlight.setPhase(\"Origin\", 4)\n elif(NS>(20+EW)):\n if(traci.trafficlight.getPhase(\"Origin\") == 3 or traci.trafficlight.getPhase(\"Origin\") == 4):\n traci.trafficlight.setPhase(\"Origin\", 5)\n start_change = step\n duration=0\n else:\n if(step-start_change>6):\n if(duration<8):\n #Left turns for EW\n traci.trafficlight.setPhase(\"Origin\", 0)\n duration+=1\n else:\n #Straight for EW\n traci.trafficlight.setPhase(\"Origin\", 1)\n step += 1\n\n traci.close()\n sys.stdout.flush()", "def find_cosmics_in_cut(x, cut_wave, cut_brightest_line, line_wavelength = 0.,\n kernel_median_cosmics = 5, cosmic_higher_than = 100, extra_factor = 1., plot=False, verbose=False):\n \n gc_bl=signal.medfilt(cut_brightest_line,kernel_size=kernel_median_cosmics)\n max_val = np.abs(cut_brightest_line-gc_bl)\n\n gc=signal.medfilt(cut_wave,kernel_size=kernel_median_cosmics)\n verde=np.abs(cut_wave-gc)-extra_factor*max_val\n \n cosmics_list = [i for i, x in enumerate(verde) if x > cosmic_higher_than]\n \n if plot:\n ptitle=\"Cosmic identification in cut\"\n if line_wavelength != 0 : ptitle=\"Cosmic identification in cut at \"+np.str(line_wavelength)+\" $\\mathrm{\\AA}$\" \n plot_plot(x,verde, ymin=0,ymax=200, hlines=[cosmic_higher_than], ptitle=ptitle, ylabel=\"abs (cut - medfilt(cut)) - extra_factor * max_val\")\n \n if verbose:\n if line_wavelength == 0:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics in fibres\",cosmics_list)\n else:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics at\",np.str(line_wavelength),\"A in fibres\",cosmics_list)\n return cosmics_list", "def run():\n\n data = parse_data()\n\n wide = 25\n tall = 6\n\n layers = []\n for index in range(0, len(data), wide * tall):\n item = data[index : index + wide * tall]\n item = [item[x : x + wide] for x in range(0, wide * tall, wide)]\n layers.append(item)\n\n lowest, layer = get_layer_containing_fewest_zeroes(layers)\n\n ones = sum([Counter(l).get(\"1\", 0) for l in layer])\n twos = sum([Counter(l).get(\"2\", 0) for l in layer])\n assert (ones * twos) == 1820\n\n display_layers(layers, wide, tall) # ckuj", "def polyMapCut(*args, caching: bool=True, constructionHistory: bool=True, moveratio:\n Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def get_coreids(self):\n return range(0, self.get_ncores()) # default behaviour for x86", "def __init__(self, core):\n\n self.core = core", "def label_modes(trip_list, silent=True):\n\n\n if silent == False:\n print('Preparing to label modes of travel for ' \\\n + str(len(trip_list)) + ' trips.')\n\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) \\\n + ' trips.')\n\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1,len(doc['reduction'])):\n if (float(doc['reduction'][i]['velocity']) >= 2.3):\n doc['reduction'][i]['mode'] = 'driving'\n\n elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):\n doc['reduction'][i]['mode'] = 'walking'\n\n elif (float(doc['reduction'][i]['velocity']) == 0.0):\n doc['reduction'][i]['mode'] = 'chilling'\n\n if (float(doc['reduction'][i]['velocity']) > 22.22):\n doc['reduction'][i]['mode'] = 'bogus'\n\n\n for i in range(1,len(doc['reduction']) - 1):\n path_length = 0\n\n if (doc['reduction'][i]['mode'] == 'driving'):\n for j in range(i+1,len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID']\n if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1\n elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break\n\n if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'\n else :\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'\n\n if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n\n if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n\n\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)\n doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)\n doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)\n doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)\n\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' \\\n + str(len(trip_list)) + '.')\n\n return trip_list", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def lcs(hh, vv):\n B=LCS.getB(hh,vv)\n trac=LCS.backtrack(B);\n cs=[ hh[h-1] for v,h,k in trac if k=='1' ]\n return cs", "def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters", "def get_GEOSChem4flightnum(flight_ID='C225', res='0.5x0.625', sdate=None,\n RunSet='MERRA2-0.5-initial', resample_data=True,\n CoreRunsOnly=False,\n debug=False):\n # Where is the extract GEOS-CF data?\n RunDict = get_dict_of_GEOSChem_model_output(res=res, RunSet=RunSet,\n CoreRunsOnly=CoreRunsOnly)\n # Asume just one run for now...\n# folder = RunDict[ list(RunDict.keys())[0] ]\n dfs = {}\n for Run in list(RunDict.keys()):\n # Extract the data for a specific flight\n folder = RunDict[Run]\n files2use = glob.glob(os.path.join(folder, '*plane.log*'))\n files2use = list(sorted(files2use))\n # Get start date of flight\n # (use the plane-flight from same day as sdate)\n if isinstance(sdate, type(None)):\n dfS = get_summary4flight(flight_ID=flight_ID)\n sdate = dfS.index.values.min()\n edate = dfS.index.values.max()\n sdate, edate = AC.dt64_2_dt([sdate, edate])\n sdate_str = sdate.strftime('%Y%m%d')\n file2use = [i for i in files2use if sdate_str in i]\n AssStr = 'WARNING: More than one ({}) planeflight file found! - {}'\n assert len(file2use) <= 1, AssStr.format(len(file2use), file2use)\n AssStr = 'WARNING: No planeflight files found in folder: {}'\n assert len(file2use) != 0, AssStr.format(folder)\n file2use = file2use[0]\n # - Instead do this manually for now\n # as cannot as output issue in v12.9 (fixed in runs > initial 4x5)\n try:\n # - Use standard AC_tool extraction\n # Get Header information from first file\n vars, sites = AC.get_pf_headers(file2use, debug=debug)\n # Extract all points from file\n df, vars = AC.pf_csv2pandas(file=file2use, vars=vars, epoch=True,\n r_vars=True)\n except ValueError:\n # Open file and save the data into\n with open(file2use, 'rb') as file:\n lines = [i for i in file]\n # Extract as raw data in chunks\n lines_1 = lines[0::2]\n header_1 = lines_1[0].decode('utf-8').split()\n data_1 = [i.decode('utf-8').split() for i in lines_1[1:]]\n df = pd.DataFrame(data_1, columns=header_1)\n lines_2 = lines[1::2]\n header_2 = lines_2[0].decode('utf-8').split()\n data_2 = [i.decode('utf-8').split() for i in lines_2[1:]]\n df2 = pd.DataFrame(data_2, columns=header_2)\n # Now combine\n df = pd.concat([df, df2], axis=1)\n # Now process the meta data/update type formats\n # TODO: below could be faster...\n # Use infer obects? - df.infer_objects\n dtypes = {'POINT': object, 'TYPE': str,\n 'YYYYMMDD': str, 'HHMM': str}\n cols2use = [i for i in df.columns if i not in dtypes.keys()]\n df[cols2use] = df[cols2use].apply(pd.to_numeric)\n # Add a datetime index\n df = AC.DF_YYYYMMDD_HHMM_2_dt(df, rmvars=None, epoch=False)\n df.index.name = None\n # Update the variable names\n d = PF_TRAXXX_2TracerName(None, folder=folder, RTN_dict=True)\n d = dict([('TRA_{:0>3}'.format(i), d[i]) for i in d.keys()])\n df = df.rename(columns=d)\n # Add derived (GEOSchem) variables to df\n df = add_derived_GEOSChem_specs2df(df)\n # Resample the data?\n if resample_data:\n df = df.resample('1T').mean()\n # save df\n dfs[Run] = df.copy()\n del df\n return dfs", "def get_feature_masks(feature, mask_dimension, road_width_px, include_bezier=True, driving_line_road_px=5, bezier_offset=(0,0)):\n np_mask_dim = (mask_dimension[1], mask_dimension[0])\n feature_masks = []\n to_feature = np.zeros(np_mask_dim)\n col = (255,255,255)\n feature_point = feature[0]\n approach_point = feature[1]\n exit_point = feature[2]\n cv2.line(to_feature, approach_point, feature_point, col, thickness=road_width_px)\n feature_masks.append(to_feature.astype(np.uint8))\n print(\"road_width_px=\",road_width_px)\n \n n = len(feature)\n print(n)\n if len(feature) > 2:\n for i in range(2, n):\n mask = np.zeros(np_mask_dim)\n cv2.line(mask, feature_point, feature[i], col, thickness=road_width_px)\n feature_masks.append(mask.astype(np.uint8))\n \n \n p1 = np.add(feature_point, bezier_offset)\n p2 = np.add(approach_point, bezier_offset)\n p3 = np.add(exit_point, bezier_offset)\n print(\"driving_line_road_px=\",driving_line_road_px)\n curve_mask=bezier.get_curve_mask(p1, p2, p3, width=driving_line_road_px, img_dimensions=mask_dimension)[:,:,0]\n \n print(\"TEST\")\n print(mask_dimension)\n print(curve_mask.shape)\n\n if include_bezier:\n feature_masks.append(curve_mask)\n\n combined_mask = np.sum(feature_masks, axis=0).astype(np.uint8)\n\n cv2.imshow(\"curve_mask\",curve_mask)\n cv2.imshow(\"combined_mask\",combined_mask)\n cv2.waitKey(0)\n\n return feature_masks, combined_mask, curve_mask", "def identify_interface_residues_4(reference_pdb, reference_withH_pdb, trajectory, cutoffs, frames, is_5udc=False):\n\n # Load reference PDB\n forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')\n pdb = PDBFile(reference_pdb)\n modeller = Modeller(pdb.topology, pdb.positions)\n modeller.addHydrogens(forcefield, pH=7.4)\n PDBFile.writeFile(modeller.topology, modeller.positions, open(reference_withH_pdb, 'w'), keepIds=True)\n solute_reference_mdtraj = md.load_pdb(reference_withH_pdb)\n solute_reference_openmm = PDBFile(reference_withH_pdb)\n\n # Load trajectory\n trajectory = md.load_dcd(trajectory, top=solute_reference_mdtraj)\n\n # Get interface residues\n rows = []\n for cutoff in cutoffs: \n for frame in frames:\n if not is_5udc: # Here, H represents antibody heavy chain and L reprsents antibody light chain (not necessarily chain IDs)\n contacts_H_F = identify_contacts(0, 2, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_H_X = identify_contacts(0, 3, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_L_F = identify_contacts(1, 2, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_L_X = identify_contacts(1, 3, solute_reference_openmm, trajectory, frame, cutoff)\n else:\n contacts_H_F = identify_contacts(4, 6, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_H_X = identify_contacts(4, 7, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_L_F = identify_contacts(5, 6, solute_reference_openmm, trajectory, frame, cutoff)\n contacts_L_X = identify_contacts(5, 7, solute_reference_openmm, trajectory, frame, cutoff)\n\n chain_H = []\n chain_L = []\n chain_F = []\n chain_X = []\n \n # Get key residues in each chain\n for pair in contacts_H_F:\n chain_H.append(pair[0])\n chain_F.append(pair[1])\n \n for pair in contacts_H_X:\n chain_H.append(pair[0])\n chain_X.append(pair[1])\n\n for pair in contacts_L_F:\n chain_L.append(pair[0])\n chain_F.append(pair[1])\n \n for pair in contacts_L_X:\n chain_L.append(pair[0])\n chain_X.append(pair[1])\n \n row = [os.path.basename(reference_pdb)[:-4], cutoff, frame, sorted(set(chain_H)), sorted(set(chain_L)), sorted(set(chain_F)), sorted(set(chain_X))]\n rows.append(row)\n return rows", "def pareto_frontier(cmrf,featlist) :\n\tQ = []\n\ttaboodict = {}\n\tnStates = len(featlist)\n\tfeat1,feat2 = featlist\n\tEaxa,Xa = cmrf.decode(feat1)\n\tEbxb,Xb = cmrf.decode(feat2)\n\tif Xa == Xb : \n\t\treturn [Xa],[(Eaxa,Ebxb)]\n\tEaxb = cmrf.score(Xb,feat1)\n\tEbxa = cmrf.score(Xa,feat2)\n\tQ.append((Xa,Xb))\n\tfrontier,frontier_energy = [],[]\n\tfrontier.extend([Xa,Xb])\n\tfrontier_energy.extend([(Eaxa,Ebxa),(Eaxb,Ebxb)])\n\ttaboodict[(Eaxa,Ebxa)] = 1;\n\ttaboodict[(Eaxb,Ebxb)] = 1;\n\twhile len(Q) > 0 :\n\t\t### Optimize \n\t\tXa,Xb = Q[0]\n\t\tQ = Q[1:] # Dequeue\n\t\tEaxb = cmrf.score(Xb,feat1)\n\t\tEbxa = cmrf.score(Xa,feat2)\t\n\t\tEaxa = cmrf.score(Xa,feat1)\n\t\tEbxb = cmrf.score(Xb,feat2)\t\n\t\tm = (Ebxa - Ebxb)/(Eaxa-Eaxb)\n\t\tif m > 0 : \n\t\t\t#stop()\n\t\t\tsys.stderr.write(\"### WARNING : Slope > 0. Cvxhull failed\")\n\t\t\treturn frontier,frontier_energy\n\t\tthetaa = -m/(1-m)\n\t\tthetab = 1/(1-m)\n\t\ttmrf = TMRF(cmrf,[thetaa,thetab],[feat1,feat2])\n\t\tXab = tmrf.decode()[1]\n\t\tEaxab = cmrf.score(Xab,feat1)\n\t\tEbxab = cmrf.score(Xab,feat2)\n\t\tif Xab != Xa and Xab != Xb and \\\n\t\t\tnot taboodict.has_key((Eaxab,Ebxab)) :\n\t\t\t# Check almost equal condition\n\t\t\tif any(map(lambda(x):almost_eq(Eaxab,x[0] or \\\n\t\t\t\talmost_eq(Ebxab,x[1])),taboodict.keys())) : \n\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\tfrontier.append(Xab)\n\t\t\tfrontier_energy.append((Eaxab,Ebxab))\n\t\t\ttaboodict[(Eaxab,Ebxab)]=1\n\t\t\tQ.extend([(Xa,Xab),(Xab,Xb)])\n\t# Calculate energy of frontier elements\t\n\treturn frontier,frontier_energy", "def __init__(self, workplane, measures):\n\n cq.Workplane.bracket = utilities.bracket\n cq.Workplane.transformedWorkplane = utilities.transformedWorkplane\n cq.Workplane.bolt = utilities.bolt\n cq.Workplane.cutEachAdaptive = utilities.cutEachAdaptive\n\n self.model = workplane\n self.debug = False\n self.measures = measures\n m = self.measures\n\n # The bracket lengths are measured at the outside, but the construction actually uses a \n # central cuboid block with two attached brackets. Adapting the measures accordingly.\n m.center_block = Measures(\n # Naming is as seen from the horizontal leg.\n width = max(m.horizontal_leg.width, m.vertical_leg.width),\n depth = m.vertical_leg.height,\n height = m.horizontal_leg.height\n )\n m.horizontal_leg.depth -= m.center_block.depth\n m.vertical_leg.depth -= m.center_block.height\n\n # Create hole specs which combine the other hole measures in the format expected by bolthole().\n m.horizontal_leg.hole_specs = [\n {\n \"diameter\": m.horizontal_leg.hole_diameters[i] if isinstance(m.horizontal_leg.hole_diameters, list) else m.horizontal_leg.hole_diameters,\n \"clamp_length\": m.horizontal_leg.clamp_lengths[i] if isinstance(m.horizontal_leg.clamp_lengths, list) else m.horizontal_leg.clamp_lengths, \n \"nuthole_size\": m.horizontal_leg.nuthole_sizes[i] if isinstance(m.horizontal_leg.nuthole_sizes, list) else m.horizontal_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.vertical_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.horizontal_leg.hole_count)\n ]\n m.vertical_leg.hole_specs = [\n {\n \"diameter\": m.vertical_leg.hole_diameters[i] if isinstance(m.vertical_leg.hole_diameters, list) else m.vertical_leg.hole_diameters,\n \"clamp_length\": m.vertical_leg.clamp_lengths[i] if isinstance(m.vertical_leg.clamp_lengths, list) else m.vertical_leg.clamp_lengths, \n \"nuthole_size\": m.vertical_leg.nuthole_sizes[i] if isinstance(m.vertical_leg.nuthole_sizes, list) else m.vertical_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.horizontal_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.vertical_leg.hole_count)\n ]\n\n # TODO: Initialize missing measures with defaults.\n\n self.build()", "def test_climb(self):\n fcs = flight_control.FlightControl(**self.start)\n\n altitude = 120.0\n pitch_angle = 10.0\n alt_cmd = cmds.SetAltitudeCmd(120.0, 10.0)\n fcs.set_altitude_cmd(alt_cmd)\n\n #heading = 150.0\n #heading_cmd = cmds.SetHeadingCmd(heading)\n #fcs.set_direction_cmd(heading_cmd)\n\n t = self.tmin\n while t < self.tmax:\n fcs.tick(t, self.dt)\n t += self.dt\n\n self.assertAlmostEqual(fcs.platform.z, altitude, delta=1.0)\n self.assertAlmostEqual(fcs.platform.theta_c, 0.0, delta=1.0)", "def core_number(G):\n if nx.number_of_selfloops(G) > 0:\n msg = (\n \"Input graph has self loops which is not permitted; \"\n \"Consider using G.remove_edges_from(nx.selfloop_edges(G)).\"\n )\n raise NetworkXError(msg)\n degrees = dict(G.degree())\n # Sort nodes by degree.\n nodes = sorted(degrees, key=degrees.get)\n bin_boundaries = [0]\n curr_degree = 0\n for i, v in enumerate(nodes):\n if degrees[v] > curr_degree:\n bin_boundaries.extend([i] * (degrees[v] - curr_degree))\n curr_degree = degrees[v]\n node_pos = {v: pos for pos, v in enumerate(nodes)}\n # The initial guess for the core number of a node is its degree.\n core = degrees\n nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}\n for v in nodes:\n for u in nbrs[v]:\n if core[u] > core[v]:\n nbrs[u].remove(v)\n pos = node_pos[u]\n bin_start = bin_boundaries[core[u]]\n node_pos[u] = bin_start\n node_pos[nodes[bin_start]] = pos\n nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]\n bin_boundaries[core[u]] += 1\n core[u] -= 1\n return core", "def clip_scaffold_loops(self):\r\n start = 0\r\n index = 0\r\n ie = len(self.walk)\r\n while index < ie:\r\n segment = None\r\n try:\r\n segment = self.walk[index+1]\r\n except IndexError:\r\n self.remove_biggest_loop_in_range(start, index)\r\n return\r\n if segment is None or segment.value == 'RUNG':\r\n # Segment is essential.\r\n if start != index:\r\n ie -= self.remove_biggest_loop_in_range(start, index)\r\n start = index + 2\r\n index += 2", "def GetMatchedSubContourLists(\n scListRef,\n scList,\n allValsByFrame,\n orderOfSCsByValue,\n splitLength=1,\n fixedNumInteriorPoints=None,\n):\n ## NOT DONE! MERGE LATER??\n return simplifiedSCListRef, simplifiedSCList", "def main():\n try:\n stdscr = init()\n\n lanes = [\n (0.0, \"wwhhwwwhhwwwhhwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww\"),\n (-3.0,\",,,llll,,llllll,,,,,,,llll,,,,,ll,,,lllll,,,,llllll,,,,lllll,,,,\"),\n (3.0, \",,,,llll,,,,,llll,,,,llll,,,,,,,,,llll,,,,,ll,,,,,,llllll,,,,,,,\"),\n (2.0, \",,lll,,,,,lll,,,,,ll,,,,,lll,,,lll,,,,ll,,,,llll,,,,ll,,,,,,ll,,\"),\n (0.0, \"pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp\"),\n (-3.0,\"....bbbb.......bbbb....bbbb..........bbbb........bbbb....bbbb...\"),\n (3.0, \".....yy..yy....yy....yy.....yy........yy..yy.yy......yy.......yy\"),\n (-4.0,\"..xx.....xx.........xx..xx........xx...xx...xx....xx...xx...xx..\"),\n (2.0, \"..yy.....yy.......yy.....yy......yy..yy.yy.......yy....yy.......\"),\n (0.0, \"pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp\")\n ]\n\n objs = {\n # Bus\n \"b\": (64, 16, 0),\n # Log\n \"l\": (64, 32, 8),\n # Car 1\n \"x\": (64, 64, 0),\n # Car 2\n \"y\": (0, 0, 64),\n # Wall\n \"w\": (64, 0, 0),\n # Home\n \"h\": (0, 32, 0),\n # Water\n \",\": (16, 32, 64),\n # Pavement\n \"p\": (16, 16, 16),\n # Road\n \".\": (0, 0, 0)\n }\n\n frog_x, frog_y = 8.0, 9.0\n\n timer = 0.0\n key = ''\n while key != ord('q'):\n # Is frog in danger?\n if get_danger(int(frog_x), int(frog_y)):\n frog_x, frog_y = 8.0, 9.0\n\n key = stdscr.getch()\n if key == curses.KEY_UP and frog_y > 0.0:\n frog_y -= 1.0\n elif key == curses.KEY_DOWN and frog_y < 9.0:\n frog_y += 1.0\n elif key == curses.KEY_LEFT:\n frog_x -= 1.0\n elif key == curses.KEY_RIGHT:\n frog_x += 1.0\n\n if frog_x < 0.0:\n frog_x = 0.0\n if frog_x > 15:\n frog_x = 15.0\n\n # Deal with movement of logs\n if frog_y <= 3.0:\n # The game is using fractions to control the speed of moving objects.\n # This can cause problems for the sideways movement of the frog\n # as it is not quite in sync (fractionally speaking). The worst\n # case is that the frog may die after seemingly landing on the end of\n # the log (because the log moves slightly ahead of the frog).\n # The solution here is to give the frog the same fractional value as\n # the log row it has landed on:\n log_pos = get_log_pos(timer, lanes[int(frog_y)][0])\n log_denom, _ = math.modf(log_pos)\n _, frog_numer = math.modf(frog_x)\n frog_x = frog_numer + log_denom\n # Now factor in log movement - as we're about to move these\n frog_x -= 0.01 * lanes[int(frog_y)][0]\n\n for y, lane in enumerate(lanes):\n start_pos = int(get_log_pos(timer, lane[0]))\n for i in range(16):\n obj = lane[1][(start_pos + i) % 64]\n draw_pixel(i, y, objs[obj])\n set_danger(i, y, obj)\n\n # Draw frog\n draw_pixel(int(frog_x), int(frog_y), (64, 128, 64))\n\n unicornhathd.show()\n time.sleep(0.01)\n timer += 0.01\n finally:\n term()" ]
[ "0.5738572", "0.5688451", "0.547744", "0.52396184", "0.52205116", "0.51830083", "0.5107131", "0.50293916", "0.50252926", "0.5000583", "0.49980915", "0.49932697", "0.49482554", "0.49387234", "0.49366552", "0.49193186", "0.49054828", "0.48359147", "0.48309058", "0.48211256", "0.48037487", "0.4801138", "0.47836584", "0.47835281", "0.47169587", "0.4714232", "0.46997422", "0.46991014", "0.46929362", "0.4673516", "0.46342587", "0.4606426", "0.4600185", "0.45922017", "0.45914832", "0.45797688", "0.45726654", "0.45685312", "0.45685312", "0.45680475", "0.45604458", "0.45598024", "0.4554503", "0.4534566", "0.4525717", "0.45233646", "0.45058602", "0.4500386", "0.4493856", "0.4493819", "0.44887623", "0.4469182", "0.44634864", "0.44625643", "0.4448707", "0.44354606", "0.44321707", "0.44286734", "0.44206074", "0.4418011", "0.44144925", "0.44052452", "0.44048327", "0.44025555", "0.4391039", "0.43838954", "0.43818134", "0.43817407", "0.43793082", "0.43779066", "0.43688202", "0.4364023", "0.43629143", "0.4362679", "0.43622035", "0.43602747", "0.4357113", "0.43556035", "0.43545875", "0.43543765", "0.43543562", "0.43533692", "0.43509507", "0.43490875", "0.43436867", "0.4343317", "0.43412519", "0.43298134", "0.43295434", "0.4329136", "0.43279448", "0.43239164", "0.43221277", "0.4315841", "0.43038657", "0.42985934", "0.42985672", "0.4297979", "0.42953035", "0.42950547" ]
0.5529602
2
This algorithm returns a cutlist which performs a vertical core operation. The laser cuts off one side of poly at a time, rotating the block such that the edge of the laser "cone" is parallel to the SCD core. After one side of the block has been removed, the block is rotated 90 degrees and the algorithm repeats until all 4 sides have been removed.
def vertical_core(block,cut,laser): layers = int(block["thickness"]/laser["z_spacing"]) angle = math.radians(laser["kerf_angle"]/2) taper = math.tan(angle) * laser["z_spacing"] u = math.tan(2 * angle) * (block["thickness"] + laser["z_final_overshoot"]) z_0 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 - block["origin_y"] + u) z_1 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 + block["origin_x"] + u) z_2 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_y"])/2 + block["origin_y"] + u) z_3 = block["thickness"]*math.cos(angle) + math.sin(angle)*((cut["final_dimension_x"])/2 - block["origin_x"] + u) cutlist = [] cutlist.append(["a_abs", f"{math.degrees(angle):.6f}"]) cutlist.append(["c_abs", str(block["physical_rotation"])]) cutlist.append(["z_abs", f"{z_0:.6f}"]) y_start_wide = ((u + cut["final_dimension_x"]/2)* math.cos(angle) - block["thickness"]*math.sin(angle) - u/math.cos(angle)) y_start_length = ((u + cut["final_dimension_y"]/2)* math.cos(angle) - block["thickness"]*math.sin(angle) - u/math.cos(angle)) depth_cut = (block["thickness"] + laser["z_final_overshoot"]) * math.cos(angle)/math.cos(2*angle) cut1 = json.loads(line(block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],-block["width"]/2 - block["origin_x"],y_start_length - block["origin_y"],depth_cut,laser)) cut2 = json.loads(line(block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],-block["length"]/2 + block["origin_y"],y_start_wide - block["origin_x"],depth_cut,laser)) cut3 = json.loads(line(block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],-block["width"]/2 + block["origin_x"],y_start_length + block["origin_y"],depth_cut,laser)) cut4 = json.loads(line(block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],-block["length"]/2 - block["origin_y"],y_start_wide + block["origin_x"],depth_cut,laser)) #cut1 = json.loads(line(block["width"]/2,y_start_length,-block["width"]/2,y_start_length,depth_cut,laser)) #cut2 = json.loads(line(block["length"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser)) #cut3 = json.loads(line(block["width"]/2,y_start_length,-cut["final_dimension_x"]/2,y_start_length,depth_cut,laser)) #cut4 = json.loads(line(cut["final_dimension_y"]/2,y_start_wide,-cut["final_dimension_y"]/2,y_start_wide,depth_cut,laser)) cutlist = (cutlist + cut1 + [["c_rel", "90"],["z_abs", f"{z_1:.6f}"],] + cut2 + [["c_rel", "90"],["z_abs", f"{z_2:.6f}"]] + cut3 + [["z_abs", f"{z_3:.6f}"],["c_rel", "90"]] + cut4) cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetContourValuesLengthsAndSubContoursAndOrderOfSubContoursByFrame(\n watershed, allValsByFrame\n):\n scListByFrame, orderOfSCsByValueByFrame = GetSubContoursAndOrderingByFrame(\n watershed, allValsByFrame\n )\n cVLSByFrame = [[sc.cVLS() for sc in scList] for scList in scListByFrame]\n return cVLSByFrame, orderOfSCsByValueByFrame\n\n ## NOT NEEDED! KEEPING FOR REFERENCE!\n # for i in range(len(cVLS)-1,0,-1):\n # for j in range(i-1,-1,-1): # Loop backwards through the sorted list of cvls's... if the value pair matches, check the endpoints (they will always be reversed for adjacent regions (always go ccw...))\n # if cVLS[i][0]!=cVLS[j][0]: # once we no longer match the value pair, we know there are no more matches in the list...\n # break\n # ######## VERIFY THIS ACTUALLY WORKS THE SAME WAY!!!\n # elif (cVLS[i][2][-1],cVLS[i][2][0]]) == (cVLS[j][2][0],cVLS[j][2][-1]): # if 2 subcoutours are the same,\n # if cVLS[j][1]>cVLS[i][1]:\n # cVLS[j],cVLS[i] = cVLS[i],cVLS[j] #swap!\n # shortest = min(cVLS[j][1],cVLS[i][1]) # keep only the one with the minimum length computation\n #\n # cVLS[j][1] = shortest\n # del(cVLS[i])\n # break", "def remove_subdivison(self):\n temp_sub_vertices = []\n for index in range(0, len(self.subdivision_list) - 1, 4):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = self.subdivision_list[index + 1][0].x\n v0.y = self.subdivision_list[index + 1][0].y\n v0.z = self.subdivision_list[index + 1][0].z\n v0.w = self.subdivision_list[index + 1][0].w\n\n v1.x = self.subdivision_list[index + 2][0].x\n v1.y = self.subdivision_list[index + 2][0].y\n v1.z = self.subdivision_list[index + 2][0].z\n v1.w = self.subdivision_list[index + 2][0].w\n\n v2.x = self.subdivision_list[index + 3][0].x\n v2.y = self.subdivision_list[index + 3][0].y\n v2.z = self.subdivision_list[index + 3][0].z\n v2.w = self.subdivision_list[index + 3][0].w\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame", "def decomposing_line_cut_by_splicing(P, v, w):\n\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_cut_as_line: W not on chain\")\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\tif distance_to_w >= chain.length or distance_to_w == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_v)\n\n\t\tp_l = left_chain.coords[:]\n\t\tp_r = right_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\tif distance_to_v >= chain.length or distance_to_v == 0:\n\n\t\tleft_chain, right_chain = cut_linestring(chain, distance_to_w)\n\n\t\tp_l = right_chain.coords[:]\n\t\tp_r = left_chain.coords[:]\t\t\n\n\t\treturn p_l, p_r\n\n\n\tif distance_to_w > distance_to_v:\n\n\t\tleft_v_cut, right_v_cut = cut_linestring(chain, distance_to_v)\n\n\t\tdistance_to_w = right_v_cut.project(w_Point)\n\t\tleft_w_chain, right_w_chain = cut_linestring(right_v_cut, distance_to_w)\n\n\t\tp_l = left_v_cut.coords[:]+right_w_chain.coords[:-1]\n\t\tp_r = left_w_chain.coords[:]\n\n\t\treturn p_l, p_r\n\n\telse:\n\n\t\tleft_w_cut, right_w_cut = cut_linestring(chain, distance_to_w)\n\n\t\tdistance_to_v = right_w_cut.project(v_Point)\n\t\tleft_v_chain, right_v_chain = cut_linestring(right_w_cut, distance_to_v)\n\n\t\tp_l = left_w_cut.coords[:]+right_v_chain.coords[:-1]\n\t\tp_r = left_v_chain.coords[:]\n\n\t\treturn p_l, p_r", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def chercherChemin(self):\n\n \n liste=self._circuit.vue(self.x,self.y,self.rayonVision)\n \n listeSuppr=[]\n couche_vehicule= self._circuit.Couche_vehicules\n \n for case in liste :\n #on élimine les cases infranchissbles les cases qui ne sont pas sur le chemin à suivre \n\n if self._circuit.numeroWayPoint(case[0],case[1])==0 or ( self._circuit.numeroWayPoint(self.x,self.y)!=self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])<= self._circuit.numeroWayPoint(self.x,self.y)) or( self._circuit.numeroWayPoint(case[0],case[1])>= 5*self._circuit.numeroWayPoint(self.x,self.y) and self._circuit.numeroWayPoint(self.x,self.y)!=0) or ( self._circuit.numeroWayPoint(self.x,self.y)==self._circuit.lastWayPoint and self._circuit.numeroWayPoint(case[0],case[1])== self._circuit.numeroWayPoint(self.x,self.y)) or self._circuit.plateau[case[1],case[0],couche_vehicule]!=None:#on élimine les points derrière\n \n listeSuppr.append(case)\n\n \n for case in listeSuppr:\n \n liste.remove(case)\n \n if len(liste)>=1:\n l=liste[0]\n\n for nour in liste :\n \n if distance((self.x,self.y),(l[0],l[1])) > distance((self.x,self.y),(nour[0],nour[1])):\n l=nour\n pasx=0\n pasy=0\n if self.x<l[0] : \n pasx=1\n elif self.x>l[0] :\n pasx=-1\n if self.y<l[1] : \n pasy=1\n elif self.y>l[1] :\n pasy=-1\n debug.dprint(\" id {} {}:({},{}) Waypoint {} Point:({},{}) WayPoint {} vitesse :{} reservoir:{}\".format(self.id,self.typeV,self.x,self.y,self._circuit.numeroWayPoint(self.x,self.y),l[0],l[1],self._circuit.numeroWayPoint(l[0],l[1]),self.vitesse,self.reservoir))\n self.orientation=atan2(pasy,pasx)\n\n self.vitesse=1\n\n debug.dprint(self) \n \n super().deplacer()\n \n\n self.rayonVision=4\n else :# on augemente le rayon de vision au cas ou toutes les cases sont occupées ou non franchissables\n self.rayonVision*=3", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return", "def decomposing_poly_cut_by_set_op(P, v, w, epsilon=10e-2):\n\n\n\tgetcontext().prec = 28\n\n\tv_Point = Point(v)\n\tw_Point = Point(w)\n\n\tchain = LineString(P[0]+[P[0][0]])\n\n\tif not chain.intersects(v_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: V not on chain\")\n\tif not chain.intersects(w_Point):\n\t\tprint(\"decomposing_poly_cut_as_line: W not on chain\")\n\n\n\tdistance_to_v = chain.project(v_Point)\n\tdistance_to_w = chain.project(w_Point)\n\n\tif distance_to_w == distance_to_v:\n\t\tprint(\"decomposing_cut_as_line: W and V are the same\")\n\n\n\t# Generate pairs of v and w modified by some epsilon amount \n\tv_l_displacements = [distance_to_v+(i*epsilon) for i in [-1, -2, 0]]\n\tv_r_displacements = [(distance_to_v+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\tw_l_displacements = [distance_to_w+(i*epsilon) for i in [-1, -2, 0]]\n\tw_r_displacements = [(distance_to_w+(i*epsilon))%chain.length for i in [1, 0, 2]]\n\n\tdef splice_polygon(dist_v, dist_w):\n\t\t\"\"\"Portion of decomposing_line_cut_by_splicing wihtout points\n\n\t\tFunction for evaluating validity of candidates\n\t\t\"\"\"\n\n\t\tif dist_w >= chain.length or dist_w == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_v)\n\n\t\t\tp_l = left_chain.coords[:]\n\t\t\tp_r = right_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\t\tif dist_v >= chain.length or dist_v == 0:\n\n\t\t\tleft_chain, right_chain = cut_linestring(chain, dist_w)\n\n\t\t\tp_l = right_chain.coords[:]\n\t\t\tp_r = left_chain.coords[:]\t\t\n\n\t\t\treturn p_l, p_r\n\n\n\t\tif dist_w%chain.length > dist_v%chain.length:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_w_chain).difference(LineString(left_v_chain))\n\n\t\t\tp_l = left_v_chain.coords[:]+right_w_chain.coords[:-1]\n\t\t\tp_r = common.coords[:]\n\n\t\t\treturn p_l, p_r\n\n\t\telse:\n\n\t\t\tleft_v_chain, right_v_chain = cut_linestring(chain, dist_v)\n\t\t\tleft_w_chain, right_w_chain = cut_linestring(chain, dist_w)\n\n\t\t\tcommon = LineString(left_v_chain).difference(LineString(left_w_chain))\n\n\t\t\tp_l = common.coords[:]\n\t\t\tp_r = left_w_chain.coords[:]+right_v_chain.coords[:-1]\n\n\t\t\treturn p_l, p_r\n\n\t# Check every ring for self-intersection, if cut is invalid => self-intersec\n\tfound = False\n\tfor i in range(len(v_l_displacements)):\n\t\tfor j in range(len(w_l_displacements)):\n\n\t\t\t# Check if resultant polygons are valid\n\t\t\tp_l, p_r = splice_polygon(v_l_displacements[i], w_r_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\tp_l, p_r = splice_polygon(v_r_displacements[i], w_l_displacements[j])\n\t\t\tp_l_lr = LinearRing(p_l+[p_l[0]])\n\t\t\tp_r_lr = LinearRing(p_r+[p_r[0]])\n\n\t\t\tif not p_l_lr.is_valid or not p_r_lr.is_valid:\n\t\t\t\tcontinue\n\n\t\t\t# Else, we have a valid candidate cut\n\t\t\tfound = True\n\t\t\tbreak\n\n\t\tif found:\n\t\t\tbreak\n\n\tif not found:\n\t\tprint(\"splice_polygon: No correct cut combination found!\")\n\t\treturn\n\n\tv_l = chain.interpolate(v_l_displacements[i]).coords[:]\n\tv_r = chain.interpolate(v_r_displacements[i]).coords[:]\n\tw_l = chain.interpolate(w_l_displacements[j]).coords[:]\n\tw_r = chain.interpolate(w_r_displacements[j]).coords[:]\n\n\tdef get_verts(v_l, v_r):\n\t\t\"\"\"Function for extraction verts between two points\n\t\t\"\"\"\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points\n\n\t# Find all vertecies of the chain betwee v_l and v_r\n\tv_pts = get_verts(v_l_displacements[i], v_r_displacements[i])\n\tw_pts = get_verts(w_l_displacements[j], w_r_displacements[j])\n\n\tpoly = Polygon(*P)\n\n\tcut_poly = Polygon(v_l+v_pts+v_r+w_l+w_pts+w_r)\n\tcut_poly = poly.intersection(cut_poly)\n\n\tprint cut_poly\n\n\tp_l, p_r = poly.difference(cut_poly)\n\n\tprint p_l\n\tprint p_r", "def removeCL(self):\n removedNum = 0\n for m in self.freeMonomers: #[::2]:\n # Remove all CL from and to the concerned monomers\n self.LaplacianMatrix[m][:m-1] = 0 \n self.LaplacianMatrix[m][m+2:] = 0\n self.LaplacianMatrix[:,m][:m-1] = 0 \n self.LaplacianMatrix[:,m][m+2:] = 0\n self.LaplacianMatrix[m,m] = 1 \n removedNum += self.cutAllEdgesWith(m)\n # Update the diagonal of the Laplacian\n np.fill_diagonal(self.LaplacianMatrix, 0) # dummy, not sure if it is worth to think a better way\n np.fill_diagonal(self.LaplacianMatrix, -1*self.LaplacianMatrix.sum(axis = 1))\n \n return removedNum", "def __init__(self, is_p1_turn: bool, side_length: int) -> None:\n super().__init__(is_p1_turn)\n self.side_length = side_length\n # ISSUE: what if node is more than 26 --> no need to handle side more than 5\n # construct a list of uppercase and lower case letters\n alph_lst_upper = list(string.ascii_uppercase)\n alph_lst_lower = list(string.ascii_lowercase)\n # alph_lst has a length of 52\n alph_lst = alph_lst_upper + alph_lst_lower\n\n # assign original value for each ley-line\n hori_result = []\n for i in range(side_length + 1):\n hori_result.append(\"@\")\n left_result = []\n for i in range(side_length + 1):\n left_result.append(\"@\")\n right_result = []\n for i in range(side_length + 1):\n right_result.append(\"@\")\n self.hori_result = hori_result\n self.left_result = left_result\n self.right_result = right_result\n\n self.hori_lst = []\n self.left_lst = []\n self.right_lst = []\n\n # construct horizontal ley-lines\n n = 2\n start_index = 0\n end_index = 0\n while n <= side_length + 1:\n end_index = start_index + n\n self.hori_lst.append(alph_lst[start_index:end_index])\n start_index = end_index\n n += 1\n end_index = start_index + side_length\n self.hori_lst.append(alph_lst[start_index:end_index])\n\n # copy hori_lst\n hori_copy = []\n for item in self.hori_lst:\n hori_copy.append(item)\n\n # construct left ley-lines\n for i in range(side_length + 1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) > i:\n temp.append(lst[i])\n self.left_lst.append(temp)\n for i in range(1, side_length + 1):\n self.left_lst[i].append(hori_copy[-1][i - 1])\n\n # construct right ley-lines\n for i in range(-1, side_length * (-1) - 2, -1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) >= i * (-1):\n temp.append(lst[i])\n self.right_lst.append(temp)\n self.right_lst = self.right_lst[::-1]\n for i in range(side_length):\n self.right_lst[i].append(hori_copy[-1][i])", "def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters", "def cluster_vertical(P): # Used in form_segment_().\n if len(P['down_fork_']) == 1 and len(P['down_fork_'][0]['up_fork_']) == 1:\n down_fork = P.pop('down_fork_')[0] # Only 1 down_fork.\n down_fork.pop('up_fork_') # Only 1 up_fork.\n down_fork.pop('y')\n down_fork.pop('sign')\n return [P] + cluster_vertical(down_fork) # Plus next P in segment\n\n return [P] # End of segment", "def hatch(S, dist, angle=0., flip_horizontal=False, get_hole_count=False, max_count=1000000, eps=1e-10): \n if not is_compound(S):\n S = [S]\n\n hole_count = [0 for i in range(len(S))]\n solid_count = [0 for i in range(len(S))]\n\n if not S:\n return []\n \n # Rotate shape for oriented hatches \n theta = radians(angle)\n mat = rot_2d(-theta, affine=True)\n S = [affine_transform(mat, P) for P in S]\n\n box = bounding_box(S)\n\n # build edge table\n ET = []\n for i, P in enumerate(S):\n P = np.array(P)\n n = P.shape[0]\n if n <= 2:\n continue\n for j in range(n):\n a, b = P[j], P[(j+1)%n]\n # reorder increasing y\n if a[1] > b[1]:\n a, b = b, a\n # slope\n dx = (b[0] - a[0]) \n dy = (b[1] - a[1])\n if abs(dx) > eps:\n m = dy/dx \n else:\n m = 1e15\n if abs(m) < eps:\n m = None\n ET.append(Edge(a=a, b=b, m=m, i=i))\n\n # sort by increasing y of first point\n ET = sorted(ET, key=lambda e: e.a[1])\n\n # intersection x\n def ex(e, y):\n if e.m is None:\n return None\n return e.a[0] + (y - e.a[1])/e.m\n\n y = box[0][1]\n scanlines = []\n\n AET = [] # active edge table\n\n flip = 0\n c = 0\n while ET or AET:\n if y > box[1][1]:\n break\n if c >= max_count:\n print(\"scanlines: reached max number of iterations\")\n break\n c += 1\n\n # move from ET to AET\n i = 0\n for e in ET:\n if e.a[1] <= y:\n AET.append(e)\n i += 1\n else:\n break\n if i < len(ET):\n ET = ET[i:]\n else:\n ET = []\n \n # remove passed edges\n AET = sorted(AET, key=lambda e: e.b[1])\n AET = [e for e in AET if e.b[1] > y] \n \n xs = [(ex(e, y), e.i) for e in AET]\n #brk()\n xs = [xi for xi in xs if xi[0] is not None]\n # sort Xs (flipped each scanline for more efficent plotting )\n if flip:\n xs = sorted(xs, key=lambda v: -v[0])\n else:\n xs = sorted(xs, key=lambda v: v[0])\n \n if flip_horizontal:\n flip = not flip\n \n even_odd = [0 for i in range(len(S))]\n\n if len(xs) > 1:\n #brk()\n parity = 1\n for (x1,i1), (x2,i2) in zip(xs, xs[1:]): \n a, b = (np.array([x1, y]),\n np.array([x2, y]))\n if parity:\n scanlines += [a, b]\n even_odd[i2] += 1\n else:\n # If se are outside of a shape and we enounter \n # an unvisited contour, it means that this is a separate \n # outer contour, so don't count. Otherwise...\n if even_odd[i2]:\n even_odd[i2] += 1\n pass\n parity = not parity\n\n # increment\n y = y + dist\n\n # unrotate\n if scanlines:\n scanlines = affine_transform(mat.T, scanlines) #np.array(scanlines))\n # make list of hatch segments\n scanlines = [[a, b] for a, b in zip(scanlines[0::2], scanlines[1::2])]\n return scanlines", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def cutNow(self,leftMonomers,definitive=False):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n for A1 in leftMonomers:\n A2 = A1 + 1\n # Mise a jour de la matrice laplacienne\n self.LaplacianMatrix[A1,A2] = 0\n self.LaplacianMatrix[A2,A1] = 0\n self.LaplacianMatrix[A1,A1] -= 1 \n self.LaplacianMatrix[A2,A2] -= 1 \n # Mise a jour de la liste d'adjacence\n self.cutEdge(A1,A2)\n # Add new free ends to freeMonomers list\n self.freeMonomers.extend([A1,A2])\n \n if definitive:\n self.generatePossibleEncounters()\n# \n for i in range(len(self.freeMonomers)):\n self.freeMonomersNames[self.freeMonomers[i]] = chr(97 + i//2) + str(1 + i%2)", "def clip_scaffold_loops(self):\r\n start = 0\r\n index = 0\r\n ie = len(self.walk)\r\n while index < ie:\r\n segment = None\r\n try:\r\n segment = self.walk[index+1]\r\n except IndexError:\r\n self.remove_biggest_loop_in_range(start, index)\r\n return\r\n if segment is None or segment.value == 'RUNG':\r\n # Segment is essential.\r\n if start != index:\r\n ie -= self.remove_biggest_loop_in_range(start, index)\r\n start = index + 2\r\n index += 2", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def lcs(hh, vv):\n B=LCS.getB(hh,vv)\n trac=LCS.backtrack(B);\n cs=[ hh[h-1] for v,h,k in trac if k=='1' ]\n return cs", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def extract_diag_blocks(x: np.ndarray, y: List) -> List:\n\n def func(cum, this):\n x_crop, res = cum\n return [\n x_crop[len(this):, len(this):],\n res + [x_crop[:len(this), :len(this)]]\n ]\n\n return functools.reduce(func, list(y), [x, []])[-1]", "def removeplane(img, slce=0.4):\n img[img == 0] = np.nan\n\n xr, yr = np.arange(slce*img.shape[0],(1-slce)*img.shape[0],dtype=int),\\\n np.arange(slce*img.shape[1],(1-slce)*img.shape[1],dtype=int)\n x, y = np.meshgrid(xr,yr)\n\n \n subimg = img[xr[0]:xr[-1]+1,yr[0]:yr[-1]+1]\n imgf = subimg[np.isfinite(subimg)].flatten()\n\n vecs = np.ones((5,imgf.size))\n vecs[0,:] = x[np.isfinite(subimg)].flatten()\n vecs[1,:] = y[np.isfinite(subimg)].flatten()\n vecs[2,:] = x[np.isfinite(subimg)].flatten()**2\n vecs[3,:] = y[np.isfinite(subimg)].flatten()**2\n\n C = vecs.dot(vecs.T)\n xv = la.inv(C).dot(vecs.dot(imgf[:,np.newaxis]))\n x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]))\n\n img -= (xv[0]*x + xv[1]*y + \\\n xv[2]*x**2 + xv[3]*y**2 + \\\n xv[4])\n return img", "def get_cut_poly_array(self, planes, angles, disp, fix_pts):\n noPlanes = len(planes)\n plane_storer = [] #4, 2, 3ch in this order\n cut_poly_array = [] #4, 2, 3ch in this order\n\n view_type = ['4ch', '2ch', '3ch']\n\n for i in range(noPlanes):\n if fix_pts[0] == 'var': # for variability test\n origin = self.epi_apex_node\n else: # for foreshortening test\n origin = fix_pts[1+i]\n\n cutPoly_endo_epi, planeActor_endo_epi = self.get_edges_strips(planes[i], origin,\n view_type[i], self.plane_colors[i])\n cut_poly_array.append(cutPoly_endo_epi) # 4, 2, 3\n plane_storer.append(planeActor_endo_epi)\n\n\n # DISPLAY PURPOSES #\n\n # include apex_node\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 0))\n\n ## create legend box ##\n legend = vtk.vtkLegendBoxActor()\n legend.SetNumberOfEntries(3)\n\n legendBox = vtk.vtkCubeSource()\n legendBox.SetXLength(2)\n legendBox.SetYLength(2)\n legend.SetEntry(0, legendBox.GetOutput(), \"4 ch\", (0, 1, 0)) #green\n legend.SetEntry(1, legendBox.GetOutput(), \"2 ch\", (0, 0, 1)) #blue\n\n legend.UseBackgroundOn()\n legend.LockBorderOn()\n legend.SetBackgroundColor(0.5, 0.5, 0.5)\n\n # create text box to display the angles ..\n textActor = vtk.vtkTextActor()\n textActor.SetInput(\"4ch = \" + str(angles[0])\n + \"\\n\" + \"2ch = \" + str(angles[1]))\n textActor.SetPosition2(10, 40)\n textActor.GetTextProperty().SetFontSize(24)\n textActor.GetTextProperty().SetColor(1.0, 0.0, 0.0)\n\n # display x-y-z actor\n axes = get_axes_actor([80,80,80], [0,0,0])\n\n # lets display the rv_dir\n rv_dir_act = include_points(list(60*self.rv_dir), 1, 15, (1, 0 ,1))\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0, 1.0, 1.0)\n ren.AddActor(self.meshActor)\n\n # for plAct in [item for sublist in plane_storer for item in sublist]: # flatten list\n # ren.AddActor(plAct)\n\n ren.AddActor(plane_storer[0][0]) # 4ch endo\n ren.AddActor(plane_storer[0][1]) # 4ch epi\n ren.AddActor(plane_storer[1][0]) # 2ch endo\n ren.AddActor(plane_storer[1][1]) # 2ch epi\n # ren.AddActor(plane_storer[2][0]) # 3ch endo\n # ren.AddActor(plane_storer[2][1]) # 3ch epi\n\n self.meshActor.GetProperty().SetOpacity(1.0)\n ren.AddActor(legend)\n ren.AddActor2D(textActor)\n ren.AddActor(axes)\n ren.AddActor(apexA)\n ren.AddActor(rv_dir_act)\n\n if disp:\n vtk_show(ren)\n\n return cut_poly_array, plane_storer, ren", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def channel_array_blocks(self, opening_width, block_len, block_from_bottom):\n \n params = self.params\n count = 0\n for i in range(len(params['widths'])):\n if params['subsampling']>0:\n back_square = self.coord[i*params['num']]\n else:\n back_square = self.coord[i*params['num']+1].copy()\n back_square = back_square-np.repeat([[params['space'],0]],[back_square.shape[0]],axis = 0)\n \n center_x = 0.5*(np.min(back_square[:,0])+np.max(back_square[:,0]))\n center_y = np.min(back_square[:,1]) \n block = Feature.define_polygon([[center_x-params['widths'][i]/2+opening_width,center_y+block_from_bottom],[center_x+params['widths'][i]/2-opening_width,center_y+block_from_bottom],\n [center_x+params['widths'][i]/2-opening_width,center_y+block_from_bottom+block_len],[center_x-params['widths'][i]/2+opening_width, center_y+block_from_bottom+block_len]])\n \n temp = Feature.reverse_feature(block, back_square)\n for j in range(params['num']):\n if ((params['subsampling']>0) and (np.mod(j,params['subsampling']) ==0)) or ((params['subsampling']<0) and (np.mod(j,-params['subsampling']) != 0)):\n new_coord = temp.coord\n new_coord = [x+np.repeat([[j*params['space'],0]],[x.shape[0]],axis = 0) for x in new_coord]\n self.coord[count] = new_coord\n count+=1\n #self.coord = [item for sublist in self.coord for item in sublist]\n temp = []\n for x in self.coord:\n if(isinstance(x, list)):\n for y in x: \n temp.append(y)\n else:\n temp.append(x)\n self.coord = temp\n\n \n \n '''params = self.params\n myarray2 = Feature.channel_array(length=block_len,num=params['num'],space = params['space'],space_series = params['space_series'],widths = [x-2*opening_width for x in params['widths']],origin=np.array(params['origin'])+np.array([0,-params['length']+block_len+block_from_bottom]), subsampling=params['subsampling'])\n new_feature = Feature()\n for i in range(len(self.coord)):\n back_square = self.coord[i]\n curr_feature = Feature()\n curr_feature.coord = [myarray2.coord[i]]\n\n temp = Feature.reverse_feature(curr_feature, back_square)\n if new_feature.coord:\n new_feature = Feature.combine_features(new_feature,temp)\n else:\n new_feature = temp\n self.coord = new_feature.coord'''\n return self", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def charpoly_factor_blocks(self):\n M = self\n\n if not M.is_square:\n raise DMNonSquareMatrixError(\"not square\")\n\n # scc returns indices that permute the matrix into block triangular\n # form and can extract the diagonal blocks. M.charpoly() is equal to\n # the product of the diagonal block charpolys.\n components = M.scc()\n\n block_factors = []\n\n for indices in components:\n block = M.extract(indices, indices)\n block_factors.append((block.charpoly_base(), 1))\n\n return _collect_factors(block_factors)", "def planeSliceGFig3(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n # print(bound)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n # print(upxvecs)\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n cbar.set_label('G', fontsize=18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'red', label = r\"$2^{nd}$ order GO gain\")\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax1.set_ylabel('G', fontsize = 18)\n # ax1.set_title(\"Slice Gain\")\n ax1.tick_params(labelsize = 14)\n ax1.grid()\n ax1.legend(loc = 1, fontsize = 14)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n grid.tight_layout(fig, pad = 1.5)\n plt.show()\n return", "def _concentric_tubes(s_tube, s_shell, Re_i, Re_o, inside_heating):\n raise NotImplementedError()\n # Get the h value for calculating U \n # Need: fluid properties, which are calculated at mean temperature between inlet and outlet both tube and shell side\n \n # Use continuity equation to get Tid\n mass_i = s_tube.massnet/3600 # kg/s\n rho_i = s_tube.rho\n mu_i = s_tube.mu\n Tid = (4/pi * mass_i*mu_i/Re_i)**(0.5)/rho_i\n \n # Get Tube Outer diameter\n tx = 0.036576 # Assumption\n Tod = Tid + 2*tx \n \n # TODO: Use this for the case with multiple tubes\n #NPS, Tid_new, Tod, tx = nearest_pipe(Di=Tid) \n \n # # Calculate velocity according to nominal pipe size (NPS)\n # A_in = pi/4*Tid**2\n # v_i = mass_i/(A_in*Tid)\n \n # # Recalculate Re and Pr for tube\n # Re_i = (rho_i * v_i * Tid) / mu_i\n Pr_i = s_tube.Pr\n \n # # For the outer tube (shell)\n # mass_o = s_shell.massnet/3600 # kg/s\n # rho_o = s_shell.rho\n # mu_o = s_shell.mu\n # Sid = (4/pi * mass_o*mu_o/Re_i)**(0.5)/rho_o\n # v_o = Re_o*rho_o/(mu_o*Sid)\n Pr_o = s_shell.Pr\n \n # Hydraulic diameter for shell side \n D_eq = Tod-Tid \n \n # Get nusselt number based on correlation\n \n if Re_i <= 2300:\n Nu_i = ht.conv_internal.laminar_T_const()\n elif Re_i > 2300: \n # For turbulent flow, the Nusselt correlation change if the fluid is heated or cooled. When using this formula check if the fluid inside the inner tube is heated or cooled\n Nu_i = ht.conv_internal.turbulent_Dittus_Boelter(Re=Re_i, Pr=Pr_i, heating=inside_heating, revised=True)\n elif 10000 < Re_i < 100000 and 0.5 < Pr_i < 3:\n Nu_i = ht.conv_internal.turbulent_Colburn(Re=Re_i, Pr=Pr_i)\n \n # Nussel coefficient shell side\n Nu_o = ht.conv_external.Nu_cylinder_Zukauskas(Re=Re_o, Pr=Pr_o, Prw=None)\n \n # Conductivity\n k_in = s_tube.k\n k_out = s_shell.k\n \n # Calculate h for internal, out and in/out\n hi = Nu_i*k_in/Tid # Tube-side coefficient\n ho = Nu_o*k_out/D_eq # Shell-side coefficient\n hio = hi * (Tid/Tod)\n \n # Fouling resitance \n # Available excel file with fouling factor for different fluids taken from Perry\n # TODO: Link to excel \"FoulingFactor\"\n Rif = 0.00009\n Rof = 0.000175\n \n #Calculate U \n U_clean = (1/hio + 1/ho)**(-1)\n return (1/U_clean + Rif + Rof)**(-1) /1000", "def make_m2_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m2_left_list = TSUGITE_list[0]\n m2_left_upper = m2_left_list[0]\n m2_left_middle = m2_left_list[1]\n m2_left_lower = m2_left_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[4]\n m2_KUMIKI_points2 = SHIGUCHI_list[5]\n\n m2_KUMIKI_points1.reverse()\n\n m2_left_upper.extend(m2_KUMIKI_points1)\n m2_left_upper.append(m2_left_upper[0])\n m2_left_upper_crv = rs.AddPolyline(m2_left_upper)\n\n m2_left_middle.extend(m2_KUMIKI_points1)\n m2_left_middle.append(m2_left_middle[0])\n m2_left_middle_crv = rs.AddPolyline(m2_left_middle)\n\n m2_left_lower.extend(m2_KUMIKI_points1)\n m2_left_lower.append(m2_left_lower[0])\n m2_left_lower_crv = rs.AddPolyline(m2_left_lower)\n\n m2_left_crvs = [m2_left_upper_crv, m2_left_middle_crv, m2_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n m2_right_list = TSUGITE_list[1]\n m2_right_upper = m2_right_list[0]\n m2_right_middle = m2_right_list[1]\n m2_right_lower = m2_right_list[2]\n\n # SHIGUCHI\n m2_KUMIKI_points1 = SHIGUCHI_list[0]\n m2_KUMIKI_points2 = SHIGUCHI_list[1]\n\n # Extend\n # material2\n m2_right_upper.reverse()\n m2_right_middle.reverse()\n m2_right_lower.reverse()\n\n # m2_KUMIKI_points1.reverse()\n\n m2_right_upper.extend(m2_KUMIKI_points1)\n m2_right_upper.append(m2_right_upper[0])\n m2_right_upper_crv = rs.AddPolyline(m2_right_upper)\n\n m2_right_middle.extend(m2_KUMIKI_points1)\n m2_right_middle.append(m2_right_middle[0])\n m2_right_middle_crv = rs.AddPolyline(m2_right_middle)\n\n m2_right_lower.extend(m2_KUMIKI_points1)\n m2_right_lower.append(m2_right_lower[0])\n m2_right_lower_crv = rs.AddPolyline(m2_right_lower)\n\n m2_right_crvs = [m2_right_upper_crv, m2_right_middle_crv, m2_right_lower_crv]\n\n return m2_left_crvs, m2_right_crvs", "def __bcc_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def crop_data(vol):\n\n thres = 250\n\n num_x = vol.shape[0]\n num_y = vol.shape[1]\n num_z = vol.shape[2]\n\n \n # set up starting positions\n starts = []\n\n # front and back\n for i in range(num_x):\n for j in range(num_z):\n starts.append( (i, 0, j) )\n starts.append( (i, num_y-1, j) )\n\n # left and right\n for i in range(num_y):\n for j in range(num_z):\n starts.append( (0, i, j) )\n starts.append( (num_x-1, i, j) )\n\n # DFS\n seenpositions = set()\n currentpositions = set(starts)\n\n while currentpositions:\n nextpositions = set()\n for p in currentpositions:\n seenpositions.add(p)\n succ = possiblesuccessors(vol, p, thres)\n for np in succ:\n if np in seenpositions: continue\n nextpositions.add(np)\n\n currentpositions = nextpositions\n\n print \"cropping %i (%i addional) voxels\" % (len(seenpositions), len(seenpositions) - len(starts))\n\n # crop visited voxels\n for pos in seenpositions:\n vol[pos[0], pos[1], pos[2]] = 0.0\n\n return vol", "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = set(nbr for key in courses[-1] for nbr in wall.neighbors(key))\n course = list(nbrs - seen)\n courses.append(course)\n seen.update(nbrs)\n vertices -= nbrs\n\n return courses", "def cleanOpenBranches(skeleton, skelton_copy, points, radii, length, clean = True, verbose = False):\n \n assert np.isfortran(skeleton);\n assert np.isfortran(skelton_copy);\n \n timer = tmr.Timer();\n timer_all = tmr.Timer();\n \n # find branch and end points\n deg = cpl.convolve_3d_indices(skeleton, t3d.n26, points, out_dtype = 'uint8');\n branchpoints = points[deg >= 3];\n e_pts = points[deg == 1];\n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n #prepare temps\n #skel = skeleton.copy();\n skel_flat = np.reshape(skelton_copy, -1, order = 'A');\n strides = np.array(skelton_copy.strides);\n \n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n label = np.arange(27);\n label = label.reshape([3,3,3]);\n label[1,1,1] = 0;\n \n critical_points = [e_pts];\n delete_points = [];\n \n for l in range(1, length + 1):\n #neighbours of end points\n e_pts_label = cpl.convolve_3d_indices(skelton_copy, label, e_pts);\n \n if verbose:\n timer.printElapsedTime('Done labeling %d / %d' % (l, length));\n timer.reset();\n \n #label zero points are non-critical short isolated branches\n e_pts_zero = e_pts_label == 0;\n #print 'zero length:', np.unravel_index(e_pts[e_pts_zero], skel.shape)\n if e_pts_zero.sum() > 0:\n keep = np.logical_not(e_pts_zero);\n for m in range(l):\n critical_points[m] = critical_points[m][keep];\n e_pts_label = e_pts_label[keep];\n e_pts = e_pts[keep];\n \n if verbose:\n timer.printElapsedTime('Ignored %d small branches' % (keep.sum()));\n timer.reset();\n \n e_pts_new = e_pts + np.sum((np.vstack(np.unravel_index(e_pts_label, label.shape)) - 1).T * strides, axis = 1)\n \n # did we hit a branch point\n delete = np.in1d(e_pts_new, branchpoints); #, assume_unique = True);\n keep = np.logical_not(delete);\n #print delete.shape, keep.shape, e_pts_new.shape\n \n #delete all path that hit a branch point\n if delete.sum() > 0:\n for m in range(l):\n delete_points.append(critical_points[m][delete]);\n #print 'deleting:', np.unravel_index(critical_points[m][delete], skel.shape)\n critical_points[m] = critical_points[m][keep];\n e_pts_new = e_pts_new[keep];\n \n if verbose:\n timer.printElapsedTime('Deleted %d points' % (delete.sum()));\n timer.reset();\n \n if l < length:\n skel_flat[e_pts] = False; # remove endpoints for new neighbour detection\n critical_points.append(e_pts_new);\n e_pts = e_pts_new;\n \n if verbose:\n timer.printElapsedTime('Cleanup iteration %d / %d done.' % (l, length));\n \n #gather all points\n if len(delete_points) > 0:\n delete_points = np.hstack(delete_points);\n delete_points = np.unique(delete_points);\n else:\n delete_points = np.zeros(0);\n \n if verbose:\n timer_all.printElapsedTime('Cleanup');\n \n if clean:\n skel_flat = np.reshape(skeleton, -1, order = 'F');\n skel_flat[delete_points] = False;\n keep_ids = np.logical_not(np.in1d(points, delete_points, assume_unique = True))\n points = points[keep_ids];\n radii = radii[keep_ids];\n return skeleton, points, radii\n \n return delete_points;", "def solve_polyphase_instance(\n allele_matrix, genotype_list, param, timers, partial_phasing=None, quiet=False\n):\n num_vars = len(allele_matrix.getPositions())\n\n # Precompute block borders based on read coverage and linkage between variants\n if not quiet:\n logger.info(\"Detecting connected components with weak interconnect ..\")\n timers.start(\"detecting_blocks\")\n\n ploidy = param.ploidy\n sl = param.block_cut_sensitivity <= 1\n block_starts = compute_block_starts(allele_matrix, ploidy, single_linkage=sl)\n\n # Set block borders and split readset\n block_starts.append(num_vars)\n num_blocks = sum(1 for i, j in zip(block_starts[:-1], block_starts[1:]) if j > i + 1)\n if not quiet:\n logger.info(\n f\"Split heterozygous variants into {num_blocks} blocks (and {len(block_starts) - num_blocks - 1} singleton blocks).\"\n )\n\n # Process blocks independently\n results = []\n processed_blocks = 0\n timers.stop(\"detecting_blocks\")\n\n \"\"\"\n Python's multiprocessing makes hard copies of the passed arguments, which is not trivial for\n cython objects, especially when they contain pointers to other cython objects. Any passed\n object must be (de)serializable (in Python: pickle). All other objects created in the main\n thread are also accessible by the workers, but they are handled via the copy-on-write policy.\n This means, that e.g. the large main matrix is not hardcopied for every thread, as long as it\n is not modified there. This must be ensured to prevent a massive waste of memory consumption.\n \"\"\"\n if param.threads == 1:\n # for single-threading, process everything individually to minimize memory footprint\n for block_id, (start, end) in enumerate(zip(block_starts[:-1], block_starts[1:])):\n submatrix = allele_matrix.extractInterval(start, end)\n subphasing = partial_phasing.extractInterval(start, end) if partial_phasing else None\n if end - start > 1:\n processed_blocks += 1\n if not quiet:\n logger.info(\n f\"Processing block {processed_blocks} of {num_blocks} with {len(submatrix)} reads and {end - start} variants.\"\n )\n results.append(\n phase_single_block(\n block_id, submatrix, genotype_list[start:end], subphasing, param, timers, quiet\n )\n )\n del submatrix\n\n else:\n # sort block by descending size (4/3-approximation for scheduling problem)\n timers.start(\"phase_blocks\")\n joblist = list(zip(range(len(block_starts)), block_starts[:-1], block_starts[1:]))\n joblist.sort(key=lambda x: x[1] - x[2])\n\n with Pool(processes=param.threads) as pool:\n process_results = [\n pool.apply_async(\n phase_single_block_mt,\n (\n allele_matrix,\n partial_phasing,\n block_id,\n start,\n end,\n genotype_list[start:end],\n param,\n timers,\n job_id,\n num_blocks,\n quiet,\n ),\n )\n for job_id, (block_id, start, end) in enumerate(joblist)\n ]\n # collect all blockwise results\n blockwise_results = [res.get() for res in process_results]\n results = sorted(blockwise_results, key=lambda x: x.block_id)\n\n timers.stop(\"phase_blocks\")\n\n # Aggregate blockwise results\n if partial_phasing and param.block_cut_sensitivity == 0:\n # For lowest sensitivity, do not add block starts to global breakpoint list\n # (unless the partial phasing is also interrupted there)\n borders = {partial_phasing.getFirstPos(i) for i in range(len(partial_phasing))}\n else:\n borders = []\n return aggregate_results(results, ploidy, borders)", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def get_obstList(self,X,Y,Z):\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def show_crisscross(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n obj = [\r\n LINEWIDTH, 3,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(float(self.point_x.get()) - 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n VERTEX, float(float(self.point_x.get()) + 0.5), float(self.point_y.get()), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) - 0.5), float(self.point_z.get()),\r\n VERTEX, float(self.point_x.get()), float(float(self.point_y.get()) + 0.5), float(self.point_z.get()),\r\n END,\r\n\r\n BEGIN, LINE_STRIP,\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) - 0.5),\r\n VERTEX, float(self.point_x.get()), float(self.point_y.get()), float(float(self.point_z.get()) + 0.5),\r\n END\r\n\r\n ]\r\n\r\n PymolPlugin.PymolPlugin().delete(self.point_name)\r\n view = PymolPlugin.PymolPlugin().get_view()\r\n PymolPlugin.PymolPlugin().load_CGO(obj, self.point_name)\r\n PymolPlugin.PymolPlugin().set_view(view)\r\n\r\n else:\r\n chimera_model_number = int(mole_object.input_structure_box.index('active')) - 1\r\n ChimeraPlugin.ChimeraPlugin().make_icosahedron(str(chimera_model_number), float(self.point_x.get()),\r\n float(self.point_y.get()), float(self.point_z.get()))", "def __fcc_transition_diagonals(self) -> cq.cq.Workplane:\n corner_points = self.unit_cell_size * np.array(\n [(0, 0),\n (1, 0),\n (1, 0),\n (1, 1),\n (1, 1),\n (0, 1)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_fcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": 45},\n {\"unit_cell_size\": self.unit_cell_size * 0.5,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": - 45},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": -45,\n \"angle_y\": 0},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 45,\n \"angle_y\": 0},\n {\"unit_cell_size\": self.unit_cell_size * 0.5,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": - 45},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": 45}\n ],\n useLocalCoords = True\n )\n )\n return result", "def rotate_cube_right_list(liste):\n\n fronttemp = liste[1]\n \n righttemp = liste[2]\n \n backtemp = liste[3]\n \n lefttemp = liste[4]\n \n uptemp0 = liste[0][0]\n uptemp1 = liste[0][1]\n uptemp2 = liste[0][2]\n uptemp3 = liste[0][3]\n uptemp4 = liste[0][4]\n uptemp5 = liste[0][5]\n uptemp6 = liste[0][6]\n uptemp7 = liste[0][7]\n uptemp8 = liste[0][8]\n \n downtemp0 = liste[5][0]\n downtemp1 = liste[5][1]\n downtemp2 = liste[5][2]\n downtemp3 = liste[5][3]\n downtemp4 = liste[5][4]\n downtemp5 = liste[5][5]\n downtemp6 = liste[5][6]\n downtemp7 = liste[5][7]\n downtemp8 = liste[5][8]\n \n liste[2] = fronttemp\n \n liste[3] = righttemp\n \n liste[4] = backtemp\n \n liste[1] = lefttemp\n \n liste[0][0] = uptemp2\n liste[0][1] = uptemp5\n liste[0][2] = uptemp8\n liste[0][3] = uptemp1\n liste[0][4] = uptemp4\n liste[0][5] = uptemp7\n liste[0][6] = uptemp0\n liste[0][7] = uptemp3\n liste[0][8] = uptemp6\n \n liste[5][0] = downtemp6\n liste[5][1] = downtemp3\n liste[5][2] = downtemp0\n liste[5][3] = downtemp7\n liste[5][4] = downtemp4\n liste[5][5] = downtemp1\n liste[5][6] = downtemp8\n liste[5][7] = downtemp5\n liste[5][8] = downtemp2\n \n return liste", "def subdivide(R, C, max_size = 36):\n \n def size(y1, x1, y2, x2):\n return (y2 - y1 + 1) * (x2 - x1 + 1)\n \n def helper(y1, x1, y2, x2):\n nonlocal max_size\n \n if size(y1, x1, y2, x2) <= max_size:\n return [(y1, x1, y2, x2)]\n \n # divide along horizontal\n if y2 - y1 > x2 - x1:\n y = (y1 + y2) // 2\n if (y - y1) & 1:\n return helper(y1, x1, y, x2) + helper(min(y+1, y2), x1, y2, x2)\n return helper(y1, x1, max(y-1, y1), x2) + helper(y, x1, y2, x2)\n \n #divide along vertical\n x = (x1 + x2) // 2\n return helper(y1, x1, y2, x) + helper(y1, min(x+1, x2), y2, x2)\n \n return helper(0, 0, R, C)", "def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected", "def create_vessel_components(self) -> list:\n\n # Blanket computed from plasma\n blanket = paramak.BlanketFP(\n plasma=self.plasma,\n thickness=4.06e2 - 3.52e2,\n start_angle=-70,\n stop_angle=230,\n rotation_angle=self.rotation_angle,\n vertical_displacement=self.plasma.vertical_displacement,\n offset_from_plasma=[[-70, 0, 90, 180, 230], [50, 20, 59, 16, 50]],\n name=\"blanket\",\n )\n\n # SN Divertor\n divertor = paramak.ITERtypeDivertor(\n anchors=((4.34e2, -3.3e2), (5.56e2, -3.74e2)),\n coverages=(105, 125),\n lengths=(45, 75),\n radii=(68, 65),\n tilts=(-30, 2),\n dome_height=45,\n dome_pos=0.45,\n rotation_angle=self.rotation_angle,\n name=\"divertor\",\n )\n\n # Vacuum vessel\n divertor.points # trigger the building of the points for divertor\n # the inner part of the vacuum vessel is computed from the outer\n # points of the blanket and the divertor\n vac_vessel_inner = paramak.RotateMixedShape(\n points=blanket.outer_points + divertor.casing_points,\n rotation_angle=self.rotation_angle,\n name=\"vessel\",\n )\n\n vac_vessel = paramak.RotateSplineShape(\n points=[\n (327.77, 36.5026668124882),\n (327.77, 73.37741270075162),\n (327.77, 108.31180820215741),\n (327.77, 143.2462037035632),\n (327.77, 178.18059920496898),\n (327.77, 213.11499470637477),\n (327.77, 248.04939020778068),\n (327.77, 282.98378570918646),\n (327.77, 317.9181812105922),\n (328.6121587814181, 368.23899806938385),\n (336.18303032328333, 422.4306297110355),\n (350.4835654579176, 457.5437492206628),\n (371.95910957013655, 492.47041663587777),\n (404.3208742000702, 522.0151685493631),\n (439.6516080621078, 544.4559826211985),\n (474.98234192414554, 556.3610266211815),\n (510.2245275810152, 564.0927634387052),\n (545.6438096482208, 565.1200145185009),\n (565.832800426528, 563.1864687746993),\n (580.9745435102584, 559.4390362932862),\n (616.3052773722961, 548.4109567158157),\n (651.6360112343338, 533.224020531035),\n (686.9667450963714, 515.3041214328789),\n (722.297478958409, 492.23516177329117),\n (757.6282128204466, 466.8689289401416),\n (792.9589466824843, 437.10619055069265),\n (825.7660566972336, 403.7167485984509),\n (853.525919017406, 369.42176700251196),\n (877.9209495411939, 333.90960594986575),\n (898.9511482685972, 300.5186330502012),\n (916.616515199616, 265.2383422522439),\n (932.5994662324425, 230.72194441870647),\n (946.0587934179808, 193.1122328856627),\n (956.1532888071343, 156.87835598377137),\n (962.8829523999035, 118.10702768634405),\n (967.9302000944803, 80.39197257542594),\n (968.7714080435763, 38.24754419835381),\n (968.7714080435763, 25.77097437642317),\n (964.5653682980957, -1.670738783514139),\n (956.9944967562304, -29.93883090626548),\n (956.1532888071343, -34.59540221679083),\n (946.0587934179808, -71.15339839027786),\n (931.7582582833464, -104.25874435511184),\n (914.9340993014238, -139.91477225259314),\n (898.9511482685972, -174.48160361826422),\n (883.8094051848669, -213.64300914878197),\n (867.8264541520404, -248.21908241802464),\n (851.0022951701176, -284.2078188440911),\n (834.1781361881949, -319.9470238737184),\n (818.1951851553683, -359.0978394110024),\n (800.5298182243495, -391.2313539579658),\n (776.1347877005617, -427.87174371008393),\n (744.1688856349085, -460.45530873911446),\n (708.8381517728709, -490.0255912806248),\n (673.5074179108332, -512.7040543014494),\n (638.1766840487956, -528.371873327094),\n (602.8459501867579, -539.0490644239661),\n (567.5152163247203, -546.1219131278361),\n (532.1844824626827, -548.9566889080664),\n (496.85374860064496, -547.7514325554811),\n (461.52301473860734, -541.3971156414638),\n (426.1922808765697, -527.596464992453),\n (390.8615470145321, -501.2796363633471),\n (360.57806084707124, -468.0473902249954),\n (340.389070068764, -431.4355817359209),\n (329.87397070506233, -399.072068113844),\n (327.770950832322, -357.4796824533661),\n (327.770950832322, -311.73270913617455),\n (327.770950832322, -276.79831363476876),\n (327.770950832322, -241.86391813336297),\n (327.770950832322, -206.92952263195718),\n (327.770950832322, -171.99512713055117),\n (327.770950832322, -137.06073162914538),\n (327.770950832322, -102.12633612773948),\n (327.770950832322, -67.19194062633369),\n ],\n cut=[vac_vessel_inner], # to make a hollow shape\n rotation_angle=self.rotation_angle,\n name=\"vessel_inner\",\n )\n\n return [divertor, blanket, vac_vessel, vac_vessel_inner]", "def test():\n\n file = 'crosssection.dat'\n f = open(file,'r')\n lines = f.readlines()\n nline = len(lines)\n points = np.zeros(shape=(nline,4))\n sigtable = np.zeros(nline)\n for i in range(nline):\n points[i,0] = float(lines[i].split()[0])\n points[i,1] = float(lines[i].split()[1])\n points[i,2] = float(lines[i].split()[2])\n points[i,3] = float(lines[i].split()[3])\n sigtable[i] = float(lines[i].split()[4])\n\n nbin = 60\n npts = nline/nbin\n\n # checking lensing cross section against magnitude\n '''\n for i in range(npts):\n plt.plot(points[i*nbin:(i+1)*nbin,3],sigtable[i*nbin:(i+1)*nbin])\n plt.show()\n '''\n npts = npts/nbin\n\n # checking lensing cross section against velocity dispersion\n '''\n for i in range(nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,0]==points[i,0])\\\n &(points[:,3]==points[i,3]))\n vel = points[mask,2]\n sigma = sigtable[mask]\n plt.plot(vel,sigma)\n plt.show()\n '''\n\n # checking lensing cross section against lens redshift\n #'''\n for i in range(3000,nline):\n mask, = np.where((points[:,1]==points[i,1])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zl = points[mask,0]\n sigma = sigtable[mask]\n plt.plot(zl,sigma)\n plt.show()\n #'''\n\n # checking lensing cross section against source redshift\n for i in reversed(range(nline)):\n mask, = np.where((points[:,0]==points[i,0])&(points[:,2]==points[i,2])\\\n &(points[:,3]==points[i,3]))\n print mask\n zs = points[mask,1]\n sigma = sigtable[mask]\n plt.plot(zs,sigma)\n plt.show()", "def auto_rivet():\n sel_list = pm.ls(sl=1)\n\n # the last selection is the mesh\n objects = sel_list[:-1]\n geo = sel_list[-1]\n\n # get the closest point to the surface\n geo_shape = geo.getShape()\n\n follicles = []\n\n for obj in objects:\n # pivot point of the obj\n pivot = obj.getRotatePivot(space='world')\n uv = geo_shape.getUVAtPoint(pivot, space='world')\n\n # create a hair follicle\n follicle = pm.nt.Follicle()\n follicles.append(follicle)\n follicle.simulationMethod.set(0)\n geo_shape.worldMatrix >> follicle.inputWorldMatrix\n geo_shape.outMesh >> follicle.inputMesh\n follicle.parameterU.set(uv[0])\n follicle.parameterV.set(uv[1])\n\n # parent the object to the follicles transform node\n follicle_transform = follicle.getParent()\n\n follicle.outTranslate >> follicle_transform.translate\n follicle.outRotate >> follicle_transform.rotate\n\n pm.parent(obj, follicle_transform)\n\n return follicles", "def polyCutUVCtx(*args, loopSpeed: Union[int, bool]=0, mapBordersColor: Union[List[float, float,\n float], bool]=None, showCheckerMap: bool=True, showTextureBorders: bool=True,\n showUVShellColoring: bool=True, steadyStroke: bool=True, steadyStrokeDistance:\n Union[float, bool]=0.0, symmetry: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[bool, Any]:\n pass", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def contextualcull(cnts):\n temp = []\n actualout = []\n indicesused = []\n for cnt in cnts:\n if cnt.area > CULL_MINIMUMS[cnt.spatialindex]:\n temp.append(cnt)\n if cnt.spatialindex not in indicesused:\n indicesused.append(cnt.spatialindex)\n if PRIORITIZE_FASTEST:\n for sindex in indicesused: #filter down to the fastest contour.\n _allatthisindex = [x for x in temp if x == sindex]\n highest = _allatthisindex.sort(key=lambda y: y.spd, reverse=False)[0]\n actualout.append(highest)\n else:\n actualout = temp\n print 'Contours after cull: ', len(actualout)\n return actualout", "def unfold_wire(pl):\n\tpl = phone_pl\n\tshape_points = [np.array(p) for p in pl.points]\n\tpointIter = takeNGenerator(shape_points, 4)\n\td0 = getDistance(*shape_points[0:2])\n\tpoints = [np.array([0, 0]), np.array([0, d0])]\n\tfor i in range(len(shape_points)-3):\n\t\t(p1,p2,p3,p4) = pointIter.next()\n\t\tv1 =p1-p2\n\t\tv2 = p3-p2\n\t\tv3 = p2-p3\n\t\tv4 = p4-p3\n\t\told_normal = np.cross(v1,v2)\n\t\tnew_normal = np.cross(v3,v4)\n\t\tnorm_old = old_normal/la.norm(old_normal)\n\t\tnorm_new = old_normal/la.norm(new_normal)\n\n\n\t\t#check if we need to transform:\n\t\tif any(norm_old != norm_new):\n\t\t\tprint norm_old, norm_new\n\t\t\t#create a transform that will rotate the next points to the old orientation\n\t\t\ttransform = create_transform(norm_new, norm_old)\n\t\t\trot_pot = p2\n\t\t\tpose = (rot_pot, transform)\n\t\t\tpoly = PolyLine(shape_points[i:])\n\t\t\ttranslated = poly.transformed(pose)\n\t\t\tnew_pts = [np.array(p) for p in translated.points]\n\n\t\t\tif len(shape_points[:i]) is 0:\n\t\t\t\tshape_points = new_pts\n\t\t\telse:\n\t\t\t\tshape_points = np.vstack((shape_points[:i], new_pts))\n\t\t\tpointIter = takeNGenerator(shape_points, 4)\n\t\t\tfast_forward(pointIter, i)\n\treturn PolyLine(shape_points)", "def possibleMove(self, dist, blockList):\r\n \r\n if self.orientation == \"v\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] + n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] +n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.y += dist\r\n self.setCoords()\r\n \r\n elif self.orientation == \"h\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and(block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.x += dist\r\n self.setCoords()", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def get_obstList(self,X,Y,Z):\n \n x_c_cone = self.x_c\n\tz_c_cone = self.z_c\n y_c_cone = 0\n x_s = 2.25*2*self.cyl_rad\n rad_cone = x_s + self.cyl_rad\n\th_cone = rad_cone*0.57735\n\n floor_part = np.array(np.where(Y < h_cone)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - x_c_cone)**2 + (Z - z_c_cone)**2 <= ((self.cyl_rad/cone)/(h_cone))**2*(Y - y_c_cone)**2))\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])", "def loft(*sections):\n result = Mesh()\n current = sections[0]\n cap = closePoly(current)\n result.addPoly(cap)\n for next in sections:\n for ((p0,p1),(q0,q1)) in zip(edges(current),edges(next)):\n # HANDEDNESS\n result.addTri([p0,q0,p1]).addTri([p1,q0,q1])\n current = next\n # back cap is reversed (to face backward)\n result.addPoly(closePoly(current))\n return result", "def convex_pieces(self, config):\n # get volume\n orig_volume = self.mesh_.get_total_volume()\n \n # convert to off\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(self.obj_filename, self.off_filename) \n os.system(meshlabserver_cmd)\n logging.info('MeshlabServer OFF Conversion Command: %s' %(meshlabserver_cmd))\n\n if not os.path.exists(off_filename):\n logging.warning('Meshlab conversion failed for %s' %(off_filename))\n return\n \n # create convex pieces\n cvx_decomp_command = config['hacd_cmd_template'] %(self.off_filename,\n config['min_num_clusters'],\n config['max_concavity'],\n config['invert_input_faces'],\n config['extra_dist_points'],\n config['add_faces_points'],\n config['connected_components_dist'],\n config['target_num_triangles'])\n logging.info('CV Decomp Command: %s' %(cvx_decomp_command))\n os.system(cvx_decomp_command) \n\n # convert each wrl to an obj and an stl\n convex_piece_files = glob.glob('%s_dec_hacd_*.wrl' %(os.path.join(self.file_path_, self.file_root_)))\n convex_piece_meshes = []\n total_volume = 0.0\n\n for convex_piece_file in convex_piece_files:\n file_root, file_ext = os.path.splitext(convex_piece_file)\n obj_filename = file_root + '.obj'\n stl_filename = file_root + '.stl'\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, obj_filename) \n os.system(meshlabserver_cmd)\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(convex_piece_file, stl_filename) \n os.system(meshlabserver_cmd)\n\n of = obj_file.ObjFile(obj_filename)\n convex_piece = of.read()\n total_volume += convex_piece.get_total_volume()\n convex_piece_meshes.append(of.read())\n\n root = et.Element('robot', name=\"test\")\n\n # get the masses and moments of inertia\n effective_density = orig_volume / total_volume\n prev_piece_name = None\n for convex_piece, filename in zip(convex_piece_meshes, convex_piece_files):\n convex_piece.set_center_of_mass(np.zeros(3))\n convex_piece.set_density(self.mesh_.density * effective_density)\n \n # write to xml\n piece_name = 'link_%s'%(file_root)\n file_path_wo_ext, file_ext = os.path.splitext(filename)\n file_path, file_root = os.path.split(file_path_wo_ext)\n I = convex_piece.inertia\n link = et.SubElement(root, 'link', name=piece_name)\n\n inertial = et.SubElement(link, 'inertial')\n origin = et.SubElement(inertial, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n mass = et.SubElement(inertial, 'mass', value='%f'%convex_piece.mass)\n inertia = et.SubElement(inertial, 'inertia', ixx='%f'%I[0,0], ixy='%f'%I[0,1], ixz='%f'%I[0,2],\n iyy='%f'%I[1,1], iyz='%f'%I[1,2], izz='%f'%I[2,2])\n \n visual = et.SubElement(link, 'visual')\n origin = et.SubElement(visual, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n geometry = et.SubElement(visual, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n material = et.SubElement(visual, 'material', name='')\n color = et.SubElement(material, 'color', rgba=\"0.75 0.75 0.75 1\")\n\n collision = et.SubElement(link, 'collision')\n origin = et.SubElement(collision, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\") \n geometry = et.SubElement(collision, 'geometry')\n mesh = et.SubElement(geometry, 'mesh', filename=file_path_wo_ext+'.stl')\n\n if prev_piece_name is not None:\n joint = et.SubElement(root, 'joint', name='%s_joint'%(piece_name), type='fixed')\n origin = et.SubElement(joint, 'origin', xyz=\"0 0 0\", rpy=\"0 0 0\")\n parent = et.SubElement(joint, 'parent', link=prev_piece_name)\n child = et.SubElement(joint, 'child', link=piece_name)\n\n prev_piece_name = piece_name\n\n \"\"\"\n txt_filename = file_root + '.txt'\n f = open(txt_filename, 'w')\n f.write('mass: %f\\n' %(convex_piece.mass))\n f.write('inertia: ' + str(convex_piece.inertia) + '\\n')\n f.close()\n \"\"\"\n\n tree = et.ElementTree(root)\n tree.write('test.URDF')\n exit(0)\n\n return convex_piece_meshes", "def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)", "def prog(args):\r\n i_fname, o_fname, pedestal_params, split_list, Num_W = args\r\n mesh = stl.mesh.Mesh.from_file(i_fname)\r\n #rotate mesh since by default the rotation axis is along X\r\n mesh.rotate([0,1,0],np.pi/2)\r\n\r\n v_arr = np.round(np.vstack(mesh.vectors).astype(float), decimals=1)\r\n\r\n splt0_arr = np.array(split_list)\r\n splt1_arr = np.roll(splt0_arr,-1)\r\n\r\n pos = cf.cartesian2cylyndrical(v_arr, Num_W)\r\n\r\n #make splits\r\n pos_list=[]\r\n for splt0, splt1 in zip(splt0_arr[:-1], splt1_arr[:-1]):\r\n pos_idx = np.where((splt0<=pos[:,:,2]) & (splt1>pos[:,:,2]))[0]\r\n print(splt0, splt1)\r\n #pos = [r, th, z] sectionwise\r\n pos_list.append(pos[pos_idx])\r\n #add pedestal mesh\r\n\r\n for sect_num, pos in enumerate(pos_list):\r\n pos = cf.add_pedestal(pos, pedestal_params)\r\n profiles=np.zeros_like(pos)\r\n\r\n for i in np.arange(np.shape(pos)[0]):\r\n profiles[i] = cf.cylyndrical2cartesian(pos[i])\r\n\r\n strokes = np.flipud(np.rot90(profiles))\r\n #transform data from longeron nodes [xyz] to:\r\n #a_arr - rotation angle around the rotation axis\r\n #r_arr - length of a segment perpenticular to the rotation axis and corresponding lateral mesh edge\r\n #z_arr - corresponding z coordiantes\r\n #v_arr - direction vector of the coresponding lateral mesh edge\r\n a_arr, r_arr, z_arr, v_arr = cf.transform(strokes, add_pedestal_bottom=True,add_pedestal_top=True)\r\n\r\n #make a summary plots\r\n cf.plot_loft_paths(profiles)\r\n cf.plot_loft_paths(pos)\r\n cf.plot_surf(a_arr,z_arr,r_arr)\r\n\r\n #collect data to the dictionary longeron wise\r\n res_dict = {'a_arr':np.rot90(a_arr, k=-1),\r\n 'r_arr':np.rot90(r_arr, k=-1),\r\n 'z_arr':np.rot90(z_arr, k=-1),\r\n 'v_arr':np.rot90(v_arr, k=-1)}\r\n\r\n #save result dictionary\r\n if not o_fname:\r\n o_fname = i_fname\r\n\r\n fname='{}_{}.pickle'.format(o_fname, sect_num)\r\n with open(fname, 'wb') as f:\r\n # Pickle the 'data' dictionary using the highest protocol available.\r\n pickle.dump(res_dict, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n print(fname, ' saved')", "def decompose(var_list, primary=False):\n subsectors = []\n for var in var_list:\n _vars = copy.copy(var_list)\n _vars.remove(var)\n subsectors.append(SubSector(var, _vars, primary=primary))\n return subsectors", "def get_lightcurves(\n self,\n pipeline=\"pdcsap\",\n cadence=\"short\",\n sectors=None,\n remove_outliers=False,\n quality_bitmask=None,\n ):\n if sectors is None:\n all_sectors = self.all_sectors\n else:\n all_sectors = sectors\n\n for n, sector in enumerate(all_sectors):\n if pipeline == \"pdcsap\":\n l = ShortCadence(\n ticid=self.ticid, sector=sector, verbose=False\n )\n lc = l.get_lc()\n else:\n errmsg = \"pdcsap is only currently available\"\n raise NotImplementedError(errmsg)\n\n if quality_bitmask == \"hard\":\n lc = lc[(lc.quality == 0) | np.isnan(lc.quality)]\n\n if remove_outliers:\n lc, mask = lc.remove_outliers(\n sigma_upper=3, sigma_lower=10, return_mask=True\n )\n\n if n == 0:\n lcs = lc.copy()\n else:\n lcs = lcs.append(lc)\n print(\n f\"{sector}: cdpp={lc.estimate_cdpp():.2f}, std={lc.flux.std():.2f}\"\n )\n\n lcs.sector = all_sectors\n return lcs", "def test_flip_loop():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (4,0), (6,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (6,5), (4,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def subpartition_bsp(self, min_width, min_height):\n \n def split_horizontal(p):\n ul_x, ul_y = p.ul_pos\n \n split_pos = (random.choice(\n list(range(ul_x + min_width, ul_x + p.width - min_width + 1))), ul_y)\n \n split_x, split_y = split_pos\n \n return([Partition(p.ul_pos, split_x-ul_x, p.height), \n Partition(split_pos, ul_x + p.width - split_x, p.height)])\n \n def split_vertical(p):\n ul_x, ul_y = p.ul_pos\n \n split_pos = (ul_x, random.choice(\n list(range(ul_y + min_height, ul_y + p.height - min_height + 1))))\n \n split_x, split_y = split_pos\n \n return([Partition(p.ul_pos, p.width, split_y-ul_y), \n Partition(split_pos, p.width, ul_y + p.height - split_y)])\n \n\n \n if self.width < min_width or self.height < min_height:\n raise PartitionException(\"Partition too small!\")\n \n splith = (self.width > 2*min_width)\n splitv = (self.height > 2*min_height)\n \n new_partitions = None\n \n if splith and splitv:\n new_partitions = random.choice([\n split_horizontal, split_vertical])(self)\n \n elif splith:\n new_partitions = split_horizontal(self)\n \n elif splitv:\n new_partitions = split_vertical(self)\n \n else:\n return [self]\n \n return list(flatten([p.subpartition_bsp(min_width, min_height) \n for p in new_partitions]))", "def getShelves(detections, lines):\r\n\r\n for idx,det in enumerate(detections):\r\n dist_to_shelf = np.zeros(len(lines))\r\n b_points = det['box_points']\r\n b_height = b_points[3] - b_points[1] \r\n for l in range(len(lines)): \r\n dist_to_shelf[l] = lines[l] - b_points[1] ### distance of upper-left corner from lines\r\n if dist_to_shelf[l] < 0:\r\n dist_to_shelf[l] = 100000 ## sth huge \r\n #print(dist_to_shelf) \r\n det['shelf'] = np.argmin(dist_to_shelf)\r\n return detections", "def cull(self):", "def GetSubContoursAndOrderingByFrame(watershed, allValsByFrame):\n cellNetworkList = GetCellNetworksByFrame(watershed, allValsByFrame)\n scListByFrame = [cellNetworkList[i].subContours for i in range(len(watershed))]\n orderOfSCsByValueByFrame = [\n cellNetworkList[i].orderOfSubContoursDict for i in range(len(watershed))\n ]\n return scListByFrame, orderOfSCsByValueByFrame", "def unitQuad_Edge(lens, N=3):\n template = [ np.array([0,0]), np.array([lens[0], 0]), None, None ] #Template from which to generate other Quad Vertex Lists\n leftDegenerate = template.copy() #Left Limit of quad if you were to rotate edge 3 CCW about the origin until you no longer can\n rightDegenerate = template.copy() #Right Limit of quad if you were to rotate edge 2 CW about point 1 until you no longer can,\n # or alternatively, how far edge 3 can rotate CW until the quad is degenerate\n try:\n leftDegenerate[3] = np.array( circleIntersection(leftDegenerate[0], lens[3], leftDegenerate[1], lens[1]+lens[2]) )\n leftDegenerate[2] = ( lens[1] / (lens[2]+lens[1]) ) * (leftDegenerate[3]-leftDegenerate[1]) + leftDegenerate[1]\n except: \n leftDegenerate[3] = np.array([-lens[3],0])\n leftDegenerate[2] = np.array( circleIntersection(leftDegenerate[3], lens[2], leftDegenerate[1], lens[1]) )\n\n try:\n rightDegenerate[2] = np.array( circleIntersection(rightDegenerate[0], lens[2]+lens[3], rightDegenerate[1], lens[1]) )\n rightDegenerate[3] = ( lens[3] / (lens[3]+lens[2]) ) * rightDegenerate[2]\n except:\n rightDegenerate[2] = np.array([lens[0]+lens[1], 0])\n rightDegenerate[3] = np.array( circleIntersection(rightDegenerate[0], lens[3], rightDegenerate[2], lens[2]))\n \n rightOfOrigin = np.array([1,0]) #Theta = 0 on the Unit Circle\n thetaMin = angle_between(leftDegenerate[3], rightOfOrigin) #Angle of \n thetaMax = angle_between(rightDegenerate[3], rightOfOrigin)\n pitch = (thetaMax - thetaMin) / (N-1)\n\n result = []\n result.append(leftDegenerate) \n for i in range(1, N-1):\n result.append(template.copy())\n result[i][3] = lens[3]*unitCircPt(i*pitch+thetaMin)\n result[i][2] = np.array(circleIntersection( result[i][3], lens[2], result[i][1], lens[1]))\n result.append(rightDegenerate) \n\n return listify(result)", "def cylinder(downCirc = -120, upCirc = -70,radius = 15, resolution = 20):\n t = np.linspace(0, 2*m.pi, resolution)\n cylinderPos = []\n for num in t:\n x = -m.cos(num)*radius\n y = m.sin(num)*radius\n\n cylinderPos.append([x, y, downCirc, 0, 0, 0, 'mov'])\n\n for num in t:\n x = -m.cos(num)*radius\n y = m.sin(num)*radius\n\n cylinderPos.append([x, y, upCirc, 0, 0, 0, 'mov'])\n\n cylinderPos.append([0,0,-127,0,0,0,'mov'])\n return cylinderPos", "def visualHull(sils, length):\n result = sils.pop(0).cone(length)\n assert result.pnFacesInPoly()\n i = 0\n for s in sils:\n # print(i)\n assert result.pnFacesInPoly()\n result = result.intersection(s.cone(length), True)\n # result.plot()\n i += 1\n return result", "def test_flip_loop2():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (5,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (5,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)", "def include_cut_poly_array(self, planes, fix_pts):\n planeActors = []\n\n for i in range(3):\n # get plane coefficients\n a = planes[i][0]\n b = planes[i][1]\n c = planes[i][2]\n\n # create vtk plane object\n VTKplane = vtk.vtkPlane()\n VTKplane.SetNormal(a, b, c)\n if fix_pts[0] == 'var': # for variability test\n VTKplane.SetOrigin(self.epi_apex_node)\n else: # for foreshortening test\n VTKplane.SetOrigin(fix_pts[1+i])\n\n # create cutter\n cutEdges = vtk.vtkCutter()\n cutEdges.SetInputData(self.endo_poly) # always cut through endo\n cutEdges.SetCutFunction(VTKplane)\n cutEdges.GenerateCutScalarsOn()\n cutEdges.GenerateTrianglesOn()\n cutEdges.SetValue(0, 0.5)\n\n # create strips # just for output purposes\n cutStrips = vtk.vtkStripper()\n cutStrips.SetInputConnection(cutEdges.GetOutputPort())\n cutStrips.Update()\n\n # get polydata from strips (just for output purposes)\n cutPoly = vtk.vtkPolyData()\n cutPts = cutStrips.GetOutput().GetPoints()\n cutPoly.SetPoints(cutPts)\n cutPoly.SetPolys(cutStrips.GetOutput().GetLines())\n\n cutterMapper = vtk.vtkPolyDataMapper()\n cutterMapper.SetInputConnection(cutEdges.GetOutputPort())\n cutterMapper.ScalarVisibilityOff()\n\n # create plane actor\n planeActor = vtk.vtkActor()\n planeActor.SetMapper(cutterMapper)\n planeActor.GetProperty().SetColor(self.plane_colors[i])\n planeActor.GetProperty().SetLineWidth(6)\n\n # store the actors of the specific planes to add later into 1 renderer\n planeActors.append(planeActor)\n\n return planeActors", "def planeSliceTOAFig(uxmax, uymax, dso, dsl, f, dm, m, n, ax, ay, npoints, xax = True, yax = True):\n \n # Calculate coefficients\n rF2 = rFsqr(dso, dsl, f)\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n lc = lensc(dm, f)\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n tg0 = tg0coeff(dso, dsl)\n tdm0 = tdm0coeff(dm, f)\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args=(alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n # print(upcross)\n \n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n cdist = uxmax/(np.abs(50*lc))\n \n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n ncomplex = np.zeros(nzones) # don't care about complex solutions in this case\n print(nreal)\n \n # Find roots\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate TOAs\n alltoas = []\n for i in range(nzones):\n toas = obsCalc(deltat, allroots[i], int(nreal[i]), npoints, 1, args = (tg0, tdm0, alp, ax, ay)).real\n alltoas.append(toas)\n \n # Plots\n fig, ax1 = plt.subplots(figsize=(10, 8), dpi = 100)\n # grid = gs.GridSpec(2, 2, width_ratios=[4, 1])\n # ax0 = plt.subplot(grid[1:, 1])\n # ax1 = plt.subplot(grid[0, 1])\n \n \n # ax2 = plt.subplot(grid[:, 0]) # Plot results\n colors = assignColor(allroots, nreal)\n l = []\n for i in range(len(upxvecs)):\n zone = alltoas[i]\n for j in range(len(zone)):\n line = ax1.plot(upxvecs[i], zone[j], color = colors[i][j], lw = 3.)\n l.append(line)\n for i in range(ncross):\n ax1.plot([upcross[i][0], upcross[i][0]], [-100, 100], color = 'black', ls = 'dashed', scaley = False, scalex = False, lw = 2.5)\n label = r'$\\nu = $' + str(f/GHz) + ' GHz'\n ax1.text(0.05, 0.9, label, transform=ax1.transAxes, fontsize = 28, bbox=dict(facecolor = 'white', alpha=1.))\n # ax1.set_ylim(min(alltoas.flatten() - 1), max(alltoas.flatten() + 1))\n if not xax:\n ax1.xaxis.set_ticklabels([])\n else:\n ax1.set_xlabel(r\"$u'_x$\", fontsize=28)\n if not yax:\n ax1.yaxis.set_ticklabels([])\n else:\n ax1.set_ylabel(r'$\\Delta t \\: (\\mu s)$', fontsize=28)\n if dm > 0:\n ax1.set_ylim(-0.5, 15.)\n else:\n ax1.set_ylim(-2.5, 10.)\n ax1.tick_params(labelsize = 22)\n ax1.grid()\n \n ax2 = inset_axes(ax1, width='18%', height='23%', loc=1)\n rx = np.linspace(-uxmax, uxmax, 1000) # Plot caustic surfaces\n ry = np.linspace(-uxmax, uxmax, 1000)\n uvec = np.meshgrid(rx, ry)\n ucaus = causCurve(uvec, coeff)\n cs = ax2.contour(rx, ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax2.plot(upx, upy, color = 'blue')\n ax2.plot(np.linspace(xmin, xmax, 10), np.linspace(ymin, ymax, 10), color = 'green')\n ax2.scatter(upcross.T[0], upcross.T[1], color = 'green')\n # ax2.set_xlabel(r\"$u'_x$\")\n # ax2.set_ylabel(r\"$u'_y$\")\n ax2.set_xlim(-uxmax, uxmax)\n ax2.tick_params(labelsize = 16)\n # ax1.set_title(\"Caustic curves\")\n # ax1.set_aspect('equal', anchor = 'N')\n ax2.grid()\n # ax2.tight_layout()\n \n plt.tight_layout()\n plt.show()\n return", "def make_m3_crv(TSUGITE_list, SHIGUCHI_list):\n \"\"\"\n 1 Get information from TSUGITE_list and SHIGUCHI_list.\n \"\"\"\n # TSUGITE\n # Left----------------------------------------------------------------------\n # material2\n m3_left_list = TSUGITE_list[2]\n m3_left_upper = m3_left_list[0]\n m3_left_middle = m3_left_list[1]\n m3_left_lower = m3_left_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[6]\n m3_KUMIKI_points2 = SHIGUCHI_list[7]\n\n # m3_KUMIKI_points1.reverse()\n\n m3_left_upper.extend(m3_KUMIKI_points1)\n m3_left_upper.append(m3_left_upper[0])\n m3_left_upper_crv = rs.AddPolyline(m3_left_upper)\n\n m3_left_middle.extend(m3_KUMIKI_points1)\n m3_left_middle.append(m3_left_middle[0])\n m3_left_middle_crv = rs.AddPolyline(m3_left_middle)\n\n m3_left_lower.extend(m3_KUMIKI_points1)\n m3_left_lower.append(m3_left_lower[0])\n m3_left_lower_crv = rs.AddPolyline(m3_left_lower)\n\n m3_left_crvs = [m3_left_upper_crv, m3_left_middle_crv, m3_left_lower_crv]\n\n # Right---------------------------------------------------------------------\n # material3\n m3_right_list = TSUGITE_list[3]\n m3_right_upper = m3_right_list[0]\n m3_right_middle = m3_right_list[1]\n m3_right_lower = m3_right_list[2]\n\n # SHIGUCHI\n m3_KUMIKI_points1 = SHIGUCHI_list[2]\n m3_KUMIKI_points2 = SHIGUCHI_list[3]\n\n # Extend\n # material3\n m3_right_upper.extend(m3_KUMIKI_points1)\n m3_right_upper.append(m3_right_upper[0])\n m3_right_upper_crv = rs.AddPolyline(m3_right_upper)\n\n m3_right_middle.extend(m3_KUMIKI_points1)\n m3_right_middle.append(m3_right_middle[0])\n m3_right_middle_crv = rs.AddPolyline(m3_right_middle)\n\n m3_right_lower.extend(m3_KUMIKI_points1)\n m3_right_lower.append(m3_right_lower[0])\n m3_right_lower_crv = rs.AddPolyline(m3_right_lower)\n\n m3_right_crvs = [m3_right_upper_crv, m3_right_middle_crv, m3_right_lower_crv]\n\n return m3_left_crvs, m3_right_crvs", "def graphCut(img, center, radius, temp, edge, count, editPoints, padList, theta_width, phi_width):\r\n\r\n\r\n \"\"\"Important note. The labeled image is referred to as temp, or self.temp in the interface.\r\n This stands for template. The previously labled image is fed back into the graphcut\"\"\"\r\n \r\n \"\"\"create polar images and cost arrays\"\"\"\r\n \r\n print \"RUNNING GRAPHCUT!\"\r\n img= padImage(img, padList)\r\n temp= padImage(temp, padList)\r\n edge= padImage(edge, padList)\r\n center= padCenter(center, padList)\r\n \r\n polar_img= img2polar(img, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n\r\n \r\n \r\n polar_grad, y, x = np.gradient(np.array(polar_img, dtype='float'))\r\n \"\"\"Lockett 100416 replacement line below to not use gradient when the image has a surface label\"\"\"\r\n \"\"\"polar_grad = -1 * np.array(polar_img, dtype='float')\"\"\"\r\n \r\n \r\n polar_cost = -1 * np.ones(polar_img.shape)\r\n for r in range(1,radius):\r\n polar_cost[r]= polar_grad[r]-polar_grad[r-1]\r\n\r\n \r\n \r\n \"\"\"\r\n flip the cost image upside down. This is so that the base set is at the bottom of the array\r\n since the graphcut cuts from top to bottom, this inversion is necessary.\r\n \"\"\"\r\n polar_cost_inv=polar_cost[::-1,:,:]\r\n\r\n print \"CONSTRUCTING GRAPH EDGES... \"\r\n \r\n \"\"\"construct the graph using PyMaxFlow\"\"\"\r\n g=maxflow.GraphFloat()\r\n nodeids=g.add_grid_nodes(polar_img.shape)\r\n structure=np.zeros((3,3,3))\r\n structure[2]= np.array([[0,10000,0],[10000, 10000, 10000],[0, 10000, 0]])\r\n g.add_grid_edges(nodeids, structure=structure, symmetric=False)\r\n\r\n \r\n \"\"\"convert the previously labeled image (temp) into a polar transform image. Take the labels and\r\n give them high cost edge weights so the segmentation avoids previously labeled objects\"\"\"\r\n polar_lbl_img= img2polar(temp, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n polar_lbl_img_inv= polar_lbl_img[::-1,:]\r\n \r\n lbl_caps= polar_lbl_img_inv>0\r\n self_caps= (polar_lbl_img_inv==count)\r\n lbl_caps-=self_caps\r\n lbl_source_caps= np.zeros(lbl_caps.shape)\r\n lbl_sink_caps= lbl_caps*10000\r\n g.add_grid_tedges(nodeids, lbl_source_caps, lbl_sink_caps)\r\n \r\n structure2= 10000*np.array([[0,0,0],[0,0,1],[0,1,0]])\r\n g.add_grid_edges(nodeids[radius-1], structure=structure2, symmetric=True)\r\n\r\n \"\"\"add terminal edges using two arrays whose elemnts are the costs of the edges from the source and to the\r\n sink\"\"\"\r\n print \"CONSTRUCTING GRAPH TEDGES...\"\r\n sinkcaps= polar_cost_inv * (polar_cost_inv>=0)\r\n sourcecaps = -1 * polar_cost_inv * (polar_cost_inv<0)\r\n g.add_grid_tedges(nodeids, sourcecaps, sinkcaps)\r\n\r\n \r\n\r\n \r\n \"\"\"accounts for edit points. Takes every point in the edit point list, converts it to its spherical coordinate, and adds high cost\r\n edges in the column of that edit point inverts the x and y coordinates of the center\"\"\"\r\n center= np.array((center[0], center[2], center[1]))\r\n if len(editPoints)!=0:\r\n for coords in editPoints:\r\n\r\n \r\n rad= math.sqrt((center[0]-coords[0])**2+ (center[1]-coords[2])**2 + (center[2]-coords[1])**2) \r\n theta= math.atan2(center[2]-coords[1], coords[2]-center[1])\r\n print str((coords[0]-center[0])/(rad+1))\r\n phi=math.acos(float(coords[0]-center[0])/(rad+1))\r\n if theta<0:\r\n theta=2*math.pi+ theta\r\n theta= theta_width- theta_width*theta/(2*math.pi)-1\r\n phi= phi_width*phi/(math.pi)-1\r\n rad= radius- rad\r\n print \"POLAR COORDS: \" + str((rad, theta, phi))\r\n\r\n for r in range(0, radius):\r\n if r<=rad:\r\n g.add_tedge(nodeids[r, theta, phi], 0, 10000)\r\n \r\n else:\r\n g.add_tedge(nodeids[r, theta, phi], 10000, 0) \r\n\r\n\r\n\r\n\r\n print \"CUTTING GRAPH...\"\r\n g.maxflow()\r\n\r\n \"\"\"s-t mincut of graph. This is converted to cartesian coordinates with the function img2cart. The\r\n images are also closed to eliminate spotty areas\"\"\"\r\n \r\n print \"STARTING CARTESIAN TRANSFORM...\"\r\n polar_img_seg= np.invert(g.get_grid_segments(nodeids)[::-1,:,:])\r\n\r\n \r\n edge_img= np.zeros(img.shape)\r\n seg_img= ndimage.binary_closing(img2cart(img, polar_img_seg, center, radius, theta_width, phi_width))\r\n \r\n \r\n \"\"\"create an edge image of the segmented object\"\"\"\r\n strel=np.ones((3,3,3))\r\n erode_img=ndimage.binary_erosion(seg_img, strel)\r\n edge_img=np.logical_xor(seg_img, erode_img)\r\n \r\n\r\n \"\"\"shears the segmentation image and edge if padding was applied\"\"\"\r\n \r\n\r\n \"\"\"add the object back on to the template image (and the edge image back on the template edge)\r\n If there was an editpoint involved, remove the previous segmentation of that object and add back\r\n on the edited object\"\"\"\r\n if len(editPoints)!=0:\r\n del_img= (temp==count)*count\r\n temp-=del_img\r\n\r\n del_edge_img= (edge==count)*count\r\n edge-= del_edge_img\r\n\r\n\r\n temp+=seg_img*count\r\n edge+=edge_img*count\r\n\r\n temp= shearImage(temp, padList)\r\n edge= shearImage(edge, padList)\r\n \r\n \r\n\r\n print \"FINISHED!\"\r\n \r\n return temp, edge", "def calculate_contours_fit(L_x, L_y, e, leaflet, ts, Plots, side):\n \n n = np.load(input_dir + 'directors_'+leaflet+'_tail_'+ str(ts) + '.npy') \n\n pos = np.load(input_dir + 'coordinates_'+leaflet+'_tail_' + str(ts) + '.npy') \n\n resid = np.load(input_dir + 'residues_'+leaflet+'_tail_' + str(ts) + '.npy')\n box = np.load(input_dir + 'box' + str(ts) + '.npy')\n\n \n chl = np.load(input_dir + 'cholesterol_'+leaflet+'_tail_' + str(ts) + '.npy')\n dlipc = np.load(input_dir + 'dlipc_'+leaflet+'_tail_' + str(ts) + '.npy') \n dspc = np.load(input_dir + 'dspc_'+leaflet+'_tail_' + str(ts) + '.npy')\n ssm = np.load(input_dir + 'ssm_'+leaflet+'_tail_' + str(ts) + '.npy')\n \n #n= np.ones(len(pos))\n \"\"\" END: read the input data \"\"\"\n\n\n field = order_vector_field(L_x, L_y, pos, n, e, box)\n\n c = pd.DataFrame(data=field).mean(axis=0).rolling(50, center=True, min_periods=1).mean() #50\n c.dropna(inplace=True)\n middle = 0.5*(np.max(c) + np.min(c)) \n #middle = 0.025\n contours = measure.find_contours(field, middle) # Marching Cubes algorith\n #save contours\n fac_x = box[0] / L_x #to get the right dimensions (range_x)\n fac_y = box[1] / L_y # (range_y)\n \n contours_x = []\n contours_y = []\n contours_x_y = []\n \n contours_all = []\n for m, contour in enumerate(contours):\n contours_x.append((contour[:, 1] * fac_x))\n contours_y.append((contour[:, 0] * fac_y))\n \n \n contours_x_y = np.column_stack((contours_x[m], contours_y[m]))\n contours_all.append(contours_x_y)\n np.save(output_contours + 'contours_'+leaflet+'.' + str(ts) + '.npy', contours_all)\n \n\n#===================================================\n#To assign resids to the different phases\n phase_belonging = np.zeros((len(pos)))\n ordered =[]\n disordered = []\n for i in np.arange(len(pos)):\n \n def apply_pbc(pos, box):\n if pos >= box:\n pos -= box\n if pos < 0:\n pos += box\n return pos\n \n idx_x = int(apply_pbc(pos[i,0], box[0]) / fac_x - 1.e-5) #the - 1.e-5 is because accuracy issue in the /\n idx_y = int(apply_pbc(pos[i,1], box[1]) / fac_y - 1.e-5) #this - 1.e-5 is because accuracy issue in the /\n #print(idx_x, idx_y)\n order= field[idx_y, idx_x]\n if (order > middle):\n ordered.append(order)\n order = 1 #ordered lipids\n \n else :\n disordered.append(order)\n order =0 #disordered lipids\n phase_belonging[i] = order\n \n\n resid_phases = np.column_stack((resid[:,0], phase_belonging))\n np.save(output_dir + 'resid_phases'+leaflet+'.'+ str(j) + '.npy', resid_phases)\n\n if Plots == True:\n plt.figure(figsize=(15,10)) \n \n contours_sorted = sorted(contours, key=len, reverse=True)\n \n for i in range(2):\n plt.plot(contours_sorted[i][:,1]* fac_x+0.5*fac_x, contours_sorted[i][:,0]* fac_y+0.5*fac_y, linewidth=3, color='#0000FF' ) ##00CC00\n \n #for m, contour in enumerate(contours_sorted):\n # print(contour[:,0])\n # for contour in contours: \n \n # plt.plot((contour[:, 1] * fac_x+0.5*fac_x),\n # (contour[:, 0] * fac_y+0.5*fac_y),\n # linewidth=4, color='#00CC00')\n \n plt.imshow(field, interpolation='nearest', \n cmap=plt.cm.gray_r,\n extent=[0, box[0], 0, box[1]], origin='lower', alpha=0.7) \n \n plt.axis('off')\n plot_scatter_order_field(pos, resid, dlipc, dspc, chl,ssm, n , box, ts, side) #phase_belonging.reshape(-1,1)\n plt.savefig(output_dir + 'contours-'+ leaflet + str(ts) + '.png', dpi=300) \n plt.close() \n \n return resid_phases #, ordered, disordered ", "def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')", "def runlength(block, color_component, prev_dc_0, prev_dc_1, prev_dc_2):\n output = []\n accumulator = []\n flag = 0\n zero_count = 0\n\n for i in range(64):\n if i == 0:\n if (color_component == 1) or (color_component == 0):\n accumulator.append(block[i] - prev_dc_0)\n output.append(0)\n prev_dc_0 = block[i]\n\n elif color_component == 2:\n accumulator.append(block[i] - prev_dc_1)\n output.append(0)\n prev_dc_1 = block[i]\n\n elif color_component == 3:\n accumulator.append(block[i] - prev_dc_2)\n output.append(0)\n prev_dc_2 = block[i]\n else:\n pass\n else:\n if block[i] == 0:\n zero_count = zero_count + 1\n else:\n if zero_count <= 15:\n output.append(zero_count)\n accumulator.append(block[i])\n zero_count = 0\n else:\n accumulator.append(0)\n output.append(15)\n data = block[i]\n zero_count = zero_count - 15\n flag = 1\n\n while flag == 1:\n if zero_count <= 15:\n accumulator.append(data)\n output.append(zero_count)\n zero_count = 0\n flag = 0\n else:\n accumulator.append(0)\n output.append(15)\n zero_count = zero_count - 15\n\n if i == 63:\n if zero_count != 0:\n accumulator.append(0)\n output.append(0)\n\n return (output, accumulator, prev_dc_0, prev_dc_1, prev_dc_2)", "def find_cruise_sections(altitudes):\n indicies = find_level_sections(altitudes)\n\n if indicies:\n prev_altitude = altitudes[indicies[0]]\n cruise_altitude = closest_cruising_altitude(prev_altitude)\n was_cruising = is_cruising(prev_altitude, cruise_altitude)\n prev_altitude = cruise_altitude if was_cruising else prev_altitude\n\n merge_indicies = []\n for index in range(2, len(indicies), 2):\n altitude = altitudes[indicies[index]]\n cruise_altitude = closest_cruising_altitude(altitude)\n if is_cruising(altitude, cruise_altitude):\n # If the aircraft cruised between the level ranges,\n if was_cruising and (prev_altitude == cruise_altitude) and \\\n in_cruise_level_range(altitudes[indicies[index - 1] + 1: indicies[index]],\n cruise_altitude):\n # merge the level indicies\n merge_indicies.append(index - 1)\n merge_indicies.append(index)\n\n prev_altitude = cruise_altitude\n was_cruising = True\n else: # level but not at a cruising level\n merge_indicies.append(index)\n merge_indicies.append(index + 1)\n prev_altitude = altitude\n was_cruising = False\n\n # merge the level indicies\n while merge_indicies:\n index = merge_indicies.pop()\n del indicies[index]\n\n return indicies", "def __init__(self, selected_points, cut_depth, cut_breadth):\n\n\n self.cut_depth = cut_depth\n self.cut_breadth = cut_breadth\n\n self.points = selected_points\n\n self.vline = self.vlinecomp()\n self.hline = self.ortho_line_cut()\n\n self.mid_left = self.midpoint(0,1)\n self.mid_right = self.midpoint(2, 3)", "def GetCVLSWithLimitedPointsBetweenNodes(\n cVLS, allValsByFrame, splitLength=1, fixedNumInteriorPoints=None, interpolate=True\n):\n allValues = list(set(tuple(flatten(allValsByFrame))))\n allPairs = sorted(\n list(set([tuple(c[0]) for cVLSByFrame in cVLS for c in cVLSByFrame]))\n ) # Value pairs...\n\n if fixedNumInteriorPoints:\n numInteriorPoints = {p: fixedNumInteriorPoints for p in allPairs}\n else:\n minLength = {}\n for p in allPairs:\n # minLength is the number of points of the shortest subcountour between cells p[0] and p[1] from all frames\n minLength[p] = min(\n [\n len(c[2])\n for cVLSByFrame in cVLS\n for c in cVLSByFrame\n if tuple(c[0]) == p\n ]\n )\n # length of subcontour\n numInteriorPoints = {}\n for p in allPairs:\n numInteriorPoints[p] = minLength[p] // splitLength\n\n cVLS2 = deepcopy(\n cVLS\n ) # otherwise, we'd also change the input argument cVLS in the outside world!\n limIntPtsFunc = (\n limitInteriorPointsInterpolating if interpolate else limitInteriorPoints\n )\n\n for cvlsByFrame in cVLS2:\n for c in cvlsByFrame:\n c[2] = limIntPtsFunc(c[2], numInteriorPoints[tuple(c[0])])\n\n return cVLS2", "def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]", "def slice_polygons_vertically(polygons,\n polygons_strengths,\n resolution,\n tolerance = 1E-4):\n\n # the intervals of polygons projected into X axis\n x_intervals = measure_polygons_width_along_yaxis(polygons)\n\n # the x value of intervals (ascend)\n x_endpoints = [0]\n for intv in x_intervals:\n x_endpoints.append(intv[0])\n x_endpoints.append(intv[1])\n\n x_endpoints = list(set(x_endpoints))\n x_endpoints.sort()\n\n # compute all possible candidate intervals\n candidate_intervals = []\n for id in range(0, len(x_endpoints) - 1):\n\n interval_left_x = x_endpoints[id]\n interval_right_x = x_endpoints[id + 1]\n\n # in some intervals, the polygons may have zero projection area\n # we ignore these intervals to accelerate our program\n is_interval_valid = False\n\n for intv in x_intervals:\n if interval_left_x > intv[1] - tolerance or interval_right_x < intv[0] + tolerance:\n is_interval_valid = False\n else:\n is_interval_valid = True\n break\n\n if is_interval_valid == False:\n continue\n\n interval_width = interval_right_x - interval_left_x\n # if the interval width is smaller than the fabrication tolerance, we ignore this interval\n if interval_width < tolerance:\n continue\n\n num_segments = math.ceil(interval_width / resolution)\n\n for kd in range(0, num_segments):\n segment_left_x = interval_left_x + interval_width / num_segments * kd\n segment_right_x = interval_left_x + interval_width / num_segments * (kd + 1)\n candidate_intervals.append([segment_left_x, segment_right_x])\n\n [polygons_intersec_shapes, polygons_intersec_strengths] = slice_polygons_vertically_with_intervals(polygons, polygons_strengths, candidate_intervals)\n\n return [polygons_intersec_shapes, polygons_intersec_strengths]", "def Back_to_center (Chest_img,wich_side='Left'):\n Filter_length = 130\n iteration = 0\n while True:\n if len(action_list) == 0:\n print('Filter_length',Filter_length)\n Chest = np.rot90(undistort_chest(Chest_img.imgs)).copy()\n cv2.imshow(\"undistort_chest\", Chest)\n cv2.waitKey(1)\n # continue\n if wich_side == 'Right':\n ROI_image = Chest[250:550,240:450]#右侧边缘,胸部\n elif wich_side == 'Left':\n ROI_image = Chest[250:550,30:239]#左侧边缘,胸部\n\n # 机器人脚的位置\n # ROI_image[340,:] = 255 \n\n cv2.imshow(\"Chest_img\",ROI_image)\n cv2.waitKey(1)\n\n ROI_image = cv2.pyrMeanShiftFiltering(ROI_image, 9, 25)\n cv2.imshow(\"pyrMeanShiftFiltering\",ROI_image)\n cv2.waitKey(1)\n Canny_img = cv2.Canny(ROI_image,15,150)\n # cv2.imshow(\"Canny_img\",Canny_img)\n # cv2.waitKey(1)\n\n #膨胀加粗边缘 \n dilate = cv2.dilate(Canny_img, np.ones((2, 2), np.uint8), iterations=1)\n cv2.imshow(\"dilate\",dilate)\n cv2.waitKey(1)\n\n\n Lines = cv2.HoughLinesP(dilate,1.0,np.pi / 180, 100,minLineLength=Filter_length,maxLineGap=15)\n\n # final_image = draw_lines(ROI_image,Lines,color=[0,255,0],thickness=2) #for test\n # cv2.imshow(\"origine line\",final_image)\n # cv2.waitKey(1)\n final_image, Final_line, good = group_lines_and_draw(ROI_image, Lines, wich_side)\n if Final_line is None:\n if Filter_length > 80:\n Filter_length -= 10\n else:\n iteration += 1\n continue\n \n if iteration == 3:\n print('No lines for long, just go')\n break\n\n cv2.imshow(\"image line\",final_image)\n cv2.waitKey(1)\n # print('test')\n if good:\n if wich_side == 'Right':\n Final_line[0] = Final_line[0] + 240\n Final_line[1] = Final_line[1] + 240\n if wich_side == 'Left':\n Final_line[0] = Final_line[0] + 30\n Final_line[1] = Final_line[1] + 30\n dX, deg = Calculate_position(Final_line)\n # print('line info',dX,deg)\n Step, Trun, Move_action, Turn_action = Move_dicision(dX, deg, wich_side)\n if Step == 0 and Trun == 0:\n print('In the center')\n break \n else:\n Step,Trun,Move_action,Turn_action = 0,0,True,True\n print('啥也没看见朋友!')\n \n\n for i in range(int(Trun)):\n action_append(Turn_action)\n time.sleep(0.5)\n\n for i in range(int(Step)):\n action_append(Move_action)\n time.sleep(0.5)", "def build_leftpart():\n # build kelly.\n build_kelly()\n # copy kelly to 3.\n copy(0, 3)\n\n # build june in slots 0,1,2\n build_june()\n # copy kelly to slot 1\n copy(3, 1)\n\n # smash together to get (june kelly) in 0\n smash()\n # copy (june kelly) to 1\n copy(0, 1)\n # build horace in 0\n build_horace(0)\n # smash together to get (horace (june kelly)) in 0\n smash()\n # wrap with an S for the whole left part.\n apply_card(\"S\", 0)", "def make_clips(self):\n\n average_messege_count, streamer_messeges_data = self.__do_analysis()\n\n clipworthy_clips = []\n\n #add clipworthy clips\n for entry in streamer_messeges_data:\n if((entry['messeges_count']*entry['messeges_count']) > (average_messege_count*1.8)):\n clipworthy_clips.append(entry)\n\n #combine clips that are next to one another in time\n clip_number = 0\n while(True):\n #print('clip_number = ' + str(clip_number) +' , length of cliparr = ' + str(len(clipworthy_clips)))\n if(clip_number >= (len(clipworthy_clips))-1):\n #at end of clips\n break\n\n if (clipworthy_clips[clip_number]['end_time']==clipworthy_clips[clip_number+1]['start_time']):\n #duplicate clip detected\n #print('dublicate clip detected for clip ' + str(clip_number))\n clipworthy_clips[clip_number]['end_time']=clipworthy_clips[clip_number+1]['end_time']\n #print('cliparr length before ridding: ' + str(len(clipworthy_clips)))\n clipworthy_clips.remove(clipworthy_clips[clip_number+1])\n #print('cliparr length after ridding: ' + str(len(clipworthy_clips)))\n #print('')\n else:\n clip_number = clip_number + 1\n\n\n print('clipworthy clips will now be made')\n clipSlicer = ClipSlicer(clipworthy_clips)\n clipSlicer.make_clips()\n\n print(\"clipworthy clips for streamer \"+ self.streamer + \" have been made\")", "def substratesFrom(polygons):\n containmentGraph = buildContainmentGraph(polygons)\n polygonLevels = graphLevels(containmentGraph) # Even polygons are outlines, odd are holes\n substrates = []\n for idx, polygon in enumerate(polygons):\n level = polygonLevels[idx]\n if not even(level):\n continue\n holes = [polygons[x].exterior for x in containmentGraph[idx] if polygonLevels[x] == level + 1]\n substrates.append(Polygon(polygon.exterior, holes))\n return substrates", "def GetXYListAndPolyListWithLimitedPointsBetweenNodes_CVLS(\n cVLS,\n allValsByFrame,\n orderOfSCsByValueByFrame,\n splitLength=1,\n fixedNumInteriorPoints=None,\n interpolate=True,\n):\n cVLS2 = GetCVLSWithLimitedPointsBetweenNodes(\n cVLS,\n allValsByFrame,\n splitLength,\n fixedNumInteriorPoints,\n interpolate=interpolate,\n )\n return GetXYListAndPolyListFromCVLS(cVLS2, allValsByFrame, orderOfSCsByValueByFrame)", "def clifford_set(u):\n i, x, y, z = u.v\n result = []\n result.append(u.clone()) # I\n result.append(Uop(-x, i, -z, y, u.hierarchy, u.construction + [\"X\"], gateset=u.gateset)) # iX, but treat it as X due to only phase difference\n result.append(Uop((i-x)/SQRT2, (x+i)/SQRT2, (y-z)/SQRT2, (z+y)/SQRT2, u.hierarchy, u.construction + [\"(I+iX)\"], gateset=u.gateset))\n result.append(Uop((i+x)/SQRT2, (x-i)/SQRT2, (y+z)/SQRT2, (z-y)/SQRT2, u.hierarchy, u.construction + [\"(I-iX)\"], gateset=u.gateset))\n result.append(Uop((i-y)/SQRT2, (x+z)/SQRT2, (y+i)/SQRT2, (z-x)/SQRT2, u.hierarchy, u.construction + [\"(I+iY)\"], gateset=u.gateset))\n result.append(Uop((i+y)/SQRT2, (x-z)/SQRT2, (y-i)/SQRT2, (z+x)/SQRT2, u.hierarchy, u.construction + [\"(I-iY)\"], gateset=u.gateset))\n for idx in range(6):\n i, x, y, z = result[idx].v\n c = result[idx].construction[-1:] if idx != 0 else []\n result.append(Uop(-z, -y, x, i, u.hierarchy, u.construction + c + [\"Z\"], gateset=u.gateset)) # iZ\n result.append(Uop((i-z)/SQRT2, (x-y)/SQRT2, (y+x)/SQRT2, (z+i)/SQRT2, u.hierarchy, u.construction + c + [\"(I+iZ)\"], gateset=u.gateset))\n result.append(Uop((i+z)/SQRT2, (x+y)/SQRT2, (y-x)/SQRT2, (z-i)/SQRT2, u.hierarchy, u.construction + c + [\"(I-iZ)\"], gateset=u.gateset))\n\n return result", "def tube(outside_diam, inside_diam, height):\n global _cmds, fragments\n r1 = outside_diam / 2\n r2 = inside_diam / 2\n _cmds = (\n \"difference(){\\n\"\n f\"cylinder(h={height},r1={r1},r2={r1},\"\n f\"center=false,$fn={fragments});\\n\"\n f\"cylinder(h={height*3},r1={r2},r2={r2},\"\n f\"center=true,$fn={fragments});\\n\"\n \"}\\n\") + _cmds", "def __around_short_row(self):\n list_of_cord = []\n for i in range(-self.rad, self.rad + 1):\n for j in range(-self.rad, self.rad + 1):\n if (i == 0 and j == 0) or (i != 0 and j == -1):\n continue\n list_of_cord.append((i, j))\n return list_of_cord", "def get_obstList(self,X,Y,Z):\n \n \t#Bed\n floor_part = np.array(np.where(Y < 2*self.cyl_rad)).flatten()\n\t\n\t#Piling\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n\n # then add the cylinder\n obst_list = np.union1d(floor_part[:],cyl_part[:])\n \n return list(obst_list[:])", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def coord_chop(full_length, size, mode):\n if size > full_length:\n mode = None\n pair_list = []\n if mode is 'exact_size':\n counter = 0\n while counter < full_length-size:\n new_upper = counter+size\n new_pair = (counter, new_upper)\n pair_list.append(new_pair)\n counter = new_upper\n last_pair = (counter, full_length)\n pair_list.append(last_pair)\n elif mode is 'maxsize_bisect':\n pair_list = [(0, full_length)]\n pair_list = recursive_bisector(pair_list, size)\n elif mode is 'maxsize_divisor':\n divisor = full_length/size +1\n approx_size = full_length/divisor\n counter = 0\n origin = 0\n while counter <= divisor:\n new_upper = origin+approx_size\n new_pair = (origin, new_upper)\n pair_list.append(new_pair)\n origin = new_upper\n counter +=1\n last_pair = (origin, full_length)\n pair_list.append(last_pair)\n elif mode is 'count_divisor':\n seg_count = size # using the size arg to pass the desired seg count\n target_size = full_length/seg_count+1\n pair_list = coord_chop(full_length, target_size, 'exact_size')\n else:\n pair_list = [(0, full_length)]\n return pair_list", "def _split_block(block: PruningBlock, list_output_channels: List[int]) -> List[PruningBlock]:\n if len(list_output_channels) == 1:\n raise RuntimeError\n\n dot_product = reduce((lambda x, y: x * y), list_output_channels)\n\n current_size = dot_product\n new_blocks = []\n divided_shapes = filter(lambda x: x != 1, list_output_channels)\n for divided_shape in divided_shapes:\n offset = int(current_size % dot_product)\n current_size /= divided_shape\n new_block = copy.copy(block)\n new_block.size = int(current_size)\n new_block.offset = offset\n new_blocks.append(new_block)\n return new_blocks", "def yield_right_scalene_triangles(cls):\n for n in range(1, 10):\n m = n + 1\n while True:\n a, b, c = 2 * m * n, m * m - n * n, m * m + n * n\n if a <= 200 and b <= 200 and c <= 200:\n yield a, b, c\n m += 1\n else:\n break", "def polyCut(*args, caching: bool=True, constructionHistory: bool=True, cutPlaneCenter:\n Union[List[float, float, float], bool]=None, cutPlaneCenterX: Union[float,\n bool]=0.0, cutPlaneCenterY: Union[float, bool]=0.0, cutPlaneCenterZ: Union[float,\n bool]=0.0, cutPlaneHeight: Union[float, bool]=0.0, cutPlaneRotate: Union[List[float,\n float, float], bool]=None, cutPlaneRotateX: Union[float, bool]=0.0,\n cutPlaneRotateY: Union[float, bool]=0.0, cutPlaneRotateZ: Union[float, bool]=0.0,\n cutPlaneSize: Union[List[float, float], bool]=None, cutPlaneWidth: Union[float,\n bool]=0.0, cuttingDirection: AnyStr=\"\", deleteFaces: bool=False, extractFaces:\n bool=False, extractOffset: Union[List[float, float, float], bool]=None,\n extractOffsetX: Union[float, bool]=0.0, extractOffsetY: Union[float, bool]=0.0,\n extractOffsetZ: Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, onObject: bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def LSSTPointing(xc, yc, angle_rot=0., area=None, maxbound=None):\n\n \"\"\"\n arr = [[3, 0], [12, 0], [12, 1], [13, 1], [13, 2], [14, 2], [14, 3], [15, 3],\n [15, 12], [14, 12], [14, 13], [13, 13], [\n 13, 14], [12, 14], [12, 15],\n [3, 15], [3, 14], [2, 14], [2, 13], [1, 13], [1, 12], [0, 12],\n [0, 3], [1, 3], [1, 2], [2, 2], [2, 1], [3, 1]]\n \"\"\"\n # this is a quarter of LSST FP (with corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 6.5], [5.5, 6.5], [\n 5.5, 5.5], [6.5, 5.5], [6.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n\n # this is a quarter of LSST FP (without corner rafts)\n arr = [[0.0, 7.5], [4.5, 7.5], [4.5, 4.5], [7.5, 4.5], [7.5, 0.0]]\n if maxbound is not None:\n arr = [[0.0, maxbound], [maxbound*4.5/7.5, maxbound], [maxbound*4.5 /\n 7.5, maxbound*4.5/7.5], [maxbound, maxbound*4.5/7.5], [maxbound, 0.0]]\n # symmetry I: y -> -y\n arrcp = list(arr)\n for val in arr[::-1]:\n if val[1] > 0.:\n arrcp.append([val[0], -val[1]])\n\n # symmetry II: x -> -x\n arr = list(arrcp)\n for val in arrcp[::-1]:\n if val[0] > 0.:\n arr.append([-val[0], val[1]])\n\n # build polygon\n poly_orig = geometry.Polygon(arr)\n\n # set area\n if area is not None:\n poly_orig = affinity.scale(poly_orig, xfact=np.sqrt(\n area/poly_orig.area), yfact=np.sqrt(area/poly_orig.area))\n\n # set rotation angle\n rotated_poly = affinity.rotate(poly_orig, angle_rot)\n\n return affinity.translate(rotated_poly,\n xoff=xc-rotated_poly.centroid.x,\n yoff=yc-rotated_poly.centroid.y)", "def _contract_by_area(slabs, dA=0.5):\n\n # In refl1d the first slab is the substrate, the order is reversed here.\n # In the following code the slabs are traversed from the backing towards\n # the fronting.\n newslabs = np.copy(slabs)[::-1]\n d = newslabs[:, 0]\n rho = newslabs[:, 1]\n irho = newslabs[:, 2]\n sigma = newslabs[:, 3]\n vfsolv = newslabs[:, 4]\n\n n = np.size(d, 0)\n i = newi = 1 # Skip the substrate\n\n while i < n:\n # Get ready for the next layer\n # Accumulation of the first row happens in the inner loop\n dz = rhoarea = irhoarea = vfsolvarea = 0.0\n rholo = rhohi = rho[i]\n irholo = irhohi = irho[i]\n\n # Accumulate slices into layer\n while True:\n # Accumulate next slice\n dz += d[i]\n rhoarea += d[i] * rho[i]\n irhoarea += d[i] * irho[i]\n vfsolvarea += d[i] * vfsolv[i]\n\n i += 1\n # If no more slices or sigma != 0, break immediately\n if i == n or sigma[i - 1] != 0.0:\n break\n\n # If next slice won't fit, break\n if rho[i] < rholo:\n rholo = rho[i]\n if rho[i] > rhohi:\n rhohi = rho[i]\n if (rhohi - rholo) * (dz + d[i]) > dA:\n break\n\n if irho[i] < irholo:\n irholo = irho[i]\n if irho[i] > irhohi:\n irhohi = irho[i]\n if (irhohi - irholo) * (dz + d[i]) > dA:\n break\n\n # Save the layer\n d[newi] = dz\n if i == n:\n # printf(\"contract: adding final sld at %d\\n\",newi)\n # Last layer uses surface values\n rho[newi] = rho[n - 1]\n irho[newi] = irho[n - 1]\n vfsolv[newi] = vfsolv[n - 1]\n else:\n # Middle layers uses average values\n rho[newi] = rhoarea / dz\n irho[newi] = irhoarea / dz\n sigma[newi] = sigma[i - 1]\n vfsolv[newi] = vfsolvarea / dz\n # First layer uses substrate values\n newi += 1\n\n return newslabs[:newi][::-1]", "def cyclic_sort_vertices_2d(Vlist):\n if len(Vlist)==0: return Vlist\n\n adjacency_matrix = Vlist[0].polyhedron().vertex_adjacency_matrix()\n result = [ Vlist.pop() ]\n while len(Vlist)>0:\n for i in range(len(Vlist)):\n if adjacency_matrix[Vlist[i].index(), result[-1].index()] == 1:\n result.append( Vlist.pop(i) )\n break;\n else:\n raise ValueError\n return result" ]
[ "0.5736482", "0.54933506", "0.5457592", "0.5425763", "0.5424649", "0.54244435", "0.5406228", "0.5358565", "0.53265375", "0.5317147", "0.5295013", "0.5287533", "0.5257076", "0.524541", "0.51966155", "0.51694214", "0.5141546", "0.51079696", "0.51050085", "0.5099054", "0.50850296", "0.507336", "0.5059004", "0.5033178", "0.5025372", "0.50115955", "0.5010461", "0.49971756", "0.49949536", "0.49887016", "0.4987", "0.49851546", "0.4981639", "0.49735895", "0.49723774", "0.49634868", "0.495334", "0.49489036", "0.49483097", "0.49345294", "0.49345294", "0.49345168", "0.49330088", "0.4926696", "0.49242353", "0.48984295", "0.4884625", "0.487796", "0.48778358", "0.4872829", "0.48657185", "0.48625663", "0.48615733", "0.48570856", "0.4856373", "0.48558742", "0.48515764", "0.48493353", "0.48465195", "0.4842356", "0.48133877", "0.48080686", "0.4803611", "0.48022258", "0.4800667", "0.47867283", "0.47854492", "0.47650608", "0.4756668", "0.47474223", "0.47426414", "0.47414345", "0.47321004", "0.47314724", "0.4721763", "0.4716969", "0.47150517", "0.4714597", "0.47031218", "0.46998435", "0.4693015", "0.4690837", "0.46902978", "0.46791306", "0.46648854", "0.4664656", "0.46619213", "0.46610087", "0.4653423", "0.46519855", "0.46504295", "0.46492514", "0.4644772", "0.46396193", "0.46303675", "0.46264002", "0.46221802", "0.46200907", "0.4619353", "0.46186975" ]
0.6428014
0
This algorithm returns a cutlist which performs a cut which is a quarter of the total slicing required to create a pyramid top, while ensuring a flat bottom above it, both of which is required for an OG seed.
def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers): cutlist = [] y_max = abs(y1-y2) for a in range(layers): i = 0 new_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y while abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0: if i % 2 == 0: cutlist.append(["jump", f"{new_x1:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x2:.6f}", f"{new_y1:.6f}"]) else: cutlist.append(["jump", f"{new_x2:.6f}", f"{new_y1:.6f}"]) cutlist.append(["mark", f"{new_x1:.6f}", f"{new_y1:.6f}"]) new_y1 = new_y1-delta i = i + 1 if a < layers - 1: cutlist.append(["z_step", str(-deltaz)]) y_max = y_max - taper_straight - taper_y return cutlist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def triple_cut(deck: List[int]) -> None:\n\n small_joker_index = deck.index(get_small_joker_value(deck))\n big_joker_index = deck.index(max(deck))\n\n if big_joker_index > small_joker_index:\n left_joker = small_joker_index\n right_joker = big_joker_index\n\n else:\n right_joker = small_joker_index\n left_joker = big_joker_index\n\n left_list = deck[:left_joker]\n right_list = deck[right_joker + 1:]\n middle_list = deck[left_joker:right_joker + 1]\n del deck[:]\n deck.extend(right_list)\n deck.extend(middle_list)\n deck.extend(left_list)", "def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res", "def triple_cut(deck_of_cards):\n new_deck =[]\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n\t \n index1 = deck_of_cards.index(small_joker_value)\n index2 = deck_of_cards.index(big_joker_value)\n index_top_card = min(index1, index2)\n index_bottom_card = max(index1, index2)\n # This function will give us the joker that is on the top and the joker \n # that is in the bottom of the deck regardless of their value\n \n new_top = deck_of_cards[(index_bottom_card + 1):]\n # Creates a deck that is to be moved the top, from the lower joker and\n # below \n middle = deck_of_cards[index_top_card : index_bottom_card + 1]\n # Middle portion of the deck that is not moved that is in between the jokers\n new_bottom = deck_of_cards[:index_top_card]\n # The deck portion that is to be moved to the bottom, from higher joker and\n # above.\n deck = new_top + middle + new_bottom\n deck_of_cards[:] = deck\n # This will then give a new deck that shifts the cards above the higher \n # joker to the end and the cards below the lower joker to the top.", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def sub_division(width: float, minimum_division: float, stretch_factor: float) -> list:\n\n sum_x = 0\n next_ = minimum_division\n new_grid = []\n max_dx = 20/100\n x = width/2\n\n while sum_x < x:\n remaining = x - sum_x\n\n if next_ > max_dx:\n n = np.ceil(remaining/max_dx)\n\n if n == 0:\n new_grid.append(remaining)\n\n next_ = remaining/n\n\n for _ in range(0, int(n)):\n new_grid.append(next_)\n sum_x += next_\n\n remaining = x - sum_x\n\n if next_ < remaining:\n new_grid.append(next_)\n sum_x += next_\n else:\n remaining += new_grid[-1]\n new_grid[-1] = remaining/2\n new_grid.append(remaining/2)\n sum_x = x\n\n next_ = next_ * stretch_factor\n\n x1 = new_grid[::-1]\n x2 = new_grid+x1\n\n return x2", "def get_partitions(cliques,cut=1):\n cliques.sort(key=len)\n k, m = divmod(len(cliques), cut)\n return list(cliques[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(cut))", "def rod_cutting_top_down_DP(rod_length, length_list, price_list):\n memo_profit_list = (rod_length + 1) * [-1]\n memo_cut_list = [[] for i in range(rod_length + 1)]\n return _rod_cutting_top_down_DP_aux(rod_length, length_list, price_list, memo_profit_list, memo_cut_list)", "def inregionCut(self,l,r,g,Nb):\n A1 = np.random.randint(l+1-self.keepCL, r-1-(Nb-1)*(g+1)-(1-self.keepCL))\n return A1 + np.arange(Nb)*(1+g)", "def createDownPyramidSets(blocksize,operating):\n bsx = int(blocksize[0]/2)\n bsy = int(blocksize[1]/2)\n dl = int((bsy)-operating); #lower y\n ul = int((bsy)+operating); #upper y\n sets = tuple()\n while dl > 0:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl-=operating\n ul+=operating\n return sets", "def find_cuts(width, height, isOdd):\n\n if isOdd:\n return width//2, height//2\n\n if width % 4 == 0:\n return width//2, height//2\n return (width-2)//2, (height-2)//2", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def triple_cut(deck):\n \n \n big = get_big_joker_value(deck)\n small = get_small_joker_value(deck)\n if deck.index(big) < deck.index(small):\n first_j = deck.index(big) \n second_j = deck.index(small)\n elif deck.index(small) < deck.index(big):\n first_j = deck.index(small)\n second_j = deck.index(big)\n above_first_j = deck[:first_j]\n under_second_j = deck[second_j+1:]\n middle = deck[first_j:second_j + 1]\n deck[:] = under_second_j + middle + above_first_j", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def chop_up_to_4s(list, n):\n sublists = []\n num_sublists = 4**(n-1)\n for i in range(num_sublists):\n sublists.append(list[4*i: 4*i + 4])\n return sublists", "def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist", "def slice_list(list_to_slice, *upper_bounds):\n list_to_return=[]\n for upper_bound in upper_bounds:\n if (len(list_to_slice)>upper_bound):\n print('Slicing...')\n list_to_return.append(list_to_slice[:upper_bound])\n else:\n list_to_return.append(None)\n \n return list_to_return", "def createUpPyramidSets(blocksize,operating):\n sets = tuple()\n ul = blocksize[0]-operating\n dl = operating\n while ul > dl:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl+=operating\n ul-=operating\n return sets", "def create_wild_lists(amount,length):\r\n box = []\r\n\r\n k = 0\r\n while k < amount:\r\n sublist = []\r\n j = 0\r\n while j < length:\r\n num = random.randint(1, 100)\r\n sublist.append(num)\r\n j += 1\r\n box.append(sublist)\r\n k += 1\r\n\r\n if amount == 1:\r\n return sublist\r\n\r\n return box", "def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples", "def compute_skiprows(start, end) -> List[int]:\n return list(range(start - 1)) + list(range(end, end + 20))", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n yield sequence[i:i+kmer_size]", "def coord_chop(full_length, size, mode):\n if size > full_length:\n mode = None\n pair_list = []\n if mode is 'exact_size':\n counter = 0\n while counter < full_length-size:\n new_upper = counter+size\n new_pair = (counter, new_upper)\n pair_list.append(new_pair)\n counter = new_upper\n last_pair = (counter, full_length)\n pair_list.append(last_pair)\n elif mode is 'maxsize_bisect':\n pair_list = [(0, full_length)]\n pair_list = recursive_bisector(pair_list, size)\n elif mode is 'maxsize_divisor':\n divisor = full_length/size +1\n approx_size = full_length/divisor\n counter = 0\n origin = 0\n while counter <= divisor:\n new_upper = origin+approx_size\n new_pair = (origin, new_upper)\n pair_list.append(new_pair)\n origin = new_upper\n counter +=1\n last_pair = (origin, full_length)\n pair_list.append(last_pair)\n elif mode is 'count_divisor':\n seg_count = size # using the size arg to pass the desired seg count\n target_size = full_length/seg_count+1\n pair_list = coord_chop(full_length, target_size, 'exact_size')\n else:\n pair_list = [(0, full_length)]\n return pair_list", "def make_chopper(\n frequency: sc.Variable,\n position: sc.Variable,\n phase: sc.Variable = None,\n cutout_angles_center: sc.Variable = None,\n cutout_angles_width: sc.Variable = None,\n cutout_angles_begin: sc.Variable = None,\n cutout_angles_end: sc.Variable = None,\n kind: str = None,\n) -> sc.Dataset:\n data = {\"frequency\": frequency, \"position\": position}\n if phase is not None:\n data[\"phase\"] = phase\n if cutout_angles_center is not None:\n data[\"cutout_angles_center\"] = cutout_angles_center\n if cutout_angles_width is not None:\n data[\"cutout_angles_width\"] = cutout_angles_width\n if cutout_angles_begin is not None:\n data[\"cutout_angles_begin\"] = cutout_angles_begin\n if cutout_angles_end is not None:\n data[\"cutout_angles_end\"] = cutout_angles_end\n if kind is not None:\n data[\"kind\"] = kind\n chopper = sc.Dataset(data=data)\n\n # Sanitize input parameters\n if (None not in [cutout_angles_begin, cutout_angles_end]) or (\n None not in [cutout_angles_center, cutout_angles_width]\n ):\n widths = utils.cutout_angles_width(chopper)\n if (sc.min(widths) < sc.scalar(0.0, unit=widths.unit)).value:\n raise ValueError(\"Negative window width found in chopper cutout angles.\")\n if not sc.allsorted(utils.cutout_angles_begin(chopper), dim=widths.dim):\n raise ValueError(\"Chopper begin cutout angles are not monotonic.\")\n if not sc.allsorted(utils.cutout_angles_end(chopper), dim=widths.dim):\n raise ValueError(\"Chopper end cutout angles are not monotonic.\")\n\n return chopper", "def cut_trees(self, )\n\n\n\n def random_spot(x_low, y_low, x_range, y_range):\n x = randint(x_low, x_low + x_range)\n y = randint(y_low, y_low + y_range)\n dur = random.uniform(0.5, 3.0)\n\n return pyautogui.moveTo(x, y, dur)", "def getSplitFoodList(self, gameState, margin=1):\n foodList = self.getFood(gameState).asList()\n foodListY = [p[1] for p in foodList]\n med = median(foodListY)\n splitList = [[], []]\n for i, p in enumerate(foodList):\n if p[1] <= med - margin:\n splitList[0].append(p)\n elif p[1] > med + margin:\n splitList[1].append(p)\n else:\n splitList[0].append(p)\n splitList[1].append(p)\n return splitList", "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res", "def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments", "def part_recur(ckt, initial, w):\n partition_set = []\n# partition_mech = KLPart.KLPartition()\n# convert_Gate(ckt, partition_mech)\n print \"Diving into C++\"\n# (a, b) = partition_mech.partition_once(KLPart.StringVector(list(set(initial))))\n (a, b) = partition(ckt, list(set(initial)))\n print \"Coming back up\"\n if len(get_inputs(ckt, a)) > w and len(a) > 3:\n partition_set = partition_set + part_recur(ckt, a, w)\n else:\n partition_set.append(a)\n if len(get_inputs(ckt, b)) > w and len(b) > 3:\n partition_set = partition_set + part_recur(ckt, b, w)\n else:\n partition_set.append(b)\n return partition_set", "def generate_category_choice(possible):\n n = len(possible)\n splits = []\n for i in range(1, pow(2, n-1)):\n split = Split(is_numerical=False)\n for j in xrange(n):\n if (i >> j) % 2 == 1:\n split.add_category_range( possible[j] )\n splits.append( split )\n return splits", "def get_spacing(filename='POSCAR', cut=0.9):\n\n structure = Structure.from_file('POSCAR')\n\n lines = open(filename).readlines()\n c_axis = lines[4].split()\n lattice_parameter = lines[1].split()\n split_coords = [line.split() for line in lines[8:8+structure.num_sites]]\n z_coords = list()\n for coord in split_coords:\n z_coord = float(coord[2])\n if z_coord > cut:\n z_coord -= 1\n z_coords.append(z_coord)\n max_height = max([z_height for z_height in z_coords])\n min_height = min([z_height for z_height in z_coords])\n spacing = ((1.0 + min_height) - max_height) * \\\n abs(float(c_axis[2])) * float(lattice_parameter[0])\n\n return spacing", "def make_slices(data, win_size):\n rows = data.shape[0] - win_size[0] + 1\n cols = data.shape[1] - win_size[1] + 1\n slices = []\n for i in range(win_size[0]):\n for j in range(win_size[1]):\n slices.append(data[i:rows+i, j:cols+j])\n return slices", "def generate_pre_heights(self):\n\n config = self.config\n\n def get_lands_oceans():\n oceans, lands = [], []\n for x in xrange(self.size):\n for y in xrange(self.size):\n coord = x, y\n if self[coord] <= 0:\n oceans.append(coord)\n else:\n lands.append(coord)\n return lands, oceans\n\n def add_heights():\n \"\"\"Add pre heights for diamond-square\n \"\"\"\n fac_min = 50\n fac_max = 40\n\n print 'Get lands and oceans'\n t = time.time()\n lands, oceans = get_lands_oceans()\n print 'lands and oceans getted: ', time.time() - t\n\n # TODO: create one def with params: mount_level and other for create heights\n # add default heights\n for coord in lands:\n self[coord] = self.config.land_mount_level[1]\n\n for coord in oceans:\n self[coord] = -self.config.mid_mount_level[1]\n\n # add low heights for lands\n count_land = int(round(len(lands) * config.factor_low_mount / 100.))\n land_coords = []\n\n starts = random.randint(count_land / fac_min, count_land / fac_max)\n for start in xrange(starts):\n start_coord = lands[random.randint(0, len(lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n if coord not in land_coords:\n self[coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n # -------------------------------------------------------------------------------\n # add mid heights for lands\n count_land = int(round(len(target_lands) * (config.factor_mid_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 3), count_land / (fac_max*3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n\n if land_coords == []:\n return\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n\n # -------------------------------------------------------------------------------\n # add high heights for lands\n count_land = int(round(len(target_lands) * (config.factor_high_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 4), count_land / (fac_max * 3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n try:\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n except ValueError:\n coord = lands[random.randint(0, len(lands) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n\n\n def square_diamond(sx, sy, size, strong):\n \"\"\"Algorithm Square-diamond generate terrain heights\n\n -> http://www.lighthouse3d.com/opengl/terrain/index.php?mpd2\n \"\"\"\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)\n\n # align\n def align_it(start, strong):\n \"\"\"Deprecated\n \"\"\"\n water = 0\n #map3d = self.copy()\n size = (abs(start)*2) + self.size - strong\n start = start + strong\n coords_map = []\n for x in xrange(start,size):\n for y in xrange(start,size):\n coords_map.append( (x, y) )\n\n random.shuffle(coords_map)\n\n lens = strong * (3.0 ** 2)\n for coord in coords_map:\n average = 0.0\n x, y = coord\n #rounds = self.get_round_xy_land(coord, -strong, False)\n #for r_coord in rounds:\n #average += self[r_coord]\n for x in xrange(-strong, strong+1):\n for y in xrange(-strong, strong+1):\n average += self[x, y]\n\n height = int(round(average / lens))\n #height = int(round(average / float(len(rounds))))\n if self[coord] <= water and height > water:\n height = water\n elif self[coord] > water and height <= water:\n height = water + 1\n\n #print self[coord], '->', height\n\n self[coord] = height\n\n if self.config.add_pre_heights:\n print 'Add heights start'\n add_heights()\n print 'Diamond-Square start'\n for x in xrange(1):\n square_diamond(\n sx = 0,\n sy = 0,\n size = self.size, strong=100)", "def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))", "def top_down_rod_cutting_helper(prices: List[int], length: int, dp: List[int]):\n if length == 0:\n return 0\n if dp[length] == 0:\n max_value = 0\n for i in range(1, length + 1):\n max_value = max(max_value, prices[i-1] + top_down_rod_cutting_helper(prices, length - i, dp))\n dp[length] = max_value\n return dp[length]", "def cut_dyadic(x):\n n = x.shape[0]\n j = math.floor(math.log2(n))\n m = 2**j\n return lax.dynamic_slice(x, (0,), (m,))", "def cutout(yx, half_size, shape=None):\n\n if shape is None:\n shape = (inf, inf)\n if not np.iterable(half_size):\n half_size = (half_size, half_size)\n\n s = np.s_[max(yx[0] - half_size[0], 0):\n min(yx[0] + half_size[0] + 1, shape[0]),\n max(yx[1] - half_size[1], 0):\n min(yx[1] + half_size[1] + 1, shape[1])]\n return s", "def generate_cuts(depths, side=SIDE_LENGTH):\n for num, den in depths:\n ad = num * side / den\n poly = Polygon([(0, 0), (side, 0), (side, ad), (0, ad)])\n yield poly", "def cut_image(im):\n width, height = im.size\n # Three pictures in a row\n item_width = int(width / 3)\n box_list = []\n for i in range(0, 3):\n for j in range(0, 3):\n box = (j * item_width, i * item_width, (j + 1) * item_width, (i + 1) * item_width)\n box_list.append(box)\n image_list = [im.crop(box) for box in box_list]\n return image_list", "def ltrim1 (l,proportiontocut,tail='right'):\r\n if tail == 'right':\r\n lowercut = 0\r\n uppercut = len(l) - int(proportiontocut*len(l))\r\n elif tail == 'left':\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l)\r\n return l[lowercut:uppercut]", "def gap_split(chain: [int], gap: int):\n chain_blocks = []\n chain_block = [0] # aircraft's charging outlet joltage\n item = 0\n for i in range(len(chain)):\n if not chain_block or chain[i] < item + gap:\n item = chain[i]\n chain_block.append(item)\n continue\n item = chain[i]\n chain_block.append(item)\n if len(chain_block) > 2: # blocks with 1 or 2 items can only have 1 distinct arrangement\n chain_blocks.append(chain_block)\n chain_block = [item]\n if len(chain_block) > 2: # blocks with 1 or 2 items can only have 1 distinct arrangement\n chain_blocks.append(chain_block)\n return chain_blocks", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n try:\n yield sequence[i:kmer_size+i]\n except StopIteration:\n return", "def plotGlassbrainSlices(niftipath, mnipath, ortho='z', nRows=2, nCuts=6,\n threshpos=0, threshneg=0, figLayout='Both',\n showLRannot=True, findOptimalCut=True,\n imageType='svg'):\n\n # Initiation of relevant parameters\n img = nb.load(niftipath)\n lineW = 2. / (nRows + int((figLayout == 'Brain' or figLayout == 'Both')))\n\n # Reduce 4D volume to 3D\n if len(img.shape) == 4:\n data4D = img.get_data()\n data4D = data4D.reshape(data4D.shape[:-1])\n img = Nifti1Image(data4D, img.get_affine())\n\n # Get voxel extend in all directions\n dirMin = np.dot(img.get_affine(), [0, 0, 0, 1])[:3]\n dirMax = np.dot(img.get_affine(),\n np.array(img.shape).tolist() + [1])[:3]\n\n if findOptimalCut:\n # Find cuts automatically\n cut_coords = find_cut_slices(img, direction=ortho, n_cuts=nCuts)\n else:\n # Split orientation in x-equal parts\n cut_coords = getEqualSpacing(dirMin, dirMax, ortho, nCuts)\n\n # Split cuts according nRows\n cut_coords = [cut_coords[int(i * len(cut_coords) / np.float(nRows)):\n int((i + 1) * len(cut_coords) / np.float(nRows))]\n for i in range(nRows)]\n\n # Create Slices\n for i in range(nRows):\n\n # Create axes for plotting\n ax = plt.subplot(nRows + int((figLayout == 'Brain' or\n figLayout == 'Both')),\n 1, i + 1)\n\n # Plot the white background for all slices as a zeros value brain\n # (without it, the view focuses around the first area plotted)\n zerobrain = Nifti1Image(img.get_data() * 0, img.get_affine())\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=cut_coords[i],\n display_mode=ortho, alpha=1, draw_cross=False, cmap=plt.cm.gray,\n black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n posdata = np.copy(img.get_data())\n posdata[posdata <= threshpos] = 0.001 # = 0 crashes contour function\n posbrain = Nifti1Image(posdata, img.get_affine())\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n negdata = np.copy(img.get_data())\n negdata[negdata >= -threshneg] = 0.001 # = 0 crashes contour function\n negbrain = Nifti1Image(negdata, img.get_affine())\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Plot overview Brain at the bottom\n if figLayout == 'Brain' or figLayout == 'Both':\n\n # Create axes for overview brain\n ax = plt.subplot(nRows + 1, 1, nRows + 1)\n\n # Find overview view direction\n if ortho == 'z':\n direction = 'x'\n elif ortho == 'x':\n direction = 'z'\n elif ortho == 'y':\n direction = 'z'\n\n # Plot the white backgroundas a zeros value brain\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=[0],\n display_mode=direction, alpha=1, draw_cross=False,\n cmap=plt.cm.gray, black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Plot the line indicating the cut\n for i in np.array(cut_coords).flatten():\n if ortho == 'z' or ortho == 'y':\n ax.plot([-100, 100], [i, i], 'k-', lw=lineW)\n elif ortho == 'x':\n ax.plot([i, i], [-100, 100], 'k-', lw=lineW)\n\n if ortho == 'z':\n ax.axis((-300.0, 300.0, dirMin[2], dirMax[2]))\n elif ortho == 'y':\n ax.axis((-300.0, 300.0, dirMin[1], dirMax[1]))\n elif ortho == 'x':\n stretcher = (nRows + 1) / 2.\n ax.axis((-300.0 * stretcher, 300.0 * stretcher, -100.0, 100.0))\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Get file prefix\n if niftipath.endswith('.nii'):\n filename = opb(niftipath)[:-4]\n elif niftipath.endswith('.nii.gz'):\n filename = opb(niftipath)[:-7]\n\n # Create output folder\n path2Figure = opj(os.path.split(os.path.realpath(niftipath))[0], 'figures')\n if not os.path.exists(opj(path2Figure)):\n os.makedirs(opj(path2Figure))\n\n # Save figure\n figname = '_'.join([filename, '%s-cut' % ortho])\n plt.savefig(opj(path2Figure, '%s.%s' % (figname, imageType)))\n plt.clf()", "def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def cutout(self, centre, radius):", "def gen_triangle_numbers(limit):\n n = 1\n tris = []\n while 0.5*n*(n + 1) < limit:\n tris.append(int(0.5*n*(n + 1)))\n n += 1\n return tris", "def cut_lines(lines, pseudoread_length):\n step = int(pseudoread_length / 2)\n\n line_iterate = [x for x in range(0, len(lines), 2)]\n\n result = []\n\n for index in line_iterate:\n\n if (index % 100000) == 0:\n print(index)\n\n id = lines[index].strip()\n\n sequence = lines[index + 1].strip()\n\n # if sequence is shorter than single window, we return just window\n end_of_range = len(sequence) - step if (len(sequence) - step > 0) else len(sequence)\n range_iterate = [x for x in\n range(0, end_of_range, step)]\n\n for i in range_iterate:\n new_id = id + '|{}'.format(i)\n kmer = sequence[i:i + pseudoread_length]\n result.append(new_id)\n result.append(kmer)\n\n return result", "def big_selections(lst: List[int], n: int) -> List[List[int]]:\n if not lst:\n return [[]]\n else:\n holder = [lst.copy()]\n for i in range(len(lst)):\n l2 = lst.copy()\n l2.pop(i)\n for item in selections(l2):\n if item not in holder and sum(item) >= n:\n holder.append(item)\n return holder", "def generateCutList(cut_configuration):\r\n\t#Check that this line reads json.loads(cut_configuration)\r\n\tinput_json = json.load(cut_configuration)\r\n\r\n\t#Currently only desired_cut and laser_cut_config are required\r\n\ttry:\r\n\t\tblock = input_json[\"block\"]\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tcut = input_json[\"desired_cut\"]\r\n\t\tlaser = input_json[\"laser_cut_config\"]\r\n\texcept:\r\n\t\traise Exception(\"Either desired_cut or laser_cut_config not provided\")\r\n\r\n\tif cut[\"cut_process\"] == \"line\":\r\n\t\tfinal_list = line(cut[\"x1\"],cut[\"y1\"],cut[\"x2\"],cut[\"y2\"],cut[\"final_dimension_z\"]+laser[\"z_final_overshoot\"],laser)\r\n\telif cut[\"cut_process\"] == \"simple_core\":\r\n\t\tfinal_list = simple_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"vertical_core\":\r\n\t\tfinal_list = vertical_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"oss_stacked\":\r\n\t\tfinal_list = oss_stacked(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"z_focus\":\r\n\t\tfinal_list = z_focus(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"cross\":\r\n\t\tfinal_list = cross(block,cut,laser)\r\n\telse:\r\n\t\traise Exception(\"No such cut exists: Check cut_process\")\r\n\t#print(time_taken(final_list, laser))\r\n\tnow = datetime.now()\r\n\ttimestamp = str(now.strftime(\"%m-%d_%H_%M\"))\r\n\tcomplete_name = os.path.join(save_path, timestamp+\".csv\")\r\n\twith open(complete_name, mode='w',newline ='') as test_data:\r\n\t data_writer = csv.writer(test_data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\t list_data = json.loads(final_list)\r\n\t for line1 in list_data:\r\n\t \tdata_writer.writerow(line1)\r\n\treturn final_list", "def corners(N = 100,scale = 0,gapwidth = 2,cornerwidth = 2):\n \n N = int(N/8)*8\n per_corner = N//4\n \n xplusmin = np.concatenate((np.ones((per_corner,1)),\\\n -np.ones((per_corner,1)),\\\n np.ones((per_corner,1)),\\\n -np.ones((per_corner,1))),\\\n axis = 0)\n \n yplusmin = np.concatenate((np.ones((per_corner,1)),\\\n -np.ones((2*per_corner,1)),\\\n np.ones((per_corner,1))),\\\n axis = 0)\n \n #Horizontal edge\n x = xplusmin[::2]*gapwidth+xplusmin[::2]*scale*(np.random.rand(2*per_corner).reshape(-1,1))\n \n y = yplusmin[::2]*gapwidth + cornerwidth*yplusmin[::2]*(np.random.rand(2*per_corner).reshape(-1,1)) \n \n z = np.floor(np.arange(2*per_corner,dtype = int)/(per_corner*0.5))\n \n horizontal = np.concatenate((x,y,z[:,np.newaxis]),axis = 1)\n \n # Vertical edge\n x2 = xplusmin[1::2]*gapwidth + cornerwidth*xplusmin[1::2]*\\\n (np.random.rand(2*per_corner).reshape(-1,1))\n \n y2 = yplusmin[1::2]*gapwidth+yplusmin[1::2]*scale*\\\n (np.random.rand(2*per_corner).reshape(-1,1))\n \n z2 = np.floor(np.arange(2*per_corner,dtype = int)/(per_corner*0.5))\n vertical = np.concatenate((x2,y2,z2[:,np.newaxis]),axis = 1)\n \n #Concatenating the final data\n data = np.concatenate((vertical,horizontal),axis = 0)\n return data", "def slice_spacing(self):\n return np.median(np.diff(self.slice_zvals))", "def brute_force_rod_cutting(rod_length, length_list, price_list):\n max_profit = 0\n max_profit_cut_list = []\n\n if rod_length == 0:\n return 0, []\n\n for i in range(len(length_list)):\n remaining_rod_len = rod_length - length_list[i]\n if remaining_rod_len >= 0:\n current_profit, current_cut_list = brute_force_rod_cutting(\n remaining_rod_len, length_list, price_list)\n current_profit += price_list[i]\n current_cut_list.append(i)\n\n if (current_profit > max_profit):\n max_profit = current_profit\n max_profit_cut_list = current_cut_list[:]\n return max_profit, max_profit_cut_list", "def cubes(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x ** 3\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence", "def cut_384(img):\n if len(img.shape) > 2:\n ret = img[:, 50:434, 60:444]\n else:\n ret = img[50:434, 60:444]\n return ret", "def cut(bits):\n\n if len(bits) <= 8:\n return [bits]\n else:\n list_ = [bits[:8]]\n list_.extend(cut(bits[8:]))\n return list_", "def get_slices(n, n_buckets):\n bucket = n // n_buckets\n slices = []\n for i in range(n_buckets):\n if i < n_buckets - 1:\n slices.append(slice(i*bucket, (i+1)*bucket))\n else:\n slices.append(slice(i*bucket, None))\n return slices", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def max_cut(g):\n # Write your code here.\n return []", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def cutRod_BU_WithPieces(p, n):\n memo = defaultdict(int)\n pieces = [0 for _ in range(n + 1)]\n for i in range(1, n + 1): #n+1 as 0 is ignored\n curProfit = 0\n for j in range(1, i + 1): #i+1 as 0 is ignored\n if curProfit < p[j] + memo[i-j]:\n curProfit = p[j] + memo[i-j]\n pieces[i] = j\n memo[i] = curProfit\n\n # print indexes\n temp = n\n op = []\n while temp > 0:\n # print(pieces[temp])\n op.append((pieces[temp]))\n temp = temp - pieces[temp]\n # print(memo[n])\n return memo[n], op", "def part_2():\n return itertools.permutations(range(5, 10))", "def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])", "def __construct_current_cut(induced_ordering):\n duplicate = list(induced_ordering) # create a copy of the list containing the induced ordering\n left_partition = {duplicate.pop()} # create the left partition\n right_partition = set(duplicate) # create the right partition\n return \\\n tuple(\n (\n StoerWagner.__unpack_node_names(left_partition),\n StoerWagner.__unpack_node_names(right_partition)\n )\n ) # return the pair of partitions", "def makeFolds(data, k):\r\n # randomize columns\r\n order = data.columns.tolist()\r\n random.shuffle(order)\r\n # split into folds (specified by k)\r\n folds = []\r\n fold = 0\r\n dist = len(order) / k\r\n while fold < k:\r\n start = int(round(fold * dist))\r\n end = int(round(start + dist))\r\n folds.append(order[start:end])\r\n fold = fold + 1\r\n return folds", "def cut_kmer(sequence, k_mer):\n for i in range(0, len(sequence)-k_mer + 1):\n yield sequence[i:i+k_mer]", "def get_bursts(len_seq):\n directions = len_seq / abs(len_seq)\n index_dir_change = np.where(directions[1:] - directions[:-1] != 0)[0] + 1\n bursts = np.split(len_seq, index_dir_change)\n return bursts", "def genSubset(L):\n if len(L) == 0:\n return [[]] # list of empty list\n smaller = genSubset(L[:-1]) # the list without last element\n extra = L[-1:] # a list of just the last element\n new = []\n for small in smaller:\n new.append(small + extra)\n return smaller + new", "def randomCuts(self,g,Nb):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n A1 = np.random.randint(1, self.numMonomers-1-(Nb-1)*(g+1)-1)\n return A1 + np.arange(Nb)*(1+g)", "def make_groups(length_of_stays, borders):\n length_of_stays = np.array(length_of_stays)\n groups = [np.where(length_of_stays <= borders[0])[0]] # first group\n for od, do in zip(borders, borders[1:]):\n groups.append(np.where(np.logical_and(od < length_of_stays, length_of_stays <= do))[0])\n groups.append(np.where(borders[len(borders) - 1] < length_of_stays)[0]) # last group\n return groups", "def edge_truncation(self, cut_frac = Integer(1)/3):\n new_vertices = []\n for e in self.bounded_edges():\n new_vertices.append((1-cut_frac)*e[0]() + cut_frac *e[1]())\n new_vertices.append(cut_frac *e[0]() + (1-cut_frac)*e[1]())\n\n new_vertices = [list(v) for v in new_vertices]\n new_rays = self.rays()\n new_lines = self.lines()\n\n return Polyhedron(vertices=new_vertices, rays=new_rays, \n lines=new_lines, \n field=self.coerce_field(cut_frac))", "def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons", "def CreateList(self, bottom_range, top_range):\n print(f\"Creating a list from {bottom_range} to {top_range}\")\n cluster_list = [item for item in range(bottom_range, top_range+1)]\n print(f\"cluster_list: {cluster_list}\")\n return cluster_list", "def sequenceToSlices(intList, sort=True):\n slices = []\n\n if intList:\n if sort:\n intList = sorted(intList)\n start = intList[0]\n stop = None\n step = None\n lastStep = None\n lastVal = start\n for curr in intList[1:]:\n curr = int(curr)\n thisStep = curr - lastVal\n #assert thisStep > 0, \"cannot have duplicate values. pass a set to be safe\"\n\n# print\n# print \"%s -> %s\" % (lastVal, curr)\n# print \"thisStep\", thisStep\n# print \"lastStep\", lastStep\n# print \"step\", step\n# print \"lastVal\", lastVal\n# print (start, stop, step)\n# print slices\n\n if lastStep is None:\n # we're here bc the last iteration was the beginning of a new slice\n pass\n elif thisStep > 0 and thisStep == lastStep:\n # we found 2 in a row, they are the beginning of a new slice\n # setting step indicates we've found a pattern\n # print \"found a pattern on\", thisStep\n step = thisStep\n else:\n if step is not None:\n # since step is set we know a pattern has been found (at least two in a row with same step)\n # we also know that the current value is not part of this pattern, so end the old slice at the last value\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n thisStep = None\n start = curr\n else:\n if lastStep == 1:\n newslice = slice(start, lastVal + 1, lastStep)\n thisStep = None\n start = curr\n else:\n newslice = slice(start, stop + 1)\n start = lastVal\n\n# print \"adding\", newslice\n slices.append(newslice)\n # start the new\n\n stop = None\n step = None\n\n lastStep = thisStep\n\n stop = lastVal\n lastVal = curr\n\n if step is not None:\n # end the old slice\n if step == 1:\n newslice = slice(start, lastVal + 1, None)\n else:\n newslice = slice(start, lastVal + 1, step)\n\n # print \"adding\", newslice\n slices.append(newslice)\n else:\n\n if lastStep == 1:\n slices.append(slice(start, lastVal + 1, lastStep))\n\n else:\n slices.append(slice(start, start + 1))\n if lastStep is not None:\n slices.append(slice(lastVal, lastVal + 1))\n\n return slices", "def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists", "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def generate_scaffolds(self, dataset, cutoff=0.18):\n mols = []\n for ind, smiles in enumerate(dataset.ids):\n mols.append(Chem.MolFromSmiles(smiles))\n n_mols = len(mols)\n fps = [AllChem.GetMorganFingerprintAsBitVect(x, 2, 1024) for x in mols]\n\n scaffold_sets = ClusterFps(fps, cutoff=cutoff)\n scaffold_sets = sorted(scaffold_sets, key=lambda x: -len(x))\n scaffold_sets = [list(sfd) for sfd in scaffold_sets]\n return scaffold_sets", "def thickenXYList( list, tester, biSectionMax=6, interpolation=xDataEnumsModule.Interpolation.linlin):\n\n def thickenXYList2( interpolation, xl, yl, xu, yu, newList, tester, level ) :\n\n if( level == biSectionMax ) : return\n level += 1\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.loglin:\n xMid = 0.5 * ( xl + xu )\n else :\n xMid = math.sqrt( xl * xu );\n\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.linlog:\n yMid = 0.5 * ( yl + yu )\n else :\n yMid = math.sqrt( yl * yu )\n\n y = tester.evaluateAtX( xMid )\n\n dy = abs( y - yMid )\n if( ( dy > abs( y * tester.relativeTolerance ) ) and ( dy > tester.absoluteTolerance ) ) :\n newList.append( [ xMid, y ] )\n thickenXYList2( interpolation, xl, yl, xMid, y, newList, tester, level )\n thickenXYList2( interpolation, xMid, y, xu, yu, newList, tester, level )\n\n if( len( list ) < 2 ) : raise Exception( \"len( list ) = %d < 2\" % len( list ) )\n newList = []\n for i1, xy in enumerate( list ) :\n x2, y2 = xy\n if( i1 > 0 ) : thickenXYList2( interpolation, x1, y1, x2, y2, newList, tester, 0 )\n newList.append( [ x2, y2 ] )\n x1, y1 = x2, y2\n newList.sort( )\n return( newList )", "def core_slices(self, borders=None):\n if borders is None:\n borders = self.all_borders\n\n core_slices = list(self.slices)\n for border, direction in borders:\n core_slice = core_slices[border]\n if direction < 0:\n core_slice = slice(core_slice.start + self.overlap[border], core_slice.stop)\n else:\n core_slice = slice(core_slice.start, core_slice.stop - self.overlap[border])\n core_slices[border] = core_slice\n\n return tuple(core_slices)", "def buildGuassianPyramid(img, size):\n G = img\n pyramid = [G]\n for i in range(size):\n G = convolveAndDownsample(G)\n pyramid.append(G)\n\n return pyramid", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def create_subset_list(self):\n\n row = 0\n for time_step in self.min_increments:\n subset = SubsetClass(time_step=time_step, query_df=self.query_df, model_df=self.model_df, row=row)\n self.subset_list.append(subset)\n row += 1", "def makeSpots(truth, binsize, label):\n #prevent weirdness\n truth[-1] = False\n shift = numpy.roll(truth, 1)\n \n starts = truth & ~shift\n ends = ~truth & shift\n \n points = zip(numpy.nonzero(starts)[0], numpy.nonzero(ends)[0])\n npoints = []\n if len(points) == 0:\n return npoints\n curStart, curEnd = points[0]\n \n #compress: <-- Don't need anymore...?\n for start, end in points[1:]:\n #if start - curEnd <= binsize:\n #curEnd = end\n #else:\n npoints.append((curStart, curEnd, label))\n curStart = start\n curEnd = end\n \n npoints.append((curStart, curEnd, label))\n \n return npoints", "def cut_bkg(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_bkg'))\n return c", "def ltrimboth (l,proportiontocut):\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l) - lowercut\r\n return l[lowercut:uppercut]", "def get_cut_limbs(life):\n\t_cut = []\n\t\n\tfor limb in life['body']:\n\t\tif life['body'][limb]['cut']:\n\t\t\t_cut.append(limb)\n\t\n\treturn _cut", "def id_slits(arc,find_stars=True,chilimit=2.5,SATURATED=57000.,useLines=True):\n\n arc = arc.copy()\n \"\"\" Attempt to avoid saturated lines \"\"\"\n w = arc.shape[1]\n tmp = arc.copy()\n tmp[tmp>SATURATED] = 0.\n tmpSorted = scipy.sort(tmp,axis=1)\n flux = tmpSorted[:,w*0.97:w*0.98].mean(axis=1)\n minflux = scipy.median(flux)/4.\n del tmp\n\n if find_stars:\n starbox = []\n slit = []\n\n if useLines==False:\n flux = scipy.sort(arc,1)[:,w*4/5]\n minflux = scipy.median(flux[flux.size/3:flux.size*2/3])/2.\n mask = scipy.where(flux>minflux,1.,0.)\n inSlit = False\n tmp = []\n meds = []\n for i in range(mask.size):\n if inSlit:\n if mask[i]==0:\n inSlit = False\n end = i-1\n if end-start>8:\n tmp.append([start+1,end-1])\n slit = arc[start+3:end-3,100:-100].mean(0)\n meds.append(slit.max())\n elif mask[i]==1:\n start = i\n inSlit = True\n if inSlit:\n end = i\n if end-start>8:\n tmp.append([start+1,end-1])\n slit = arc[start+3:end-3,100:-100].mean(0)\n meds.append(slit.max())\n meds = numpy.array(meds)\n if find_stars:\n slit = []\n starbox = []\n m,s = Clip(meds,nsig=3.,locut=0.,hicut=0.75)\n for i in range(len(tmp)):\n if meds[i]<m+s*5:\n slit.append(tmp[i])\n else:\n starbox.append(tmp[i])\n return slit,starbox\n return tmp\n\n m,s = clip(tmpSorted[arc.shape[0]/2,:w*0.05],2.)\n\n inSlit = False\n i = 0\n while i<arc.shape[0]:\n# lines = findlines(arc[i])\n if useLines:\n lines = findlines(arc[i])\n else:\n med = scipy.median(arc[i])\n if med>m+5*s:\n lines = [0]*10\n else:\n lines = [0]\n if len(lines)<9 and inSlit==False:\n i += 1\n continue\n elif len(lines)>9 and inSlit==False:\n inSlit = True\n start = i\n i += 1\n continue\n\n bestchi = 1e29\n if len(lines)>9:\n #bestchi = 1e29\n x = scipy.arange(arc[i].size)\n smooth = ndimage.gaussian_filter(arc[i],1.)\n model = interpolate.splrep(x,smooth,k=3,s=0)\n comp = ndimage.gaussian_filter(arc[i-1],1.)\n usedpix = arc[i-1][10:-10]>scipy.median(arc[i-1])\n for o in range(30):\n offset = float(o-15.)/5.\n row = interpolate.splev(x[10:-10]+offset,model)\n chi = (comp[10:-10]-row)**2/(abs(comp[10:-10]))\n chi = chi[usedpix]\n chi.sort()\n chi = chi[:-chi.size/100] # Reject the five highest points\n if chilimit>6. and i>600 and o>6 and 1==2:\n import pylab\n pylab.plot(row)\n pylab.plot(comp[10:-10])\n pylab.figure()\n pylab.plot((row-comp[10:-10])**2/(abs(comp[10:-10])+16.))\n pylab.show()\n if chi.sum()/chi.size<bestchi:\n bestchi = chi.sum()/chi.size\n\n if inSlit is True and (bestchi>chilimit or len(lines)<9):\n \"\"\" The row is at the top edge of the slit. \"\"\"\n inSlit = False\n end = i\n\n i += 1\n if end-start<3:\n continue\n \"\"\"\n Conservatively shrink the edges. A better approach\n might be to use the flatfield data and set the edge\n to where the flux is, say, 1 sigma below the nominal\n level for the slit.\n \"\"\"\n# if start!=0:\n# start += 2\n# end -= 2\n\n \"\"\" Check if the slit is a starbox (if requested) \"\"\"\n if find_stars:\n mid = (start+end)/2\n peaks = findlines(arc[mid],False)\n is_star = check_star(peaks,arc[mid])\n else: is_star = False\n\n \"\"\" Skip small slits \"\"\"\n if not is_star and end-start<11:\n continue\n elif is_star and end-start<9:\n continue\n\n \"\"\"\n Conservatively shrink the edges. A better approach\n might be to use the flatfield data and set the edge\n to where the flux is, say, 1 sigma below the nominal\n level for the slit.\n \"\"\"\n if is_star:\n starbox.append([start,end])\n else:\n while flux[start+1]-flux[start]>3.*flux[start]**0.5:\n start += 1\n while flux[end-1]-flux[end]>3.*flux[end]**0.5:\n end -= 1\n if flux[start:end].mean()<minflux:\n continue\n slit.append([start,end])\n\n elif i+1==arc.shape[0] and end<start:\n \"\"\" The top of the mask is also the top of a slit. \"\"\"\n end = i+1\n if find_stars:\n mid = (start+end)/2\n peaks = findlines(arc[mid],False)\n is_star = check_star(peaks,arc[mid])\n else: is_star = False\n\n if not is_star and end-start<11:\n continue\n elif is_star and end-start<9:\n continue\n\n if is_star:\n starbox.append([start+2,end])\n else:\n while flux[start+1]-flux[start]>3.*flux[start]**0.5:\n start += 1\n if flux[start:end].mean()<minflux:\n continue\n slit.append([start,end])\n break\n else:\n \"\"\" In the middle of the slit, nothing to do.... \"\"\"\n i += 1\n\n if find_stars:\n return slit,starbox\n return slit", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def rslices(n, allow_empty=False):\n for _ in range(5):\n yield rslice(n, allow_empty)", "def rslices(n, allow_empty=False):\n for _ in range(5):\n yield rslice(n, allow_empty)", "def getEqualSpacing(dirMin, dirMax, ortho, nCuts):\n\n if ortho == 'x':\n idx = 0\n elif ortho == 'y':\n idx = 1\n elif ortho == 'z':\n idx = 2\n\n sign = -1.0 * np.sign(dirMin[idx])\n stepsize = sign * int(np.abs(dirMin[idx] - dirMax[idx]) / (nCuts + 1))\n cut_order = np.arange(dirMin[idx], dirMax[idx], stepsize)\n if len(cut_order) == nCuts + 2:\n cut_coords = cut_order[1:-1]\n else:\n cut_coords = np.delete(cut_order, np.argmax(np.abs(cut_order)))\n\n return cut_coords", "def make_combinations(items):\n\n def inner(items, r):\n \"\"\"\n recursively yields partitioned remainders of original partition lists\n \"\"\"\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition\n\n def outter(items, r):\n \"\"\"\n combines partition lists\n \"\"\"\n items = set(items)\n for i in range(len(items), -1, -r):\n if i == 0:\n for partition in inner(items, r):\n yield partition\n elif i != r:\n for combination in combinations(items, i):\n for partition in inner(items.difference(combination), r):\n yield partition + (combination, )\n\n # step through length of origin combination partitions to ensure full list\n for i in range(1, len(items)):\n gen = outter(items, i)\n for row in gen:\n yield row", "def all_rolls(sides):\r\n result = []\r\n temp_list = list(range(2, 2*sides+1))\r\n\r\n while temp_list:\r\n result.extend(temp_list)\r\n temp_list = temp_list[1:-1]\r\n\r\n return sorted(result)", "def long_slice(image_path, out_name, outdir, slice_size, net):\n img = Image.open(image_path)\n imgout = Image.open(image_path)\n orw, orh = img.size\n width, height = img.size\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n orw, orh = imgout.size\n width, height = img.size\n print(img.size)\n r = 1\n draw = ImageDraw.Draw(imgout)\n\n flag_continue = True\n while flag_continue:\n if os.path.exists(\"./testsliceimage/list.txt\"):\n os.remove(\"./testsliceimage/list.txt\")\n file = open(\"./testsliceimage/list.txt\", \"w+\")\n for sliceh in range(slicesh*step):\n for slicew in range(slicesw*step):\n #set the bounding box! The important bit\n bbox = (int(slicew*slice_size/step), int(sliceh*slice_size/step), int(slicew*slice_size/step)+slice_size, int(sliceh*slice_size/step)+slice_size)\n working_slice = img.crop(bbox)\n\n working_slice.save(os.path.join(outdir, \"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\"))\n file.write(\"slice_\" + str(height) + \"_\" + str(width) + \"_\" + out_name + \"_\" + str(sliceh) + \"_\" + str(slicew) +\".png\\n\")\n\n if sliceh == 16 and slicew == 27 and width == 450 :\n print (int(slicew*slice_size/step), int(sliceh*slice_size/step),int(slicew*slice_size/step)+slice_size,int(sliceh*slice_size/step)+slice_size)\n\n file.close()\n transform_test = tf.Compose([tf.Grayscale(), tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n testset = UnknownDataset(\"./testsliceimage/\", \"./testsliceimage/list.txt\", transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=False, num_workers=WORKERS)\n\n with torch.no_grad():\n N = 0\n for data in testloader:\n images, img_names = data['image'], data['image_name']\n outputs = net(images.float())\n _, predicted = torch.max(outputs.data, 1)\n # print(predicted)\n if max(predicted) == 1 :\n ite = -1\n for predic in predicted :\n ite += 1\n if predic == 1 and outputs[ite][1]-outputs[ite][0] > CONFIDENCE:\n print(img_names[ite])\n # print(outputs)\n N += 1\n #dessiner carre sur image\n slh = int(img_names[ite].split('_')[4])\n slw = int(img_names[ite].split('_')[5][:-4])\n x1 = int(slh * slice_size / step)\n x2 = x1 + slice_size\n y1 = int(slw * slice_size / step)\n y2 = y1 + slice_size\n\n if slh == 16 and slw == 27 and width ==450 :\n print (x1, y1, x2, y2)\n\n print(r)\n rh = orh / height\n rw = orw / width\n x1 = x1 * rh\n x2 = x2 * rh\n y1 = y1 * rw\n y2 = y2 * rw\n\n draw.rectangle(((y1, x1), (y2, x2)), outline=\"red\")\n # draw.text((y2,x2), img_names[0])\n copyfile(\"./testsliceimage/\"+img_names[ite], \"./goodimage/\"+ img_names[ite])\n\n if width <= 200 or height <= 200:\n flag_continue = False\n else:\n r = r * scale\n width, height = int(width/scale), int(height/scale)\n slicesh = int(math.ceil(height/slice_size))\n slicesw = int(math.ceil(width/slice_size))\n img = img.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n width, height = img.size\n\n # imgout = imgout.resize((slicesw*slice_size, slicesh*slice_size), PIL.Image.ANTIALIAS)\n imgout.save(\"./rectangle/out\", \"PNG\")", "def _calc_block_ends(line_number, list_length, radius):\n\t\tstart = line_number - radius\n\t\tend = line_number + radius + 1\n\t\n\t\t# Realign if over/undershot\n\t\tif start < 0:\n\t\t\tend -= start\n\t\t\tstart = 0\n\t\tif end >= list_length:\n\t\t\tstart -= end - list_length\n\t\t\tend = list_length\n\t\t\t\n\t\t# Clamp to limits\n\t\tif start < 0:\n\t\t\tstart = 0\n\t\tif end >= list_length:\n\t\t\tend = list_length\n\t\t\n\t\treturn slice(start, end)", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def splitList(itms, numGr):\n\ttcount = len(itms)\n\tcItems = list(itms)\n\tsz = int(len(cItems) / numGr)\n\tgroups = list()\n\tcount = 0\n\tfor i in range(numGr):\n\t\tif (i == numGr - 1):\n\t\t\tcsz = tcount - count\n\t\telse:\n\t\t\tcsz = sz + randint(-2, 2)\n\t\t\tcount += csz\n\t\tgr = list()\n\t\tfor j in range(csz):\n\t\t\tit = selectRandomFromList(cItems)\n\t\t\tgr.append(it)\t\n\t\t\tcItems.remove(it)\t\n\t\tgroups.append(gr)\n\treturn groups" ]
[ "0.6510405", "0.6228084", "0.60608464", "0.60395974", "0.5939082", "0.59373015", "0.5816045", "0.5764991", "0.5731781", "0.5705467", "0.57034826", "0.57019746", "0.5698488", "0.56911135", "0.56729025", "0.5668522", "0.56589216", "0.5610545", "0.56084186", "0.5563644", "0.55584705", "0.5507292", "0.54872245", "0.5421182", "0.5401605", "0.53999895", "0.5354079", "0.5343062", "0.5327405", "0.52783895", "0.52470857", "0.5240807", "0.5236492", "0.5234829", "0.52343535", "0.5215339", "0.5207072", "0.5191714", "0.51858026", "0.5184371", "0.51588136", "0.5138781", "0.5138575", "0.5138374", "0.51383364", "0.5136296", "0.5135001", "0.5130157", "0.51208067", "0.5116217", "0.5113867", "0.51066667", "0.5106512", "0.5099907", "0.5096055", "0.50774646", "0.5068777", "0.50671786", "0.506614", "0.50583476", "0.50540733", "0.5041153", "0.5030437", "0.5029639", "0.5019379", "0.50033826", "0.49990204", "0.4998392", "0.49855095", "0.4983641", "0.49781108", "0.4976593", "0.4973956", "0.4973099", "0.4971015", "0.49668667", "0.49664888", "0.49662668", "0.49646086", "0.49628717", "0.49619496", "0.49617958", "0.49514416", "0.49512056", "0.49505424", "0.49504438", "0.4947001", "0.49414068", "0.4939861", "0.49357653", "0.49292263", "0.49278513", "0.49278513", "0.49277702", "0.49228072", "0.4918838", "0.49158588", "0.49122065", "0.4911567", "0.49061283" ]
0.67580295
0
This algorithm returns a cutlist which performs OG slicing. It begins with an optional core, then cuts out slices until as many OG seeds as specified are removed from the block.
def oss_stacked(block, cut, laser): x0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut["final_dimension_x"]/2) x0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut["final_dimension_y"]/2) angle = math.radians(laser["kerf_angle"]/2) gap = math.tan(pyramid_angle_1) * (cut["final_dimension_x"]/2) + cut["gap_size"] unit_length = gap + cut["base_height"] max_slices = math.floor(block["thickness"]/unit_length) taper_straight = math.tan(angle)*(laser["z_spacing"]) if cut["core"] == "yes": cutlist = json.loads(vertical_core(block,cut,laser)) cutlist.pop() cutlist.pop(0) else: cutlist = [] a0 = -(90 + math.degrees(angle)) z_shift = (cut["base_height"] + gap) * math.sin(angle) x_shift = (cut["base_height"] + gap) * math.cos(angle) x_delta = math.sin(angle) * block["origin_x"] y_delta = math.sin(angle) * block["origin_y"] z1_delta = math.cos(angle) * block["origin_x"] z2_delta = math.cos(angle) * block["origin_y"] cutlist.append(["a_abs",f"{a0:.6f}"]) cutlist.append(["c_abs",str(block["physical_rotation"])]) cutlist.append(["z_abs",str(z0_1 + z2_delta)]) if pyramid_angle_1 >= angle and pyramid_angle_2 >= angle: if cut["num_of_seeds"] == "max": num_slices = max_slices else: num_slices = cut["num_of_seeds"] + 1 for i in range(num_slices): cutlist = (cutlist + pyramid_slice(cut["final_dimension_y"]/2 - block["origin_x"],x0_1 + y_delta,-cut["final_dimension_y"]/2 - block["origin_x"],x1_1 + y_delta,z0_1 + block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1) + [["z_abs",str(z0_2 + z1_delta)]] + [["c_abs","90"]] + pyramid_slice(cut["final_dimension_x"]/2 + block["origin_y"],x0_2 + x_delta,-cut["final_dimension_x"]/2 + block["origin_y"],x1_2 + x_delta,z0_2 + block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2) + [["z_abs",str(z0_1 - z2_delta)]] + [["c_abs","180"]] + pyramid_slice(cut["final_dimension_y"]/2 + block["origin_x"],x0_1 - y_delta,-cut["final_dimension_y"]/2 + block["origin_x"],x1_1 - y_delta,z0_1 - block["origin_y"],laser["xy_spacing"], laser["z_spacing"], taper_x_1,taper_y_1,taper_straight,layers_1) + [["z_abs",str(z0_2 - z1_delta)]] + [["c_abs","270"]] + pyramid_slice(cut["final_dimension_x"]/2 - block["origin_y"],x0_2 - x_delta,-cut["final_dimension_x"]/2 - block["origin_y"],x1_2 - x_delta,z0_2 - block["origin_x"],laser["xy_spacing"], laser["z_spacing"], taper_x_2,taper_y_2,taper_straight,layers_2) ) z0_1 = z0_1 + z_shift z0_2 = z0_2 + z_shift x0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift cutlist.append(["c_abs",str(block["physical_rotation"])]) cutlist.append(["z_abs",str(z0_1 + z2_delta)]) else: raise Exception("Pyramid angle too small") cutlist.insert(0, ["set_trigger4", "1", "0", "7", "8", "45"]) cutlist.append(["stop_trigger"]) return json.dumps(cutlist)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res", "def _recursive_cutting(g, p, res=[]):\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res", "def _core_subgraph(G, k_filter, k=None, core=None):\n if core is None:\n core = core_number(G)\n if k is None:\n k = max(core.values())\n nodes = (v for v in core if k_filter(v, k, core))\n return G.subgraph(nodes).copy()", "def alternatingSlice(self,geom,polyLayer,targetArea,granularity,direction,method):\r\n global recurs\r\n recurs+=1\r\n if self.debug: print \"******************************\"\r\n if self.debug: print \"Slicing, No of part: \",str(recurs)\r\n if self.debug: print \"Slicing, Granularity remaining: \", str(granularity)\r\n bbox=[geom.boundingBox().xMinimum(),geom.boundingBox().yMinimum(),geom.boundingBox().xMaximum(),geom.boundingBox().yMaximum()]\r\n if direction==\"h\":\r\n step=(bbox[2]-bbox[0])/granularity\r\n pointer=bbox[0]\r\n else:\r\n step=(bbox[3]-bbox[1])/granularity\r\n pointer=bbox[1]\r\n totalArea=0\r\n slices=0\r\n #save the original geom\r\n tempGeom=QgsGeometry(geom)\r\n #start slicing until targetArea is reached\r\n while totalArea<targetArea*0.999:\r\n pointer+=step\r\n if direction==\"h\":\r\n startPt=QgsPoint(pointer,bbox[1])\r\n endPt=QgsPoint(pointer,bbox[3])\r\n (multiGeom,tempGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n else:\r\n startPt=QgsPoint(bbox[0],pointer)\r\n endPt=QgsPoint(bbox[2],pointer)\r\n (tempGeom,multiGeom)=self.cutPoly(tempGeom,startPt,endPt)\r\n if multiGeom!=None:\r\n totalArea+=multiGeom.area();\r\n slices+=1\r\n if self.debug: print \"Slicing, Slices: \", str(slices)\r\n #do the real cutting when reached targetArea and add \"left\" feature to layer\r\n if self.debug: print \"Cutting with line, Cutline:\", startPt,\",\",endPt\r\n if direction==\"h\":\r\n (multiGeom,geom)=self.cutPoly(geom,startPt,endPt,True)\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts to the left:\",str(len(multiGeom.asGeometryCollection()))\r\n if geom:\r\n if self.debug: print \"After split, Parts to the right:\",str(len(geom.asGeometryCollection()))\r\n else:\r\n (geom,multiGeom)=self.cutPoly(geom,startPt,endPt,True)\r\n if geom:\r\n if self.debug: print \"After split, Parts above:\",str(len(geom.asGeometryCollection()))\r\n if multiGeom:\r\n if self.debug: print \"After split, Parts under:\",str(len(multiGeom.asGeometryCollection()))\r\n self.addGeomToLayer(multiGeom,polyLayer)\r\n #self.addGeomToLayer(QgsGeometry.fromPolyline([startPt,endPt]),lineLayer)\r\n if geom:\r\n if geom.area()>targetArea:\r\n if (method==\"v\") or ((method==\"a\") and (direction==\"h\")):\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"v\",method)\r\n else:\r\n self.alternatingSlice(geom,polyLayer,targetArea,granularity-slices,\"h\",method)\r\n else:\r\n self.addGeomToLayer(geom,polyLayer)", "def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n yield sequence[i:i+kmer_size]", "def delete_shell(core_mol, del_mol, cut_off, in_out='in'):\n\n if in_out not in ['in', 'out']:\n raise ValueError(\"The passed in_out parameter is not recognized: {}\".format(in_out))\n\n # Copy the passed molecule to delete in\n to_del = oechem.OEMol(del_mol)\n\n # Create a OE bit vector mask for each atoms of the\n # molecule to delete\n bv = oechem.OEBitVector(to_del.GetMaxAtomIdx())\n bv.NegateBits()\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(to_del, cut_off)\n for nbrs in nn.GetNbrs(core_mol):\n # bv.SetBitOff(nbrs.GetBgn().GetIdx())\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n bv.SetBitOff(atom.GetIdx())\n\n # Invert selection mask\n if in_out == 'in':\n bv.NegateBits()\n\n pred = oechem.OEAtomIdxSelected(bv)\n for atom in to_del.GetAtoms(pred):\n to_del.DeleteAtom(atom)\n\n # It is necessary to reset the atom indexes of the molecule with\n # delete atoms to avoid possible mismatching\n reset_del = oechem.OEMol(to_del)\n\n return reset_del", "def disconnect_lowest_ecc(G, num_remove):\n num_removed = []\n spectral_gap = []\n\n g = G.copy()\n vs = np.random.choice(list(g.nodes()), num_remove, replace=False)\n for i, v in enumerate(vs):\n neighbors = list(g.neighbors(v))\n if len(neighbors) == 0:\n continue\n ecc = np.array([nx.eccentricity(G, n) for n in neighbors])\n remove = np.argmin(ecc)\n g.remove_edge(v, neighbors[remove])\n\n num_removed.append(i)\n spectral_gap.append(get_spectral_gap(g))\n\n return num_removed, spectral_gap", "def max_cut(g):\n # Write your code here.\n return []", "def slice_graph_bwd( endea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\t\r\n\tstartnode = slice_node( 0, endea, reg )\t\t# start at the end of the slice node\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\t[tgt_reg, split] = currslice.get_target_reg_bwd()\r\n\t\tprint tgt_reg\r\n\t\tprint split\r\n\t\tif tgt_reg == \"END\":\r\n\t\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.startea != currslice.get_lines()[0][0]):\r\n\t\t\t# Do process this node further, nothing really going on \r\n\t\t\tprint \"ZEZ\"\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name() )\r\n\t\telse:\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tif split:\r\n\t\t\t\tfor ref in xrefs:\r\n\t\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\treturn [ graph, data_bib ]", "def k_crust(G, k=None, core_number=None):\n # Default for k is one less than in _core_subgraph, so just inline.\n # Filter is c[v] <= k\n if core_number is None:\n core_number = find_cores(G)\n if k is None:\n k = max(core_number.values()) - 1\n nodes = (v for v in core_number if core_number[v] <= k)\n return G.subgraph(nodes).copy()", "def cliques_on_ring(cliques,labels,G, cut=0):\n print(\"cliques_on_ring CUT:\", cut)\n #if partitions\n if cut > 1:\n partitions = get_partitions(cliques,cut)\n for partition in partitions:\n if len(partition) > 1:\n n1 = random.choice(partition[0])\n for idx,clique in enumerate(partition):\n if idx > 0:# and idx not in cuts:\n candidates = [n for n in clique if labels[n.id] != labels[n1.id]]\n candidates = candidates if len(candidates) > 0 else clique\n n2 = random.choice(candidates)\n G.add_edge(n1,n2)\n n1 = random.choice(clique)\n\n else:\n n1 = random.choice(cliques[0]) \n for idx,clique in enumerate(cliques):\n if idx > 0:# and idx not in cuts:\n candidates = [n for n in clique if labels[n.id] != labels[n1.id]]\n candidates = candidates if len(candidates) > 0 else clique\n n2 = random.choice(candidates)\n G.add_edge(n1,n2)\n n1 = random.choice(clique)\n \n #Attach head and tail cliques\n candidates = [n for n in cliques[0] if labels[n.id] != labels[n1.id]]\n candidates = candidates if len(candidates) > 0 else cliques[0]\n n2 = random.choice(candidates)\n G.add_edge(n1,n2)", "def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res\n\n def _recursive_cutting(g, p, res=[]):\n \"\"\"helper function (recursive version)\"\"\"\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res\n\n # when computing a partitioning for the graph nodes,\n # if result is known for a smaller value of n_partitions\n # don't restart from scratch but use it as an initial value\n if g not in self._cache or len(self._cache[g]) < n_partitions:\n self._cache.clear()\n partitions = _recursive_cutting(g, p=n_partitions)\n self._cache[g] = partitions[:]\n else:\n partitions = self._cache[g][:]\n\n # merge small partitions to return the required number of partitions\n while len(partitions) > n_partitions:\n partitions.sort(key=len, reverse=True)\n e1 = partitions.pop()\n e2 = partitions.pop()\n partitions.append(e1.union(e2))\n return partitions", "def core_slices(self, chunk):\n intersect_slices = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n intersect_slices.append(slice(s.start + olap, s.stop))\n elif s.stop == b.stop:\n intersect_slices.append(slice(s.start, s.stop - olap))\n else:\n intersect_slices.append(s)\n\n return tuple(self.remove_chunk_overlap(chunk, intersect_slices))", "def slice_graph_fwd( startea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\tstartnode = slice_node( startea, 0, reg )\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\ttgt_reg = currslice.get_target_reg()\r\n\t\tif tgt_reg == \"END\":\r\n\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.endea != currslice.get_lines()[-1][0]):\r\n\t\t\t# Nothing much happening here, just proceed to parent bocks\r\n\t\t\tif ua_mnem( currslice.endea ) == \"call\":\r\n\t\t\t\txrefs = get_short_crefs_from( currslice.endea )\r\n\t\t\telse:\r\n\t\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\telse:\r\n\t\t\t# Register was modified, use new register\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\treturn [ graph, data_bib ]", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n try:\n yield sequence[i:kmer_size+i]\n except StopIteration:\n return", "def segment_cough(x,fs, cough_padding=0.2,min_cough_len=0.2, th_l_multiplier = 0.1, th_h_multiplier = 2):\n \n cough_mask = np.array([False]*len(x))\n \n\n #Define hysteresis thresholds\n rms = np.sqrt(np.mean(np.square(x)))\n seg_th_l = th_l_multiplier * rms\n seg_th_h = th_h_multiplier*rms\n\n #Segment coughs\n coughSegments = []\n padding = round(fs*cough_padding)\n min_cough_samples = round(fs*min_cough_len)\n cough_start = 0\n cough_end = 0\n cough_in_progress = False\n tolerance = round(0.01*fs)\n below_th_counter = 0\n \n for i, sample in enumerate(x**2):\n if cough_in_progress:\n if sample<seg_th_l:\n below_th_counter += 1\n if below_th_counter > tolerance:\n cough_end = i+padding if (i+padding < len(x)) else len(x)-1\n cough_in_progress = False\n if (cough_end+1-cough_start-2*padding>min_cough_samples):\n coughSegments.append(x[cough_start:cough_end+1])\n cough_mask[cough_start:cough_end+1] = True\n elif i == (len(x)-1):\n cough_end=i\n cough_in_progress = False\n if (cough_end+1-cough_start-2*padding>min_cough_samples):\n coughSegments.append(x[cough_start:cough_end+1])\n else:\n below_th_counter = 0\n else:\n if sample>seg_th_h:\n cough_start = i-padding if (i-padding >=0) else 0\n cough_in_progress = True\n \n return coughSegments, cough_mask", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def make_seg_chopped_array(dataset, fields, make_params, chop_params, include_trials):\n if 'ignored_trials' in make_params:\n logger.warning(\"`ignored_trials` found in `make_params`. Overriding with `include_trials`\")\n make_params.pop('ignored_trials')\n if type(fields) != list:\n fields = [fields]\n trial_data = dataset.make_trial_data(ignored_trials=~include_trials, **make_params)\n ci = ChopInterface(**chop_params)\n array_dict = ci.chop(trial_data, fields)\n return array_dict, ci", "def get_partitions(cliques,cut=1):\n cliques.sort(key=len)\n k, m = divmod(len(cliques), cut)\n return list(cliques[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(cut))", "def clip_scaffold_loops(self):\r\n start = 0\r\n index = 0\r\n ie = len(self.walk)\r\n while index < ie:\r\n segment = None\r\n try:\r\n segment = self.walk[index+1]\r\n except IndexError:\r\n self.remove_biggest_loop_in_range(start, index)\r\n return\r\n if segment is None or segment.value == 'RUNG':\r\n # Segment is essential.\r\n if start != index:\r\n ie -= self.remove_biggest_loop_in_range(start, index)\r\n start = index + 2\r\n index += 2", "def inregionCut(self,l,r,g,Nb):\n A1 = np.random.randint(l+1-self.keepCL, r-1-(Nb-1)*(g+1)-(1-self.keepCL))\n return A1 + np.arange(Nb)*(1+g)", "def slice_genome(self, fractions=0.2, at_idx=None):\n if type(at_idx) == int:\n at_idx = [at_idx]\n if type(fractions) == int:\n fractions = [fractions]\n idxs = []\n parts = []\n if fractions == [1]:\n return [GenomePart(self, 0, len(self.data)+1)]\n elif at_idx is None:\n idxs.append(np.random.randint(0, len(self)))\n if fractions is None:\n idxs = at_idx\n else:\n for frac in fractions:\n new_idx = idxs[-1]+int(len(self)*frac)\n if new_idx > len(self):\n new_idx -= len(self)\n idxs.append(new_idx)\n parts.append(GenomePart(self, idxs[-1], idxs[0]))\n for i in range(len(idxs)-1):\n parts.append(GenomePart(self, idxs[i], idxs[i+1]))\n return parts", "def cutPoly(self,geom,startPt,endPt,debug=False):\r\n #if we have disjoint Multi geometry as geom to split we need to iterate over its parts\r\n splittedGeoms=[]\r\n leftFragments=[]\r\n rightFragments=[]\r\n #if self.debug: print \"Number of geoms when slicing: \",str(len(geom.asGeometryCollection()))\r\n for geomPart in geom.asGeometryCollection():\r\n #split the actual part by cut line defined by startPt,endPt\r\n (res,splittedGeomsPart,topo)=geomPart.splitGeometry([startPt,endPt],False)\r\n splittedGeoms+=splittedGeomsPart\r\n #Add the remaining geomPart to the rightFragments or letfFragments\r\n #depending on distance\r\n d=self.signedDistCentroidFromLine(geomPart,startPt,endPt)\r\n if d>0:\r\n rightFragments.append(geomPart)\r\n else:\r\n leftFragments.append(geomPart)\r\n #if self.debug: print j,splittedGeoms\r\n\r\n for fragment in splittedGeoms:\r\n \"\"\"\r\n calculate signed distance of centroid of fragment and the splitline\r\n if signed distance is below zero, the point is to the left of the line\r\n if above zero the point is to the right of the line\r\n \"\"\"\r\n d=self.signedDistCentroidFromLine(fragment,startPt,endPt)\r\n #if debug==True:\r\n #if self.debug: print d\r\n\r\n if d>0:\r\n rightFragments.append(fragment)\r\n else:\r\n leftFragments.append(fragment)\r\n\r\n #if self.debug: print \"Left frags:\",len(leftFragments),\"Right frags:\",len(rightFragments)\r\n leftGeom=self.buildMultiPolygon(leftFragments)\r\n rightGeom=self.buildMultiPolygon(rightFragments)\r\n return leftGeom,rightGeom", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def coord_chop(full_length, size, mode):\n if size > full_length:\n mode = None\n pair_list = []\n if mode is 'exact_size':\n counter = 0\n while counter < full_length-size:\n new_upper = counter+size\n new_pair = (counter, new_upper)\n pair_list.append(new_pair)\n counter = new_upper\n last_pair = (counter, full_length)\n pair_list.append(last_pair)\n elif mode is 'maxsize_bisect':\n pair_list = [(0, full_length)]\n pair_list = recursive_bisector(pair_list, size)\n elif mode is 'maxsize_divisor':\n divisor = full_length/size +1\n approx_size = full_length/divisor\n counter = 0\n origin = 0\n while counter <= divisor:\n new_upper = origin+approx_size\n new_pair = (origin, new_upper)\n pair_list.append(new_pair)\n origin = new_upper\n counter +=1\n last_pair = (origin, full_length)\n pair_list.append(last_pair)\n elif mode is 'count_divisor':\n seg_count = size # using the size arg to pass the desired seg count\n target_size = full_length/seg_count+1\n pair_list = coord_chop(full_length, target_size, 'exact_size')\n else:\n pair_list = [(0, full_length)]\n return pair_list", "def core_slices(self, borders=None):\n if borders is None:\n borders = self.all_borders\n\n core_slices = list(self.slices)\n for border, direction in borders:\n core_slice = core_slices[border]\n if direction < 0:\n core_slice = slice(core_slice.start + self.overlap[border], core_slice.stop)\n else:\n core_slice = slice(core_slice.start, core_slice.stop - self.overlap[border])\n core_slices[border] = core_slice\n\n return tuple(core_slices)", "def k_core(G, k=None, core_number=None):\n\n def k_filter(v, k, c):\n return c[v] >= k\n\n return _core_subgraph(G, k_filter, k, core_number)", "def k_shell(G, k=None, core_number=None):\n\n def k_filter(v, k, c):\n return c[v] == k\n\n return _core_subgraph(G, k_filter, k, core_number)", "def generate_scaffolds(self, dataset, cutoff=0.18):\n mols = []\n for ind, smiles in enumerate(dataset.ids):\n mols.append(Chem.MolFromSmiles(smiles))\n n_mols = len(mols)\n fps = [AllChem.GetMorganFingerprintAsBitVect(x, 2, 1024) for x in mols]\n\n scaffold_sets = ClusterFps(fps, cutoff=cutoff)\n scaffold_sets = sorted(scaffold_sets, key=lambda x: -len(x))\n scaffold_sets = [list(sfd) for sfd in scaffold_sets]\n return scaffold_sets", "def get_core_bonds(core_xyz, inp):\n core_bonds = []\n\n if inp.core_en:\n dists = cdist(core_xyz, core_xyz)\n if inp.core_shape != \"shell\":\n logger.info(\"\\tBuilding elastic network based on first neighbors...\")\n close_dists = dists <= (2*inp.bead_radius+0.01)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = np.where(close_dists[i])[0]\n if len(close_ndxs) == 1:\n dists_sorted = np.argsort(dists[i])\n close_ndxs = dists_sorted[[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n core_bonds.append([ndx1, ndx2])\n\n else:\n logger.info(\"\\tBuilding elastic network based on six nearest neighbours and one farthest neighbour...\")\n neighboring_bonds = []\n antipodal_bonds = []\n dists_sorted = np.argsort(dists, axis=1)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = dists_sorted[i,[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n neighboring_bonds.append([ndx1, ndx2])\n antipodal_ndx = dists_sorted[i,-1]\n if antipodal_ndx != i and [ndx1, antipodal_ndx] not in core_bonds and [antipodal_ndx, ndx1] not in core_bonds:\n antipodal_bonds.append([ndx1, antipodal_ndx, \"antipodal\"])\n core_bonds = neighboring_bonds + antipodal_bonds\n\n return core_bonds", "def prune_redundant_blocks(hpo_data):\n\n for hpo, hdat in hpo_data.items():\n if len(hdat['blocks']) == 0:\n continue\n\n # First step: make a graph of all blocks where edges indicate overlapping credible intervals\n G = nx.Graph()\n G.add_nodes_from(hdat['blocks'].keys())\n cs_bt_strs = []\n for bid, bdat in hdat['blocks'].items():\n cs_bt_strs += ['{}\\t{}\\t{}\\t{}\\n'.format(*x, bid) for x in bdat['credset_coords']]\n cs_bt = pbt.BedTool(''.join(cs_bt_strs), from_string=True)\n for hit in cs_bt.sort().merge(c=4, o='distinct'):\n bids = hit[3].split(',')\n if len(bids) > 1:\n for bid_a in bids:\n for bid_b in bids:\n if bid_a != bid_b:\n G.add_edge(bid_a, bid_b)\n\n # Second step: resolve subgraphs with multiple nodes\n for g in nx.connected_components(G):\n if len(g) > 1:\n # Gather evidence for each block (significance level and size)\n criteria = {bid : (hdat['blocks'][bid]['credset_max_sig'], \n np.sum([x.length for x in hdat['blocks'][bid]['credset_bt']])) \\\n for bid in g}\n # Keep blocks with higher significance level (GW over FDR)\n # Break ties by taking larger block\n criteria = {k : v for k, v in sorted(criteria.items(), \n key=lambda x: x[1][1], \n reverse=True)}\n criteria = {k : v for k, v in sorted(criteria.items(), \n key=lambda x: x[1][0].lower(), \n reverse=True)}\n for i, bid in enumerate(criteria.keys()):\n if i > 0:\n hpo_data[hpo]['blocks'].pop(bid)\n\n return hpo_data", "def graphCut(img, center, radius, temp, edge, count, editPoints, padList, theta_width, phi_width):\r\n\r\n\r\n \"\"\"Important note. The labeled image is referred to as temp, or self.temp in the interface.\r\n This stands for template. The previously labled image is fed back into the graphcut\"\"\"\r\n \r\n \"\"\"create polar images and cost arrays\"\"\"\r\n \r\n print \"RUNNING GRAPHCUT!\"\r\n img= padImage(img, padList)\r\n temp= padImage(temp, padList)\r\n edge= padImage(edge, padList)\r\n center= padCenter(center, padList)\r\n \r\n polar_img= img2polar(img, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n\r\n \r\n \r\n polar_grad, y, x = np.gradient(np.array(polar_img, dtype='float'))\r\n \"\"\"Lockett 100416 replacement line below to not use gradient when the image has a surface label\"\"\"\r\n \"\"\"polar_grad = -1 * np.array(polar_img, dtype='float')\"\"\"\r\n \r\n \r\n polar_cost = -1 * np.ones(polar_img.shape)\r\n for r in range(1,radius):\r\n polar_cost[r]= polar_grad[r]-polar_grad[r-1]\r\n\r\n \r\n \r\n \"\"\"\r\n flip the cost image upside down. This is so that the base set is at the bottom of the array\r\n since the graphcut cuts from top to bottom, this inversion is necessary.\r\n \"\"\"\r\n polar_cost_inv=polar_cost[::-1,:,:]\r\n\r\n print \"CONSTRUCTING GRAPH EDGES... \"\r\n \r\n \"\"\"construct the graph using PyMaxFlow\"\"\"\r\n g=maxflow.GraphFloat()\r\n nodeids=g.add_grid_nodes(polar_img.shape)\r\n structure=np.zeros((3,3,3))\r\n structure[2]= np.array([[0,10000,0],[10000, 10000, 10000],[0, 10000, 0]])\r\n g.add_grid_edges(nodeids, structure=structure, symmetric=False)\r\n\r\n \r\n \"\"\"convert the previously labeled image (temp) into a polar transform image. Take the labels and\r\n give them high cost edge weights so the segmentation avoids previously labeled objects\"\"\"\r\n polar_lbl_img= img2polar(temp, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n polar_lbl_img_inv= polar_lbl_img[::-1,:]\r\n \r\n lbl_caps= polar_lbl_img_inv>0\r\n self_caps= (polar_lbl_img_inv==count)\r\n lbl_caps-=self_caps\r\n lbl_source_caps= np.zeros(lbl_caps.shape)\r\n lbl_sink_caps= lbl_caps*10000\r\n g.add_grid_tedges(nodeids, lbl_source_caps, lbl_sink_caps)\r\n \r\n structure2= 10000*np.array([[0,0,0],[0,0,1],[0,1,0]])\r\n g.add_grid_edges(nodeids[radius-1], structure=structure2, symmetric=True)\r\n\r\n \"\"\"add terminal edges using two arrays whose elemnts are the costs of the edges from the source and to the\r\n sink\"\"\"\r\n print \"CONSTRUCTING GRAPH TEDGES...\"\r\n sinkcaps= polar_cost_inv * (polar_cost_inv>=0)\r\n sourcecaps = -1 * polar_cost_inv * (polar_cost_inv<0)\r\n g.add_grid_tedges(nodeids, sourcecaps, sinkcaps)\r\n\r\n \r\n\r\n \r\n \"\"\"accounts for edit points. Takes every point in the edit point list, converts it to its spherical coordinate, and adds high cost\r\n edges in the column of that edit point inverts the x and y coordinates of the center\"\"\"\r\n center= np.array((center[0], center[2], center[1]))\r\n if len(editPoints)!=0:\r\n for coords in editPoints:\r\n\r\n \r\n rad= math.sqrt((center[0]-coords[0])**2+ (center[1]-coords[2])**2 + (center[2]-coords[1])**2) \r\n theta= math.atan2(center[2]-coords[1], coords[2]-center[1])\r\n print str((coords[0]-center[0])/(rad+1))\r\n phi=math.acos(float(coords[0]-center[0])/(rad+1))\r\n if theta<0:\r\n theta=2*math.pi+ theta\r\n theta= theta_width- theta_width*theta/(2*math.pi)-1\r\n phi= phi_width*phi/(math.pi)-1\r\n rad= radius- rad\r\n print \"POLAR COORDS: \" + str((rad, theta, phi))\r\n\r\n for r in range(0, radius):\r\n if r<=rad:\r\n g.add_tedge(nodeids[r, theta, phi], 0, 10000)\r\n \r\n else:\r\n g.add_tedge(nodeids[r, theta, phi], 10000, 0) \r\n\r\n\r\n\r\n\r\n print \"CUTTING GRAPH...\"\r\n g.maxflow()\r\n\r\n \"\"\"s-t mincut of graph. This is converted to cartesian coordinates with the function img2cart. The\r\n images are also closed to eliminate spotty areas\"\"\"\r\n \r\n print \"STARTING CARTESIAN TRANSFORM...\"\r\n polar_img_seg= np.invert(g.get_grid_segments(nodeids)[::-1,:,:])\r\n\r\n \r\n edge_img= np.zeros(img.shape)\r\n seg_img= ndimage.binary_closing(img2cart(img, polar_img_seg, center, radius, theta_width, phi_width))\r\n \r\n \r\n \"\"\"create an edge image of the segmented object\"\"\"\r\n strel=np.ones((3,3,3))\r\n erode_img=ndimage.binary_erosion(seg_img, strel)\r\n edge_img=np.logical_xor(seg_img, erode_img)\r\n \r\n\r\n \"\"\"shears the segmentation image and edge if padding was applied\"\"\"\r\n \r\n\r\n \"\"\"add the object back on to the template image (and the edge image back on the template edge)\r\n If there was an editpoint involved, remove the previous segmentation of that object and add back\r\n on the edited object\"\"\"\r\n if len(editPoints)!=0:\r\n del_img= (temp==count)*count\r\n temp-=del_img\r\n\r\n del_edge_img= (edge==count)*count\r\n edge-= del_edge_img\r\n\r\n\r\n temp+=seg_img*count\r\n edge+=edge_img*count\r\n\r\n temp= shearImage(temp, padList)\r\n edge= shearImage(edge, padList)\r\n \r\n \r\n\r\n print \"FINISHED!\"\r\n \r\n return temp, edge", "def brh_cogs(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):\n log.log(26, \"Searching BRH orthologs\")\n species = set(map(str, species))\n\n min_species = len(species) - round(missing_factor * len(species))\n \n if seed_sp == \"auto\":\n # seed2size = get_sorted_seeds(seed_sp, species, species, min_species, DB)\n # sort_seeds = sorted([(len(size), sp) for sp, size in seed2size.iteritems()])\n # sp_to_test = [sort_seeds[-1][1]]\n sp_to_test = list(species)\n elif seed_sp == \"largest\":\n cmd = \"\"\"SELECT taxid, size FROM species\"\"\"\n db.seqcursor.execute(cmd)\n sp2size = {}\n for tax, counter in db.seqcursor.fetchall():\n if tax in species: \n sp2size[tax] = counter\n \n sorted_sp = sorted(sp2size.items(), lambda x,y: cmp(x[1],y[1]))\n log.log(24, sorted_sp[:6])\n largest_sp = sorted_sp[-1][0]\n sp_to_test = [largest_sp]\n log.log(28, \"Using %s as search seed. Proteome size=%s genes\" %\\\n (largest_sp, sp2size[largest_sp]))\n else:\n sp_to_test = [str(seed_sp)]\n \n # The following loop tests each possible seed if none is\n # specified.\n log.log(28, \"Detecting Clusters of Orthologs groups (COGs)\")\n log.log(28, \"Min number of species per COG: %d\" %min_species)\n cogs_selection = []\n \n for j, seed in enumerate(sp_to_test):\n log.log(26,\"Testing new seed species:%s (%d/%d)\", seed, j+1, len(sp_to_test))\n species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))\n species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))\n pairs1 = []\n pairs2 = []\n # Select all ids with matches in the target species, and\n # return the total number of species covered by each of\n # such ids.\n if species_side1 != \"\":\n cmd = \"\"\"SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE\n taxid1=\"%s\" AND taxid2 IN (%s) \"\"\" %\\\n (seed, species_side1)\n DB.orthocursor.execute(cmd)\n pairs1 = DB.orthocursor.fetchall()\n\n if species_side2 != \"\":\n cmd = \"\"\"SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE\n taxid1 IN (%s) AND taxid2 = \"%s\" \"\"\" %\\\n (species_side2, seed)\n\n #taxid2=\"%s\" AND taxid1 IN (%s) AND score >= %s\"\"\" %\\\n #(seed, species_side2, min_score)\n DB.orthocursor.execute(cmd)\n pairs2 = DB.orthocursor.fetchall()\n\n cog_candidates = defaultdict(set)\n for seq1, sp1, seq2, sp2 in pairs1 + pairs2:\n s1 = (sp1, seq1)\n s2 = (sp2, seq2)\n cog_candidates[(sp1, seq1)].update([s1, s2])\n\n all_cogs = [cand for cand in cog_candidates.values() if\n len(cand) >= min_species]\n \n cog_sizes = [len(cog) for cog in all_cogs]\n cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]\n\n if [1 for i in xrange(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:\n # for i in xrange(len(cog_sizes)):\n # if cog_sizes[i] != cog_spsizes[i]:\n # print cog_sizes[i], cog_spsizes[i]\n # raw_input()\n raise ValueError(\"Inconsistent COG found\")\n \n if cog_sizes: \n cogs_selection.append([seed, all_cogs])\n log.log(26, \"Found %d COGs\" % len(all_cogs))\n\n def _sort_cogs(cogs1, cogs2):\n cogs1 = cogs1[1] # discard seed info\n cogs2 = cogs2[1] # discard seed info \n cog_sizes1 = [len(cog) for cog in cogs1]\n cog_sizes2 = [len(cog) for cog in cogs2]\n mx1, mn1, avg1 = _max(cog_sizes1), _min(cog_sizes1), round(_mean(cog_sizes1))\n mx2, mn2, avg2 = _max(cog_sizes2), _min(cog_sizes2), round(_mean(cog_sizes2))\n \n # we want to maximize all these values in the following order:\n for i, j in ((mx1, mx2), (avg1, avg2), (len(cogs1), len(cogs2))):\n v = -1 * cmp(i, j)\n if v != 0:\n break\n return v\n \n log.log(26, \"Finding best COG selection...\")\n cogs_selection.sort(_sort_cogs)\n lines = []\n for seed, all_cogs in cogs_selection:\n cog_sizes = [len(cog) for cog in all_cogs]\n mx, mn, avg = max(cog_sizes), min(cog_sizes), round(_mean(cog_sizes))\n lines.append([seed, mx, mn, avg, len(all_cogs)])\n analysis_txt = StringIO()\n print_as_table(lines[:25], stdout=analysis_txt,\n header=[\"Seed\",\"largest COG\", \"smallest COGs\", \"avg COG size\", \"total COGs\"])\n log.log(28, \"Analysis details:\\n\"+analysis_txt.getvalue())\n best_seed, best_cogs = cogs_selection[0]\n cog_sizes = [len(cog) for cog in best_cogs]\n\n # Not necessary since they will be sorted differently later on\n #best_cogs.sort(lambda x,y: cmp(len(x), len(y)), reverse=True)\n \n if max(cog_sizes) < len(species):\n raise ValueError(\"Current COG selection parameters do not permit to cover all species\")\n\n recoded_cogs = []\n for cog in best_cogs:\n named_cog = map(lambda x: \"%s%s%s\" %(x[0], GLOBALS[\"spname_delimiter\"],x[1]), cog)\n recoded_cogs.append(named_cog)\n\n return recoded_cogs, analysis_txt.getvalue()", "def crop_segs(segs, cropmin, cropmax):\n cropmin = np.array(cropmin)\n cropmax = np.array(cropmax)\n i = 0\n while i < len(segs)-1:\n s = seg_cropped((segs[i], segs[i+1]), cropmin, cropmax)\n if s is None:\n if i == 0:\n del segs[0]\n else:\n del segs[i+1]\n else:\n segs[i], segs[i+1] = s\n i += 1", "def plotGlassbrainSlices(niftipath, mnipath, ortho='z', nRows=2, nCuts=6,\n threshpos=0, threshneg=0, figLayout='Both',\n showLRannot=True, findOptimalCut=True,\n imageType='svg'):\n\n # Initiation of relevant parameters\n img = nb.load(niftipath)\n lineW = 2. / (nRows + int((figLayout == 'Brain' or figLayout == 'Both')))\n\n # Reduce 4D volume to 3D\n if len(img.shape) == 4:\n data4D = img.get_data()\n data4D = data4D.reshape(data4D.shape[:-1])\n img = Nifti1Image(data4D, img.get_affine())\n\n # Get voxel extend in all directions\n dirMin = np.dot(img.get_affine(), [0, 0, 0, 1])[:3]\n dirMax = np.dot(img.get_affine(),\n np.array(img.shape).tolist() + [1])[:3]\n\n if findOptimalCut:\n # Find cuts automatically\n cut_coords = find_cut_slices(img, direction=ortho, n_cuts=nCuts)\n else:\n # Split orientation in x-equal parts\n cut_coords = getEqualSpacing(dirMin, dirMax, ortho, nCuts)\n\n # Split cuts according nRows\n cut_coords = [cut_coords[int(i * len(cut_coords) / np.float(nRows)):\n int((i + 1) * len(cut_coords) / np.float(nRows))]\n for i in range(nRows)]\n\n # Create Slices\n for i in range(nRows):\n\n # Create axes for plotting\n ax = plt.subplot(nRows + int((figLayout == 'Brain' or\n figLayout == 'Both')),\n 1, i + 1)\n\n # Plot the white background for all slices as a zeros value brain\n # (without it, the view focuses around the first area plotted)\n zerobrain = Nifti1Image(img.get_data() * 0, img.get_affine())\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=cut_coords[i],\n display_mode=ortho, alpha=1, draw_cross=False, cmap=plt.cm.gray,\n black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n posdata = np.copy(img.get_data())\n posdata[posdata <= threshpos] = 0.001 # = 0 crashes contour function\n posbrain = Nifti1Image(posdata, img.get_affine())\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n negdata = np.copy(img.get_data())\n negdata[negdata >= -threshneg] = 0.001 # = 0 crashes contour function\n negbrain = Nifti1Image(negdata, img.get_affine())\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Plot overview Brain at the bottom\n if figLayout == 'Brain' or figLayout == 'Both':\n\n # Create axes for overview brain\n ax = plt.subplot(nRows + 1, 1, nRows + 1)\n\n # Find overview view direction\n if ortho == 'z':\n direction = 'x'\n elif ortho == 'x':\n direction = 'z'\n elif ortho == 'y':\n direction = 'z'\n\n # Plot the white backgroundas a zeros value brain\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=[0],\n display_mode=direction, alpha=1, draw_cross=False,\n cmap=plt.cm.gray, black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Plot the line indicating the cut\n for i in np.array(cut_coords).flatten():\n if ortho == 'z' or ortho == 'y':\n ax.plot([-100, 100], [i, i], 'k-', lw=lineW)\n elif ortho == 'x':\n ax.plot([i, i], [-100, 100], 'k-', lw=lineW)\n\n if ortho == 'z':\n ax.axis((-300.0, 300.0, dirMin[2], dirMax[2]))\n elif ortho == 'y':\n ax.axis((-300.0, 300.0, dirMin[1], dirMax[1]))\n elif ortho == 'x':\n stretcher = (nRows + 1) / 2.\n ax.axis((-300.0 * stretcher, 300.0 * stretcher, -100.0, 100.0))\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Get file prefix\n if niftipath.endswith('.nii'):\n filename = opb(niftipath)[:-4]\n elif niftipath.endswith('.nii.gz'):\n filename = opb(niftipath)[:-7]\n\n # Create output folder\n path2Figure = opj(os.path.split(os.path.realpath(niftipath))[0], 'figures')\n if not os.path.exists(opj(path2Figure)):\n os.makedirs(opj(path2Figure))\n\n # Save figure\n figname = '_'.join([filename, '%s-cut' % ortho])\n plt.savefig(opj(path2Figure, '%s.%s' % (figname, imageType)))\n plt.clf()", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def cut_kmer(sequence, k_mer):\n for i in range(0, len(sequence)-k_mer + 1):\n yield sequence[i:i+k_mer]", "def slice_ontology(rnd, ontology, valid_proportion, test_proportion, zero_shot_triples=[]):\n ont_valid = ConjunctiveGraph()\n ont_test = ConjunctiveGraph()\n ont_train = ConjunctiveGraph()\n valid_size = int(np.floor(valid_proportion * len(ontology)))\n\n test_size = int(np.floor(test_proportion * len(ontology)))\n # add all zero_shot entities to test set and remove from overall ontology\n if len(zero_shot_triples) > 0:\n remove_triples = []\n for zero_shot_triple in zero_shot_triples:\n ont_test.add(zero_shot_triple)\n remove_triples.append(zero_shot_triple)\n for s,p,o in remove_triples:\n ontology.remove((s,p,o))\n n_test = len(ont_test)\n if n_test > test_size:\n print(\"More zero shot triples than test proportion\")\n sys.exit(0)\n # remaining test size\n test_size = test_size - n_test\n # random splits\n slice_indices = rnd.choice(range(0, len(ontology)), valid_size + test_size, replace=False)\n valid_indices = slice_indices[:valid_size]\n test_indices = slice_indices[valid_size:]\n for i, (s, p, o) in enumerate(sorted(ontology.triples((None, None, None)))):\n if i in valid_indices:\n ont_valid.add((s, p, o))\n elif i in test_indices:\n ont_test.add((s, p, o))\n else:\n ont_train.add((s, p, o))\n return ont_train, ont_valid, ont_test", "def cutSec(ppm, X, start, stop, featureMask):\n\tflip=0\n\tif ppm[0]>ppm[-1]:\n\t\tflip=1\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n \n #find first entry in ppm with >='start' valu\n\tstart = (ppm>=start).nonzero()\n\tstart = start[0][0]#first entry\n\tstop = (ppm<=stop).nonzero()\n\tstop = stop[0][-1]#last entry\n\n#currently setting featureMask will get rid of peaks in start:stop region BUT it also marks as excluded so have removed as inaccurately marking for exclusion when all we want to do is remove from intensityData not mark as exluded\n\ttry:\n\t\tfeatureMask[0,start:stop]=False # this may only occur on unit test data, not sure need to check but either way was causing issue\n\texcept:\n\t\tfeatureMask[start:stop]=False\n\tif flip==1:\n\t\tppm = ppm[::-1]\n\t\tX = X[:, ::-1]\n\treturn ppm, X, featureMask\n\tpass", "def get_param_groups(core, selection=\"kep\"):\n if selection == \"all\":\n selection = \"kep_binary_gr_pm_spin_pos_noise_dm_chrom_dmx_fd\"\n kep_pars = [\n \"PB\",\n \"PBDOT\",\n \"T0\",\n \"A1\",\n \"OM\",\n \"E\",\n \"ECC\",\n \"EPS1\",\n \"EPS2\",\n \"EPS1DOT\",\n \"EPS2DOT\",\n \"FB\",\n \"SINI\",\n \"COSI\" \"MTOT\",\n \"M2\",\n \"XDOT\",\n \"X2DOT\",\n \"EDOT\",\n \"KOM\",\n \"KIN\",\n \"TASC\",\n ]\n\n mass_pars = [\"M2\", \"SINI\", \"COSI\", \"PB\", \"A1\"]\n\n noise_pars = [\"efac\", \"ecorr\", \"equad\", \"gamma\", \"A\"]\n\n pos_pars = [\"RAJ\", \"DECJ\", \"ELONG\", \"ELAT\", \"BETA\", \"LAMBDA\", \"PX\"]\n\n spin_pars = [\"F\", \"F0\", \"F1\", \"F2\", \"P\", \"P1\", \"Offset\"]\n\n fd_pars = [\"FD1\", \"FD2\", \"FD3\", \"FD4\", \"FD5\"]\n\n gr_pars = [\n \"H3\",\n \"H4\",\n \"OMDOT\",\n \"OM2DOT\",\n \"XOMDOT\",\n \"PBDOT\",\n \"XPBDOT\",\n \"GAMMA\",\n \"PPNGAMMA\",\n \"DR\",\n \"DTHETA\",\n ]\n\n pm_pars = [\"PMDEC\", \"PMRA\", \"PMELONG\", \"PMELAT\", \"PMRV\", \"PMBETA\", \"PMLAMBDA\"]\n\n dm_pars = [\n \"dm_gp_log10_sigma\",\n \"dm_gp_log10_ell\",\n \"dm_gp_log10_gam_p\",\n \"dm_gp_log10_p\",\n \"dm_gp_log10_ell2\",\n \"dm_gp_log10_alpha_wgt\",\n \"n_earth\",\n ]\n\n chrom_gp_pars = [\n \"chrom_gp_log10_sigma\",\n \"chrom_gp_log10_ell\",\n \"chrom_gp_log10_gam_p\",\n \"chrom_gp_log10_p\",\n \"chrom_gp_log10_ell2\",\n \"chrom_gp_log10_alpha_wgt\",\n ]\n\n excludes = [\"lnlike\", \"lnprior\", \"chain_accept\", \"pt_chain_accept\"]\n\n selection_list = selection.split(\"_\")\n plot_params = defaultdict(list)\n for param in core.params:\n split_param = param.split(\"_\")[-1]\n if \"kep\" in selection_list:\n if split_param in kep_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"mass\" in selection_list:\n if split_param in mass_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"pos\" in selection_list:\n if split_param in pos_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"noise\" in selection_list:\n if split_param in noise_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append((\" \").join(param.split(\"_\")[1:]))\n if \"spin\" in selection_list:\n if split_param in spin_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"gr\" in selection_list:\n if split_param in gr_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"pm\" in selection_list:\n if split_param in pm_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"fd\" in selection_list:\n if split_param in fd_pars and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(split_param)\n if \"dm\" in selection_list:\n if (\"_\").join(param.split(\"_\")[1:]) in dm_pars and (\"_\").join(\n param.split(\"_\")[1:]\n ) not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(param) # (\" \").join(param.split(\"_\")[-2:]))\n if \"chrom\" in selection_list:\n if (\"_\").join(param.split(\"_\")[1:]) in chrom_gp_pars and (\"_\").join(\n param.split(\"_\")[1:]\n ) not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(param)\n elif param in dm_pars and param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(param)\n if \"dmx\" in selection_list:\n if \"DMX_\" in param and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append((\"_\").join(param.split(\"_\")[-2:]))\n if \"excludes\" in selection_list:\n if split_param in excludes and split_param not in plot_params:\n plot_params[\"par\"].append(param)\n plot_params[\"title\"].append(param)\n return plot_params", "def socnet_pipeline(self, subset: int = None):\n\n return SOCnet(self.data, self.edgelist[slice(subset)])", "def slice_reads(reads, max_coverage):\n\t\n\tSEED = 448\n\trandom.seed(SEED)\n\tshuffled_indices = list(range(len(reads)))\n\trandom.shuffle(shuffled_indices)\n\n\tposition_list = reads.get_positions()\n\tlogger.info('Found %d SNP positions', len(position_list))\n\n\t# dictionary to map SNP position to its index\n\tposition_to_index = { position: index for index, position in enumerate(position_list) }\n\n\t# List of slices, start with one empty slice ...\n\tslices = [IndexSet()]\n\t# ... and the corresponding coverages along each slice\n\tslice_coverages = [CoverageMonitor(len(position_list))]\n\tskipped_reads = 0\n\taccessible_positions = set()\n\tfor index in shuffled_indices:\n\t\tread = reads[index]\n\t\t# Skip reads that cover only one SNP\n\t\tif len(read) < 2:\n\t\t\tskipped_reads += 1\n\t\t\tcontinue\n\t\tfor position, base, allele in read:\n\t\t\taccessible_positions.add(position)\n\t\tfirst_position, first_base, first_allele = read[0]\n\t\tlast_position, last_base, last_allele = read[len(read)-1]\n\t\tbegin = position_to_index[first_position]\n\t\tend = position_to_index[last_position] + 1\n\t\tslice_id = 0\n\t\twhile True:\n\t\t\t# Does current read fit into this slice?\n\t\t\tif slice_coverages[slice_id].max_coverage_in_range(begin, end) < max_coverage:\n\t\t\t\tslice_coverages[slice_id].add_read(begin, end)\n\t\t\t\tslices[slice_id].add(index)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tslice_id += 1\n\t\t\t\t# do we have to create a new slice?\n\t\t\t\tif slice_id == len(slices):\n\t\t\t\t\tslices.append(IndexSet())\n\t\t\t\t\tslice_coverages.append(CoverageMonitor(len(position_list)))\n\tlogger.info('Skipped %d reads that only cover one SNP', skipped_reads)\n\n\tunphasable_snps = len(position_list) - len(accessible_positions)\n\tif position_list:\n\t\tlogger.info('%d out of %d variant positions (%.1d%%) do not have a read '\n\t\t\t'connecting them to another variant and are thus unphasable',\n\t\t\tunphasable_snps, len(position_list),\n\t\t\t100. * unphasable_snps / len(position_list))\n\n\t# Print stats\n\tfor slice_id, index_set in enumerate(slices):\n\t\tlogger.info('Slice %d contains %d reads', slice_id, len(index_set))\n\n\treturn reads.subset(slices[0])", "def cutout(od,\n varList = None,\n YRange = None,\n XRange = None,\n add_Hbdr = False,\n mask_outside = False,\n ZRange = None,\n add_Vbdr = False,\n timeRange = None,\n timeFreq = None,\n sampMethod = 'snapshot',\n dropAxes = False):\n \n # Check\n for wrong_dim in ['mooring', 'station', 'particle']:\n if wrong_dim in od._ds.dims and (XRange is not None or YRange is not None):\n raise ValueError('`cutout` cannot subsample in the horizontal plain oceandatasets with dimension [{}]'.format(wrong_dim))\n \n # Convert variables to numpy arrays and make some check\n if not isinstance(od, _ospy.OceanDataset):\n raise TypeError('`od` must be OceanDataset')\n \n if varList is not None:\n varList = _np.asarray(varList, dtype='str')\n if varList.ndim == 0: varList = varList.reshape(1)\n elif varList.ndim >1: raise TypeError('Invalid `varList`')\n \n if not isinstance(add_Hbdr, (float, int, bool)):\n raise TypeError('`add_Hbdr` must be float, int, or bool')\n \n if not isinstance(mask_outside, bool):\n raise TypeError('`add_Hbdr` must be bool')\n \n if YRange is not None:\n YRange = _np.asarray(YRange, dtype=od._ds['YG'].dtype)\n if YRange.ndim == 0: YRange = YRange.reshape(1)\n elif YRange.ndim >1: raise TypeError('Invalid `YRange`')\n Ymax = od._ds['YG'].max().values\n Ymin = od._ds['YG'].min().values\n if any(YRange<Ymin) or any(YRange>Ymax):\n _warnings.warn(\"\\nThe Y range of the oceandataset is: {}\"\n \"\\nYRange has values outside the oceandataset range.\".format([Ymin, Ymax]), stacklevel=2)\n \n if XRange is not None:\n XRange = _np.asarray(XRange, dtype=od._ds['XG'].dtype)\n if XRange.ndim == 0: XRange = XRange.reshape(1)\n elif XRange.ndim >1: raise TypeError('Invalid `XRange`')\n Xmax = od._ds['XG'].max().values\n Xmin = od._ds['XG'].min().values\n if any(XRange<Xmin) or any(XRange>Xmax):\n _warnings.warn(\"\\nThe X range of the oceandataset is: {}\"\n \"\\nXRange has values outside the oceandataset range.\".format([Xmin, Xmax]), stacklevel=2)\n if ZRange is not None:\n ZRange = _np.asarray(ZRange, dtype=od._ds['Zp1'].dtype)\n if ZRange.ndim == 0: ZRange = ZRange.reshape(1)\n elif ZRange.ndim >1: raise TypeError('Invalid `ZRange`')\n Zmax = od._ds['Zp1'].max().values\n Zmin = od._ds['Zp1'].min().values\n if any(ZRange<Zmin) or any(ZRange>Zmax):\n _warnings.warn(\"\\nThe Z range of the oceandataset is: {}\"\n \"\\nZRange has values outside the the oceandataset range.\".format([Zmin, Zmax]), stacklevel=2)\n \n if timeRange is not None:\n timeRange = _np.asarray(timeRange, dtype=od._ds['time'].dtype)\n if timeRange.ndim == 0: timeRange = timeRange.reshape(1)\n elif timeRange.ndim >1: raise TypeError('Invalid `timeRange`')\n timemax = od._ds['time'].max().values\n timemin = od._ds['time'].min().values\n if any(timeRange<timemin) or any(timeRange>timemax):\n _warnings.warn(\"\\nThe time range of the oceandataset is: {}\"\n \"\\ntimeRange has values outside the the oceandataset range.\".format([timemin, timemax]), stacklevel=2)\n \n if not isinstance(timeFreq, (str, type(None))):\n raise TypeError('`timeFreq` must None or str')\n \n sampMethod_list = ['snapshot', 'mean']\n if sampMethod not in sampMethod_list:\n raise ValueError('[{}] is not an available `sampMethod`.'\n '\\nOptions: {}'.format(sampMethod, sampMethod_list))\n \n if not isinstance(dropAxes, bool):\n dropAxes = _np.asarray(dropAxes, dtype='str')\n if dropAxes.ndim == 0: dropAxes = dropAxes.reshape(1)\n elif dropAxes.ndim >1: raise TypeError('Invalid `dropAxes`')\n axis_error = [axis for axis in dropAxes if axis not in od.grid_coords]\n if len(axis_error)!=0:\n raise ValueError('{} are not in od.grid_coords and can not be dropped'.format(axis_error))\n dropAxes = {d: od.grid_coords[d] for d in dropAxes}\n elif dropAxes is True:\n dropAxes = od.grid_coords\n if YRange is None : dropAxes.pop('Y', None)\n if XRange is None : dropAxes.pop('X', None)\n if ZRange is None : dropAxes.pop('Z', None)\n if timeRange is None: dropAxes.pop('time', None)\n else:\n dropAxes = {}\n \n # Message\n print('Cutting out the oceandataset.')\n \n # Copy\n od = _copy.copy(od)\n \n # Unpack\n ds = od._ds\n periodic = od.grid_periodic\n \n # ---------------------------\n # Horizontal CUTOUT\n # ---------------------------\n \n if add_Hbdr is True:\n add_Hbdr = (_np.mean([_np.fabs(od._ds['XG'].max() - od._ds['XG'].min()),\n _np.fabs(od._ds['YG'].max() - od._ds['YG'].min())]) / \n _np.mean([len(od._ds['X']), len(od._ds['Y'])]))\n elif add_Hbdr is False:\n add_Hbdr = 0\n \n if add_Vbdr is True:\n add_Vbdr = _np.fabs(od._ds['Zp1'].diff('Zp1')).max().values\n elif add_Vbdr is False:\n add_Vbdr = 0\n \n # Initialize horizontal mask\n if XRange is not None or YRange is not None:\n maskH = _xr.ones_like(ds['XG'])\n\n if YRange is not None: \n # Use arrays\n YRange = _np.asarray([_np.min(YRange)-add_Hbdr, _np.max(YRange)+add_Hbdr]).astype(ds['YG'].dtype)\n\n # Get the closest \n for i, Y in enumerate(YRange):\n diff = _np.fabs(ds['YG']-Y)\n YRange[i] = ds['YG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['YG']>=YRange[0], ds['YG']<=YRange[-1]), 0)\n maskHY = maskH\n\n if XRange is not None:\n # Use arrays\n XRange = _np.asarray([_np.min(XRange)-add_Hbdr, _np.max(XRange)+add_Hbdr]).astype(ds['XG'].dtype)\n\n # Get the closest \n for i, X in enumerate(XRange):\n diff = _np.fabs(ds['XG']-X)\n XRange[i] = ds['XG'].where(diff==diff.min()).min().values \n maskH = maskH.where(_np.logical_and(ds['XG']>=XRange[0], ds['XG']<=XRange[-1]), 0)\n\n # Can't be all zeros\n if maskH.sum()==0: raise ValueError('Zero grid points in the horizontal range')\n\n # Find horizontal indexes\n maskH['Yp1'].values = _np.arange(len(maskH['Yp1']))\n maskH['Xp1'].values = _np.arange(len(maskH['Xp1']))\n dmaskH = maskH.where(maskH, drop=True)\n dYp1 = dmaskH['Yp1'].values\n dXp1 = dmaskH['Xp1'].values\n iY = [_np.min(dYp1), _np.max(dYp1)]\n iX = [_np.min(dXp1), _np.max(dXp1)]\n maskH['Yp1'] = ds['Yp1']\n maskH['Xp1'] = ds['Xp1']\n \n # Original length\n lenY = len(ds['Yp1'])\n lenX = len(ds['Xp1']) \n \n # Indexis\n if iY[0]==iY[1]:\n if 'Y' not in dropAxes:\n if iY[0]>0: iY[0]=iY[0]-1\n else: iY[1]=iY[1]+1\n else: dropAxes.pop('Y', None)\n \n\n if iX[0]==iX[1]:\n if 'X' not in dropAxes:\n if iX[0]>0: iX[0]=iX[0]-1\n else: iX[1]=iX[1]+1\n else: dropAxes.pop('X', None)\n \n # Cutout\n ds = ds.isel(Yp1 = slice(iY[0], iY[1]+1),\n Xp1 = slice(iX[0], iX[1]+1))\n \n if 'X' in dropAxes:\n if iX[0]==len(ds['X']):\n iX[0]=iX[0]-1\n iX[1]=iX[1]-1\n ds = ds.isel(X = slice(iX[0], iX[1]+1))\n elif (('outer' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['outer'].name == 'Xp1') or \n ('left' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['left'].name == 'Xp1')):\n ds = ds.isel(X = slice(iX[0], iX[1]))\n elif 'right' in od._grid.axes['X'].coords and od._grid.axes['X'].coords['right'].name =='Xp1':\n ds = ds.isel(X = slice(iX[0]+1, iX[1]+1)) \n \n if 'Y' in dropAxes:\n if iY[0]==len(ds['Y']):\n iY[0]=iY[0]-1\n iY[1]=iY[1]-1\n ds = ds.isel(Y = slice(iY[0], iY[1]+1))\n elif (('outer' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['outer'].name == 'Yp1') or \n ('left' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['left'].name == 'Yp1')):\n ds = ds.isel(Y = slice(iY[0], iY[1]))\n elif 'right' in od._grid.axes['Y'].coords and od._grid.axes['Y'].coords['right'].name =='Yp1':\n ds = ds.isel(Y = slice(iY[0]+1, iY[1]+1))\n \n # Cut axis can't be periodic\n if (len(ds['Yp1']) < lenY or 'Y' in dropAxes) and 'Y' in periodic: periodic.remove('Y')\n if (len(ds['Xp1']) < lenX or 'X' in dropAxes) and 'X' in periodic: periodic.remove('X')\n \n # ---------------------------\n # Vertical CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskV = _xr.ones_like(ds['Zp1'])\n \n if ZRange is not None:\n # Use arrays\n ZRange = _np.asarray([_np.min(ZRange)-add_Vbdr, _np.max(ZRange)+add_Vbdr]).astype(ds['Zp1'].dtype)\n \n # Get the closest \n for i, Z in enumerate(ZRange):\n diff = _np.fabs(ds['Zp1']-Z)\n ZRange[i] = ds['Zp1'].where(diff==diff.min()).min().values \n maskV = maskV.where(_np.logical_and(ds['Zp1']>=ZRange[0], ds['Zp1']<=ZRange[-1]), 0) \n \n # Find vertical indexes\n maskV['Zp1'].values = _np.arange(len(maskV['Zp1']))\n dmaskV = maskV.where(maskV, drop=True)\n dZp1 = dmaskV['Zp1'].values\n iZ = [_np.min(dZp1), _np.max(dZp1)]\n maskV['Zp1'] = ds['Zp1']\n \n # Original length\n lenZ = len(ds['Zp1']) \n \n # Indexis\n if iZ[0]==iZ[1]:\n if 'Z' not in dropAxes:\n if iZ[0]>0: iZ[0]=iZ[0]-1\n else: iZ[1]=iZ[1]+1\n else: dropAxes.pop('Z', None)\n \n # Cutout\n ds = ds.isel(Zp1 = slice(iZ[0], iZ[1]+1))\n if 'Z' in dropAxes:\n if iZ[0]==len(ds['Z']):\n iZ[0]=iZ[0]-1\n iZ[1]=iZ[1]-1\n ds = ds.isel(Z = slice(iZ[0], iZ[1]+1))\n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu=ds['Zp1'].values, method='nearest')\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl=ds['Zp1'].values, method='nearest')\n \n else:\n ds = ds.isel(Z = slice(iZ[0], iZ[1]))\n \n if 'Zu' in ds.dims and len(ds['Zu'])>1:\n ds = ds.sel(Zu = slice(ds['Zp1'].isel(Zp1=0).values, ds['Zp1'].isel(Zp1=-1).values))\n\n if 'Zl' in ds.dims and len(ds['Zl'])>1:\n ds = ds.sel(Zl = slice(ds['Zp1'].isel(Zp1=0).values, ds['Z'].isel(Z=-1).values))\n \n # Cut axis can't be periodic\n if (len(ds['Z']) < lenZ or 'Z' in dropAxes) and 'Z' in periodic: periodic.remove('Z')\n \n # ---------------------------\n # Time CUTOUT\n # ---------------------------\n \n # Initialize vertical mask\n maskT = _xr.ones_like(ds['time']).astype('int')\n \n if timeRange is not None:\n \n # Use arrays\n timeRange = _np.asarray([_np.min(timeRange), _np.max(timeRange)]).astype(ds['time'].dtype)\n \n # Get the closest \n for i, time in enumerate(timeRange):\n if _np.issubdtype(ds['time'].dtype, _np.datetime64):\n diff = _np.fabs(ds['time'].astype('float64') - time.astype('float64'))\n else:\n diff = _np.fabs(ds['time']-time)\n timeRange[i] = ds['time'].where(diff==diff.min()).min().values \n # return maskT, ds['time'], timeRange[0], timeRange[-1]\n maskT = maskT.where(_np.logical_and(ds['time']>=timeRange[0], ds['time']<=timeRange[-1]), 0) \n \n # Find vertical indexes\n maskT['time'].values = _np.arange(len(maskT['time']))\n dmaskT = maskT.where(maskT, drop=True)\n dtime = dmaskT['time'].values\n iT = [min(dtime), max(dtime)]\n maskT['time'] = ds['time']\n \n # Original length\n lenT = len(ds['time'])\n \n # Indexis\n if iT[0]==iT[1]:\n if 'time' not in dropAxes:\n if iT[0]>0: iT[0]=iT[0]-1\n else: iT[1]=iT[1]+1\n else: dropAxes.pop('time', None)\n \n # Cutout\n ds = ds.isel(time = slice(iT[0], iT[1]+1))\n if 'time' in dropAxes:\n if iT[0]==len(ds['time_midp']):\n iT[0]=iT[0]-1\n iT[1]=iT[1]-1\n ds = ds.isel(time_midp = slice(iT[0], iT[1]+1))\n else:\n ds = ds.isel(time_midp = slice(iT[0], iT[1]))\n \n # Cut axis can't be periodic\n if (len(ds['time']) < lenT or 'T' in dropAxes) and 'time' in periodic: periodic.remove('time')\n \n # ---------------------------\n # Horizontal MASK\n # ---------------------------\n \n if mask_outside and (YRange is not None or XRange is not None):\n if YRange is not None: minY = YRange[0]; maxY = YRange[1]\n else: minY = ds['YG'].min().values; maxY = ds['YG'].max().values\n if XRange is not None: minX = XRange[0]; maxX = XRange[1]\n else: minX = ds['XG'].min().values; maxX = ds['XG'].max().values \n \n maskC = _xr.where(_np.logical_and(_np.logical_and(ds['YC']>=minY, ds['YC']<=maxY),\n _np.logical_and(ds['XC']>=minX, ds['XC']<=maxX)), 1,0).persist()\n maskG = _xr.where(_np.logical_and(_np.logical_and(ds['YG']>=minY, ds['YG']<=maxY),\n _np.logical_and(ds['XG']>=minX, ds['XG']<=maxX)), 1,0).persist()\n maskU = _xr.where(_np.logical_and(_np.logical_and(ds['YU']>=minY, ds['YU']<=maxY),\n _np.logical_and(ds['XU']>=minX, ds['XU']<=maxX)), 1,0).persist()\n maskV = _xr.where(_np.logical_and(_np.logical_and(ds['YV']>=minY, ds['YV']<=maxY),\n _np.logical_and(ds['XV']>=minX, ds['XV']<=maxX)), 1,0).persist()\n for var in ds.data_vars:\n if set(['X', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskC)\n elif set(['Xp1', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskG)\n elif set(['Xp1', 'Y']).issubset(ds[var].dims): ds[var] = ds[var].where(maskU)\n elif set(['X', 'Yp1']).issubset(ds[var].dims): ds[var] = ds[var].where(maskV)\n \n # ---------------------------\n # TIME RESAMPLING\n # ---------------------------\n # Resample in time\n if timeFreq:\n \n # Infer original frequency\n inFreq=_pd.infer_freq(ds.time.values); \n if timeFreq[0].isdigit() and not inFreq[0].isdigit(): inFreq='1'+inFreq\n \n # Same frequency: Skip\n if timeFreq==inFreq:\n _warnings.warn(\"\\nInput time freq: [{}] = Output time frequency: [{}]:\"\n \"\\nSkip time resampling.\".format(inFreq, timeFreq), stacklevel=2)\n \n else:\n \n # Remove time_midp and warn\n vars2drop = [var for var in ds.variables if 'time_midp' in ds[var].dims]\n if vars2drop:\n _warnings.warn(\"\\nTime resampling drops variables on `time_midp` dimension.\"\n \"\\nDropped variables: {}.\".format(vars2drop), stacklevel=2)\n ds = ds.drop(vars2drop)\n if 'time_midp' in ds.dims: ds = ds.drop('time_midp')\n \n # Snapshot\n if sampMethod=='snapshot': \n # Find new times\n newtime = ds['time'].sel(time=ds['time'].resample(time=timeFreq).first())\n\n # Use slice when possible\n inds = [i for i, t in enumerate(ds['time'].values) if t in newtime.values]\n inds_diff = _np.diff(inds)\n if all(inds_diff==inds_diff[0]): \n ds = ds.isel(time = slice(inds[0], inds[-1]+1, inds_diff[0]))\n else: \n # TODO: is this an xarray bug od just bad chunking/bad coding/bad SciServe compute performances?\n # Make test case and open issue!\n attrs = ds.attrs\n ds = _xr.concat([ds.sel(time = time) for i, time in enumerate(newtime)], dim='time')\n ds.attrs = attrs\n # Mean\n elif sampMethod=='mean':\n\n # Separate time and timeless\n attrs = ds.attrs\n ds_dims = ds.drop([var for var in ds.variables if not var in ds.dims])\n ds_time = ds.drop([var for var in ds.variables if not 'time' in ds[var].dims])\n ds_timeless = ds.drop([var for var in ds.variables if 'time' in ds[var].dims])\n\n # Resample\n ds_time = ds_time.resample(time=timeFreq).mean('time')\n\n # Add all dimensions to ds, and fix attributes\n for dim in ds_time.dims:\n if dim=='time': ds_time[dim].attrs = ds_dims[dim].attrs\n else: ds_time[dim] = ds_dims[dim]\n\n # Merge\n ds = _xr.merge([ds_time, ds_timeless])\n ds.attrs = attrs\n \n # Update oceandataset\n od._ds = ds\n \n # Add time midp\n if timeFreq and 'time' not in dropAxes:\n od = od.set_grid_coords({**od.grid_coords, 'time' : {'time': -0.5}}, add_midp=True, overwrite=True)\n\n # Drop axes\n grid_coords = od.grid_coords\n for coord in list(grid_coords): \n if coord in dropAxes: grid_coords.pop(coord, None)\n od = od.set_grid_coords(grid_coords, overwrite=True)\n \n # Cut axis can't be periodic \n od = od.set_grid_periodic(periodic, overwrite = True)\n \n # Drop variables\n if varList is not None: \n if isinstance(varList, str): varList = [varList]\n \n # Compute missing variables\n od = _compute._add_missing_variables(od, varList)\n \n # Drop useless\n od._ds = od._ds.drop([v for v in od._ds.variables if (v not in od._ds.dims and v not in od._ds.coords and v not in varList)])\n \n return od", "def make_clips(self):\n\n average_messege_count, streamer_messeges_data = self.__do_analysis()\n\n clipworthy_clips = []\n\n #add clipworthy clips\n for entry in streamer_messeges_data:\n if((entry['messeges_count']*entry['messeges_count']) > (average_messege_count*1.8)):\n clipworthy_clips.append(entry)\n\n #combine clips that are next to one another in time\n clip_number = 0\n while(True):\n #print('clip_number = ' + str(clip_number) +' , length of cliparr = ' + str(len(clipworthy_clips)))\n if(clip_number >= (len(clipworthy_clips))-1):\n #at end of clips\n break\n\n if (clipworthy_clips[clip_number]['end_time']==clipworthy_clips[clip_number+1]['start_time']):\n #duplicate clip detected\n #print('dublicate clip detected for clip ' + str(clip_number))\n clipworthy_clips[clip_number]['end_time']=clipworthy_clips[clip_number+1]['end_time']\n #print('cliparr length before ridding: ' + str(len(clipworthy_clips)))\n clipworthy_clips.remove(clipworthy_clips[clip_number+1])\n #print('cliparr length after ridding: ' + str(len(clipworthy_clips)))\n #print('')\n else:\n clip_number = clip_number + 1\n\n\n print('clipworthy clips will now be made')\n clipSlicer = ClipSlicer(clipworthy_clips)\n clipSlicer.make_clips()\n\n print(\"clipworthy clips for streamer \"+ self.streamer + \" have been made\")", "def make_doppelganger_vs_clusters(n_clusters_considered,X,X_occam,n_repeats):\n res = []\n for n_clusters in n_clusters_considered:\n res.append([])\n for _ in range(n_repeats):\n X_restricted,restricted_idxs = get_n_random_clusters(X_occam,n_clusters)\n print(X.val.shape)\n print(X_restricted.val.shape)\n evaluator_X = evaluators.EvaluatorWithFiltering(X,X_restricted,leave_out=True,fitter_class=standard_fitter,valid_idxs=valid_idxs[restricted_idxs])\n res[-1].append(evaluator_X.weighted_average) \n return res", "def refine_selection(selection, max_slices, s):\n MAX_ITERS = 8 # refinement iterations\n SUBSET_L = 5 # max length of subsets for candidate removal\n slices = np.array(s)\n\n # try to improve untill there is space or we did not find a way to improve\n available_space = max_slices - sum(slices[selection])\n changed = True\n iters = 0\n while available_space > 0 and changed and iters < MAX_ITERS:\n iters += 1\n changed = False\n\n unordered_pizzas = set(range(len(s))) - set(selection)\n for pizza_type in unordered_pizzas:\n # TODO: any way to prune this?\n\n adding_slices = slices[pizza_type]\n if changed:\n break # we need to recompute unordered_pizzas\n\n # how much space do we need to create?\n remove_slices = adding_slices - available_space\n\n # we can simply insert this pizza\n if remove_slices <= 0:\n selection.append(pizza_type)\n changed = True\n available_space -= adding_slices\n continue\n\n # try to make space for this pizza by removig as less as possible\n current_slices = sum(slices[selection])\n removal_candidates = set(selection)\n\n # prune the removal set\n pruning = set()\n for candidate in removal_candidates:\n if (-slices[candidate] + adding_slices) <= 0:\n pruning.add(candidate)\n for prune in pruning:\n removal_candidates.remove(prune)\n\n bad_subsets = set()\n for candidate_removal_set in arrays_and_powerset(removal_candidates,\n SUBSET_L):\n skip = False\n for bad in bad_subsets:\n if all([b in candidate_removal_set for b in bad]):\n skip = True\n break\n if skip:\n continue\n\n candidate_slices = sum(slices[list(candidate_removal_set)])\n\n if candidate_slices < remove_slices:\n # not enough\n continue\n\n score_delta = -candidate_slices + adding_slices\n if score_delta <= 0:\n # not convenient\n bad_subsets.add(candidate_removal_set)\n continue\n\n # score delta is positive, make the change (even if it's not\n # the optimal change, checking all is too much\n for rem_pizza in candidate_removal_set:\n selection.remove(rem_pizza)\n available_space += slices[rem_pizza]\n selection.append(pizza_type)\n available_space -= adding_slices\n changed = True\n break\n\n # if we did not break then we did not found any candidate set with\n # positive delta, check another type of pizza\n\n return selection", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def gen_distance_subsets(ruggedness,seq_len=5,library=\"ACDEFGHIKL\",seed=None):\n\n land_K2, seq, _ = makeNK(seq_len,ruggedness,library)\n\n if not seed:\n seed = np.array([x for x in \"\".join([library[0] for x in range(seq_len)])])\n\n subsets = {x : [] for x in range(seq_len+1)}\n for seq in land_K2:\n subsets[hamming(seq[0],seed)].append(seq)\n\n return subsets", "def cut(S, T, graph):\n ###TODO\n pass", "def make_slice_gromacs(**kwargs):\n\tspec_in = kwargs.get('spec',None)\n\tif not spec_in: raise Exception('send slice details in a dict called \"spec\"')\n\treq_keys = 'start end skip group'.split()\n\tmissing_keys = [k for k in req_keys if k not in spec_in]\n\tif any(missing_keys): \n\t\traise Exception('slice maker for GROMACS is missing items in kwargs[\\'specs\\']: %s'%missing_keys)\n\t#---prepare specification for the slicer\n\tspec = dict([(k,spec_in[k]) for k in req_keys])\n\t#---get the PBC\n\tspec['pbc'] = spec_in.get('pbc',None)\n\t#---sequence uses the EDR files to figure out which parts we need to slice\n\tspec['sequence'] = kwargs['sequence']\n\tsn_prefixed = kwargs['sn_prefixed']\n\t#---name the slices\n\tpbc_suffix = '' if not spec['pbc'] else '.pbc%s'%spec['pbc']\n\tspec['outkey'] = '%s.%d-%d-%d.%s%s'%(\n\t\tsn_prefixed,spec['start'],spec['end'],spec['skip'],spec['group'],pbc_suffix)\n\tspec['postdir'] = kwargs['postdir']\n\tspec['tpr_keyfinder'] = kwargs['tpr_keyfinder']\n\tspec['traj_keyfinder'] = kwargs['traj_keyfinder']\n\t#---create the group\n\tif spec_in['group']:\n\t\tif spec_in['group']!=kwargs['group_name']:\n\t\t\traise Exception('group_name %s does not match the slice group %s'%(\n\t\t\t\tspec_in['group'],kwargs['group_name']))\n\t\tspec_group = dict(sn=kwargs['sn'],group=spec_in['group'],\n\t\t\tselect=kwargs['group_selection'],simkey=spec['outkey'])\n\t\t#import ipdb;ipdb.set_trace()\n\t\t#---get the latest starting structure\n\t\t#spec['tpr_keyfinder']('EGFR_active_L747P_MD_2', ('s', '01', 'protein'), '0001')\n\t\tgroup_fn = create_group(postdir=kwargs['postdir'],structure=kwargs['last_structure'],**spec_group)\n\t\tspec['group_fn'] = group_fn\n\t#---call the slice maker\n\tslice_trajectory(**spec)\n\t#---return the name for storage in the postdat\n\treturn spec['outkey']", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def get_cuckoos(nest, best, Lb, Ub):\n\tn = nest.shape[0]\n\tbeta=3/2;\n\tsigma=(sp.gamma(1+beta)*np.sin(np.pi*beta/2)/(sp.gamma((1+beta)/2)*beta*2**((beta-1)/2)))**(1/beta);\n\tfor j in range(n):\n\t\ts = nest[j,:]\n\t\t#u=np.random.randn(s.shape[0])*sigma\n\t\t#v=np.random.randn(s.shape[0])\n\t\t#step=u/abs(v)**(1/beta)\n\t\t#stepsize=0.01*step*(s-best);\n\t\ts=s+ss.levy.rvs(size=21)*1e-40\n\t\tnest[j,:]=simple_bounds(s, Lb, Ub)\n\treturn nest", "def prune_path(clf, X, y, max_n_leaves=10, n_iter=10,\n test_size=0.1, random_state=None, n_jobs=1):\n \n\n from sklearn.base import clone\n from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit\n from sklearn.metrics import roc_auc_score,mean_squared_error\n from multiprocessing.dummy import Pool as ThreadPool\n from itertools import repeat\n import pandas as pd\n #import copy\n \n #classification score\n def my_auc(estimator, X, y):\n y_score = estimator.predict_proba(X)[:,1] # You could also use the binary predict, but probabilities should give you a more realistic score.\n return roc_auc_score(y, y_score)\n \n #regression score\n def my_nmse(estimator, X, y):\n y_pre = estimator.predict(X) # You could also use the binary predict, but probabilities should give you a more realistic score.\n return -mean_squared_error(y, y_pre)\n \n\n if len(np.unique(y)) == 2: \n scoring_fuc = my_auc\n \n else:\n scoring_fuc = my_nmse\n \n def multip_run(fuction,task_zip,n_jobs = 1):\n\n #Multi-process Run\n\n pool = ThreadPool(processes=n_jobs)\n results = pool.starmap(fuction, task_zip)\n pool.close()\n pool.join()\n return results \n\n def OneFoldCut(clf,X_train, y_train,X_test,y_test,max_n_leaves):\n estimator = clone(clf)\n \n fitted = estimator.fit(X_train, y_train)\n \n if max_n_leaves < get_n_leaves(fitted):\n n_leaves = max_n_leaves\n \n else:\n n_leaves = get_n_leaves(fitted)\n \n print('###### Iters true start leaves is %d #######' % n_leaves)\n \n #cut_num = list(range(2,n_leaves, 1))\n cut_num = list(range(n_leaves-1,1,-1))\n #n = len(cut_num)\n loc_indexs = []\n loc_scores = []\n for i in cut_num:\n #clf1 = copy.deepcopy(fitted)\n #clf1 = clone(fitted)\n #clf1.prune(i)\n fitted.prune(i)\n onescore = scoring_fuc(fitted,X_test,y_test)\n #onescore = scoring_fuc(clf1,X_test,y_test)\n loc_scores.append(onescore)\n loc_indexs.append(i)\n \n S = pd.DataFrame(loc_scores,index=loc_indexs)\n\n return S\n\n\n #scores = list()\n if len(np.unique(y)) == 2: \n kf = StratifiedShuffleSplit(y,\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n else:\n kf = ShuffleSplit(len(y),\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n \n X_trains = [X[tr] for tr,ts in kf]\n y_trains = [y[tr] for tr,ts in kf]\n \n X_tests = [X[ts] for tr,ts in kf]\n y_tests = [y[ts] for tr,ts in kf]\n \n task_zip = zip(repeat(clf),\n X_trains,\n y_trains,\n X_tests,\n y_tests,\n repeat(max_n_leaves))\n \n scores = multip_run(OneFoldCut,task_zip,n_jobs = n_jobs)\n \n df = pd.concat(scores,axis=1)\n df.columns = range(len(df.columns))\n\n return df #zip(*scores)", "def exclude_nodes_GC(G):\n remove, present = [], []\n # Find giant component\n Gcc = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)\n G0 = Gcc[0]\n for node in G.nodes():\n if node not in G0.nodes():\n remove.append(node)\n G0.add_node(node,GC= 0)\n else:\n present.append(node)\n G0.add_node(node, GC= 1)\n # Remove nodes not in giant component\n remove_outliers = [node for node in G.nodes() if node not in G0.nodes()]\n G.remove_nodes_from(remove_outliers)\n return G", "def cull(self):\n # genetics.cpp:2716\n num_parents = int(self.pop.survival_thresh * len(self) + 1)\n self.sort_genomes()\n self.genomes = self.genomes[:num_parents]", "def cutdna(dna, *cutsites, crop=False, supfeature=False, product=None, process_name=None, process_description=None, \n pn=None, pd=None, quinable=True, **kwargs):\n \n kwargs.setdefault(\"_sourcefile\", None) \n kwargs.setdefault(\"process_id\", None)\n kwargs.setdefault(\"original_ids\", []) \n _sourcefile = kwargs[\"_sourcefile\"] \n process_id = kwargs[\"process_id\"] \n original_ids = kwargs[\"original_ids\"]\n\n #Set process name, description and ID\n project = None\n project = project if product is None else product\n process_name = pn if process_name is None else process_name\n process_description = pd if process_description is None else process_description\n\n dna = copy.deepcopy(dna)\n def extract(dna, start, end, project=None): \n start_top = start[0] \n start_bottom = start[1] \n start = min(start)\n \n end_top = end[0]\n end_bottom = end[1] \n end = max(end)\n\n if start == 0 and end == len(dna.seq) and dna._topology == \"linear\":\n new_dna = copy.copy(dna)\n new_dna._topology = \"linear\"\n return new_dna\n\n if dna.topology == \"circular\":\n start = len(dna.seq) + start if start < 0 else start\n start = start - len(dna.seq) if start > len(dna.seq) else start\n end = end - len(dna.seq) if end > len(dna.seq) else end\n \n if (start >= end or (start_top == end_top and start_bottom == end_bottom)) and dna.topology == \"circular\":\n subdna1 = extract(dna, [start, start], [len(dna.seq), len(dna.seq)])\n if start == end and start == 0:\n subdna = subdna1\n else:\n subdna2 = extract(dna, [0,0], [end,end])\n subdna = joindna(subdna1, subdna2, quinable=0)\n else:\n if start > end and dna.topology == \"linear\":\n raise ValueError(\"'end' position must be larger than 'start' position.\")\n feats = []\n new_features = []\n \n #Linearize feature (Split feature covering zero position) \n for feat in dna.dnafeatures:\n strand = feat.strand\n s = feat.start\n e = feat.end\n if s > e:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1)\n \n if len(feat.location.parts) == 1:\n length = len(dna.seq) - s + e\n locations = [FeatureLocation(s,len(dna.seq)),FeatureLocation(0,e)]\n if strand == -1:\n locations.reverse()\n feat.location = CompoundLocation(locations)\n feat.location.strand = strand\n\n strand = feat.strand\n if len(feat.location.parts) == 2:\n feat1 = copy.deepcopy(feat)\n feat1.location = feat.location.parts[0]\n feat1.location.strand = feat.location.strand\n feat2 = copy.deepcopy(feat)\n feat2.location = feat.location.parts[1]\n feat2.location.strand = feat.location.strand\n\n else:\n feat1 = copy.deepcopy(feat)\n new_locations = []\n for part in feat1.location.parts:\n if part.start.position > part.end.postion:\n new_locations.append(FeatureLocation(part.start.position, len(dna.seq)))\n break\n else:\n new_locations.append(part)\n if strand == -1:\n new_locations.reverse()\n feat1.location = CompoundLocation(new_locations)\n feat1.location.strand = strand\n flag = 0\n feat2 = copy.deepcopy(feat)\n new_locations = []\n for part in feat1.location.parts:\n if part.start.position > part.end.postion:\n new_locations.append(FeatureLocation(0, part.end.position))\n flag = 1\n\n if flag == 1:\n new_locations.append(part)\n\n if strand == -1:\n new_locations.reverse()\n feat2.location = CompoundLocation(new_locations)\n feat2.location.strnad = strand\n\n if \"broken_feature\" not in feat1.qualifiers:\n label = feat1._id\n if feat1.feature_type == \"source\":\n original_seq = \"-\"\n else:\n original_seq = feat1.original\n \n if feat1.feature_type == \"CDS\" and \"translation\" in feat1.qualifiers:\n del feat1.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat1.original), original_seq, s, e))\n if strand >= 0:\n feat1.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, len(dna.seq)-s)]\n else:\n feat1.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s, 1)]\n\n else:\n note = feat.qualifiers[\"broken_feature\"]\n note = note[0] if type(note) is list else note \n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s, pos_s + len(dna.seq)-s)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (len(dna.seq)-s))\n feat1.qualifiers[\"broken_feature\"] = [note]\n\n if \"broken_feature\" not in feat2.qualifiers:\n label = feat2._id\n if feat2.feature_type == \"source\":\n original_seq = \"-\"\n else:\n original_seq = feat2.original\n \n if feat2.feature_type == \"CDS\" and \"translation\" in feat2.qualifiers:\n del feat2.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat2.original), original_seq, s, e))\n if strand >= 0:\n feat2.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s+1, len(dna.seq)-s+e)]\n else:\n feat2.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(dna.seq)-s+e, len(dna.seq)-s+1)]\n\n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4])\n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s + len(dna.seq)-s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\"))\n note = \"{}:{}..{}\".format(label, pos_s - (len(dna.seq)-s), pos_e)\n feat2.qualifiers[\"broken_feature\"] = [note]\n \n new_features.append(feat.__class__(feature=feat1))\n new_features.append(feat.__class__(feature=feat2))\n \n else:\n #print(feat, start, end) \n new_features.append(feat.__class__(feature=feat))\n \n #Cropping\n for feat in new_features:\n strand = feat.strand\n s = feat.start\n e = feat.end\n feat = copy.deepcopy(feat)\n if len(feat.location.parts) == 1 and s <= e:\n if e > start and s < end:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1) \n if s - start < 0:\n feat.location.parts[0]._start = ExactPosition(0)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, abs(s-start)+1, e-s)] \n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(feat.original) - abs(s-start), 1)] \n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) + abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) - abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n feat.location.parts[0]._start = ExactPosition(s - start) \n \n feat.location.parts[-1]._end = ExactPosition(e - start) \n if feat.location.parts[-1]._end > end-start:\n feat.location.parts[-1]._end = ExactPosition(end - start)\n if \"broken_feature\" not in feat.qualifiers: \n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0: \n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, end-s)]\n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, len(feat.original), len(feat.original)-(end-s)+1)]\n else:\n s = int(feat.location.parts[0].start.position)\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s + (end-start-s)-1)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (end-start-s)+1)\n feat.qualifiers[\"broken_feature\"] = [note]\n \n feat.location.strand = strand\n feats.append(feat.__class__(feature=feat))\n \n else:\n length = e-s\n locations = []\n sflag = 0 \n eflag = 0\n for apart in feat.location.parts:\n s = apart.start.position \n e = apart.end.position\n if e > start and s <= end:\n if \"_original\" not in feat.__dict__:\n feat._original = dna.printsequence(s, e, feat.location.strand if feat.location.strand !=0 else 1) \n _start = ExactPosition(s)\n if s - start <= 0:\n sflag = 1\n _end = ExactPosition(e) \n if _end > end-start:\n eflag = 1\n locations.append([_start,_end,feat.location.strand])\n \n if len(locations) > 0:\n s = int(locations[0][0])\n e = int(locations[-1][1])\n if s - start < 0 and sflag == 1:\n locations[0][0] = ExactPosition(0)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id\n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"{}\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n if strand >= 0:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, abs(s-start)+1, e-s)]\n else:\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, e-s, abs(s-start)+1)] \n else:\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) + abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\")) - abs(s-start) \n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_e)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n locations[0][0] = ExactPosition(s - start)\n \n if e > end-start and eflag == 1:\n locations[-1][1] = ExactPosition(end-start)\n if \"broken_feature\" not in feat.qualifiers:\n label = feat._id \n if feat.feature_type == \"source\" or len(feat.original) > 10000:\n original_seq = \"-\"\n else:\n original_seq = feat.original\n \n if feat.feature_type == \"CDS\" and \"translation\" in feat.qualifiers:\n del feat.qualifiers[\"translation\"]\n\n label = \"[{}]\".format(\"{}:{}:{}:{}:{}..{}\".format(dna.project, label, len(feat.original), original_seq, s, e))\n feat.qualifiers[\"broken_feature\"] = [\"{}:{}..{}\".format(label, 1, end-s)]\n else:\n s = int(locations[0][0])\n note = feat.qualifiers[\"broken_feature\"][0]\n if strand >= 0:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s + (end-start-s)-1)\n else:\n label = \":\".join(note.split(\":\")[:-1])\n length = int(note.split(\":\")[-4]) \n pos_s = int(note.split(\":\")[-1].split(\"..\")[0].replace(\" \",\"\"))\n pos_e = int(note.split(\":\")[-1].split(\"..\")[1].replace(\" \",\"\")) \n note = \"{}:{}..{}\".format(label, pos_s, pos_s - (end-start-s)+1)\n feat.qualifiers[\"broken_feature\"] = [note]\n else:\n locations[-1][1] = ExactPosition(e - start)\n \n if len(locations) == 1:\n feat.location = FeatureLocation(*locations[0])\n else:\n for l in range(len(locations)):\n if l == 0:\n locations[l][1] = locations[l][1] - start\n elif l == len(locations) - 1:\n locations[l][0] = locations[l][0] - start\n else:\n locations[l][0] = locations[l][0] - start\n locations[l][1] = locations[l][1] - start\n locations = [FeatureLocation(*loc) for loc in locations] \n if strand == -1:\n locations.reverse()\n feat.location = CompoundLocation(locations)\n feats.append(feat.__class__(feature=feat))\n \n feats.sort(key=lambda x:(x.location.parts[0].start.position, x.location.parts[-1].end.position))\n subdna = dna.__class__(seq=str(dna.seq[start:end]), quinable=0)\n subdna._history_feature = copy.deepcopy(dna._history_feature) \n subdna._dnafeatures = feats\n subdna._topology = \"linear\"\n \n if start < len(dna._left_end) and dna.topology == \"linear\":\n subdna._left_end = dna._left_end[start:] \n subdna._left_end_top = dna._left_end_top\n subdna._left_end_bottom = dna._left_end_bottom\n else:\n subdna._left_end = subdna.seq[0:20] \n subdna._left_end_top = 1\n subdna._left_end_bottom = 1\n \n if len(dna.seq) - end < len(dna._right_end) and dna.topology == \"linear\":\n subdna._right_end = dna._right_end[:len(dna._right_end) - (len(dna.seq) - end)]\n subdna._right_end_top = dna._right_end_top\n subdna._right_end_bottom = dna._right_end_bottom\n \n else:\n subdna._right_end = subdna.seq[-20:]\n subdna._right_end_top = 1\n subdna._right_end_bottom = 1\n\n subdna.record.annotations[\"topology\"] = subdna.topology\n subdna.record.features = subdna.dnafeatures\n \n if start_top != start_bottom or end_top != end_bottom:\n start_dif = start_top - start_bottom\n if start_dif > 0:\n left = \"-\" * start_dif + \"/\" + \"*\" * start_dif\n elif start_dif < 0:\n left = \"*\" * abs(start_dif) + \"/\" + \"-\" * abs(start_dif) \n else:\n left = \"\"\n \n end_dif = end_top - end_bottom\n if end_dif > 0:\n right = \"*\" * end_dif + \"/\" + \"-\" * end_dif \n elif end_dif < 0:\n right = \"-\" * abs(end_dif) + \"/\" + \"*\" * abs(end_dif) \n else:\n right = \"\"\n subdna = modifyends(subdna, left, right, quinable=0)\n else:\n pass \n \n for dnafeature in subdna.dnafeatures:\n dnafeature.subject = subdna \n \n if start >= end:\n subdna._positions = dna._positions[start:] + dna._positions[:end]\n else:\n subdna._positions = dna._positions[start:end] \n\n return subdna \n \n dnas = [] \n new_positions = [] \n for pos in cutsites:\n if type(pos) is str:\n pos = tuple(map(int,pos.split(\"/\")))\n spos, epos = pos\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos)) \n \n elif type(pos) is int or (\"__dict__\" in dir(pos) and \"_qint\" in pos.__dict__):\n pos = (pos, pos) \n spos, epos = pos\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos)) \n \n elif type(pos) is SeqFeature or (\"__dict__\" in dir(pos) and \"_dnafeature\" in pos.__dict__):\n strand = pos.location.strand\n if \"cutsite\" not in pos.qualifiers:\n raise ValueError(\"DNAfeature object should hold 'qualifiers:cutsite' attribute.\")\n \n if pos._digestion_topl == \"null\":\n _, _, pos._digestion_topl, pos._digestion_topr, pos._digestion_bottoml, pos._digestion_bottomr = compile_cutsite(pos.qualifiers[\"cutsite\"][0])\n \n if strand != -1:\n if pos._digestion_topl != \"null\":\n spos = pos.start - pos._digestion_topl\n epos = pos.start - pos._digestion_bottoml \n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n elif pos._digestion_topr != \"null\": \n spos = pos.end + pos._digestion_topr\n epos = pos.end + pos._digestion_bottomr\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n else:\n if pos._digestion_topr != \"null\":\n spos = pos.start - pos._digestion_bottomr\n epos = pos.start - pos._digestion_topr\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n elif pos._digestion_topl != \"null\": \n spos = pos.end + pos._digestion_bottoml\n epos = pos.end + pos._digestion_topl\n spos = spos - len(dna.seq) if spos > len(dna.seq) else spos \n epos = epos + len(dna.seq) if epos < 0 else epos\n new_positions.append((spos,epos))\n \n tmp_positions = new_positions[:]\n tmp_positions.sort() \n top_positions = list(list(zip(*tmp_positions))[0])\n bottom_positions = list(list(zip(*tmp_positions))[1])\n for b in range(len(bottom_positions)-1):\n if bottom_positions[b] <= bottom_positions[b+1]:\n pass\n else:\n raise ValueError(\"Invalid cut pattern.\")\n\n new_positions_original = new_positions[:] \n new_positions_original = [\"{}/{}\".format(*posset) for posset in new_positions_original]\n if crop == True:\n crop_positions = (new_positions[0], new_positions[1])\n \n if dna.topology == \"linear\":\n if (0,0) not in new_positions:\n new_positions = [(0,0)] + new_positions \n else:\n pass\n \n if (len(dna.seq),len(dna.seq)) not in new_positions:\n new_positions = new_positions + [(len(dna.seq), len(dna.seq))] \n new_positions = list(new_positions) \n new_positions.sort() \n \n elif dna.topology == \"circular\":\n new_positions = list(new_positions) \n tmp_positions = new_positions[:]\n new_positions.sort() \n for pindex, pos in enumerate(new_positions):\n if pos == tmp_positions[0]:\n new_positions = new_positions[pindex:] + new_positions[:pindex]\n break \n\n if dna.topology == \"linear\":\n if crop == True:\n dnas.append(extract(dna, crop_positions[0], crop_positions[1], project=project))\n else:\n for i, pos in enumerate(new_positions[0:-1]):\n dnas.append(extract(dna, pos, new_positions[i+1], project=project))\n \n elif dna.topology == \"circular\":\n if crop == True: \n dnas.append(extract(dna, crop_positions[0], crop_positions[1], project=project))\n else:\n for i, pos in enumerate(new_positions[0:-1]):\n dnas.append(extract(dna, pos, new_positions[i+1], project=project))\n if new_positions[0] == (0,0):\n dnas.append(extract(dna, new_positions[-1], (len(dna.seq), len(dna.seq)), project=project)) \n else:\n dnas.append(extract(dna, new_positions[-1], new_positions[0], project=project)) \n\n if project is None:\n for subdna in dnas:\n subdna._unique_id = dna._unique_id\n else:\n for subdna in dnas:\n subdna._unique_id = project\n \n if quinable == True:\n products = []\n dna_keys = list(dnas[0].__class__.dna_dict.keys())\n for i in range(len(dnas)):\n dnas[i]._product_id = dnas[i]._unique_id if product is None else product \n products.append(\"QUEEN.dna_dict['{}']\".format(dnas[i]._product_id))\n\n args = [] \n history_features = [dnas[0]._history_feature] \n for pos in cutsites:\n if \"__dict__\" in dir(pos) and \"_dnafeature\" in pos.__dict__:\n qkey = pos._qkey\n for qindex, qfeat in enumerate(dnas[0].__class__.queried_features_dict[qkey]):\n if qfeat._second_id == pos._second_id:\n break\n args.append(\"QUEEN.queried_features_dict['{}'][{}]\".format(qkey, qindex))\n history_features.append(pos.subject._history_feature) \n\n elif \"__dict__\" in dir(pos) and \"_qint\" in pos.__dict__:\n qkey = pos.qkey\n for qindex, qfeat in enumerate(dnas[0].__class__.queried_features_dict[qkey]):\n if qfeat._second_id == pos.parental_id:\n break\n args.append(\"QUEEN.queried_features_dict['{}'][{}].{}\".format(pos.qkey, qindex, pos.name))\n history_features.append(pos.parent.subject._history_feature) \n\n else:\n if type(pos) is int:\n args.append(str(pos))\n else:\n args.append(\"'\" + str(pos) + \"'\")\n \n \n if type(supfeature) in (tuple, list) and type(supfeature[0]) in (tuple, list) and type(supfeature[0][0]) == dict:\n for i, feature_dict_list in enumerate(supfeature): \n for feature_dict in feature_dict_list:\n dnas[i].setfeature(feature_dict) \n \n elif type(supfeature) in (tuple, list) and type(supfeature[0]) == dict:\n for i, feature_dict in enumerate(supfeature): \n dnas[i].setfeature(feature_dict) \n\n elif type(supfeature) == dict:\n dnas[0].setfeature(supfeature)\n\n if crop == True:\n fcrop = \", crop=True\"\n else:\n fcrop = \"\" \n \n project = \"\" \n fsupfeature = \"\" if supfeature == False else \", supfeature={}\".format(str(supfeature))\n fproduct = \"\" if product is None else \", product='\" + product + \"'\"\n process_name = \"\" if process_name is None else \", process_name='\" + process_name + \"'\"\n process_description = \"\" if process_description is None else \", process_description='\" + process_description + \"'\" \n \n if len(products) > 1:\n building_history = \"{} = cutdna(QUEEN.dna_dict['{}'], {}{}{}{}{}{}{})\".format(\", \".join(products), dna._product_id, \", \".join(args), fcrop, fsupfeature, project, fproduct, process_name, process_description) \n else:\n building_history = \"{}, = cutdna(QUEEN.dna_dict['{}'], {}{}{}{}{}{}{})\".format(\", \".join(products), dna._product_id, \", \".join(args), fcrop, project, fsupfeature, fproduct, process_name, process_description)\n \n for subdna in dnas:\n history_feature = _combine_history(subdna, history_features)\n subdna._history_feature = history_feature\n process_id, original_ids = make_processid(subdna, building_history, process_id, original_ids)\n subdna._check_uniqueness() \n add_history(subdna, [building_history, \"positions: {}\".format(\",\".join(list(map(str, new_positions_original)))) + \"; num_products: {}\".format(len(dnas)), \",\".join([process_id] + original_ids)], _sourcefile)\n else:\n for subdna in dnas:\n subdna.__dict__[\"_product_id\"] = dna._product_id if \"_product_id\" in dna.__dict__ else dna._unique_id\n \n if product is None:\n pass \n else:\n product = product.replace(\" \",\"\") \n if \",\" in product:\n for name, subdna in zip(product.split(\",\"), dnas):\n dnas[0].__class__._namespace[name] = subdna\n else: \n dnas[0].__class__._namespace[product] = dnas\n\n if crop == True:\n return dnas[0], crop_positions \n else:\n return dnas", "def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')", "def genSubset(L):\n if len(L) == 0:\n return [[]] # list of empty list\n smaller = genSubset(L[:-1]) # the list without last element\n extra = L[-1:] # a list of just the last element\n new = []\n for small in smaller:\n new.append(small + extra)\n return smaller + new", "def genomeslice(input_array, strand, left, right, wrt = '5_to_3'):\n if left > right: # empty slice case\n return np.asarray([])\n elif (strand == 0) or (wrt is 'genome'):\n return input_array[strand,left:right+1]\n elif (strand == 1) and (wrt is '5_to_3'):\n return np.flip(input_array[strand,left:right+1],axis=0)\n else:\n raise ValueError(\"Unhandled strand {0 or 1} or wrt {'genome' or '5_to_3'} value.\")", "def get_all_guides_that_cut_in_cds(self,pam, seq_len_around_cut,\n min_mut_pos_in_guide, max_mut_pos_in_guide,\n excluded_seqs, mapping_cmd, sort_by = '5to3'):\n \n ordered_lefts = self.cds_lefts\n ordered_rights = self.cds_rights\n if (self.is_neg_strand()):\n ordered_lefts = ordered_lefts[::-1]\n ordered_rights = ordered_rights[::-1]\n \n ######\n # search positive strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n\n for left,right in zip(ordered_lefts,ordered_rights):\n \n # cut is to the right of the nucleotide\n cur_left_for_pam = left + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n cur_right_for_pam = right + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n \n\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+pam+\")\", str(seq))])\n \n # removing guides that are not entirely in the CDS\n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n cur_guides0_chr_pos = (cur_pam_dists-1) + cur_left_for_pam \n \n if (self.is_neg_strand()): # negative\n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - (self.CRISPR_CUT_INDEX + 1) # the cut is right of the nt\n else:\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - self.CRISPR_CUT_INDEX # the cut is left of the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n pos_strand_guides_df = self.__guide_positions_to_df(pam, False, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num) \n \n ######\n # search negative strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides0_gene_dna_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n \n for left,right in zip(ordered_lefts,ordered_rights):\n \n \n cur_left_for_pam = int(left) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n cur_right_for_pam = int(right) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n revcomp_pam = Seq(pam,generic_dna).reverse_complement()\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+str(revcomp_pam)+\")\", str(seq))])\n \n \n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n \n cur_guides0_chr_pos = (cur_pam_dists+2) + cur_left_for_pam\n \n if (self.is_neg_strand()): # negative \n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX # the cut is right of the nt\n else: # positive\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX + 1 # the cut is leftot the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n neg_strand_guides_df = self.__guide_positions_to_df(pam, True, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num)\n \n \n # concating the positive and negative strands guides\n guides_df = pd.concat([pos_strand_guides_df, neg_strand_guides_df])\n \n # adding for each guide its location in the gene (5' -> 3'; fraction)\n guides_df[\"guide_cut_gene_pos_frac\"] = guides_df[\"guide_cut_gene_nt_pos\"] / guides_df[\"CDS_len_nts\"]\n\n \n # if the 'sort' method is onlyStopCodon then leave only guide that cut the stop codon\n if sort_by == 'onlyStopCodon':\n guides_df = guides_df.ix[( ( (guides_df['CDS_len_nts']).values / 3) == ( (guides_df['guide_cut_gene_aa_pos']).values + 1) ) ,:]\n \n # calculating Azimuth score\n #print \"Calculating Azimuth score\"\n guides_df = cal_azimuth_score(guides_df, output_filename_GUIDE_withScores = \"\", guides_PAMm4p3_col_name=\"guide_PAM_m4p3\")\n \n # calculating off targets\n #print \"Testing off targets\"\n guides_df = eval_guides_off_targets(guides_df, self.genome_seq, 'guide_id', 'guide_noPAM', pam, mapping_cmd)\n \n \n return (guides_df)", "def detect_cut(self, second_iteration=False):\r\n if self.dfg:\r\n # print(\"\\n\\n\")\r\n par_cut = self.detect_parallel_cut()\r\n conc_cut = self.detect_concurrent_cut()\r\n seq_cut = self.detect_sequential_cut()\r\n loop_cut = self.detect_loop_cut()\r\n\r\n if par_cut[0]:\r\n union_acti_comp = set()\r\n for comp in par_cut[1]:\r\n union_acti_comp = union_acti_comp.union(comp)\r\n diff_acti_comp = set(self.activities).difference(union_acti_comp)\r\n\r\n for act in diff_acti_comp:\r\n par_cut[1] = add_to_most_probable_component(par_cut[1], act, self.ingoing, self.outgoing)\r\n\r\n for comp in par_cut[1]:\r\n new_dfg = filter_dfg_on_act(self.dfg, comp)\r\n self.detected_cut = \"parallel\"\r\n self.children.append(Subtree(new_dfg, self.initial_dfg, comp, self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n else:\r\n if conc_cut[0]:\r\n for comp in conc_cut[1]:\r\n new_dfg = filter_dfg_on_act(self.dfg, comp)\r\n self.detected_cut = \"concurrent\"\r\n self.children.append(Subtree(new_dfg, self.initial_dfg, comp, self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n else:\r\n if seq_cut[0]:\r\n dfg1 = filter_dfg_on_act(self.dfg, seq_cut[1])\r\n dfg2 = filter_dfg_on_act(self.dfg, seq_cut[2])\r\n self.detected_cut = \"sequential\"\r\n self.children.append(\r\n Subtree(dfg1, self.initial_dfg, seq_cut[1], self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n self.children.append(\r\n Subtree(dfg2, self.initial_dfg, seq_cut[2], self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n else:\r\n if loop_cut[0]:\r\n dfg1 = filter_dfg_on_act(self.dfg, loop_cut[1])\r\n dfg2 = filter_dfg_on_act(self.dfg, loop_cut[2])\r\n self.detected_cut = \"loopCut\"\r\n self.children.append(\r\n Subtree(dfg1, self.initial_dfg, loop_cut[1], self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n self.children.append(\r\n Subtree(dfg2, self.initial_dfg, loop_cut[2], self.counts, self.rec_depth + 1,\r\n noise_threshold=self.noise_threshold))\r\n else:\r\n if self.noise_threshold > 0:\r\n if not second_iteration:\r\n self.initialize_tree(self.dfg, self.initial_dfg, None, second_iteration=True)\r\n else:\r\n pass\r\n self.detected_cut = \"flower\"\r\n else:\r\n self.detected_cut = \"base_concurrent\"", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def disable_default_cuts(gmodel):\n gmodel.setParam('PreCrush', 1)\n gmodel.setParam(GRB.Param.CoverCuts,0)\n gmodel.setParam(GRB.Param.CliqueCuts,0)\n gmodel.setParam(GRB.Param.FlowCoverCuts,0)\n gmodel.setParam(GRB.Param.FlowPathCuts,0)\n gmodel.setParam(GRB.Param.GUBCoverCuts,0)\n gmodel.setParam(GRB.Param.ImpliedCuts,0)\n gmodel.setParam(GRB.Param.InfProofCuts,0)\n gmodel.setParam(GRB.Param.MIPSepCuts,0)\n gmodel.setParam(GRB.Param.MIRCuts,0)\n gmodel.setParam(GRB.Param.ModKCuts,0)\n gmodel.setParam(GRB.Param.NetworkCuts,0)\n gmodel.setParam(GRB.Param.ProjImpliedCuts,0)\n gmodel.setParam(GRB.Param.StrongCGCuts,0)\n gmodel.setParam(GRB.Param.SubMIPCuts,0)\n gmodel.setParam(GRB.Param.ZeroHalfCuts,0)\n gmodel.setParam(GRB.Param.GomoryPasses,0)", "def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return", "def detect_cut(self, second_iteration=False):\n if pkgutil.find_loader(\"networkx\"):\n import networkx as nx\n else:\n msg = \"networkx is not available. inductive miner cannot be used!\"\n logging.error(msg)\n raise Exception(msg)\n\n if self.dfg:\n\n this_nx_graph = transform_dfg_to_directed_nx_graph(self.dfg, activities=self.activities)\n conn_components = detection_utils.get_connected_components(self.ingoing, self.outgoing, self.activities)\n strongly_connected_components = [list(x) for x in nx.strongly_connected_components(this_nx_graph)]\n\n xor_cut = self.detect_xor_cut(conn_components, this_nx_graph, strongly_connected_components)\n\n if xor_cut[0]:\n for comp in xor_cut[1]:\n new_dfg = filter_dfg_on_act(self.dfg, comp)\n self.detected_cut = \"xor\"\n self.children.append(\n SubtreeDFGBased(new_dfg, self.master_dfg, self.initial_dfg, comp, self.counts,\n self.rec_depth + 1,\n noise_threshold=self.noise_threshold,\n initial_start_activities=self.initial_start_activities,\n initial_end_activities=self.initial_end_activities))\n else:\n seq_cut = self.detect_sequential_cut(conn_components, this_nx_graph, strongly_connected_components)\n if seq_cut[0]:\n self.detected_cut = \"sequential\"\n for child in seq_cut[1]:\n dfg_child = filter_dfg_on_act(self.dfg, child)\n self.children.append(\n SubtreeDFGBased(dfg_child, self.master_dfg, self.initial_dfg, child, self.counts,\n self.rec_depth + 1,\n noise_threshold=self.noise_threshold,\n initial_start_activities=self.initial_start_activities,\n initial_end_activities=self.initial_end_activities))\n self.put_skips_in_seq_cut()\n else:\n par_cut = self.detect_parallel_cut(conn_components, this_nx_graph, strongly_connected_components)\n if par_cut[0]:\n self.detected_cut = \"parallel\"\n for comp in par_cut[1]:\n new_dfg = filter_dfg_on_act(self.dfg, comp)\n self.children.append(\n SubtreeDFGBased(new_dfg, self.master_dfg, new_dfg, comp, self.counts,\n self.rec_depth + 1,\n noise_threshold=self.noise_threshold,\n initial_start_activities=self.initial_start_activities,\n initial_end_activities=self.initial_end_activities))\n else:\n loop_cut = self.detect_loop_cut(conn_components, this_nx_graph, strongly_connected_components)\n if loop_cut[0]:\n if loop_cut[2]:\n self.detected_cut = \"loopCut\"\n for index_enum, child in enumerate(loop_cut[1]):\n dfg_child = filter_dfg_on_act(self.dfg, child)\n next_subtree = SubtreeDFGBased(dfg_child, self.master_dfg, self.initial_dfg, child,\n self.counts, self.rec_depth + 1,\n noise_threshold=self.noise_threshold,\n initial_start_activities=self.initial_start_activities,\n initial_end_activities=self.initial_end_activities)\n if loop_cut[3]:\n next_subtree.must_insert_skip = True\n self.children.append(next_subtree)\n else:\n self.detected_cut = \"sequential\"\n self.need_loop_on_subtree = True\n for index_enum, child in enumerate(loop_cut[1]):\n dfg_child = filter_dfg_on_act(self.dfg, child)\n next_subtree = SubtreeDFGBased(dfg_child, self.master_dfg, self.initial_dfg, child,\n self.counts, self.rec_depth + 1,\n noise_threshold=self.noise_threshold,\n initial_start_activities=self.initial_start_activities,\n initial_end_activities=self.initial_end_activities)\n self.children.append(next_subtree)\n next_subtree.must_insert_skip = True\n else:\n if self.noise_threshold > 0:\n if not second_iteration:\n self.initialize_tree(self.dfg, self.initial_dfg, None, second_iteration=True)\n else:\n self.detected_cut = \"flower\"\n else:\n self.detected_cut = \"flower\"\n\n else:\n self.detected_cut = \"base_xor\"", "def simplify_and_prune(hucs, rivers, args):\n tol = args.simplify\n \n logging.info(\"\")\n logging.info(\"Simplifying and pruning\")\n logging.info(\"========================\")\n logging.info(\"filtering rivers outside of the HUC space\")\n rivers = workflow.hydrography.filter_rivers_to_huc(hucs, rivers, tol)\n if len(rivers) is 0:\n return rivers\n\n logging.info(\"removing rivers with only a few reaches\")\n for i in reversed(range(len(rivers))):\n ltree = len(rivers[i])\n if ltree < args.prune_reach_size:\n rivers.pop(i)\n logging.info(\" removing river with %d reaches\"%ltree)\n else:\n logging.info(\" keeping river with %d reaches\"%ltree)\n if len(rivers) is 0:\n return rivers\n \n logging.info(\"simplifying rivers\")\n workflow.hydrography.cleanup(rivers, tol, tol, tol)\n\n logging.info(\"simplify HUCs\")\n workflow.hucs.simplify(hucs, tol)\n\n # snap\n logging.info(\"snapping rivers and HUCs\")\n rivers = workflow.hydrography.snap(hucs, rivers, tol, 10*tol, args.cut_intersections)\n \n logging.info(\"filtering cut reaches outside the HUC space\")\n rivers = workflow.hydrography.filter_rivers_to_huc(hucs, rivers, -0.1*tol)\n logging.info(\"...done\")\n\n logging.info(\"Resulting info\")\n if len(rivers) is not 0:\n mins = []\n for river in rivers:\n for line in river.dfs():\n coords = np.array(line.coords[:])\n dz = np.linalg.norm(coords[1:] - coords[:-1], 2, -1)\n mins.append(np.min(dz))\n logging.info(\" river min seg length: %g\"%min(mins))\n logging.info(\" river median seg length: %g\"%np.median(np.array(mins)))\n\n mins = []\n for line in hucs.segments:\n coords = np.array(line.coords[:])\n dz = np.linalg.norm(coords[1:] - coords[:-1], 2, -1)\n mins.append(np.min(dz))\n logging.info(\" HUC min seg length: %g\"%min(mins))\n logging.info(\" HUC median seg length: %g\"%np.median(np.array(mins)))\n return rivers", "def gc_blocks(seq, block_size):\n\n # Make all capital\n seq = seq.upper()\n iterations = len(seq) // block_size\n\n # Iterate through finding the GC content\n gc = []\n for i in range(iterations):\n block = seq[i*block_size:(i+1)*block_size]\n gc.append((block.count('G') + block.count('C')) / block_size)\n return tuple(gc)", "def breed_slice(sch1, sch2, way='average'):\n children = []\n for _ in range(2):\n child = []\n\n # part before idx goes from sch1 to sch2.\n idx = randint(0, len(sch1)-1)\n tmp = sch1[:idx]\n sch1[:idx] = sch2[:idx]\n sch2[:idx] = tmp\n return [sch1, sch2]", "def _cut(self, tessellation, limit, unique_id):\n print(\"Preparing limit for edge resolving...\")\n geometry_cut = _split_lines(limit, 100)\n\n print(\"Building R-tree...\")\n sindex = tessellation.sindex\n # find the points that intersect with each subpolygon and add them to points_within_geometry\n print(\"Identifying edge cells...\")\n to_cut = pd.DataFrame()\n for poly in tqdm(geometry_cut, total=(len(geometry_cut))):\n # find approximate matches with r-tree, then precise matches from those approximate ones\n possible_matches_index = list(sindex.intersection(poly.bounds))\n possible_matches = tessellation.iloc[possible_matches_index]\n precise_matches = possible_matches[possible_matches.intersects(poly)]\n to_cut = to_cut.append(precise_matches)\n\n # delete duplicates\n to_cut = to_cut.drop_duplicates(subset=[unique_id])\n subselection = list(to_cut.index)\n\n print(\"Cutting...\")\n for idx, rgeom in tqdm(\n tessellation.loc[subselection].geometry.iteritems(),\n total=tessellation.loc[subselection].shape[0],\n ):\n intersection = rgeom.intersection(limit)\n if intersection.type == \"MultiPolygon\":\n areas = {}\n for p, i in enumerate(intersection):\n area = intersection[p].area\n areas[p] = area\n maximal = max(areas.items(), key=operator.itemgetter(1))[0]\n tessellation.loc[idx, \"geometry\"] = intersection[maximal]\n elif intersection.type == \"GeometryCollection\":\n for geom in list(intersection.geoms):\n if geom.type != \"Polygon\":\n pass\n else:\n tessellation.loc[idx, \"geometry\"] = geom\n else:\n tessellation.loc[idx, \"geometry\"] = intersection\n return tessellation, sindex", "def slice_batch(x, n_gpus, part):\n sh = K.shape(x)\n L = sh[0] // n_gpus\n if part == n_gpus - 1:\n return x[part*L:]\n return x[part*L:(part+1)*L]", "def cyclic_subgroups(C):\n ps = C.points()\n Gs = []\n \n for i in ps:\n P = Elliptic_Point(i[0],i[1],C)\n Gs.append(cyclic_subgroup(P))\n\n return Gs", "def get_random_coreset(self, size=30):\n return None", "def test_clumerge_general(\n prng: Generator,\n ndims,\n ds_cg_n,\n ds_ot_n,\n ds_od_n,\n no_clusters_field,\n):\n if ds_cg_n + ds_ot_n + ds_od_n > 0:\n datasets: MutableSequence[NamedTuple | Mapping[str, ArrayLike]] = []\n tclu: int = 0\n tpts: int = 0\n\n # Create data sets with clugen()\n for _ in range(ds_cg_n):\n # clugen() should run without problem\n with warnings.catch_warnings():\n # Check that the function runs without warnings\n warnings.simplefilter(\"error\")\n\n ds_cg = clugen(\n ndims,\n prng.integers(1, high=11),\n prng.integers(1, high=101),\n prng.random(size=ndims),\n prng.random(),\n prng.random(size=ndims),\n prng.random(),\n prng.random(),\n prng.random(),\n allow_empty=True,\n rng=prng,\n )\n\n if no_clusters_field:\n tclu = max(tclu, max(ds_cg.clusters))\n else:\n tclu += len(unique(ds_cg.clusters))\n\n tpts += len(ds_cg.points)\n\n datasets.append(ds_cg)\n\n # Create non-clugen() data sets as named tuples\n for _ in range(ds_ot_n):\n npts = prng.integers(1, high=101)\n nclu = prng.integers(1, high=min(3, npts) + 1)\n ds_ot = _PointsClusters(\n prng.random((npts, ndims)), prng.integers(1, high=nclu + 1, size=npts)\n )\n if no_clusters_field:\n tclu = max(tclu, max(ds_ot.clusters))\n else:\n tclu += len(unique(ds_ot.clusters))\n\n tpts += npts\n\n datasets.append(ds_ot)\n\n # Create non-clugen() data sets as dictionaries\n for _ in range(ds_od_n):\n npts = prng.integers(1, high=101)\n nclu = prng.integers(1, high=min(3, npts) + 1)\n ds_od = {\n \"points\": prng.random((npts, ndims)),\n \"clusters\": prng.integers(1, high=nclu + 1, size=npts),\n }\n if no_clusters_field:\n tclu = max(tclu, max(ds_od[\"clusters\"]))\n else:\n tclu += len(unique(ds_od[\"clusters\"]))\n\n tpts += npts\n\n datasets.append(ds_od)\n\n # Prepare optional keywords parameters\n kwargs: dict[str, Any] = {}\n if no_clusters_field:\n kwargs[\"clusters_field\"] = None\n\n # clumerge() should run without problem\n with warnings.catch_warnings():\n # Check that the function runs without warnings\n warnings.simplefilter(\"error\")\n\n mds: dict[str, NDArray] = clumerge(*datasets, **kwargs)\n\n # Check that the number of points and clusters is correct\n expect_shape = (tpts,) if ndims == 1 else (tpts, ndims)\n\n assert mds[\"points\"].shape == expect_shape\n assert max(mds[\"clusters\"]) == tclu\n assert can_cast(mds[\"clusters\"].dtype, int64)", "def _split_block(block: PruningBlock, list_output_channels: List[int]) -> List[PruningBlock]:\n if len(list_output_channels) == 1:\n raise RuntimeError\n\n dot_product = reduce((lambda x, y: x * y), list_output_channels)\n\n current_size = dot_product\n new_blocks = []\n divided_shapes = filter(lambda x: x != 1, list_output_channels)\n for divided_shape in divided_shapes:\n offset = int(current_size % dot_product)\n current_size /= divided_shape\n new_block = copy.copy(block)\n new_block.size = int(current_size)\n new_block.offset = offset\n new_blocks.append(new_block)\n return new_blocks", "def rest_of_ORF(dna):\n num_codons = int(len(dna)/3)\n num = 0\n list_codons = []\n cut_dna = ''\n stop_index = -1\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna[num_start:num_end])\n num = num + 1\n for element in list_codons:\n if element == 'TAA' or element == 'TAG' or element == 'TGA':\n stop_index = list_codons.index(element)\n break\n\n if stop_index != -1:\n this_ORF = list_codons[0:stop_index]\n for element in this_ORF:\n cut_dna = cut_dna + element\n return cut_dna\n else:\n return dna", "def __filter_vertices(k, coreness, *args, **kwargs):\n return list(filter(lambda i: coreness[i] >= k, range(len(coreness))))", "def inj_seg(self, exclude_coinc_flags=None):\n\n if exclude_coinc_flags is None:\n exclude_coinc_flags = []\n\n tmp_list = segments.segmentlist([])\n for key in self.exc_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.exc_dict[key])\n for key in self.seg_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.seg_dict[key])\n for key in self.bitmask_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.bitmask_dict[key])\n if self.schedule_time:\n seg = segments.segment(self.schedule_time, self.schedule_time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n for time in self.gracedb_time:\n seg = segments.segment(time, time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n return tmp_list", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def low_index_subgroups(G, N, Y=[]):\n C = CosetTable(G, [])\n R = G.relators()\n # length chosen for the length of the short relators\n len_short_rel = 5\n # elements of R2 only checked at the last step for complete\n # coset tables\n R2 = set([rel for rel in R if len(rel) > len_short_rel])\n # elements of R1 are used in inner parts of the process to prune\n # branches of the search tree,\n R1 = set([rel.identity_cyclic_reduction() for rel in set(R) - R2])\n R1_c_list = C.conjugates(R1)\n S = []\n descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)\n return S", "def gr(node,cut,bins,partType = None,startFrame=0,endFrame=node.source.num_frames,step=1):\n\t# If particle type is given delete all other partcles\n\tif partType:\n\t\t#node.modifiers.append(SelectTypeModifier(types={partType}))\n\t\tnode.modifiers.append(ExpressionSelectionModifier(expression = 'fast==1'))\n\t\tnode.modifiers.append(InvertSelectionModifier())\n\t\tnode.modifiers.append(DeleteSelectedModifier())\n\tnode.modifiers.append(set_cell)\n\tmodifier = CoordinationAnalysisModifier(cutoff = cut, number_of_bins = bins)\n\tnode.modifiers.append(modifier)\n\trdf = np.zeros((bins,2), float)\n\tcounted = 0\n\tfor frame in range(startFrame,endFrame,step):\n\t\tif frame%10 == 0:\n\t\t\tprint ('Frame: ',frame)\n\t\t# # Compute normalized bond vectors\n\t\tdata = node.compute(frame)\n\t\t#print(data.particles.count)\n\t\trdf+=data.tables['coordination-rdf'].xy()\n\t\tcounted +=1\n\trdf/=counted\n\tr = rdf[:,0]\n\tgr = rdf[:,1]\n\treturn r,gr", "def brute_force_cow_transport(cows,limit=10):\n trip_options = []\n\n for partition in get_partitions(cows.items()):\n ledger = [] #clear trips ledger between \n \n for trip in partition:\n trip_wt = sum(cow[1] for cow in trip)\n if trip_wt <= limit:\n ledger.append([cow[0] for cow in trip]) #adds list of names to list\n continue\n else: break #next partition but hits next line first...\n \n if len(ledger) == len(partition): #checks if above loop completed vs broke\n trip_options.append(ledger) \n\n return trip_options", "def generateCutList(cut_configuration):\r\n\t#Check that this line reads json.loads(cut_configuration)\r\n\tinput_json = json.load(cut_configuration)\r\n\r\n\t#Currently only desired_cut and laser_cut_config are required\r\n\ttry:\r\n\t\tblock = input_json[\"block\"]\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tcut = input_json[\"desired_cut\"]\r\n\t\tlaser = input_json[\"laser_cut_config\"]\r\n\texcept:\r\n\t\traise Exception(\"Either desired_cut or laser_cut_config not provided\")\r\n\r\n\tif cut[\"cut_process\"] == \"line\":\r\n\t\tfinal_list = line(cut[\"x1\"],cut[\"y1\"],cut[\"x2\"],cut[\"y2\"],cut[\"final_dimension_z\"]+laser[\"z_final_overshoot\"],laser)\r\n\telif cut[\"cut_process\"] == \"simple_core\":\r\n\t\tfinal_list = simple_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"vertical_core\":\r\n\t\tfinal_list = vertical_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"oss_stacked\":\r\n\t\tfinal_list = oss_stacked(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"z_focus\":\r\n\t\tfinal_list = z_focus(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"cross\":\r\n\t\tfinal_list = cross(block,cut,laser)\r\n\telse:\r\n\t\traise Exception(\"No such cut exists: Check cut_process\")\r\n\t#print(time_taken(final_list, laser))\r\n\tnow = datetime.now()\r\n\ttimestamp = str(now.strftime(\"%m-%d_%H_%M\"))\r\n\tcomplete_name = os.path.join(save_path, timestamp+\".csv\")\r\n\twith open(complete_name, mode='w',newline ='') as test_data:\r\n\t data_writer = csv.writer(test_data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\t list_data = json.loads(final_list)\r\n\t for line1 in list_data:\r\n\t \tdata_writer.writerow(line1)\r\n\treturn final_list", "def core_number(G):\n if nx.number_of_selfloops(G) > 0:\n msg = (\n \"Input graph has self loops which is not permitted; \"\n \"Consider using G.remove_edges_from(nx.selfloop_edges(G)).\"\n )\n raise NetworkXError(msg)\n degrees = dict(G.degree())\n # Sort nodes by degree.\n nodes = sorted(degrees, key=degrees.get)\n bin_boundaries = [0]\n curr_degree = 0\n for i, v in enumerate(nodes):\n if degrees[v] > curr_degree:\n bin_boundaries.extend([i] * (degrees[v] - curr_degree))\n curr_degree = degrees[v]\n node_pos = {v: pos for pos, v in enumerate(nodes)}\n # The initial guess for the core number of a node is its degree.\n core = degrees\n nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}\n for v in nodes:\n for u in nbrs[v]:\n if core[u] > core[v]:\n nbrs[u].remove(v)\n pos = node_pos[u]\n bin_start = bin_boundaries[core[u]]\n node_pos[u] = bin_start\n node_pos[nodes[bin_start]] = pos\n nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]\n bin_boundaries[core[u]] += 1\n core[u] -= 1\n return core", "def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def gates_per_clifford(qobj_list, clifford_length, basis, qubits):\n\n #TO DO\n\n pass", "def newcogfind(ncf_cogs, ncf_twogs, ncf_prots):\n coglijst_hoogste_waarde = list(set([y.split()[0] for y in ncf_cogs]))\n werkgetal = vindhoogstecognummer(coglijst_hoogste_waarde)\n ncf_list = []\n for prot in ncf_prots:\n prot_in_twogs = zoekinlijst(prot, ncf_twogs, 1)\n posible_found_twogs = verbrogentwogs(prot, ncf_twogs, prot_in_twogs)\n if posible_found_twogs != list():\n newcog = list(set(\" \".join(posible_found_twogs).split()))\n werkgetal += 1\n for incog in newcog:\n cogzin = \"%s\\t%s\\n\" % ((\"0000000000%s\" % werkgetal)[-8:], incog)\n ncf_list.append(cogzin)\n twogremove(zoekinlijst(incog, ncf_twogs, 0), ncf_twogs)\n voegtoeaancog(ncf_list)", "def CutEdge(self, *args):\n return _BRepAlgo.BRepAlgo_Loop_CutEdge(self, *args)", "def process_component(COMP, G, max_k, min_length, max_CV, SEQS, bamfile, pool, use_scores=False, use_genes=False, num_procs=1):\n\n ###############MOVED FROM OUTER CODE ON WHOLE G\n if use_scores: remove_hi_confidence_chromosome(COMP) ##################################\n\n # initialize shortest path set considered\n path_count = 0\n seen_unoriented_paths = set([])\n paths_set = set([]) #the set of paths found\n\n\n # first look for paths starting from the nodes annotated with plasmid genes\n if use_genes:\n plasmid_gene_nodes = get_plasmid_gene_nodes(COMP)\n potential_plasmid_mass_tuples = [(get_spades_base_mass(COMP,nd),nd) for nd in plasmid_gene_nodes]\n potential_plasmid_mass_tuples.sort(key = lambda n: n[0])\n while potential_plasmid_mass_tuples: # could be removing other nodes from the list\n top_node = potential_plasmid_mass_tuples.pop() # highest mass node\n top_node_name = top_node[1]\n path = get_high_mass_shortest_path(top_node_name,COMP,use_scores,use_genes) #######\n if path is None: continue\n # check coverage variation\n path_CV = get_wgtd_path_coverage_CV(path,G,SEQS,max_k_val=max_k)\n logger.info(\"Plasmid gene path: %s, CV: %4f\" % (str(path),path_CV))\n if path_CV <= max_CV and is_good_cyc(path,G,bamfile):\n logger.info(\"Added plasmid gene path %s\" % (str(path)))\n\n # prevent checking nodes that have been removed\n i = 0\n while i < len(potential_plasmid_mass_tuples):\n if potential_plasmid_mass_tuples[i][1] in path or \\\n rc_node(potential_plasmid_mass_tuples[i][1]) in path:\n potential_plasmid_mass_tuples.pop(i)\n else: i += 1\n\n seen_unoriented_paths.add(get_unoriented_sorted_str(path))\n before_cov, _ = get_path_mean_std(path, G, SEQS, max_k)\n covs = update_path_coverage_vals(path, G, SEQS, max_k)\n update_path_with_covs(path, COMP, covs)\n path_count += 1\n paths_set.add((path,before_cov))\n\n else:\n logger.info(\"Did not add plasmid gene path: %s\" % (str(path)))\n\n # then look for circular paths that start from hi confidence plasmid nodes\n if use_scores:\n potential_plasmid_nodes = get_hi_conf_plasmids(COMP)\n potential_plasmid_mass_tuples = [(get_spades_base_mass(COMP,nd),nd) for nd in potential_plasmid_nodes]\n potential_plasmid_mass_tuples.sort(key = lambda n: n[0])\n while potential_plasmid_mass_tuples: # could be removing other nodes from the list\n top_node = potential_plasmid_mass_tuples.pop() # highest mass node\n top_node_name = top_node[1]\n path = get_high_mass_shortest_path(top_node_name,COMP,use_scores,use_genes)\n if path is None: continue\n # check coverage variation\n path_CV = get_wgtd_path_coverage_CV(path,G,SEQS,max_k_val=max_k)\n logger.info(\"Hi conf path: %s, CV: %4f\" % (str(path),path_CV))\n\n if path_CV <= max_CV and is_good_cyc(path,G,bamfile):\n logger.info(\"Added hi conf path %s\" % (str(path)))\n\n # prevent checking nodes that have been removed\n i = 0\n while i < len(potential_plasmid_mass_tuples):\n if potential_plasmid_mass_tuples[i][1] in path or \\\n rc_node(potential_plasmid_mass_tuples[i][1]) in path:\n potential_plasmid_mass_tuples.pop(i)\n else: i += 1\n\n seen_unoriented_paths.add(get_unoriented_sorted_str(path))\n before_cov, _ = get_path_mean_std(path, G, SEQS, max_k)\n #before_cov, _ = get_path_mean_std(path, COMP, SEQS, max_k)\n covs = update_path_coverage_vals(path, G, SEQS, max_k)##########################\n update_path_with_covs(path, COMP, covs) ####################################\n path_count += 1\n paths_set.add((path,before_cov))\n\n else:\n logger.info(\"Did not add hi-conf path: %s\" % (str(path)))\n\n # 3rd step. Run Recycler algorithm that looks for circular high mass shortest\n # paths and accept them as plasmid predictions if the coverages and mate pairs\n # match the required thresholds\n#######################################################################################\n#######################################################################################\n\n\n paths = enum_high_mass_shortest_paths(COMP, pool, use_scores,use_genes,seen_unoriented_paths)\n last_path_count = 0\n last_node_count = 0\n\n # continue as long as you either removed a low mass path\n # from the component or added a new path to final paths\n while(path_count!=last_path_count or\\\n len(COMP.nodes())!=last_node_count):\n\n last_node_count = len(COMP.nodes())\n last_path_count = path_count\n\n # make tuples of (CV, path)\n path_tuples = []\n for p in paths:\n if len(get_seq_from_path(p, SEQS, max_k_val=max_k)) < min_length:\n seen_unoriented_paths.add(get_unoriented_sorted_str(p))\n logger.info(\"Num seen paths: %d\" % (len(seen_unoriented_paths)))\n continue\n path_tuples.append((get_wgtd_path_coverage_CV(p,G,SEQS,max_k_val=max_k), p))\n\n logger.info(\"Num path tuples: %d\" % (len(path_tuples)))\n if(len(path_tuples)==0): break\n\n # sort in ascending CV order\n path_tuples.sort(key=lambda path: path[0])\n\n for pt in path_tuples:\n curr_path = pt[1]\n curr_path_CV = pt[0]\n logger.info(\"Path: %s\" % (\",\".join(curr_path)))\n if get_unoriented_sorted_str(curr_path) not in seen_unoriented_paths:\n\n ## only report if low CV and matches mate pair info\n if (curr_path_CV <= (max_CV) and \\\n is_good_cyc(curr_path,G,bamfile)):\n\n logger.info(\"Added path %s\" % \", \".join(curr_path))\n logger.info(\"\\tCV: %4f\" % curr_path_CV)\n seen_unoriented_paths.add(get_unoriented_sorted_str(curr_path))\n #before_cov, _ = get_path_mean_std(curr_path, COMP, SEQS, max_k)\n before_cov, _ = get_path_mean_std(curr_path, G, SEQS, max_k)\n covs = update_path_coverage_vals(curr_path, G, SEQS, max_k)\n update_path_with_covs(curr_path, COMP, covs)\n path_count += 1\n paths_set.add((curr_path,before_cov))\n break\n\n else:\n logger.info(\"Did not add path: %s\" % (\", \".join(curr_path)))\n logger.info(\"\\tCV: %4f\" % curr_path_CV)\n if curr_path_CV > max_CV:\n break # sorted by CV\n else: # not good mate pairs\n seen_unoriented_paths.add(get_unoriented_sorted_str(curr_path))\n\n # recalculate paths on the component\n print(str(len(COMP.nodes())) + \" nodes remain in component\")\n logger.info(\"Remaining nodes: %d\" % (len(COMP.nodes())))\n paths = enum_high_mass_shortest_paths(COMP, pool, use_scores,use_genes,seen_unoriented_paths)\n\n #end while\n return paths_set", "def slice(self, evidence={}):\n return self.condition(evidence)\n \n \n\n# def eliminate(self, elimVars, elimOp):\n # TODO: awkward way to define this; convert to more direct implementation?\n for v in elimVars:\n if len(self.markovBlanket(v)) > 2: raise ValueError(\"Cannot eliminate {} with {} (>2) neighbors\".format(v,len(self.markovBlanket(v))))\n flist = self.factorsWith(v)\n gm_model = GraphModel(flist); print(gm_model); gm_model.eliminate([v],elimOp)\n fnew = gm_model.factors[0]\n self.removeFactors(flist); # doesn't quite work? numerical roundoff issues?\n self.L[v,:] = 0; self.L[:,v] = 0; self.h[v] = 0; # TODO: better to mark as removed? how?\n self.addFactors([fnew])\n # TODO: \"remove\" variable by setting states = 0? \"known value\" = 0?", "def find_optimal_components_subset(contours, edges):\n c_info = props_for_contours(contours, edges)\n c_info.sort(key=lambda x: -x['sum'])\n total = np.sum(edges) / 255\n area = edges.shape[0] * edges.shape[1]\n\n c = c_info[0]\n del c_info[0]\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n crop = this_crop\n covered_sum = c['sum']\n\n while covered_sum < total:\n changed = False\n recall = 1.0 * covered_sum / total\n prec = 1 - 1.0 * crop_area(crop) / area\n f1 = 2 * (prec * recall / (prec + recall))\n # print '----'\n for i, c in enumerate(c_info):\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n new_crop = union_crops(crop, this_crop)\n new_sum = covered_sum + c['sum']\n new_recall = 1.0 * new_sum / total\n new_prec = 1 - 1.0 * crop_area(new_crop) / area\n new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)\n\n # Add this crop if it improves f1 score,\n # _or_ it adds 25% of the remaining pixels for <15% crop expansion.\n # ^^^ very ad-hoc! make this smoother\n remaining_frac = c['sum'] / (total - covered_sum)\n new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1\n if new_f1 > f1 or (\n remaining_frac > 0.25 and new_area_frac < 0.15):\n print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (\n i, covered_sum, new_sum, total, remaining_frac,\n crop_area(crop), crop_area(new_crop), area, new_area_frac,\n f1, new_f1))\n crop = new_crop\n covered_sum = new_sum\n del c_info[i]\n changed = True\n break\n\n if not changed:\n break\n\n return crop", "def cutting(args):\n import numpy as np\n import h5py\n\n # Read in map data\n with h5py.File(args.pointmap, 'r') as f:\n ptmap = f['map'][...]\n\n if args.threshold > 0:\n cut_map = np.where(ptmap<args.threshold, 0, ptmap)\n else:\n idx = np.unravel_index(np.argmax(ptmap), ptmap.shape) # the index of the max element\n cut_map = np.zeros_like(ptmap)\n cut_map[idx] = ptmap[idx]\n\n # Create output image file name\n if args.outfile:\n out_file = args.outfile\n elif args.threshold > 0:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_' + str(int(args.threshold)) + '.hdf5'\n else:\n out_file = ((args.pointmap.split('/')[-1]).split('.')[0]).replace('sim', 'cut') + '_max.hdf5'\n\n # Save cut data\n with h5py.File(out_file, 'w') as f:\n f.create_dataset('map', data=cut_map)\n\n print 'done!'", "def prep_optics(SetofObjects, epsilon):\n\n for j in SetofObjects._index:\n # Find smallest nonzero distance\n SetofObjects._core_dist[j] = np.sort(SetofObjects.data[j,:])[1]\n print(\n 'Core distances and neighborhoods prepped for ' + str(\n SetofObjects._n) + ' points.')", "def test_conv_mol_deg_slice(self):\n atom_features = np.array([[20, 21, 22, 23], [24, 25, 26, 27],\n [28, 29, 30, 31], [32, 33, 34, 35]])\n adj_list = [[1, 2], [0, 3], [0, 3], [1, 2]]\n mol = ConvMol(atom_features, adj_list)\n\n assert np.array_equal(\n mol.get_deg_slice(),\n # 0 atoms of degree 0\n # 0 atoms of degree 1\n # 4 atoms of degree 2\n # 0 atoms of degree 3\n # 0 atoms of degree 4\n # 0 atoms of degree 5\n # 0 atoms of degree 6\n # 0 atoms of degree 7\n # 0 atoms of degree 8\n # 0 atoms of degree 9\n # 0 atoms of degree 10\n np.array([[0, 0], [0, 0], [0, 4], [0, 0], [0, 0], [0, 0], [0, 0],\n [0, 0], [0, 0], [0, 0], [0, 0]]))", "def optimize_graph( complete, desired, algo, simi_cutoff ) :\n if (algo == \"trim\") :\n return trim_cluster( desired, desired.nodes(), 2 )", "def neighborhood_sample(solution, ssize):\n n_runs = len(solution)\n n_clusters = len(solution[0])\n \n swaps_per_run = n_clusters * (n_clusters-1) // 2\n max_size = n_runs * swaps_per_run\n \n ssize = min(ssize, max_size)\n \n combinations = list(itertools.combinations(range(n_clusters), 2))\n selection = random.sample(range(max_size), ssize)\n \n nb = [0] * ssize\n \n for i_nb, i_sel in enumerate(selection):\n i_run = i_sel // swaps_per_run\n i_comb = i_sel % swaps_per_run\n \n i1, i2 = combinations[i_comb]\n swapped = solution[i_run][:]\n swapped[i1], swapped[i2] = swapped[i2], swapped[i1]\n nb[i_nb] = solution[:i_run] + [swapped] + solution[i_run+1:]\n \n return selection, nb", "def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_y1 = new_y1-delta\r\n\t\t\ti = i + 1\r\n\t\tif a < layers - 1:\r\n\t\t\tcutlist.append([\"z_step\", str(-deltaz)])\r\n\t\ty_max = y_max - taper_straight - taper_y\r\n\r\n\treturn cutlist", "def training_batch_selection(train_set_size, input_img):\n\n input_dims = input_img.shape\n all_data_indices = np.arange(input_dims[0]*input_dims[1])\n all_data_indices = all_data_indices.reshape(input_dims[:-1])\n\n conf = get_config()\n inside = int(np.floor(conf[\"inside_part\"]))\n outside = int(np.floor(conf[\"outside_part\"]))\n cx = ii.image.c_x\n cy = ii.image.c_y\n\n # Find the position of the crop in the image and determine part least affected by radial distortion\n center_x = (conf[\"crop\"][\"left_top\"]['x'] + conf[\"crop\"][\"size\"][\"height\"]/2)*2/ii.image.height-1.\n center_y = (conf[\"crop\"][\"left_top\"]['y'] + conf[\"crop\"][\"size\"][\"width\"]/2)*2/ii.image.width-1.\n\n left_right_center = max(min((center_y-cy)*2, 1), -1)\n top_bot_center = max(min((center_x - cx) * 2, 1), -1)\n\n # DEBUG: set your own center\n # left_right_center = 0\n # top_bot_center = 0\n\n from_x = int(round(inside*(1.-top_bot_center)))\n to_x = min(-int(round(inside*(1.+top_bot_center))), -1)\n from_y = int(round(inside*(1.-left_right_center)))\n to_y = min(-int(round(inside*(1.+left_right_center))), -1)\n\n # Exclude part that is minimally affected by radial distortion\n selection_exclude = all_data_indices[from_x:to_x, from_y:to_y]\n selection_exclude = selection_exclude.reshape(-1)\n\n # Exclude outer border in order to avoid index out of bounds\n selection_include = all_data_indices[outside:-outside,\n outside:-outside]\n selection_include = selection_include.reshape(-1)\n\n selection = [x for x in selection_include if x not in selection_exclude]\n selection = np.random.permutation(selection)\n\n # DEBUG: forcing larger training set\n train_set_size = int(train_set_size*2)\n\n selection = selection[:train_set_size]\n\n # DEBUG: display image region for selection\n image = input_img.reshape(-1)\n image[:] = 0\n image[selection_include] = input_img.reshape(-1)[selection_include]\n image[selection_exclude] = 0\n image = image.reshape(input_dims[0], input_dims[1])\n Verbose.imshow(image, Verbose.debug)\n\n return selection", "def get_obstList(self,X,Y,Z):\n #Pipe in - find all points exterior of small\n\tpipe_in = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_in/2)**2)).flatten()\n\tpipe_in_stop = np.array(np.where(Z <= 3 + 0.5*(self.diam_out - self.diam_in))).flatten()\n\tpipe_in = np.intersect1d(pipe_in[:],pipe_in_stop[:])\n\n\t#Expansion - find all points exterior of expansion\n\tr_cone = self.diam_in\n\th_cone = self.diam_in\t\n\texpansion = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (r_cone/h_cone)**2*(Z - 3)**2)).flatten()\n\texpansion_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_out - self.diam_in)))\n\t#expansion_stop = np.array(np.where(Z <= 4)).flatten()\n\texpansion = np.intersect1d(expansion[:],expansion_start[:])\n\t#expansion = np.intersect1d(expansion[:],expansion_stop[:])\n\n\t#Pipe out - final all points exterior of smaller pipe\n\tpipe_out = np.array(np.where((X - 1)**2 + (Y - 1)**2 > (self.diam_out/2)**2)).flatten()\n\tpipe_out_start = np.array(np.where(Z >= 3 + 0.5*(self.diam_in - self.diam_out))).flatten()\n\tpipe_out = np.intersect1d(pipe_out[:],pipe_out_start[:])\n\n\n\t#Put the pieces together\n\n\tpipe = expansion[:]\n\tpipe = np.union1d(expansion[:],pipe_in[:])\n\tpipe = np.union1d(pipe[:],pipe_out[:])\n\n\tobst_list = pipe[:]\n\n \n return list(obst_list[:])" ]
[ "0.55150145", "0.5500191", "0.5455198", "0.5378894", "0.53707415", "0.5246031", "0.5238846", "0.5228055", "0.51568055", "0.51228464", "0.51081675", "0.51019835", "0.5090313", "0.5088919", "0.50825244", "0.5066888", "0.50596964", "0.5026915", "0.5015684", "0.5010477", "0.50010544", "0.49980006", "0.49813336", "0.49644715", "0.495699", "0.49527663", "0.4947649", "0.49471876", "0.49312317", "0.49226144", "0.49141884", "0.48928672", "0.48911062", "0.48602712", "0.48478734", "0.4834149", "0.48133707", "0.48094612", "0.48093885", "0.48066494", "0.47979447", "0.47935945", "0.47857898", "0.4762969", "0.47612348", "0.4760677", "0.4753024", "0.47522882", "0.4752236", "0.47494423", "0.47386533", "0.4732009", "0.47295615", "0.4718848", "0.471418", "0.46992654", "0.4694065", "0.46932712", "0.46932676", "0.46811473", "0.46765152", "0.4667459", "0.46500507", "0.46499527", "0.464955", "0.46432474", "0.46429476", "0.4635794", "0.46345335", "0.463417", "0.46209702", "0.46107376", "0.4609579", "0.4596479", "0.45862156", "0.45847982", "0.45811975", "0.45760527", "0.45756587", "0.45695662", "0.45687905", "0.45678735", "0.45657575", "0.45643803", "0.45599198", "0.4546198", "0.45423433", "0.4537044", "0.45362765", "0.452873", "0.45197123", "0.45166442", "0.45135242", "0.4511159", "0.4509791", "0.4509103", "0.45089307", "0.45057487", "0.45020267", "0.44968855" ]
0.5538502
0
This algorithm takes a cutlist and returns an estimate for the time
def time_taken(json_cutlist, laser): cutlist = json.loads(json_cutlist) time = 0 coordinate_array = [0, 0] for a in cutlist: if a[0] == "jump" or a[0] == "mark": coordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]] mag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2) if a[0] == "jump": time += mag/laser["jump_speed"] else: time += mag/laser["mark_speed"] coordinate_array = [float(a[1]), float(a[2])] elif a[0] == "z_abs" or a[0] == "z_rel": zSet = float(a[1]) elif a[0] == "c_abs" or a[0] == "c_rel": cSet = float(a[1]) elif a[0] == "a_abs" or a[0] == "a_rel": aSet = float(a[1]) else: pass return str(datetime.timedelta(seconds=int(time)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit_time(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n times = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n tm = 0\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n tm += 1\n if tm==cutoff:\n tm = nan\n break\n \n times.append(tm)\n\n return array(times)", "def ATS(trajectory, min_gini):\r\n if len(trajectory) == 0:\r\n return -1\r\n \r\n \r\n \"\"\"\r\n Build partitions by avg velocity\r\n \"\"\"\r\n \r\n partitionTime = 0\r\n simplificationTime = 0\r\n mergeTime = 0\r\n \r\n \r\n tStart = time.time()\r\n \r\n pair_list = get_split_pair(trajectory, min_gini)\r\n \r\n partitionTime = time.time() - tStart\r\n \r\n \r\n simplified_set = set()\r\n \r\n epsilon = 0\r\n \r\n # epsilon_dict = [0.004592016 / 111, 0.01176842 / 111, 0.02649389 / 111, 0.05039507 / 111] #g=0.4\r\n epsilon_dict = [0.003461152 / 111, 0.02017883 / 111, 0.03125521 / 111, 0.08043219 / 111] #g=0.1, MAX Final Version\r\n # epsilon_dict = [0.00502254864192/111, 0.0255854290033/111, 0.040629531118/111, 0.112363957637/111] # g=0.3 max\r\n # epsilon_dict = [0.0129068727529/111,0.0336671793759/111,0.0587065427654/111,0.16344623299/111] # g=0.5 max\r\n # epsilon_dict = [0.0395070706597/111,0.0868617737273/111,0.220840329515/111,0.224195931453/111] # g=0.7 max\r\n # epsilon_dict = [0.0494846202161/111, 0.0942020880901/111, 0.252738388544/111, 0.347107672303/111] # g=0.9 max\r\n \r\n \r\n \r\n \r\n for idx in xrange(len(pair_list)):\r\n \r\n tStartSimplification = time.time()\r\n \r\n start = pair_list[idx][0]\r\n end = pair_list[idx][1]\r\n \r\n sub_trajectory = trajectory[start:end+1]\r\n \r\n velocity_list = get_velocity(sub_trajectory)\r\n avg_velocity = sum(velocity_list) / float(len(velocity_list))\r\n L = label(avg_velocity)\r\n \r\n epsilon = epsilon_dict[L]\r\n \r\n \r\n # S = DP(sub_trajectory, epsilon) \r\n S = EBT(sub_trajectory, epsilon) \r\n\r\n S = [i+start for i in S]\r\n \r\n simplificationTime += time.time() - tStartSimplification\r\n \r\n tStartMerge = time.time()\r\n \r\n map(lambda i: simplified_set.add(i), S)\r\n \r\n mergeTime += time.time() - tStartMerge\r\n\r\n \r\n tStartMerge = time.time()\r\n \r\n simplified_list = list(simplified_set)\r\n simplified_list.sort()\r\n \r\n mergeTime += time.time() - tStartMerge\r\n \r\n ## print processing time in each steps##\r\n # print partitionTime, simplificationTime, mergeTime\r\n \r\n return simplified_list", "def _SD_optimal(t):", "def sim_hits(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n trajs = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n traj = list()\n traj.append(curr)\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n traj.append(curr)\n \n if len(traj)>=cutoff:\n # traj=[nan]\n break\n \n trajs.append(array(traj))\n\n return trajs", "def strategy_best(cookies, cps, time_left, build_info):\n items = build_info.build_items()\n result = None\n cost = 0\n overall = cookies + time_left * cps\n for item in items:\n temp_cost = build_info.get_cps(item) / build_info.get_cost(item)\n if temp_cost <= overall and cost < temp_cost:\n result = item\n cost = temp_cost\n return result", "def strategy_cheap(cookies, cps, time_left, build_info):\n items = build_info.build_items()\n result = None\n cost = float('+inf')\n overall = cookies + time_left * cps\n for item in items:\n temp_cost = build_info.get_cost(item)\n if temp_cost <= overall and cost > temp_cost:\n result = item\n cost = temp_cost\n return result", "def performance_test(n):\n \n def analyze_result(test_results, start_index, end_index, range_):\n \"\"\"Helper method - analyzes test results of one range (short, mid, or long).\n \n Args:\n test_results: list of test results containing tuples of (euclidean_distance, time_dijkstra, time_idastar)\n start_index: start index of this range in test results list\n end_index: end index of this range in test results list\n range_: Short, Mid, or Long range\n \n Returns:\n String describing analysis of test results in this range\n \"\"\"\n time_dij = 0\n time_ida = 0\n ida_not_finished = 0\n for i in range(start_index, end_index):\n result = test_results[i]\n time_dij += result[1]\n if result[2] == \"no_time\":\n ida_not_finished += 1\n else: # ida finished\n time_ida += result[2]\n result_dij = \"Algorithm: Dijkstra - Average time: \" + str(round(time_dij/(end_index-start_index)*1000, 3)) + \" ms\\n\"\n result_ida = \"Algorithm: IDA* - Average time: \" + str(round(time_ida/(end_index-start_index-ida_not_finished)*1000, 3)) + \" ms [\" + str(ida_not_finished) + \" attempts not finished]\\n\"\n header = range_ + \"-range results [\" + str(end_index-start_index) + \" tests]:\\n\"\n return header + result_dij + result_ida\n \n \n print(\"\\nCalculating...\\n\")\n \n test_results = []\n \n for _ in range(n):\n # random start and end cities\n start = cities[randint(0, len(cities)-1)]\n end = cities[randint(0, len(cities)-1)]\n while start == end:\n end = cities[randint(0, len(cities)-1)]\n # calculate euclidean distance\n x_diff = coordinates[start][0] - coordinates[end][0]\n y_diff = coordinates[start][1] - coordinates[end][1]\n eucl_dist = sqrt(x_diff**2 + y_diff**2)\n \n # Dijkstra\n dist_dij, path_dij, time_dij = dijkstra(start, end, adjlist)\n # IDA*\n dist_ida, path_ida, time_ida = idastar(start, end, adjlist, coordinates)\n \n test_results.append( (eucl_dist, time_dij, time_ida) )\n \n # Analysis of results\n test_results.sort()\n short_index = 0\n mid_index = n//3\n long_index = n//3*2\n \n # Short-range\n print(analyze_result(test_results, short_index, mid_index, \"Short\"))\n # Mid-range\n print(analyze_result(test_results, mid_index, long_index, \"Mid\"))\n # Long-range\n print(analyze_result(test_results, long_index, len(test_results), \"Long\"))", "def source_cut(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,\r\n sum_cut_number_list):\r\n sum_cut_number = 0\r\n for i in range(number):\r\n sample_j = np.random.choice(df_caltocut_distr['time'])\r\n sum_cut_number += sample_j\r\n for j in range(sample_j):\r\n if j == 0:\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n else:\r\n t = 0\r\n \r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, \r\n 'Doc%02d_%02d' %(i,j), \r\n generation,\r\n counter, \r\n time_in_fac,\r\n generation_list_begin,\r\n generation_list_wait,\r\n generation_list_finish,\r\n serve_time,\r\n generation_list_name)\r\n env.process(c)\r\n sum_cut_number_list.append(sum_cut_number)", "def slow_dtw(base_list, test_list, extended=False):\r\n\r\n b = base_list.shape[0]\r\n t = test_list.shape[0]\r\n if (b > 0 and t > 0):\r\n DTW = np.zeros((b, t))\r\n cost = np.zeros((b, t))\r\n\r\n DTW[:, 0] = float('inf')\r\n DTW[0, :] = float('inf')\r\n DTW[0, 0] = 0.0\r\n\r\n for i in range(0, b):\r\n for j in range(0, t):\r\n dist = math.sqrt((test_list[j, 0] - base_list[i, 0]) ** 2 + (test_list[j, 1] - base_list[i, 1]) ** 2)\r\n cost[i, j] = dist\r\n if (i > 0 and j > 0):\r\n jminus2 = DTW[i - 1, j - 2] if j > 1 else float('inf')\r\n jminus1 = DTW[i - 1, j - 1]\r\n jeven = DTW[i - 1, j]\r\n minimum = min(jminus2, jminus1, jeven)\r\n DTW[i, j] = dist + minimum\r\n if (extended):\r\n return DTW[b - 1, t - 1], cost, DTW, _traceback(DTW)\r\n else:\r\n return DTW[b - 1, t - 1]", "def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,\n winlen, binsize, num_rep=10):\n\n time_fast_fca = 0.\n time_fpgrowth = 0.\n for rep in range(num_rep):\n # Generating artificial data\n data = []\n for i in range(n):\n np.random.seed(0)\n data.append(stg.homogeneous_poisson_process(\n rate=rate, t_start=0*pq.s, t_stop=t_stop))\n\n # Extracting Closed Frequent Itemset with FP-Growth\n t0 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = spade._build_context(binary_matrix,\n winlen)\n # Applying FP-Growth\n fim_results = [i for i in spade._fpgrowth(\n transactions,\n rel_matrix=rel_matrix,\n winlen=winlen)]\n time_fpgrowth += time.time() - t0\n\n # Extracting Closed Frequent Itemset with Fast_fca\n t1 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = \\\n spade._build_context(binary_matrix, winlen)\n # Applying FP-Growth\n fim_results = spade._fast_fca(context, winlen=winlen)\n time_fast_fca += time.time() - t1\n\n time_profiles = {'fp_growth': time_fpgrowth/num_rep,\n 'fast_fca': time_fast_fca/num_rep}\n\n # Storing data\n res_path = '../results/{}/{}/'.format(key, expected_num_spikes)\n # Create path is not already existing\n path_temp = './'\n for folder in split_path(res_path):\n path_temp = path_temp + '/' + folder\n mkdirp(path_temp)\n\n np.save(res_path + '/profiling_results.npy', {'results': time_profiles,\n 'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,\n 'winlen': winlen, 'binsize': binsize}})", "def congestionOccurrence(k, routeList, bestTime, extraTime):\n extraTimeTimeOut = 0\n\n while extraTime < 0:\n randomNum = random.randint(0, k - 1)\n routeSelection = routeList[randomNum]\n routeTime = sim.getGlobalRoutePathTime(routeSelection)\n extraTime = routeTime - bestTime\n\n extraTimeTimeOut += 1\n\n if extraTimeTimeOut:\n extraTime = 0\n break\n\n return extraTime", "def time_complexities():\n return \"Best Case: O(n), Average Case: O(n), Worst Case: O(n)\"", "def set_takeoff_time(input_list):\n print(\"========================Start of set_takeoff_time() Method *\")\n if len(input_list) > 0: # if list is not empty\n input_list[0].set_actualStart(max(int(input_list[0].get_actualStart()), int(input_list[0].get_reqStart())))\n\n for i in range(1, len(input_list)): # from 2nd item onwards\n wait_period = input_list[i - 1].get_actualStart() + (int(input_list[i - 1].get_reqDuration()))\n input_list[i].set_actualStart(wait_period)\n \n for j in range(len(input_list)):\n input_list[j].set_actualEnd(\n (int(input_list[j].get_reqDuration())+int(input_list[j].get_actualStart())))\n \n print(\"========================finished calc take-off times *\")\n # Testing if we have adjusted actual takeoff start and end times for each flight\n Airport_Driver.display_contents(input_list)\n print(\"========================End of set_takeoff_time() Method *\")\n return input_list", "def strategy_expensive(cookies, cps, time_left, build_info):\n items = build_info.build_items()\n result = None\n cost = float('-inf')\n overall = cookies + time_left * cps\n for item in items:\n temp_cost = build_info.get_cost(item)\n if temp_cost <= overall and cost < temp_cost:\n result = item\n cost = temp_cost\n return result", "def find_Hits(List,tol):\n Hits=[]\n for i in xrange(0,len(List)-1):\n item1 = List[i]\n item2 = List[i+1]\n if item2[0] != 0:\n ratio=item1[0]/item2[0]\n else:\n if item1[0] == 0:\n ratio = 1\n else:\n ratio = 0\n # get the tree node\n node1 = get_top_node(item1)\n node2 = get_top_node(item2)\n if item1[1]!=item2[1] and ratio < tol and ratio > (1/tol) and node1 == node2:\n if item1[1]=='c':\n Hits.append((item1,item2))\n elif item2[1]=='c':\n Hits.append((item2,item1))\n else:\n print \"Error in find_Hits. Neither item labeled c\"\n return Hits", "def efficiency(w, L_t=79.6, p=75, fmax=1e12, p1=database['K+'],\r\n p2=database['pi+'], p3=database['p+'], E=1e6, delta_p=1.6e-2,\r\n n=10000, ng=50, nl=50, nf=100, plot=True, set_freq=5.7e9):\r\n if L_t == 79.6 and p == 75 and fmax == 1e12:\r\n opt = [296.5e9, 25.73, 13.33, 9.21]\r\n if L_t == 641.4-22.8 and p == 75 and fmax == 5.7e9:\r\n opt = [5.7e9, 206.2, 104.1, 134.8]\r\n else:\r\n opt = cavity_gap_comp(L_t, p, fmax, p1, p2, p3, E, False, delta_p, ng, nl,\r\n nf, False, False, set_freq)\r\n d_2 = abs_deflection(ang_deflection(p, opt[0], p1, p2, L_t-opt[1], opt[2], E, delta_p), opt[1])\r\n d_3 = abs_deflection(ang_deflection(p, opt[0], p1, p3, L_t-opt[1], opt[2], E, delta_p), opt[1])\r\n phase_range = np.linspace(0, 2*np.pi, n)\r\n disp_2, disp_3, count_2, count_3 = [], [], 0, 0\r\n for tau in phase_range:\r\n disp_2.append(-d_2*np.cos(tau))\r\n disp_3.append(-d_3*np.cos(tau))\r\n if abs(disp_2[-1]) > w:\r\n count_2 += 1\r\n if abs(disp_3[-1]) > w:\r\n count_3 += 1 \r\n num_K = 6*decay_proportion(L_t, p1, p, None)/Gaussian_integral(w/26.4, plot=False)\r\n purity = (num_K/(num_K+((70*(n-count_2))/n)+((23*(n-count_3))/n)))*100\r\n if plot == True:\r\n fig = plt.figure(figsize=[12, 4])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.plot(phase_range, disp_2, 'r', lw=2, label=r'$\\pi^+$')\r\n ax.plot(phase_range, disp_3, 'b', lw=2, label=r'$p^+$')\r\n blank = np.zeros(n)\r\n ymin, ymax = np.min(disp_2 + disp_3)*1.1, np.max(disp_2 + disp_3)*1.1\r\n ax.fill_between(phase_range, blank+w, blank+ymax, color='k', alpha=0.5)\r\n ax.fill_between(phase_range, blank-w, blank+ymin, color='k', alpha=0.5)\r\n ax.set_xlim(0, 2*np.pi)\r\n ax.set_ylim(ymin, ymax)\r\n ax.set_xlabel('Phase', fontsize=20)\r\n ax.set_ylabel('Displacement', fontsize=20)\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n ax.set_title('Phase dependence of unwanted particle absorbance in the beam stopper', fontsize=17)\r\n ax.legend(fontsize=20)\r\n plt.show()\r\n fig.savefig('Efficiency_Blank.pdf', bbox_inches='tight')\r\n print('{0} efficiency = {1:.1f}%\\n{2} efficiency = {3:.1f}%\\nBeam purity = {4:.1f}%'.format(p2.name, (count_2/n)*100, p3.name, (count_3/n)*100, purity))\r\n return purity", "def count_times(opList, xCar):\n try:\n if isinstance(opList, str):\n opList = str_list(opList)\n if len(opList) == 1:\n numtimes = 1\n return (numtimes)\n else:\n opList.sort()\n numtimes = 1\n index = 1\n for x in opList:\n if index == 1:\n initTime = float(x[len(xCar) + 1:])\n initStart = initTime\n initEnd = initTime + 300\n index = index + 1\n if index != 1:\n curTime = float(x[len(xCar) + 1:])\n within = curTime < initEnd\n if curTime < initEnd:\n index = index + 1\n elif curTime >= initEnd:\n numtimes = numtimes + 1\n initTime = curTime\n initStart = initTime\n initEnd = curTime + 300\n index = index + 1\n return (numtimes)\n except:\n return(0)", "def generateCutList(cut_configuration):\r\n\t#Check that this line reads json.loads(cut_configuration)\r\n\tinput_json = json.load(cut_configuration)\r\n\r\n\t#Currently only desired_cut and laser_cut_config are required\r\n\ttry:\r\n\t\tblock = input_json[\"block\"]\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\tcut = input_json[\"desired_cut\"]\r\n\t\tlaser = input_json[\"laser_cut_config\"]\r\n\texcept:\r\n\t\traise Exception(\"Either desired_cut or laser_cut_config not provided\")\r\n\r\n\tif cut[\"cut_process\"] == \"line\":\r\n\t\tfinal_list = line(cut[\"x1\"],cut[\"y1\"],cut[\"x2\"],cut[\"y2\"],cut[\"final_dimension_z\"]+laser[\"z_final_overshoot\"],laser)\r\n\telif cut[\"cut_process\"] == \"simple_core\":\r\n\t\tfinal_list = simple_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"vertical_core\":\r\n\t\tfinal_list = vertical_core(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"oss_stacked\":\r\n\t\tfinal_list = oss_stacked(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"z_focus\":\r\n\t\tfinal_list = z_focus(block,cut,laser)\r\n\telif cut[\"cut_process\"] == \"cross\":\r\n\t\tfinal_list = cross(block,cut,laser)\r\n\telse:\r\n\t\traise Exception(\"No such cut exists: Check cut_process\")\r\n\t#print(time_taken(final_list, laser))\r\n\tnow = datetime.now()\r\n\ttimestamp = str(now.strftime(\"%m-%d_%H_%M\"))\r\n\tcomplete_name = os.path.join(save_path, timestamp+\".csv\")\r\n\twith open(complete_name, mode='w',newline ='') as test_data:\r\n\t data_writer = csv.writer(test_data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\t list_data = json.loads(final_list)\r\n\t for line1 in list_data:\r\n\t \tdata_writer.writerow(line1)\r\n\treturn final_list", "def CalculateSpeedIndex(self):\n time_completeness_list = self.GetTimeCompletenessList()\n prev_completeness = 0.0\n speed_index = 0.0\n prev_time = time_completeness_list[0][0]\n for time, completeness in time_completeness_list:\n # Add the incemental value for the interval just before this event.\n elapsed_time = time - prev_time\n incompleteness = (1.0 - prev_completeness)\n speed_index += elapsed_time * incompleteness\n\n # Update variables for next iteration.\n prev_completeness = completeness\n prev_time = time\n return speed_index", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)", "def strategy_optimized(cookies, cps, history, time_left, build_info):\n\n items = build_info.build_items()\n costs = [build_info.get_cost(item) for item in items]\n value = [build_info.get_cps(item) / build_info.get_cost(item) for item in items]\n builds = sorted(zip(costs, value, items))\n\n earnable_cookies = cookies + time_left * cps\n\n if builds[0][0] > earnable_cookies:\n return None\n else:\n best_value = builds[0]\n for build in builds:\n if build[0] < earnable_cookies and build[1] > best_value[1]:\n best_value = build\n\n return best_value[2]", "def cutpointStrategy(listOfDict):\n df = pd.DataFrame(listOfDict)\n resultDF = df.copy(deep=True)\n for i in df.columns:\n if(df[i].dtype == np.float64):\n distNumValues = list(set(df.loc[:,i].values))\n distNumValues.sort()\n cutPoints = [(distNumValues[j]+distNumValues[j+1])/2 for j in range(len(distNumValues)-1)]\n del resultDF[i]\n for k in range(len(cutPoints)):\n for j in range(df.shape[0]):\n if(df.loc[j,i] < cutPoints[k]):\n resultDF.loc[j,i+str(cutPoints[k])] = str(min(distNumValues)) + \"..\" + str(cutPoints[k])\n else:\n resultDF.loc[j,i+str(cutPoints[k])] = str(cutPoints[k]) + \"..\" + str(max(distNumValues))\n return resultDF.T.to_dict().values()", "def efficiency_vs_length(w=50, Lmax=1000, Lmin=102.4, p=75, fmax=5.7e9,\r\n p1=database['K+'], p2=database['pi+'],\r\n p3=database['p+'], E=1e6, delta_p=1.6e-2, n=1000,\r\n ng=50, nl=50, nf=100, resolution=0.01, plot=True,\r\n set_freq=5.7e9, present=False, count_L=True):\r\n current_target_rate = 53957518\r\n if count_L == True:\r\n if resolution == None:\r\n if set_freq == None:\r\n est = timing(n*ng*nl*nf*4e-05)\r\n else:\r\n est = timing(n*ng*nl*4e-05)\r\n print('Estimated time: {0}'.format(est))\r\n else:\r\n num = int(round(np.log10(Lmax-Lmin))-1)-np.log10(resolution)+1\r\n if set_freq == None:\r\n est = timing(10*num*ng*nl*nf*4e-05)\r\n else:\r\n est = timing(10*num*ng*nl*4e-05)\r\n print('Estimated maximum time: {0}'.format(est))\r\n start = time.time()\r\n if resolution == None:\r\n L_range = np.linspace(Lmin, Lmax, n)\r\n eff, purity = [], []\r\n for L in L_range:\r\n if count_L == True:\r\n print(L)\r\n eff.append(efficiency(w, L-22.8, p, fmax, p1, p2, p3, E, delta_p,\r\n 10000, ng, nl, nf, False, set_freq))\r\n# purity.append((target_rate(L, p1, p)/current_target_rate)*(6/Gaussian_integral(w/26.4, mu=0, sigma=26.4, A=1, y0=0, plot=False)))\r\n purity.append(100*decay_proportion(L-22.8, p1, p, target_rate=target_rate(L, p1, p))/(750e6*Gaussian_integral(w/26.4, plot=False)))\r\n i = 0\r\n while eff[i] < purity[i] and i < np.size(L_range)-1:\r\n i += 1\r\n if i >= np.size(L_range)-1:\r\n return None\r\n output = [L_range[i], eff[i], purity[i]/6]\r\n else:\r\n if present == True:\r\n n = 10**(int(round(np.log10(Lmax-Lmin))-2))\r\n n1 = 10**(int(round(np.log10(Lmax-Lmin))-2))\r\n else:\r\n n = 10**(int(round(np.log10(Lmax-Lmin))-1))\r\n n1 = 10**(int(round(np.log10(Lmax-Lmin))-1))\r\n Lmin = Lmax - ((int(((Lmax-Lmin)/n))+1)*n)\r\n Lmax = Lmin + ((int(((Lmax-Lmin)/n))+1)*n)\r\n while n >= resolution:\r\n if n == n1 and plot==True:\r\n graph_Lmin = Lmin\r\n graph_Lmax = Lmax - n\r\n graph_L_range = np.arange(Lmin, Lmax, n)\r\n graph_eff, graph_purity, graph_intensity = [], [], []\r\n for L in graph_L_range:\r\n if count_L == True:\r\n print(L)\r\n graph_eff.append(efficiency(w, L-22.8, p, fmax, p1, p2,\r\n p3, E, delta_p, 10000, ng, nl,\r\n nf, False, set_freq))\r\n# graph_purity.append((target_rate(L, p1, p)/current_target_rate)*(6/Gaussian_integral(w/26.4, mu=0, sigma=26.4, A=1, y0=0, plot=False)))\r\n graph_purity.append(100*decay_proportion(L-22.8, p1, p, target_rate=target_rate(L, p1, p))/(750e6*Gaussian_integral(w/26.4, plot=False)))\r\n graph_intensity.append((target_rate(L, p1, p)/current_target_rate)/Gaussian_integral(w/26.4, plot=False))\r\n i = 0\r\n while graph_eff[i] < graph_purity[i] and i < np.size(graph_L_range)-1:\r\n i += 1\r\n if i >= np.size(graph_L_range)-1:\r\n output = None\r\n break\r\n Lmin = graph_L_range[i-1]\r\n n = 10**(int(np.log10(n)-1))\r\n Lmax = Lmin + ((int(((Lmax-Lmin)/n))+1)*n)\r\n else:\r\n L_range = np.arange(Lmin, Lmax, n)\r\n eff, purity, intensity = 0, 0, 0\r\n i = 0\r\n while eff <= purity and i < np.size(L_range)-1:\r\n i += 1\r\n L= L_range[i]\r\n if count_L == True:\r\n print(L)\r\n eff = efficiency(w, L-22.8, p, fmax, p1, p2, p3, E, delta_p,\r\n 10000, ng, nl, nf, False, set_freq)\r\n# purity = (target_rate(L, p1, p)/current_target_rate)*(6/Gaussian_integral(w/26.4, mu=0, sigma=26.4, A=1, y0=0, plot=False))\r\n purity = 100*decay_proportion(L-22.8, p1, p, target_rate=target_rate(L, p1, p))/(750e6*Gaussian_integral(w/26.4, plot=False))\r\n intensity = (target_rate(L, p1, p)/current_target_rate)/Gaussian_integral(w/26.4, plot=False)\r\n if i >= np.size(L_range)-1:\r\n return None\r\n output = [L_range[i], eff, intensity]\r\n Lmin = L_range[i-1]\r\n n = 10**(int(np.log10(n)-1))\r\n Lmax = Lmin + ((int(((Lmax-Lmin)/n))+1)*n)\r\n if plot == True:\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax2 = ax.twinx()\r\n eff, purity, intensity, L_range = graph_eff, graph_purity, graph_intensity, graph_L_range\r\n Lmax, Lmin = graph_Lmax, graph_Lmin\r\n line1, = ax.plot(L_range, eff, 'r', lw=2, label='Beam Purity', alpha=0.6)\r\n line2, = ax.plot(L_range, purity, 'b', lw=2, label='Required Purity', alpha=0.6)\r\n line3, = ax2.plot(L_range, intensity, 'g', lw=2, label='Req. Intensity', alpha=0.6)\r\n# ax.axhline(24, color='k', label='Kaon percentage = 24%')\r\n ax.set_xlim(Lmin, Lmax)\r\n ax.set_ylim(0, 100)\r\n ax.set_xlabel('Distance between target and decay region / m', fontsize=20)\r\n ax.set_ylabel('Beam purity / %', fontsize=20)\r\n ax2.set_ylabel(r'Intensity / I$_0$', fontsize=20)\r\n ax2.set_ylim(0)\r\n ax2.yaxis.label.set_color(line3.get_color())\r\n ax2.tick_params(axis='y', colors=line3.get_color())\r\n# index = np.argmin(np.abs(eff-24))\r\n# if eff[index] >= 24:\r\n# min_L = L_range[index]\r\n# else:\r\n# min_L = L_range[index+1]\r\n# print(f'{timing(time.time()-start)}')\r\n# print('Minimum target distance required = {0:.1f} m'.format(min_L))\r\n# return min_L\r\n if output != None:\r\n line4 = ax.axvline(output[0], color='k', label='Intercept', dashes=[6, 2])\r\n lines = [line1, line2, line3, line4]\r\n else:\r\n lines = [line1, line2, line3]\r\n ax2.legend(lines, [l.get_label() for l in lines], loc='best', fontsize=15)\r\n ax.minorticks_on()\r\n ax2.minorticks_on()\r\n ax.grid()\r\n ax.set_title('Observed and required beam purities as a function of\\ntarget distance'+f' for E = {int(E*1e-6)} MV/m', fontsize=16)\r\n plt.show()\r\n fig.savefig(f'Purity_vs_Required_p_75_E_1e{int(round(np.log10(E)))}_with_Intensity.pdf', bbox_inches='tight')\r\n if count_L == True:\r\n print(f'{timing(time.time()-start)}')\r\n return output", "def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)", "def _calc_estimated_chops_from_timepoints(self):\n if self.verbose:\n logger.info(\"Start calc Chop Times: length: {} raw time: {}\".format(self.length,self.times[-1]))\n \n self._chops = None\n self._indices = None\n self._estimated_chops = None\n self._estimated_indices = None\n \n #--- warn if times less than chop length\n if self.times[-1] <= self.length:\n logger.warning(\"<Raw Times> : {} smaler than <Chop Times> : {}\\n\\n\".format(self.times[-1],self.length))\n \n self._estimated_chops,self._estimated_indices = get_chop_times_indices(self.times,\n chop_length=self.length,\n exit_on_error=self.exit_on_error) \n \n if self.verbose:\n self.GetInfo()\n \n return self._estimated_chops,self._estimated_indices", "def estimateTime(numparts, maskpixrad=None):\n\t#min time 60 sec vs. 289 from model\n\t#linear time 0 sec vs. -1.1587 from model\n\t#quadradic time March 14, 2008\n\tx = float(maskpixrad*numparts*2.0)\n\testtime = ( 26.83 + 0.001809 * x + 1.8542e-09 * x**2 )\n\t#ln(y) = -13.182 + 1.531 * ln(x) ==>\n\t#esttime = 1.884e-6 * (x**1.531) + 26.0\n\treturn esttime", "def run_simulation(n_pieces, l, mean, sd):\n inspection_times = []\n prev_arrival = 0\n prev_inspection_end_a = 0\n prev_inspection_end_b = 0\n for piece in range(n_pieces):\n # Random values\n r, u1, u2 = get_n_rands(3)\n exp_t = dist_exponential(l, r)\n norm_t = dist_normal(u1, u2, mean, sd)\n # Arrival vals\n arrival = prev_arrival + exp_t\n if not prev_arrival:\n inspection_start_a = arrival\n inspection_start_b = 0\n prev_inspection_end_a = inspection_start_a + norm_t\n prev_inspection_end_b = 0\n else:\n inspection_start_a = arrival if prev_inspection_end_a > prev_inspection_end_b else min(prev_inspection_end_a, prev_inspection_end_b)\n inspection_start_b = arrival if prev_inspection_end_a < prev_inspection_end_b else min(prev_inspection_end_a, prev_inspection_end_b)\n prev_inspection_end_a = prev_inspection_end_a if inspection_start_a == 0 else inspection_start_a + norm_t\n prev_inspection_end_b = prev_inspection_end_b if inspection_start_b == 0 else inspection_start_b + norm_t\n prev_arrival = arrival\n # Inspection vals\n inspection_times.append(max(inspection_start_a, inspection_start_b) + norm_t - arrival)\n return inspection_times", "def compute_time(size, data1, data2):\n\n print(\"List size:\", size)\n my_list = list(range(size)) # Make list [0, 1, 2, 3, ..., size - 1]\n start = clock() # Start the clock\n ans = is_ascending(my_list) # Compute answer\n elapsed1 = clock() - start # Stop the clock\n print(\" is_ascending: {} Elapsed: {:12.7f}\".format(ans, elapsed1))\n start = clock() # Start the clock\n ans = is_ascending2(my_list) # Compute answer\n elapsed2 = clock() - start # Stop the clockt\n print(\" is_ascending2: {} Elapsed: {:12.7f}\".format(ans, elapsed2))\n print(\" Speedup: {:6.1f}\".format(elapsed1/elapsed2)) # Compute speedup\n print()\n data1.append((size, elapsed1))\n data2.append((size, elapsed2))", "def timer(trainX, trainY, testX, k, condensed=False):\n \n gc.disable() # disable garbage collector for uninterrupted timing \n initial = clock()\n if condensed:\n cnn = condenseData(trainX, trainY)\n testY = testknn(trainX[cnn], trainY[cnn], testX, k)\n else:\n testY = testknn(trainX, trainY, testX, k)\n final = clock()\n \n gc.enable() # turn garbage collector back on\n return ((final - initial), testY)", "def _calcExecTime(self, migTask, dPrime):\n #print \"ae\", self\n # Let's start making U = 0.9999 (which probably causes deadline misses).\n # If we force U = 1, we won't be able to use La.\n if self.util() >= 0.9999:\n self._lastCost = 0.0\n return 0.0\n cPrime = (0.9999 - self.util())*migTask.period()\n\n # Temporarily add the slice\n tempSlice = WmSlice(-1, cPrime, dPrime, migTask)\n self._addSlice(tempSlice)\n\n L = self._L()\n min_d = self._minDeadline()\n\n #print \"L\", L\n #print self\n #print \"Calculating cost. dPrime\", dPrime\n\n # QPA\n t = self._lastDeadline(L)\n h = self._h(t)\n #print t\n while round(t,12) >= round(min_d,12): # We are checking demand only for the migratory task\n # We round the checking to 12 decimal places. Otherwise, it could make the algorithm repeat undefinedly, in\n # case new calculated cost is not 100% precise. We do the same when applying floor(). The other comparisons don't\n # need this correction, since they are not so critical.\n if round(h,12) > round(t,12):\n #print \"HIGH. t %.15f\" % t, \"h(t) %.15f\" % h, \". C was\", cPrime\n cPrime = (t - self._h_oth(t, tempSlice)) / floor(round((t + migTask.period() - dPrime)/migTask.period(), 12))\n #print \"New C is\", cPrime\n tempSlice._wcet = cPrime # Update slice cost to fix demand\n\n if cPrime <= 0.0: # Stop if the cost gets negative\n self._removeLastSlice()\n self._lastCost = 0.0\n return 0.0\n\n #print \"OK. t\", t, \"h(t)\",h, \"new t\",\n t = self._lastDeadline(t)\n #print t\n h = self._h(t)\n #print \"OK. t\", t, \"h(t)\",h\n\n #print self\n #print \"Final cost\", cPrime\n #if not self._qpa():\n # print self.tasks()\n #assert self._qpa()\n\n self._removeLastSlice()\n self._lastCost = cPrime\n return cPrime", "def strategy_cheap(cookies, cps, history, time_left, build_info):\n pick = None\n cost = float('inf')\n for item in build_info.build_items():\n if build_info.get_cost(item) < cost: \n cost = build_info.get_cost(item)\n if (time_left * cps + cookies) >= cost:\n pick = item\n \n return pick", "def brute_force(L):\n\n max_diff = -float(\"inf\")\n length = len(L)\n for i in range(length - 1):\n start = L[i]\n for j in range(i + 1, length):\n end = L[j]\n diff = end - start\n max_diff = max(max_diff, diff)\n return max_diff", "def strategy_best(cookies, cps, history, time_left, build_info):\n pick = None\n ratio = float('-inf')\n for item in build_info.build_items():\n if build_info.get_cps(item) / build_info.get_cost(item) > ratio: \n cost = build_info.get_cost(item)\n ratio = build_info.get_cps(item) / build_info.get_cost(item)\n if (time_left * cps + cookies) >= cost:\n pick = item\n \n return pick", "def Kendalls_Tau2(xlist, ylist):\n\tif len(xlist) != len(ylist):\n\t\traise StatsError(\"Data sets have different lengths.\")\n\txdata = xlist\n\tydata = ylist\n\t#for i in range(len(xlist)):\n\t#\tif xlist[i] != None and ylist[i] != None:\n\t#\t\txdata.append(xlist[i])\n\t#\t\tydata.append(ylist[i])\n\tassert len(xdata) == len(ydata)\n\t#assert len(xdata) <= len(xlist) - xlist.count(None)\n\t#assert len(ydata) <= len(ylist) - ylist.count(None)\n\t#assert len(ydata) >= len(ylist) - xlist.count(None) - ylist.count(None)\n\tif len(xdata) == 0:\n\t\traise StatsError(\"No valid data entries.\")\n\tn = len(xdata)\n\t# compute the number of concordant and discordant pairs\n\tconc = disc = 0.0 # concordant and discordant pairs\n\tnx = ny = 0.0\n\tupdown = 0\n\tfor i in range(n): # loop over all pairs\n\t\txi = xdata[i]\n\t\tyi = ydata[i]\n\t\tif xi and yi:\n\t\t\tfor j in range(i + 1, n):\n\t\t\t\tif xdata[j] and ydata[j]:\n\t\t\t\t\txd = xi - xdata[j]\n\t\t\t\t\tyd = yi - ydata[j]\n\t\t\t\t\tprod = xd * yd\n\t\t\t\t\tif prod != 0:\n\t\t\t\t\t\tnx += 1\n\t\t\t\t\t\tny += 1\n\t\t\t\t\t\tif prod > 0:\n\t\t\t\t\t\t\tupdown += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tupdown -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif xd != 0:\n\t\t\t\t\t\t\tnx += 1\n\t\t\t\t\t\tif yd != 0:\n\t\t\t\t\t\t\tny += 1\n\t# Compute tau\n\tn = float(n)\n\tdenom = math.sqrt(nx*ny)\n\ttry:\n\t\ttau = float(updown) / denom\n\texcept ZeroDivisionError:\n\t\traise StatsError(\"Too few entries: {:d}\".format(n))\n\t# Compute P-value\n\tz = 3.0 * tau * math.sqrt(n * (n - 1.0)) / math.sqrt(2.0 * (2.0 * n + 5.0))\n\tprob = Prob_Z(z)\n\treturn (tau, prob, int(n))", "def strategy_best(cookies, cps, time_left, build_info):\n return_item = None\n highest_icr = float('-inf')\n item_list = build_info.build_items()\n cookies_potential = cookies + time_left * cps\n for item in item_list:\n cost = build_info.get_cost(item)\n curr_icr = build_info.get_cps(item) / cost \n if cookies_potential >= cost and curr_icr > highest_icr:\n return_item = item\n highest_icr = curr_icr\n return return_item", "def long_waiters_die(celllist, tnow):\n survivors = []\n for sublist in celllist:\n newsub = []\n for cell in sublist:\n if tnow - cell.GCentrytime <= cf.tlifeGC:\n newsub.append(cell)\n survivors.append(newsub)\n return survivors", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def SelectWorkingSet(L, q):\n i = 0\n index = 0\n while i < int(q/2):\n if L[index, 1] > 0 and L[index, 1] < C:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == -1 and L[index, 1] <= 0:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == 1 and L[index, 1] == 100:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n index = index + 1\n # print(WorkingSet)\n # print(index)\n\n index = len(y_train) - 1\n while i < int(q):\n j = 0\n while j < (int(q/2)):\n if index == int(WorkingSet[j, 2]):\n # print(\"Hello cunt\")\n # print(index)\n index = index - 1\n # print(index)\n if j > 0:\n j = 0\n else:\n j = j + 1\n if L[index, 1] > 0 + error and L[index, 1] < C:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == 1 and L[index, 1] <= 0:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n elif y_train[int(L[index, 0])] == -1 and L[index, 1] == 100:\n WorkingSet[i, 0] = L[index, 0]\n WorkingSet[i, 1] = L[index, 1]\n WorkingSet[i, 2] = index\n i = i + 1\n index = index - 1\n # print(WorkingSet)\n return WorkingSet", "def Kendalls_Tau(xlist, ylist):\n\tif len(xlist) != len(ylist):\n\t\traise StatsError(\"Data sets have different lengths.\")\n\txdata = []\n\tydata = []\n\tfor i in range(len(xlist)):\n\t\tif xlist[i] != None and ylist[i] != None:\n\t\t\txdata.append(xlist[i])\n\t\t\tydata.append(ylist[i])\n\tassert len(xdata) == len(ydata)\n\tassert len(xdata) <= len(xlist) - xlist.count(None)\n\tassert len(ydata) <= len(ylist) - ylist.count(None)\n\tassert len(ydata) >= len(ylist) - xlist.count(None) - ylist.count(None)\n\tif len(xdata) == 0:\n\t\traise StatsError(\"No valid data entries.\")\n\tn = len(xdata)\n\t# compute the number of concordant and discordant pairs\n\tconc = disc = 0.0 # concordant and discordant pairs\n\tfor i in range(n): # loop over all pairs\n\t\txi = xdata[i]\n\t\tyi = ydata[i]\n\t\tfor j in range(i + 1, n):\n\t\t\txd = xi - xdata[j]\n\t\t\tyd = yi - ydata[j]\n\t\t\tprod = xd * yd\n\t\t\tif prod == 0.0: # this is a tie\n\t\t\t\tcontinue\n\t\t\telif prod > 0.0:\n\t\t\t\tconc += 1\n\t\t\telse:\n\t\t\t\tdisc += 1\n\t# compute the tie correction: sum(t * t - t)\n\txcopy = []\n\tycopy = []\n\tfor i in range(n):\n\t\txcopy.append(xdata[i])\n\t\tycopy.append(ydata[i])\n\txties = yties = 0.0\n\twhile xcopy:\n\t\txi = xcopy[0]\n\t\tt = xcopy.count(xi)\n\t\txties = xties + t * t - t\n\t\twhile xcopy.count(xi) > 0:\n\t\t\txcopy.remove(xi)\n\twhile ycopy:\n\t\tyi = ycopy[0]\n\t\tt = ycopy.count(yi)\n\t\tyties = yties + t * t - t\n\t\twhile ycopy.count(yi) > 0:\n\t\t\tycopy.remove(yi)\n\t# Compute tau\n\tn = float(n)\n\tdenom = math.sqrt((n * n - n - xties) * (n * n - n - yties))\n\ttry:\n\t\ttau = 2.0 * (conc - disc) / denom\n\texcept ZeroDivisionError:\n\t\traise StatsError(\"Too few entries: {:d}.\".format(n))\n\t# Compute P-value\n\tz = 3.0 * tau * math.sqrt(n * (n - 1.0)) / math.sqrt(2.0 * (2.0 * n + 5.0))\n\tprob = Prob_Z(z)\n\treturn (tau, prob, int(n))", "def time_test():\n REPETITIONS = 200 # Larger value helps smooth the plot. \n MIN_LENGTH = 100\n MAX_LENGTH = 10001\n STEP = 100\n\n sizes = []\n times = []\n \n for n in range(MIN_LENGTH,MAX_LENGTH,STEP):\n \n total_time = 0\n \n for _ in range(REPETITIONS):\n \n lst = list(range(n))\n my_shuffle(lst)\n ans = lst.pop() if n>0 else 0\n \n start = time()\n res = f1(lst)\n finish = time()\n \n assert ans == res\n \n total_time += (finish-start)\n \n sizes.append(n)\n times.append(total_time)\n \n plt.plot(sizes,times)\n plt.savefig(\"c17p04.png\")\n plt.close()", "def _travel_times(self, trip_list, index=0):\n\n def distance_in_travel_time(dep_secs, arr_secs):\n t_dist = arr_secs - dep_secs\n if t_dist < 0:\n t_dist = self._DUMMY_SEPARATOR # min separation\n return t_dist\n\n if not trip_list:\n return []\n\n if 0 < index < len(trip_list):\n trip = trip_list[index]\n else:\n trip = trip_list[0]\n\n t_dists2 = [distance_in_travel_time(stop[3], tail[2]) for (stop, tail)\n in zip(trip.get_time_stops(), trip.get_time_stops()[1:])]\n return t_dists2", "def strategy_expensive(cookies, cps, history, time_left, build_info):\n pick = None\n cost = float('-inf')\n for item in build_info.build_items():\n if build_info.get_cost(item) > cost: \n cost = build_info.get_cost(item)\n if (time_left * cps + cookies) >= cost:\n pick = item\n \n return pick", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def cut_train(self, hits, *args):\n n_days = self.predict_window + self.train_window\n # How much free space we have to choose starting day\n free_space = self.inp.data_days - n_days - self.back_offset - self.start_offset\n if self.verbose:\n lower_train_start = self.inp.data_start + pd.Timedelta(self.start_offset, 'D')\n lower_test_end = lower_train_start + pd.Timedelta(n_days, 'D')\n lower_test_start = lower_test_end - pd.Timedelta(self.predict_window, 'D')\n upper_train_start = self.inp.data_start + pd.Timedelta(free_space - 1, 'D')\n upper_test_end = upper_train_start + pd.Timedelta(n_days, 'D')\n upper_test_start = upper_test_end - pd.Timedelta(self.predict_window, 'D')\n print(f\"Free space for training: {free_space} days.\")\n print(f\" Lower train {lower_train_start}, prediction {lower_test_start}..{lower_test_end}\")\n print(f\" Upper train {upper_train_start}, prediction {upper_test_start}..{upper_test_end}\")\n # Random starting point\n offset = tf.random_uniform((), self.start_offset,self.start_offset + free_space+1, dtype=tf.int32, seed=self.rand_seed)\n end = offset + n_days\n # Cut all the things\n return self.cut(hits, offset, end) + args", "def greedy_binning(t, C, n_bins, maxit= 1000):\n b= n_bins\n n_u= generate_n_u(t)\n d= len(n_u)\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n tau= np.unique(t)\n tau= np.hstack([tau, [np.max(tau) + 0.1]])\n \n splits= sorted(np.random.randint(1, d, b-1))\n while len(np.unique(splits)) < b-1:\n splits= sorted(np.random.randint(1, d, b-1)) \n bins= np.array([0] + splits + [d])\n \n sums= np.repeat(0.0, n_bins)\n\n for i in range(n_bins):\n sums[i]= block_sum(i, bins, C, n_u)\n \n ns= np.repeat(0.0, n_bins)\n for i in range(n_bins):\n ns[i]= cum_n_u[bins[i+1]] - cum_n_u[bins[i]]\n \n objective= 0.0\n \n for i in range(n_bins):\n objective+= sums[i]/ns[i]\n\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n \n it= 0\n while True and it < maxit:\n it+= 1\n \n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n \n for i in range(1, n_bins):\n for step in [-1, 0]:\n if ns[i + step] > n_u[bins[i] + step]:\n change, sum_i, sum_im1, ns_i, ns_im1 = changes(i, step*2 + 1, bins, C, n_u, ns, sums)\n if change > change_obj:\n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= change, i, step*2 + 1, sum_i, sum_im1, ns_i, ns_im1\n \n if change_obj > 0.0:\n objective= objective + change_obj\n bins[change_idx]+= step_\n sums[change_idx]= new_sum_i\n sums[change_idx-1]= new_sum_im1\n ns[change_idx]= new_ns_i\n ns[change_idx-1]= new_ns_im1\n else:\n break\n \n t_binning= []\n for i in range(len(t)):\n for j in range(len(bins)):\n if t[i] >= tau[bins[j]] and t[i] < tau[bins[j+1]]:\n t_binning.append(j)\n \n return np.array(t_binning)", "def makeDecision(self, entries):\n minGradient = self.getStoredValue(Values.OperatorTrendGradient)\n N = len(entries)\n if not self.checkEntriesSize(entries): return 0\n if not self.checkEntriesTimeWidth(entries): return 0\n t = np.zeros(N)\n v = np.zeros(N)\n for i in range(0, N):\n trend = entries[i]\n t[i] = trend.date.timestamp()\n v[i] = trend.strength - 0.5 # -0.5 <= v <= 0.5\n tmax = np.max(t)\n tmin = np.min(t)\n t = (t - tmin) / (tmax - tmin) # 0 <= t <= 1.0\n f = np.polyfit(t, v, 1) # f = ax + b\n f0 = np.inner(f, np.array([0, 1])) # f0 = b\n f1 = np.inner(f, np.array([1, 1])) # f1 = x + b\n self.logger.debug('Decision calculation finished, ' +\n 'f={f}, f(0)={f0}, f(1)={f1}.'\n .format(f=f, f0=f0, f1=f1))\n strength1 = 0.0\n strength2 = 0.02\n chance = None\n if f[0] > minGradient and \\\n ((f0 < strength1 < f1 and strength1 < v[0]) or \\\n (strength1 < f0 < strength2 and strength1 < v[0])):\n self.logger.warning('Decision is +1(long, open), ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = +1\n if f[0] < -minGradient and \\\n ((f0 > -strength1 > f1 and -strength1 > v[0]) or \\\n (-strength1 > f0 > -strength2 and -strength1 > v[0])):\n self.logger.warning('Decision is -1(down, open), ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = -1\n position = self.getPositionSize()\n variations = self.getPositionVariation()\n amount = position['total']\n self.logger.warning('Positions: amount={a}, variations={v}'.format(a=amount, v=variations))\n if chance is None and f[0] < 0 and amount > 0 and \\\n len(variations['long']) > 0 and variations['long'][-1] > 1.0:\n self.logger.warning('Decision is -1(short, profit), positions oppose trend, ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = -1\n if chance is None and f[0] > 0 and amount < 0 and \\\n len(variations['short']) > 0 and variations['short'][-1] < 1.0:\n self.logger.warning('Decision is +1(long, profit), positions oppose trend, ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = +1\n minGradientLossCut = 0.\n #f0 < 0 and \n if chance is not None and \\\n f[0] < -minGradientLossCut and \\\n f1 < 0 and \\\n len(variations['long']) > 0 and variations['long'][-1] < 1.0:\n self.logger.warning('Decision is -1(short, losscut), positions oppose trend, ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = -1\n #f0 > 0 and \n if chance is not None and \\\n f[0] > minGradientLossCut and \\\n f1 > 0 and \\\n len(variations['short']) > 0 and variations['short'][-1] > 1.0:\n self.logger.warning('Decision is +1(long, losscut), positions oppose trend, ' +\n 'f={f}, f(0)={f0:.5f}, f(1)={f1:.5f}, entries=[{e}].'\n .format(f=f, f0=f0, f1=f1,\n e=', '.join(map(str, entries))))\n chance = +1\n if chance is None: return 0\n if not self.checkPositionsCount(chance): return 0\n return chance", "def calc_metrics(inputs, model, tracklen=None):\n if len(inputs.shape) != 3:\n raise ValueError('Input array must be 3-dimensional, '\n f'but got {inputs.shape}')\n\n if inputs.shape[1] <= 2:\n raise ValueError(f'Input shape is {inputs.shape}, '\n 'second dimension must be greater than 2')\n\n if tracklen is not None:\n if tracklen <= 2:\n raise ValueError(f'Invalid tracklen, must be >2, got {tracklen} instead')\n if tracklen > inputs.shape[1]:\n raise ValueError('Tracklen can`t be greater than number of hits in the sample '\n f'inputs.shape=[{inputs.shape}], tracklen={tracklen}')\n\n efficiency = 0\n hits_efficiency = 0\n # create tensor and place it to CPU or GPU\n inputs_val = torch.from_numpy(inputs).to(model.device)\n model.eval()\n\n if tracklen is None:\n tracklen = inputs_val.size(1)\n\n # run from the first station to the last\n for i in range(1, tracklen):\n # cut off all track-candidates\n inputs_part = inputs_val[:, :i]\n # x, y coords of the next hit in a track\n target = inputs_val[:, i, :2]\n # get model's prediction\n preds = model(inputs_part)\n\n # get indices of the tracks, which continuation was found\n idx = point_in_ellipse(preds, target)\n # count number of right predictions\n hits_efficiency += np.sum(idx) / len(idx)\n # exclude\n inputs_val = inputs_val[idx]\n\n # count number of track for which we found all points\n efficiency = len(inputs_val) / len(inputs)\n # recompute the hits_efficiency\n hits_efficiency = (hits_efficiency + 2) / tracklen\n return efficiency, hits_efficiency", "def measure(self, A, B, start_index): \n #code modifed from wikipedia\n Dlp = lambda x,y: abs(x-y)\n timeSB = np.arange(1,len(B)+1)\n timeSA = np.arange(1,len(A)+1)\n nu = self.v\n _lambda = self.gamma\n # Reference :\n # Marteau, P.; F. (2009). \"Time Warp Edit Distance with Stiffness Adjustment for Time Series Matching\".\n # IEEE Transactions on Pattern Analysis and Machine Intelligence. 31 (2): 306–318. arXiv:cs/0703033\n # http://people.irisa.fr/Pierre-Francois.Marteau/\n\n # Check if input arguments\n if len(A) != len(timeSA):\n print(\"The length of A is not equal length of timeSA\")\n return None, None\n \n if len(B) != len(timeSB):\n print(\"The length of B is not equal length of timeSB\")\n return None, None\n\n if nu < 0:\n print(\"nu is negative\")\n return None, None\n\n # Add padding\n A = np.array([0] + list(A))\n timeSA = np.array([0] + list(timeSA))\n B = np.array([0] + list(B))\n timeSB = np.array([0] + list(timeSB))\n\n n = len(A)\n m = len(B)\n # Dynamical programming\n DP = np.zeros((n, m))\n\n # Initialize DP Matrix and set first row and column to infinity\n DP[0, :] = np.inf\n DP[:, 0] = np.inf\n DP[0, 0] = 0\n\n # Compute minimal cost\n for i in range(1, n):\n for j in range(1, m):\n # Calculate and save cost of various operations\n C = np.ones((3, 1)) * np.inf\n # Deletion in A\n C[0] = (\n DP[i - 1, j]\n + Dlp(A[i - 1], A[i])\n + nu * (timeSA[i] - timeSA[i - 1])\n + _lambda\n )\n # Deletion in B\n C[1] = (\n DP[i, j - 1]\n + Dlp(B[j - 1], B[j])\n + nu * (timeSB[j] - timeSB[j - 1])\n + _lambda\n )\n # Keep data points in both time series\n C[2] = (\n DP[i - 1, j - 1]\n + Dlp(A[i], B[j])\n + Dlp(A[i - 1], B[j - 1])\n + nu * (abs(timeSA[i] - timeSB[j]) + abs(timeSA[i - 1] - timeSB[j - 1]))\n )\n # Choose the operation with the minimal cost and update DP Matrix\n DP[i, j] = np.min(C)\n distance = DP[n - 1, m - 1]\n self.M = DP\n self.decision_scores_.append((start_index, distance))\n return distance", "def count_time():\n repetitions = 1000\n num_of_max_elements = 500\n min_possible_value = -100000\n max_possible_value = 100000\n\n possible_values = list(range(min_possible_value, max_possible_value))\n number_of_elements = [num for num in range(num_of_max_elements)]\n\n all_times_insert = [0] * num_of_max_elements\n all_times_search = [0] * num_of_max_elements\n for _ in range(repetitions):\n red_black_tree = RedBlackTree()\n for cur_number_of_elements in number_of_elements:\n\n start_time_insertion = time.perf_counter()\n red_black_tree.insert(random.choice(possible_values))\n end_time_insertion = time.perf_counter()\n all_times_insert[cur_number_of_elements] += end_time_insertion - start_time_insertion\n\n start_time_search = time.perf_counter()\n red_black_tree.search(random.choice(possible_values))\n end_time_search = time.perf_counter()\n all_times_search[cur_number_of_elements] += end_time_search - start_time_search\n\n all_times_insert = [time_insert / repetitions for time_insert in all_times_insert]\n all_times_search = [time_search / repetitions for time_search in all_times_search]\n\n plt.plot(number_of_elements, all_times_insert)\n plt.plot(number_of_elements, all_times_search)\n plt.xlabel(\"Num of elements\")\n plt.ylabel(\"Time\")\n plt.legend([\"Insertion\", \"Search\"])\n plt.savefig(\"time.png\")\n plt.show()", "def fast_dtw(base_list, test_list, extended=False):\r\n b = base_list.shape[0]\r\n t = test_list.shape[0]\r\n if (b > 0 and t > 0):\r\n DTW = np.full((b, t), float('inf'))\r\n\r\n DTW[0, 0] = 0.0\r\n cost = np.zeros((b, t))\r\n for i in range(b):\r\n cost[i] = np.linalg.norm(test_list - base_list[i], axis=1)\r\n for i in range(1, b):\r\n DTW[i, 1] = cost[i, 1] + min(DTW[i - 1, 0], DTW[i - 1, 1])\r\n for j in range(2, t):\r\n DTW[i, j] = cost[i, j] + min(DTW[i - 1, j - 2], DTW[i - 1, j - 1], DTW[i - 1, j])\r\n if (extended):\r\n return DTW[b - 1, t - 1], cost, DTW, _traceback(DTW)\r\n else:\r\n return DTW[b - 1, t - 1]", "def runCutVals(df, eVal=0., windowSize = 2):\n\n dfg = df.groupby(['cpd1'])\n\n eMin = round(eVal - windowSize/2, 2)\n eMax = round(eMin + windowSize, 2)\n dFullPeakE, dFullBkgE = 0, 0\n dCutPeakE, dCutBkgE = 0, 0\n dFullPeakN, dFullBkgN = 0, 0\n dCutPeakN, dCutBkgN = 0, 0\n\n for name, g in dfg:\n valsFull = g['trapENFCal1'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values\n\n valsCut = g['trapENFCal1'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>eMin) & (g['trapENFCal1']<eMax)].values + g['trapENFCal2'].loc[(g['Pass1']==True) & (g['Pass2']==True) & (g['trapENFCal1']>=eMin) & (g['trapENFCal1']<=eMax)].values\n if name in enrDetList:\n dFullPeakE += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakE += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgE += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgE += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n elif name in natDetList:\n dFullPeakN += len(valsFull[(valsFull > 237.28) & (valsFull < 239.46)])\n dCutPeakN += len(valsCut[(valsCut > 237.28) & (valsCut < 239.46)])\n dFullBkgN += len(valsFull[(valsFull > 235) & (valsFull < 237.18)])\n dCutBkgN += len(valsCut[(valsCut > 235) & (valsCut < 237.18)])\n\n return dFullPeakE, dCutPeakE, dFullBkgE, dCutBkgE, dFullPeakN, dCutPeakN, dFullBkgN, dCutBkgN", "def find_cut(events, rates, obstime, feature, low_cut, high_cut, gamma_efficiency):\n\n if events.shape[0] == 0:\n\n if feature == \"gammaness\":\n return low_cut\n else:\n return high_cut\n\n tol = 1000\n\n if feature == \"gammaness\":\n lookfor_cut = high_cut\n alternative_cut = low_cut\n else:\n lookfor_cut = low_cut\n alternative_cut = high_cut\n\n while tol > 1e-6:\n midpoint = (lookfor_cut + alternative_cut) / 2.0\n\n if samesign(diff_events_after_cut(events, rates, obstime, feature, lookfor_cut, gamma_efficiency),\n diff_events_after_cut(events, rates, obstime, feature, midpoint, gamma_efficiency)):\n lookfor_cut = midpoint\n else:\n alternative_cut = midpoint\n\n tol = abs(alternative_cut - lookfor_cut)\n return midpoint", "def strategy_cheap(cookies, cps, time_left, build_info):\n return_item = None\n lowest_cost = float('inf')\n item_list = build_info.build_items()\n cookies_potential = cookies + time_left * cps\n for item in item_list:\n cost = build_info.get_cost(item)\n if cookies_potential >= cost and cost < lowest_cost:\n return_item = item\n lowest_cost = cost\n return return_item", "def findAlternatives(sortedList):\n #zeroing the data below treshold\n global TRESHOLD\n # if THRESHOLD == 0:\n TRESHOLD = readsHistogram(sortedList)\n afterTresholdData = []\n print(len(sortedList))\n for i in range(len(sortedList)):\n if np.mean(sortedList[i].getSamples()) >= TRESHOLD:\n afterTresholdData.append(sortedList[i]) #leaves only the reads only if the mean of the reads above TRESHOLD\n index = 0\n while index < (len(afterTresholdData) - 1):\n counter = 1\n while afterTresholdData[index].getName() == afterTresholdData[index + counter].getName():\n afterTresholdData[index].appendSamples(afterTresholdData[index + counter].getSamples())\n afterTresholdData[index].appendCoordinates(afterTresholdData[index + counter].getCoordinates())\n counter += 1\n index += counter\n alternatives = []\n for item in afterTresholdData:\n if len(item.getSamples().shape) > 1:\n alternatives.append(item)\n print(len(afterTresholdData), len(alternatives))\n return alternatives", "def thickenXYList( list, tester, biSectionMax=6, interpolation=xDataEnumsModule.Interpolation.linlin):\n\n def thickenXYList2( interpolation, xl, yl, xu, yu, newList, tester, level ) :\n\n if( level == biSectionMax ) : return\n level += 1\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.loglin:\n xMid = 0.5 * ( xl + xu )\n else :\n xMid = math.sqrt( xl * xu );\n\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.linlog:\n yMid = 0.5 * ( yl + yu )\n else :\n yMid = math.sqrt( yl * yu )\n\n y = tester.evaluateAtX( xMid )\n\n dy = abs( y - yMid )\n if( ( dy > abs( y * tester.relativeTolerance ) ) and ( dy > tester.absoluteTolerance ) ) :\n newList.append( [ xMid, y ] )\n thickenXYList2( interpolation, xl, yl, xMid, y, newList, tester, level )\n thickenXYList2( interpolation, xMid, y, xu, yu, newList, tester, level )\n\n if( len( list ) < 2 ) : raise Exception( \"len( list ) = %d < 2\" % len( list ) )\n newList = []\n for i1, xy in enumerate( list ) :\n x2, y2 = xy\n if( i1 > 0 ) : thickenXYList2( interpolation, x1, y1, x2, y2, newList, tester, 0 )\n newList.append( [ x2, y2 ] )\n x1, y1 = x2, y2\n newList.sort( )\n return( newList )", "def optimal_scale(n,pred,true):\n def ECE(n,pred,true):\n n_bins = n\n bins = [[] for i in range(n_bins)]\n\n # computing the bins\n for i in range(pred.shape[0]):\n for j in range(n_bins):\n if pred[i].max()>j*(1./n_bins) and pred[i].max()<=(j+1)*(1./n_bins):\n bins[j].append(i)\n # computing the average accuracy over the bins\n cum_sum = [0 for i in range(n_bins)]\n for j in range(n_bins):\n for i in range(len(bins[j])):\n if np.argmax(pred[bins[j][i]]) == np.argmax(true[bins[j][i]]):\n cum_sum[j]+= 1./len(bins[j])\n # computing the ECE metric as presented in the paper\n ECE = 0.\n for j in range(n_bins):\n ECE+= abs((j+1./2)*(1./n_bins)-cum_sum[j])*(float(len(bins[j]))/2000.)\n return(ECE)\n\n # the range of temperature for which we evaluate the ECE\n Ts = np.linspace(1,3,30)\n l = []\n for i in range(30):\n print(Ts[i])\n scaled = temperature_scaling(pred,Ts[i])\n l.append(ECE(n,scaled,true))\n l = np.array(l)\n print(l)\n res = temperature_scaling(pred,Ts[np.argmin(l)])\n return(res,Ts[np.argmin(l)])", "def optimal(count):\n\n return _optimal(start, count)[0]", "def LST2timeStamp(self, lst):\n if isinstance(lst, list):\n lst = np.array(lst)\n return (lst-self.lst_start)*3590.*1e6", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def test_accuracy(file, lambs, file_CART, file_OSDT, timelimit=1800):\r\n with open(file_CART, 'a+') as f:\r\n f.write(\";\".join([\"fold\", \"lamb\", \"nleaves\", \"trainaccu_CART\", \"testaccu_CART\"]) + '\\n')\r\n with open(file_OSDT, 'a+') as f:\r\n f.write(\";\".join(\r\n [\"fold\", \"lamb\", \"nleaves\", \"trainaccu_OSDT\", \"testaccu_OSDT\", \"totaltime\", \"time_c\", \"leaves_c\"]) + '\\n')\r\n for lamb in lambs:\r\n for i in range(1, 11): # 10 folds\r\n\r\n file_train = file + '.train' + str(i) + '.csv'\r\n file_test = file + '.test' + str(i) + '.csv'\r\n\r\n data_train = pd.DataFrame(pd.read_csv(file_train, sep=\";\"))\r\n data_test = pd.DataFrame(pd.read_csv(file_test, sep=\";\"))\r\n\r\n X_train = data_train.values[:, :-1]\r\n y_train = data_train.values[:, -1]\r\n\r\n X_test = data_test.values[:, :-1]\r\n y_test = data_test.values[:, -1]\r\n\r\n # CART\r\n clf = tree.DecisionTreeClassifier(max_depth=None,\r\n min_samples_split=max(math.ceil(lamb * 2 * len(y_train)), 2),\r\n min_samples_leaf=math.ceil(lamb * len(y_train)),\r\n max_leaf_nodes=math.floor(1 / (2 * lamb)),\r\n min_impurity_decrease=lamb\r\n )\r\n\r\n clf = clf.fit(X_train, y_train)\r\n\r\n nleaves_CART = (clf.tree_.node_count + 1) / 2\r\n trainaccu_CART = clf.score(X_train, y_train)\r\n testaccu_CART = clf.score(X_test, y_test)\r\n\r\n with open(file_CART, 'a+') as f:\r\n f.write(\r\n \";\".join([str(i), str(lamb), str(nleaves_CART), str(trainaccu_CART), str(testaccu_CART)]) + '\\n')\r\n\r\n # OSDT\r\n model = OSDT(lamb=lamb, prior_metric=\"curiosity\", timelimit=timelimit, init_cart=True)\r\n model.fit(X_train, y_train)\r\n _, testaccu_OSDT = model.predict(X_test, y_test)\r\n\r\n # print(\"<<<<<<<<<<<<<<<<< clf1:\", clf)\r\n # print(\">>>>>>>>>>>>>>>>> testaccu_OSDT:\", testaccu_OSDT)\r\n\r\n # assert testaccu_OSDT==testaccu_CART\r\n\r\n with open(file_OSDT, 'a+') as f:\r\n f.write(\";\".join(\r\n [str(i), str(lamb), str(model.nleaves), str(model.accu), str(testaccu_OSDT),\r\n str(model.totaltime), str(model.time_c), str(model.leaves_c)]) + '\\n')", "def prepfold_time_alloc(prepfold_dict, beg, end):\n nopsearch = False\n nopdsearch = False\n nodmsearch = False\n nosearch = False\n if \"nopsearch\" in prepfold_dict:\n nopsearch = True\n if \"nodmsearch\" in prepfold_dict:\n nodmsearch = True\n if \"nopdsearch\" in prepfold_dict:\n nopdsearch = True\n if \"nosearch\" in prepfold_dict:\n nosearch = True\n npfact = prepfold_dict[\"npfact\"]\n ndmfact = prepfold_dict[\"ndmfact\"]\n nbins = prepfold_dict[\"n\"]\n duration = (prepfold_dict[\"end\"] - prepfold_dict[\"start\"]) * (end - beg)\n\n time = 600\n time += nbins\n time += duration\n\n if not nosearch:\n ptime = 1\n pdtime = 1\n dmtime = 1\n if not nopsearch:\n ptime = npfact*nbins\n if not nopdsearch:\n pdtime = npfact*nbins\n if not nodmsearch:\n dmtime = ndmfact*nbins\n time += ((ptime * pdtime * dmtime)/1e4)\n time = time*2 #compute time is very sporadic so just give double the allocation time\n\n return time", "def count_targets(searchList):\n count = 0\n n = len(searchList)\n stop1 = time.time()\n for t in range(-10000, 10001):\n for x in searchList:\n if t - x <= x:\n break\n i = bisect_left(searchList, t - x, hi=n-1)\n if searchList[i] == t - x:\n count += 1\n break\n return count", "def nyquist(self):\n return 1 / (2 * np.median(np.diff(self.lc.time)))", "def get_cream(list_of_things):\n\n return int(len(list_of_things) * 0.2)", "def time_binning(item, T_l):\n\n j = 0\n addresses = item[:, 0]\n timestamps = item[:, 1]\n features = []\n\n while j < timestamps[-1] / T_l:\n feature = np.zeros(64)\n for i in range(64):\n interest = timestamps[addresses == i]\n feature[i] = np.sum((interest < (j + 1) * T_l) * (interest >= j * T_l))\n features.append(feature)\n j += 1\n return np.stack(features)", "def _fast_partition_distance(p1_list, p2_list, n):\r\n p1 = {}\r\n for i, box in enumerate(p1_list):\r\n for x in box:\r\n p1[x] = i\r\n p2 = {}\r\n for i, box in enumerate(p2_list):\r\n for x in box:\r\n p2[x] = i\r\n\r\n k = max(len(p1_list), len(p2_list))\r\n similarity = 0\r\n t = np.empty((k, k), dtype=int)\r\n m = np.zeros(k, dtype=int)\r\n sigma = np.zeros(k, dtype=int)\r\n size_p1 = np.zeros(k, dtype=int)\r\n size_p2 = np.zeros(k, dtype=int)\r\n for x in range(n):\r\n t[p1[x], p2[x]] = 0\r\n for x in range(n):\r\n i = p1[x]\r\n j = p2[x]\r\n t[i, j] += 1\r\n size_p1[i] += 1\r\n size_p2[j] += 1\r\n if t[i, j] > m[i]:\r\n m[i] = t[i, j]\r\n sigma[i] = j\r\n for i in range(k):\r\n if m[i] != 0:\r\n if 3*m[i] <= size_p1[i] + size_p2[sigma[i]]:\r\n return None\r\n similarity = similarity + t[i,sigma[i]]\r\n return n - similarity", "def test_calculate_tunneling_factor(self):\n Tlist = np.array([300, 500, 1000, 1500, 2000])\n kexplist = np.array([4.90263, 2.40495, 1.35124, 1.15611, 1.08781])\n for T, kexp in zip(Tlist, kexplist):\n kact = self.tunneling.calculate_tunneling_factor(T)\n self.assertAlmostEqual(kexp, kact, 4)", "def crack_legth_compute(self):\n crack_grad = self.gradient(4,5)\n avegrad = self.average_grad()\n cracklen = self.distance(self.points[4,:],self.points[5,:])\n #print(\"photo_crack_len\", cracklen)\n\n #print(\"Crack_grad:\", crack_grad)\n\n angle_between_crack_and_vertical = np.arctan(np.abs((crack_grad - avegrad)/(1+crack_grad*avegrad)))\n #print(\"angle between crack and vertical: \", np.rad2deg(angle_between_crack_and_vertical),\"degrees\")\n\n\n h_dist = np.sin(angle_between_crack_and_vertical)*cracklen\n h_rel = h_dist/self.hline[2]\n #print('relative horizontal_crack distance:', h_rel)\n v_dist = np.cos(angle_between_crack_and_vertical)*cracklen\n v_rel = v_dist / self.vline[2]\n #print('relative vertical_crack distance:', v_rel)\n\n actual_crack_length = np.sqrt((h_rel*self.cut_breadth)**2 + (v_rel*self.cut_depth)**2)\n\n #print(\"Actual_crack_length\\n,\", actual_crack_length)\n print(actual_crack_length) # This is later writen to a text file and read into the main program\n\n return actual_crack_length", "def runTimingTests(c, startNx, endNx, stepNx, displayResults = False):\n timesArray = []\n nxs = np.empty(shape=[0])\n iteration = 0\n\n for currNx in range(startNx, endNx, stepNx):\n nx = currNx\n nt = nx\n nxs = np.append(nxs, nx)\n _, timesSmooth, _, _ = main(nx, nt, c, displayResults = False)\n timesArray = np.append(timesArray, timesSmooth)\n iteration = iteration+1\n \n timesArray = timesArray.reshape(iteration, len(timesSmooth)) \n timesArray = np.matrix.transpose(timesArray)\n logNxs = np.log10(nxs)\n logTimes = np.log10(timesArray)\n methods = [\"FTBS\", \"CTCS\", \"CNCS\", \"LaxWendroff\"]\n if(display):\n for i in range (0, 4):\n plt.plot(logNxs, logTimes[i], label=methods[i])\n coeff = np.polyfit(logNxs,logTimes[i],1)\n print(\"Estimated order of magnitude time vs nx \"\\\n +methods[i]+\": \"+str(coeff[0]))\n plt.title(\"Log-log plot time of execution in s vs nx\\nc=\"+str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()", "def solve_tsp(dist):\n\n # number of nodes\n N = dist.shape[0]\n\n # tsp path for quick calculation of cost\n ii = np.arange(N)\n jj = np.hstack((np.arange(1, N), 0))\n\n # for each node, a sorted list of closest nodes\n dsort = [np.argsort(d) for d in dist]\n dsort = [d[d != i] for i, d in enumerate(dsort)]\n\n # randomly initialize path through graph\n path = np.random.permutation(N)\n idx = np.argsort(path)\n cost = np.sum(dist[path[ii], path[jj]])\n \n # keep track of objective function over time\n cost_hist = [cost]\n\n # optimization loop\n node = 0\n while node < N:\n\n # we'll try breaking the connection i -> j\n i = path[node]\n j = path[(node+1) % N]\n \n # since we are breaking i -> j we can remove the cost of that connection\n c = cost - dist[i, j]\n\n # search over nodes k that are closer to j than i\n for k in dsort[j]:\n # can safely continue if dist[i,j] < dist[k,j] for the remaining k\n if k == i:\n node += 1\n break\n\n # break connection k -> p\n # add connection j -> p\n # add connection i -> k\n p = path[(idx[k]+1) % N]\n new_cost = c - dist[k,p] + dist[j,p] + dist[i,k]\n\n # if this swap improves the cost, implement it and move to next i\n if new_cost < cost:\n path = reverse_segment(path, idx[j], idx[k])\n idx = np.argsort(path)\n # make sure that we didn't screw up\n assert np.abs(np.sum(dist[path[ii], path[jj]]) - new_cost) < 1e-6\n cost = new_cost\n # restart from the begining of the graph\n cost_hist.append(cost)\n node = 0\n break\n\n return path, cost_hist", "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def elapseTime(self, idx):\n newBeliefs = util.Counter()\n for oldPos in self.legalPositions:\n if self.beliefs[idx][oldPos] <= 0:\n continue\n newPosDist = self.getPositionDistribution(oldPos)\n for newPos, prob in newPosDist.items():\n newBeliefs[newPos] += prob * self.beliefs[idx][oldPos]\n newBeliefs.normalize()\n self.beliefs[idx] = newBeliefs", "def get_mean_in_time(trajectories, nb_bins=15, freq_range=[0.4, 0.6]):\n # Create bins and select trajectories going through the freq_range\n time_bins = np.linspace(-950, 2000, nb_bins)\n trajectories = [traj for traj in trajectories if np.sum(np.logical_and(\n traj.frequencies >= freq_range[0], traj.frequencies < freq_range[1]), dtype=bool)]\n\n # Offset trajectories to set t=0 at the point they are seen in the freq_range and adds all the frequencies / times\n # to arrays for later computation of mean\n t_traj = np.array([])\n f_traj = np.array([])\n for traj in trajectories:\n idx = np.where(np.logical_and(traj.frequencies >=\n freq_range[0], traj.frequencies < freq_range[1]))[0][0]\n traj.t = traj.t - traj.t[idx]\n t_traj = np.concatenate((t_traj, traj.t))\n f_traj = np.concatenate((f_traj, traj.frequencies))\n\n # Binning of all the data in the time bins\n filtered_fixed = [traj for traj in trajectories if traj.fixation == \"fixed\"]\n filtered_lost = [traj for traj in trajectories if traj.fixation == \"lost\"]\n freqs, fixed, lost = [], [], []\n for ii in range(len(time_bins) - 1):\n freqs = freqs + [f_traj[np.logical_and(t_traj >= time_bins[ii], t_traj < time_bins[ii + 1])]]\n fixed = fixed + [len([traj for traj in filtered_fixed if traj.t[-1] < time_bins[ii]])]\n lost = lost + [len([traj for traj in filtered_lost if traj.t[-1] < time_bins[ii]])]\n\n # Computation of the mean in each bin, active trajectories contribute their current frequency,\n # fixed contribute1 and lost contribute 0\n mean = []\n for ii in range(len(freqs)):\n mean = mean + [np.sum(freqs[ii]) + fixed[ii]]\n mean[-1] /= (len(freqs[ii]) + fixed[ii] + lost[ii])\n\n nb_active = [len(freq) for freq in freqs]\n nb_dead = [fixed[ii] + lost[ii] for ii in range(len(fixed))]\n\n return 0.5 * (time_bins[1:] + time_bins[:-1]), mean, nb_active, nb_dead", "def compare_cow_transport_algorithms():\n cow_set = load_cows(\"ps1_cow_data.txt\")\n \n def get_run_time(func):\n start = time.time()\n print(func(cow_set))\n end = time.time()\n return end-start\n \n greedy_time = get_run_time(greedy_cow_transport)\n brute_force_time = get_run_time(brute_force_cow_transport)\n \n print(\"---\"*20)\n print(\"greedy runtime: \",greedy_time)\n print(\"brute force runtime: \", brute_force_time)", "def get_max_time_vals(train_results):\n t_tr, t_te, t_lo, t_re = 0, 0, 0, 0\n for tres in train_results:\n t_tr += tres.time_train\n t_te += tres.time_test\n t_lo += tres.time_load\n t_re += tres.time_reduce\n n = len(train_results)\n return t_tr/n, t_te/n, t_lo/n, t_re/n", "def n50_counter(input_list):\n input_list.sort()\n half_tot = sum(input_list) / 2\n\n current_count = 0\n for num in input_list:\n current_count += num\n if current_count >= half_tot:\n return num", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def test_calculate_tunneling_factor(self):\n Tlist = np.array([300, 500, 1000, 1500, 2000])\n kexplist = np.array([1623051., 7.69349, 1.46551, 1.18111, 1.09858])\n for T, kexp in zip(Tlist, kexplist):\n kact = self.tunneling.calculate_tunneling_factor(T)\n self.assertAlmostEqual(kexp, kact, delta=1e-3 * kexp)", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def calculate_accuracy(actual_list, predicted_list):\n valid_part_len = sum(i == j for i, j in zip(actual_list, predicted_list))\n return (100.0 / len(actual_list)) * valid_part_len", "def strategy_cheap(cookies, cps, history, time_left, build_info):\n min_item = None\n min_cost = float('inf')\n for item in build_info.build_items():\n if build_info.get_cost(item) <= min_cost:\n min_item = item\n min_cost = build_info.get_cost(item)\n\n if (time_left * cps + cookies) < min_cost:\n min_item = None\n\n print \"min item\", min_item\n print \"min_cost:\", min_cost\n print \"CPS:\", cps\n print \"Time Left:\", time_left\n print time_left * cps + cookies\n\n return min_item", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def detect_via_cusum_lg(ts, istart=30, threshold_times=5):\n S_h = 0\n S_l = 0\n S_list = np.zeros(istart) # 前面填充的30个空数据\n meanArray = talib.SMA(ts,timeperiod = istart)\n stdArray = talib.STDDEV(np.log(ts/meanArray),timeperiod = istart)\n for i in range(istart, len(ts)): # 这里是否应该掐头去尾?\n tslog = np.log(ts[i] / meanArray[i - 1])\n S_h_ = max(0, S_h + tslog - stdArray[i-1])\n S_l_ = min(0, S_l + tslog + stdArray[i-1])\n if S_h_> threshold_times * stdArray[i-1]:\n S_list = np.append(S_list,1) # 该点为上变点\n S_h_ = 0\n elif abs(S_l_)> threshold_times * stdArray[i-1]:\n S_list = np.append(S_list, -1) # 该点为下变点\n S_l_ = 0\n else:\n S_list = np.append(S_list, 0) # 该点无特殊情况\n S_h = S_h_\n S_l = S_l_\n\n return S_list", "def run(data, params):\n start_time = time.process_time()\n\n # 'n' is the number of candidates, also the number of ranks\n n = params['n']\n # 'N' is the total number of voters\n N = params['N']\n # 's0' is the optional ground truth full ranking of the candidates\n # (distribution is drawn off this full ranking)\n s0 = params['s0']\n\n # Order candidates by non-decreasing pair-wise contest wins \n # (ascending order with lexicographic tie-breaking)\n precedenceMatrix = utils.precedenceMatrix(data, n)\n\n # Credits to Sayan-Paul for starter code for merge sort\n # See: https://github.com/Sayan-Paul/Sort-Library-in-Python/blob/master/sortlib.py\n def mergesort(ar):\n if len(ar)<=1:\n return ar\n middle=len(ar)/2\n left =ar[:middle]\n right=ar[middle:]\n left=mergesort(left)\n right=mergesort(right)\n res=merge(left,right)\n return res\n\n def merge(left,right):\n res=[]\n while len(left)+len(right):\n if len(left)*len(right):\n if precedenceMatrix[left[0],right[0]]<=precedenceMatrix[right[0],left[0]]:\n res.append(left[0])\n left=left[1:]\n else:\n res.append(right[0])\n right=right[1:]\n elif len(left):\n res.append(left[0])\n left=left[1:]\n elif len(right):\n res.append(right[0])\n right=right[1:]\n return res\n\n candidates = [i for i in range(n)]\n sortedCandidates = mergesort(candidates)\n\n sigma = tuple(sortedCandidates)\n\n time_elapsed = (time.process_time() - start_time) * 1000\n\n return ALGORITHM_NAME, utils.generalizedKendallTauDistance(data, sigma, n, N, s0), time_elapsed, sigma", "def ensemble_cost(x_tapes):\n per_trajectory_costs = [final_state_cost(x_tape) for x_tape in x_tapes]\n # Return the L1-norm of the per-trajectory-costs, scaled by the number of\n # trajectories.\n return np.linalg.norm(per_trajectory_costs, 1) / len(x_tapes)", "def tim_sort(lst):\n length = len(lst)\n runs, sorted_runs = [], []\n new_run = [lst[0]]\n sorted_array = []\n i = 1\n while i < length:\n if lst[i] < lst[i - 1]:\n runs.append(new_run)\n new_run = [lst[i]]\n else:\n new_run.append(lst[i])\n i += 1\n runs.append(new_run)\n\n for run in runs:\n sorted_runs.append(insertion_sort(run))\n for run in sorted_runs:\n sorted_array = merge(sorted_array, run)\n\n return sorted_array", "def bucket_sort_sorted_list(q: int = 1000, n: int = 1000):\n times = []\n for i in range(q):\n sorted_list = sorted([random.randint(-100000, 100000) for iter in range(n)])\n start_time = time.time()\n bucket_sort(sorted_list)\n times.append(time.time() - start_time)\n return times", "def label_modes(trip_list, silent=True):\n\n\n if silent == False:\n print('Preparing to label modes of travel for ' \\\n + str(len(trip_list)) + ' trips.')\n\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) \\\n + ' trips.')\n\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1,len(doc['reduction'])):\n if (float(doc['reduction'][i]['velocity']) >= 2.3):\n doc['reduction'][i]['mode'] = 'driving'\n\n elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):\n doc['reduction'][i]['mode'] = 'walking'\n\n elif (float(doc['reduction'][i]['velocity']) == 0.0):\n doc['reduction'][i]['mode'] = 'chilling'\n\n if (float(doc['reduction'][i]['velocity']) > 22.22):\n doc['reduction'][i]['mode'] = 'bogus'\n\n\n for i in range(1,len(doc['reduction']) - 1):\n path_length = 0\n\n if (doc['reduction'][i]['mode'] == 'driving'):\n for j in range(i+1,len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID']\n if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1\n elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break\n\n if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'\n else :\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'\n\n if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n\n if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n\n\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)\n doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)\n doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)\n doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)\n\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' \\\n + str(len(trip_list)) + '.')\n\n return trip_list", "def main():\n list = FileList(input('Enter data file: '))\n start = time.time()\n median = (median_quick_select(list))\n print('Optimum new store Location:', median)\n print('Sum of distances to new store:', find_sums(list,median),'\\n')\n end = time.time()\n print('elapsed time:', end-start)", "def makedtimes(in_list,bucket,date,radar):\n s = pd.Series(in_list)\n split_s = s.str.split(bucket + date.strftime('/%Y/%m/%d/') + radar + '/' + radar+date.strftime('%Y%m%d_'),expand=True)\n split_s = split_s[1]\n counts = split_s.str.len()\n if date < np.datetime64('2007-12-31'):\n version = '.gz'\n min_count = 9.0\n elif date < np.datetime64('2015-12-31'):\n version = '_V06'\n min_count = 13\n else:\n min_count=10.0\n if radar[0]=='K':\n version = '_V06'\n elif radar[0] =='T':\n version = '_V08'\n split_s = split_s.str.split(version,expand=True)\n split_s = split_s[0]\n dtime = pd.to_datetime(date.strftime('%Y-%m-%d ')+ split_s).values\n df = pd.DataFrame(in_list,index=dtime)\n\n df['counts'] = pd.Series(counts.values,index=dtime)\n df = df.where(df.counts == min_count).dropna()\n df = df.drop('counts',axis=1)\n return df", "def get_expected_cost(self):", "def cheapCycling(SList,CList):\n N = len(CList)\n nodes= list(range(N))\n Udict = dict(zip(nodes,CList)) #dictionary nodes:neighbours\n queue = deque() #things to check\n checked = {} #checked nodes to avoid double checking\n stations=np.zeros((N,2))#node:arrival output\n\n min_arr = 1000000 #initialise fat value\n arr_node=-1\n \n min_dep = 1000000\n dep_node=-1\n\n while Udict:\n #node = Udict.pop(next(iter(Udict))) #extract first node\n node = next(iter(Udict)) #select first node\n queue.append(node) #add node to queue\n while queue: #while queue is non empty\n node = queue.popleft() #set/extract node to element of queue\n Udict.pop(node) #make sure is also removed from overarching dict\n for nb in CList[node]: #neighbours of node\n if nb not in checked:\n if SList[nb][0] <min_arr: #check if new minimum\n min_arr = SList[nb][0]\n arr_node = nb\n if SList[nb][1] <min_dep: #same but for departure\n min_dep = SList[nb][1]\n dep_node= nb\n queue.append(nb)\n checked[nb] = 1\n checked[node]=1\n \n stations[list(checked.keys()),0] = arr_node #dropping in the cheapest arr and dep nodes for all nodes in connected part\n stations[list(checked.keys()),1] = dep_node\n checked={} #reset checked to empty for new connected part IMPORTANT!\n min_arr=1000000 #reset minimum values\n min_dep=1000000 #reset min dep values\n\n return stations", "def preparation_time_in_minutes(number_of_layers):\n return number_of_layers * 2", "def strategy_expensive(cookies, cps, history, time_left, build_info):\n max_item = None\n max_cost = 0.0\n info = build_info\n for item in info.build_items():\n if (info.get_cost(item) >= max_cost and info.get_cost(item) <= (time_left * cps + cookies)):\n max_item = item\n max_cost = info.get_cost(item)\n\n return max_item", "def differenceFinder(self, startIndex, endIndex, list):\n maxDiff = 0\n start = list[startIndex]\n end = list[endIndex]\n for x in range(20):\n diffBefore = abs(list[startIndex] - list[startIndex-x])\n if diffBefore > maxDiff:\n maxDiff = diffBefore\n diffAfter = abs(list[endIndex] - list[endIndex+x])\n if diffAfter > maxDiff:\n maxDiff = diffAfter\n temp = endIndex\n if list[startIndex] < list[endIndex]: #Compare middle with smaller start or end index\n temp = startIndex\n for x in range(startIndex, endIndex):\n diff = abs(list[x] - list[temp])\n if diff > maxDiff:\n maxDiff = diff\n return maxDiff", "def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1", "def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def brute_force(city_list):\n start = time.time()*1000\n shortest = exhaustive_search(city_list,6)\n stop = time.time()*1000\n print(\"Shortest tour for 6 first cities:\", tour_distance(shortest))\n print (\"Time spent on 6 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,7)\n stop = time.time()*1000\n print(\"Shortest tour for 7 first cities:\", tour_distance(shortest))\n print (\"Time spent on 7 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,8)\n stop = time.time()*1000\n print(\"Shortest tour for 8 first cities:\", tour_distance(shortest))\n print (\"Time spent on 8 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,9)\n stop = time.time()*1000\n print(\"Shortest tour for 9 first cities:\", tour_distance(shortest))\n print (\"Time spent on 9 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,10)\n stop = time.time()*1000\n print(\"Shortest tour for 10 first cities:\", tour_distance(shortest))\n print (\"Time spent on 10 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\" \")" ]
[ "0.627223", "0.5568012", "0.55149806", "0.542661", "0.54003626", "0.5371899", "0.5357149", "0.5351884", "0.5332691", "0.53161633", "0.52837616", "0.52747995", "0.5273911", "0.52718514", "0.52711356", "0.5226085", "0.5219191", "0.51971257", "0.51944643", "0.5190102", "0.51824313", "0.5178413", "0.51422703", "0.5135504", "0.51283586", "0.50829357", "0.5077005", "0.5067348", "0.5055081", "0.50372857", "0.50348127", "0.5033357", "0.5027877", "0.50030285", "0.49933177", "0.4975318", "0.49721488", "0.49646", "0.4962616", "0.49598053", "0.4950164", "0.49212494", "0.4919435", "0.4917571", "0.49055696", "0.4905171", "0.48971167", "0.48910296", "0.48907068", "0.48894352", "0.48871148", "0.48838234", "0.4876615", "0.48734573", "0.486684", "0.48613173", "0.48609135", "0.48591575", "0.48569804", "0.4856494", "0.4854881", "0.48476943", "0.48439634", "0.48394135", "0.48332152", "0.4830649", "0.48213795", "0.4820359", "0.48168054", "0.48139998", "0.48128504", "0.4811784", "0.4810934", "0.48090786", "0.48013988", "0.48002413", "0.4799177", "0.4795202", "0.47940165", "0.47938505", "0.479357", "0.4784864", "0.4775671", "0.47631735", "0.47597402", "0.47597045", "0.4758023", "0.47565866", "0.47487155", "0.47460973", "0.47427502", "0.47282013", "0.47225246", "0.47222874", "0.47147515", "0.47145134", "0.47117373", "0.47116685", "0.4709625", "0.47081608" ]
0.6458329
0
This function takes a cut_configuration json object and calls the function corresponding to the desired cut, thereby returning the cutlist.
def generateCutList(cut_configuration): #Check that this line reads json.loads(cut_configuration) input_json = json.load(cut_configuration) #Currently only desired_cut and laser_cut_config are required try: block = input_json["block"] except: pass try: cut = input_json["desired_cut"] laser = input_json["laser_cut_config"] except: raise Exception("Either desired_cut or laser_cut_config not provided") if cut["cut_process"] == "line": final_list = line(cut["x1"],cut["y1"],cut["x2"],cut["y2"],cut["final_dimension_z"]+laser["z_final_overshoot"],laser) elif cut["cut_process"] == "simple_core": final_list = simple_core(block,cut,laser) elif cut["cut_process"] == "vertical_core": final_list = vertical_core(block,cut,laser) elif cut["cut_process"] == "oss_stacked": final_list = oss_stacked(block,cut,laser) elif cut["cut_process"] == "z_focus": final_list = z_focus(block,cut,laser) elif cut["cut_process"] == "cross": final_list = cross(block,cut,laser) else: raise Exception("No such cut exists: Check cut_process") #print(time_taken(final_list, laser)) now = datetime.now() timestamp = str(now.strftime("%m-%d_%H_%M")) complete_name = os.path.join(save_path, timestamp+".csv") with open(complete_name, mode='w',newline ='') as test_data: data_writer = csv.writer(test_data, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) list_data = json.loads(final_list) for line1 in list_data: data_writer.writerow(line1) return final_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCutFolder(config):\n cutfolder = QFramework.TQFolder(\"__baseCut__\") #not a name any user should use but still sort of readable\n cutfolder.setTagString(\".cutExpression\",\"1.\") #pass all events, this is only a common handle for all actual (sub)cuts\n cutfolder.setTagBool(\".skipJobs\",True) #do not execute AnalysisJobs on this cut\n cutdeffiles = config.getTagVString(\"cuts\")\n if len(cutdeffiles) < 1:\n QFramework.BREAK(\"no cuts given!\")\n for cutdeffile in cutdeffiles:\n cutdeffile_abs = ROOT.TString(common.findConfigPath(cutdeffile))\n QFramework.INFO(\"loading cuts from '{:s}'\".format(cutdeffile.Data()))\n ok = False\n if cutdeffile_abs.EndsWith(\".py\"):\n ok = loadPyCuts(config,cutfolder,cutdeffile_abs)\n else:\n ok = cutfolder.importFromTextFile(cutdeffile_abs)\n\n\n if not ok:\n QFramework.BREAK(\"unable to load cuts from '{:s}' \".format(cutdeffile.Data()))\n\n return cutfolder", "def get_cuts(data, args, verbose):\n\n if args['experiment']['cut_finding'] == CutFinding.features:\n\n values = (data.xs == True).T\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.binning:\n\n values, names = binning(xs=data.xs,\n range_answers=args['cut_finding']['range_answers'],\n n_bins=args['cut_finding']['n_bins'])\n return Cuts(values=values, names=names)\n\n if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:\n\n values = kernighan_lin(A=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.kmodes:\n\n values = find_kmodes_cuts(xs=data.xs,\n max_nb_clusters=args['cut_finding']['max_nb_clusters'])\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:\n\n values = fid_mat(xs=data.A,\n nb_cuts=args['cut_finding']['nb_cuts'],\n lb_f=args['cut_finding']['lb_f'],\n seed=args['experiment']['seed'],\n verbose=verbose)\n values = np.unique(values, axis=0)\n return Cuts(values=values)\n\n if args['experiment']['cut_finding'] == CutFinding.linear:\n\n values, equations = linear_cuts(xs=data.xs,\n equations=args['cut_finding']['equations'],\n verbose=verbose)\n\n return Cuts(values=values, equations=equations)\n\n raise ValueError('Wrong name for a cut finding function')", "def loadCuts(config, aliases=QFramework.TQTaggable()):\n\n # if the user has specified a templated cut file to use, use that instead of a regular cut file\n templatedCutsFilepath = str(config.getTagStringDefault(\"templatedCuts.templateFilepath\", \"\"))\n templatedCutsFilepath_abs = str(common.findConfigPath(templatedCutsFilepath, False))\n if templatedCutsFilepath_abs:\n import os.path\n templateDir = os.path.dirname(templatedCutsFilepath_abs)\n templatedCutsInputsDir = str(config.getTagStringDefault(\"templatedCuts.inputsDir\",templateDir + '../inputs/'))\n # TODO: probably there's a cleaner way to do this, but for now we pass in the relative path to the templated cut file\n # but already the fully resolved path to the inputs dir (from the absolute path to the templated cut file)\n # templated cut file again resolved internally\n #Note: when using the templated cuts the 'single entry-cut policy' still applies\n cutfolder = loadTemplatedCutFolder(templatedCutsFilepath, templatedCutsInputsDir)\n cutdefs = cutfolder.getFolder(\"?\")\n else:\n # Use a regular cut file\n cutdefs = loadCutFolder(config)\n # retrieve the actual cut definitions from the folder\n #cutdefs = cutfolder.getFolder(\"?\")\n #cutdefs.detachFromBase()\n\n # if no aliases directly provided,\n if aliases.getNTags() < 1:\n # see if there are any in the config\n aliases.importTagsWithoutPrefix(config,\"cutParameters.\")\n aliases.importTagsWithoutPrefix(config,\"aliases.\")\n # create compiled cuts\n cuts = QFramework.TQCut.importFromFolder(cutdefs,aliases)\n if not cuts:\n QFramework.BREAK(\"failed to load cuts from folder '{:s}' - please check input path\".format(cutdefs.GetName()))\n\n # for \"N-1\" plots\n for cut in config.getTagVString(\"nullifyCuts\"):\n c = cuts.getCut(cut)\n if c:\n QFramework.WARN(\"nullifying cut '{:s}'\".format(cut))\n c.setCutExpression(\"1\")\n else:\n QFramework.WARN(\"unable to find cut '{:s}' while trying to nullify\".format(cut))\n\n return cuts", "def loadPyCuts(config, basecut, path):\n\n abs_path = common.findConfigPath(path)\n\n # continue only if there was one match found\n if len(abs_path) == 0:\n QFramework.BREAK(\"No file '{:s}' found, please check your configuration!\".format(abs_path))\n\n import imp\n import inspect\n import os\n\n module_name = os.path.basename(abs_path).rstrip(\".py\")\n try:\n addcuts = imp.load_source(module_name, abs_path)\n\n argspec = inspect.getargspec(addcuts.addCuts)\n if 'config' in argspec.args and 'baseCut' in argspec.args:\n added = addcuts.addCuts(baseCut = PyCut(basecut), config=config)\n elif len(argspec.args) == 2:\n added = addcuts.addCuts(config,PyCut(basecut))\n elif len(argspec.args) == 1:\n added = addcuts.addCuts(PyCut(basecut))\n else:\n QFramework.BREAK(\"unable to add cuts(s) from script '{:s}' - unknown arguments appeared: {:s}\".format(abs_path, str(argspec.args)))\n if not added and not (added is None):\n QFramework.BREAK(\"The cut definition snippet '{:s}' indicated an error, please check potential errors reported above\".format(abs_path))\n except IOError as error:\n QFramework.BREAK(\"unable to open file '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except NameError as error:\n QFramework.BREAK(\"syntax error in cut snippet '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except AttributeError as error:\n QFramework.BREAK(\"attribute error in cut definition '{:s}' - please double-check!\\n\".format(abs_path)+\n \"If the message from python below is\\n'module' object has no attribute 'addCuts'\\nplease make sure that the snippet has the function addCuts(baseCut) or addCuts(config,baseCut) defined.\\n\"\n \"Message from python:\\n\"+str(error))\n return True", "def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()", "def main(configuration_path, input_path, output_path, chunksize, verbose):\n log = setup_logging(verbose=verbose)\n\n with open(configuration_path) as f:\n config = yaml.load(f)\n\n selection = config.get(\"selection\", None)\n data_format = config.get(\"data_format\", \"simple\")\n\n if not selection:\n log.info(\"No entries for selection cuts. Just copying files.\")\n copyfile(input_path, output_path)\n log.info(\"Copying finished\")\n return\n\n log.info(data_format)\n if data_format == \"simple\":\n key = config.get(\"events_key\", \"events\")\n n_events = get_number_of_rows_in_table(input_path, key=key)\n if chunksize is None:\n chunksize = n_events + 1\n apply_cuts_h5py_chunked(\n input_path, output_path, selection, chunksize=chunksize, key=key\n )\n n_events_after = get_number_of_rows_in_table(output_path, key=key)\n remaining = n_events_after / n_events\n log.info(f\"Events in file before cuts {n_events}\")\n log.info(\n f\"Events in new file after cuts {n_events_after}. That is {remaining:.2%}\"\n )\n copy_group(input_path, output_path, \"runs\")\n elif data_format == \"CTA\":\n keep_images = config.get(\"keep_images\", True)\n n_before, n_after = apply_cuts_cta_dl1(\n input_path,\n output_path,\n selection,\n keep_images,\n )\n\n log.info(f\"Telescope-events in file before cuts {n_before}\")\n log.info(\n f\"Telescope-events in new file after cuts {n_after}. \"\n f\"That is {(n_after/n_before):.2%}\"\n )", "def writecalls(\n self, config: Dict[str, Dict[str, Dict[str, str]]], scope: str\n ) -> List[str]:\n inputs: List[str] = []\n for quantity in self.input[scope]:\n inputs.extend(quantity.get_leaves_of_scope(scope))\n config[\"nominal\"][\"input\"] = '\"' + '\", \"'.join(inputs) + '\"'\n config[\"nominal\"][\"input_vec\"] = '{\"' + '\",\"'.join(inputs) + '\"}'\n config[\"nominal\"][\"df\"] = \"{df}\"\n try:\n return [\n self.call.format(**config[\"nominal\"])\n ] # use format (not format_map here) such that missing config entries cause an error\n except KeyError as e:\n log.error(\n \"Error in {} Basefilter, key {} is not found in configuration\".format(\n self.name, e\n )\n )\n log.error(\"Call: {}\".format(self.call))\n raise Exception", "def buildcutlineset():\r\n cutlineset=[[[-3.2697,-3.2697],[-4.3304,-4.3304]],[[-3.2697,-4.3304],[-4.3304,-3.2697]]]\r\n cutlineset.extend([[[-3.2697,176.0104],[-4.3304,174.9497]],[[-3.2697,174.9497],[-4.3304,176.0104]]])\r\n cutlineset.extend([[[176.0104,176.0104],[174.9497,174.9497]],[[176.0104,174.9497],[174.9497,176.0104]]])\r\n cutlineset.extend([[[175.4800,-3.05],[175.4800,-4.55]],[[174.7300,-3.8],[176.2300,-3.8]]])\r\n \r\n for cutline in cutlineset:\r\n for pos in cutline:\r\n pos[0]=pos[0]+globalconfig.CUTLINE_X_OFFSET\r\n pos[1]=pos[1]+globalconfig.CUTLINE_Y_OFFSET\r\n \r\n for row in range(0,globalconfig.X_ARRAY_NUM):\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,-3.0+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[globalconfig.X_BLANK+row*(globalconfig.X_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_X_OFFSET,174.68+globalconfig.CUTLINE_Y_OFFSET]])\r\n for line in range(0,globalconfig.Y_ARRAY_NUM):\r\n cutlineset.append([[0.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[-3.0+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n cutlineset.append([[171.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET],[174.68+globalconfig.CUTLINE_X_OFFSET,globalconfig.Y_BLANK+line*(globalconfig.Y_LENGTH/globalconfig.CENTER_RATIO)+globalconfig.CUTLINE_Y_OFFSET]])\r\n return cutlineset", "def update_cutoffs_from_json(course_key, cutoffs, user):\r\n descriptor = modulestore('direct').get_course(course_key)\r\n descriptor.grade_cutoffs = cutoffs\r\n\r\n modulestore('direct').update_item(descriptor, user.id)\r\n\r\n return cutoffs", "def createAnalysisSampleVisitor(config, cuts):\n\n # TODO: warn user if this function is called but 'cutbased' is false? (or no jobs yet booked - must do this first if a cutbased analysis is desired)\n\n # read the channel definitions\n channels = config.getTagVString(\"channels\")\n\n CLI = config.getFolder(\"CLI+\")\n # flag indicating to run a robust analysis\n robust = CLI.getTagBoolDefault(\"robust\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n if not config.getTagBoolDefault(\"useMultiChannelVisitor\",False) or robust or dummy:\n # using regular analysis sample visitor (default)\n visitor = QFramework.TQAnalysisSampleVisitor()\n visitor.setVerbose(True)\n visitor.setBaseCut(cuts)\n visitor.setPrettyPrint(config.getTagBoolDefault(\"prettyPrint\",True))\n visitor.setLineUpdates(config.getTagBoolDefault(\"lineUpdates\",True))\n visitor.setTagDouble(\"progressInterval\",config.getTagDoubleDefault(\"progressInterval\",5.))\n else:\n # using fast MultiChannel analysis sample visitor\n visitor = QFramework.TQMultiChannelAnalysisSampleVisitor()\n visitor.setVerbose(True)\n visitor.setPrettyPrint(config.getTagBoolDefault(\"prettyPrint\",True))\n visitor.setLineUpdates(config.getTagBoolDefault(\"lineUpdates\",True))\n visitor.setTagDouble(\"progressInterval\",config.getTagDoubleDefault(\"progressInterval\",5.))\n\n runtime = config.getFolder(\"runtime+\")\n # TODO: add some protection against not finding the mcasvchannels in the runtime config for whatever reason\n mcasvchannels = runtime.getTagVStandardString(\"mcasvchannels\")\n\n # TODO: cutlist was defined in runAnalysis.py, but aparently not used\n #cutlist = []\n for channel in mcasvchannels:\n cut = cuts.getClone()\n #cutlist.append(cut)\n visitor.addChannel(channel,cut)\n # TODO: used previously just as a list for cloneObservablesSmart\n # mcvchannels.append(channel)\n if config.getTagBoolDefault(\"showChannels\",False):\n visitor.printChannels()\n\n # TODO: SmartObservableCloning not yet migrated to CAFExample (or fully implemented?) - Initial author = Carsten\n # Safe for this to go here? i.e. only for MCASV and before analysis algorithms are attached\n cloneObservablesSmart = False\n if config.getTagBoolDefault(\"reduceMCVObservables\",False):\n try:\n from CAFExample.SmartObservableCloning import cloneSetSmart\n cloneObservablesSmart = True\n except ImportError:\n cloneObservablesSmart = False\n QFramework.ERROR(\"smart observable cloning unavailable, skipping\")\n if cloneObservablesSmart:\n for channel in mcasvchannels:\n QFramework.TQObservable.getManager().cloneActiveSet(channel)\n\n return visitor", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def get_pop_list_for_scaling(bolo_name, d_cut, data_dir, tree_name = \"t_merged\"):\n \n file_tree = TFile(data_dir+bolo_name+\"_lowmass_fond.root\")\n tree = file_tree.Get(tree_name)\n\n #Create background hist directory\n pop_path_name = script_utils.create_directory(\"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\")\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n d_std_true_events = BDT_fh.open_true_event_FWHM_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = str(d_est[\"HEAT\"][:5]), str(1- float(d_est[\"HEAT\"][:5]))\n coeff_EIA, coeff_EIB = str(d_est[\"S1\"][:5]), str(1-float(d_est[\"S1\"][:5]))\n coeff_EIC, coeff_EID = str(d_est[\"S2\"][:5]), str(1-float(d_est[\"S2\"][:5]))\n \n sigma_IA = str(d_std_true_events[\"FWIA\"])\n sigma_IC = str(d_std_true_events[\"FWIC\"])\n sigma_IB = str(d_std_true_events[\"FWIB\"])\n sigma_ID = str(d_std_true_events[\"FWID\"])\n\n #Load standard cuts\n TCut_path_name = script_utils.create_directory(\"../Cut_files/\") \n TCut_file_name =\"TCuts.txt\" \n file_TCut =\"\" \n #Add an exception if the file does not exist\n try:\n file_TCut = script_utils.open_text_file(TCut_path_name, TCut_file_name , \"r\")\n except IOError: \n script_utils.print_utility(script_utils.COL(\"No such file, use get_standard_cuts.py first\",\"fail\"))\n sys.exit()\n \n # Load the cut values. \n list_file_TCut_lines =[line.rstrip().split(\",\") for line in file_TCut.readlines()]\n standard_cuts =\"\"\n # Add a boolean flag to check if the bolo has its cuts in the file\n is_bolo_in_file =False\n for line in list_file_TCut_lines:\n if bolo_name == line[0]:\n standard_cuts = line[1]\n is_bolo_in_file = True\n assert(is_bolo_in_file)\n\n \n l_all = TEventList(\"l_all\")\n l_heatonly = TEventList(\"l_heatonly\")\n \n l_FidGamma = TEventList(\"l_FidGamma\")\n l_S1Gamma = TEventList(\"l_S1Gamma\")\n l_S2Gamma = TEventList(\"l_S2Gamma\")\n \n l_S1Beta = TEventList(\"l_S1Beta\")\n l_S2Beta = TEventList(\"l_S2Beta\")\n \n l_S1Pb = TEventList(\"l_S1Pb\")\n l_S2Pb = TEventList(\"l_S2Pb\")\n\n\n string_EC = coeff_EC1 + \"*EC1_ERA+\" + coeff_EC2 + \"*EC2_ERA\"\n string_EI = coeff_EIB + \"*EIB+\" + coeff_EID + \"*EID\"\n\n standard_cuts = standard_cuts + \"&&KTH<1&&KTH>0\"\n heat_cut = str(d_cut[\"ECinf\"]) + \"<\" + string_EC + \"&&\" + str(d_cut[\"ECsup\"]) + \">\" + string_EC + \"&& abs(EC1_ERA-EC2_ERA)<1\"\n ion_cut = str(d_cut[\"EIinf\"]) + \"<\" + string_EI + \"&&\" + str(d_cut[\"EIsup\"]) + \">\" + string_EI\n veto_cut = \"EIA<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IA + \"&&\" + \"EIC<\" + str(d_cut[\"sigma_vet\"]) + \"*\" + sigma_IC\n \n # all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n all_cuts = \"&&\".join([standard_cuts, heat_cut, ion_cut, veto_cut])\n\n # print tree\n # print all_cuts.split(\"&&\")\n # raw_input()\n\n ###############################\n # All\n ###############################\n tree.Draw(\">>l_all\", all_cuts )\n pop_len = l_all.GetN()\n pop_file_name = bolo_name + \"_all_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_all.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n ###############################\n # Heatonly\n ###############################\n tree.Draw(\">>l_heatonly\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID)\n pop_len = l_heatonly.GetN()\n pop_file_name = bolo_name + \"_heatonly_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_heatonly.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close()\n\n\n ##################################\n # G A M M A E V E N T S\n ##################################\n #Fiducial gammas\n tree.Draw(\">>l_FidGamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_FID\"] + \">0.7\")\n pop_len = l_FidGamma.GetN()\n pop_file_name = bolo_name + \"_FidGamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_FidGamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S1 gammas\n tree.Draw(\">>l_S1Gamma\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \">0.65\")\n pop_len = l_S1Gamma.GetN()\n pop_file_name = bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 gammas\n tree.Draw(\">>l_S2Gamma\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \">0.65\")\n pop_len = l_S2Gamma.GetN()\n pop_file_name = bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Gamma.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # B E T A E V E N T S\n ##################################\n #S1 beta\n tree.Draw(\">>l_S1Beta\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.65 && \" + d_est[\"Q_S1\"] + \">0.2\")\n pop_len = l_S1Beta.GetN()\n pop_file_name = bolo_name + \"_S1Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n #S2 beta\n tree.Draw(\">>l_S2Beta\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.65 && \" + d_est[\"Q_S2\"] + \">0.2\")\n pop_len = l_S2Beta.GetN()\n pop_file_name = bolo_name + \"_S2Beta_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Beta.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n ##################################\n # P b E V E N T S\n ##################################\n # S1 Pb\n tree.Draw(\">>l_S1Pb\",all_cuts + \" && EIA>2.7*\" + sigma_IA +\" && EIB>2.7*\" + sigma_IB +\"&& EIC<2.7*\" + sigma_IC +\"&& EID<2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S1\"] + \"<0.15 &&\" + d_est[\"Q_S1\"] + \">0.04\")\n print \n pop_len = l_S1Pb.GetN()\n pop_file_name = bolo_name + \"_S1Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S1Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n # S2 Pb\n tree.Draw(\">>l_S2Pb\",all_cuts + \" && EIA<2.7*\" + sigma_IA +\" && EIB<2.7*\" + sigma_IB +\"&& EIC>2.7*\" + sigma_IC +\"&& EID>2.7*\" + sigma_ID + \" &&\" + d_est[\"Q_S2\"] + \"<0.15 &&\" + d_est[\"Q_S2\"] + \">0.04\")\n pop_len = l_S2Pb.GetN()\n pop_file_name = bolo_name + \"_S2Pb_KTH_cut_full_info.txt\"\n pop_file = script_utils.open_text_file(pop_path_name, pop_file_name, \"w\")\n for k in range(pop_len):\n counter = l_S2Pb.GetEntry(k)\n tree.GetEntry(counter)\n pop_file.write(str(tree.EC1_ERA) + \",\" + str(tree.EC2_ERA) + \",\" + str(tree.EIA) + \",\" + str(tree.EIB) + \",\" + str(tree.EIC) + \",\" + str(tree.EID) + \"\\n\")\n pop_file.close() \n\n list_list = [l_heatonly, l_FidGamma, l_S1Gamma, l_S2Gamma, l_S1Beta, l_S2Beta, l_S1Pb, l_S2Pb]\n list_num = [l.GetN() for l in list_list]\n list_ev = [\"heatonly\", \"FidGamma\", \"S1Gamma\", \"S2Gamma\", \"S1Beta\", \"S2Beta\", \"S1Pb\", \"S2Pb\"]\n for ev, num in zip(list_ev, list_num):\n print ev, num\n\n print \"all known\", sum(list_num)\n print \"all\", l_all.GetN()\n\n del l_all\n del l_heatonly\n\n del l_FidGamma\n del l_S1Gamma\n del l_S2Gamma \n\n del l_S1Beta \n del l_S2Beta \n\n del l_S1Pb \n del l_S2Pb", "def parse(self, conf):\n boundaries = process_args(conf,\n factory=self.factory,\n str_keys=['type', 'boundary_type'])\n\n for b in boundaries.values():\n for k, v in b.items():\n if isinstance(v, dict) and 'type' in v:\n f_type = v.pop('type')\n func = self.factory.create_function(f_type, **v)\n b[k] = func\n\n self.bcs = list(boundaries.values())", "def apply_trigger_first(cut_fn):\n def wrapped(arrays, cut):\n arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018)\n return cut_fn(arrays, cut)\n return wrapped", "def compute_cost_and_order_cuts(cuts, cost_function):\n\n cost_cuts = np.zeros(len(cuts.values), dtype=float)\n for i_cut, cut in enumerate(cuts.values):\n cost_cuts[i_cut] = cost_function(cut)\n idx = np.argsort(cost_cuts)\n\n cuts.values = cuts.values[idx]\n cuts.costs = cost_cuts[idx]\n if cuts.names is not None:\n cuts.names = cuts.names[idx]\n if cuts.equations is not None:\n cuts.equations = cuts.equations[idx]\n\n return cuts", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def main():\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()", "def cut_eval(self, hits, *args):\n end = self.start_offset + self.train_window + self.predict_window\n return self.cut(hits, self.start_offset, end) + args", "def setCutFile(self, cutfile):\n self.cutfile = cutfile\n self.cuts = {}\n with open(cutfile) as f:\n for l in f:\n llist = l.strip().split()\n cut = str(int(llist[0])).zfill(2)\n seq = cutinfo11[cut][1]\n self.cuts[cut] = [readQIE.sequences(elem,seq) for elem in llist[1:]]", "def writecalls(\n self, config: Dict[str, Dict[str, Dict[str, str]]], scope: str\n ) -> List[str]:\n basecall = self.call\n calls: List[str] = []\n shifts = [\"nominal\"]\n if self.output is not None:\n shifts.extend(self.output[0].get_shifts(scope))\n for shift in shifts:\n # check that all config lists (and output if applicable) have same length\n log.debug(\"self.vec_configs[0]: {}\".format(self.vec_configs[0]))\n log.debug(\"len(self.vec_configs): {}\".format(len(self.vec_configs)))\n log.debug(\"available shifts: {}\".format(config.keys()))\n n_versions = len(config[shift][self.vec_configs[0]])\n for key in self.vec_configs:\n if n_versions != len(config[shift][key]):\n log.error(\n \"Following lists in config must have same length: %s, %s\"\n % (self.vec_configs[0], key)\n )\n raise Exception\n if self.output is not None and len(self.output) != n_versions:\n log.error(\n \"{} expects either no output or same amount as entries in config lists !\".format(\n self\n )\n )\n log.error(\"Number of expected outputs: {}\".format(n_versions))\n log.error(\"List of outputs: {}\".format(self.output))\n raise Exception\n for i in range(n_versions):\n helper_dict: Dict[Any, Any] = {}\n for key in self.vec_configs:\n helper_dict[key] = config[shift][key][i]\n if self.output is not None:\n helper_dict[\"output\"] = (\n '\"' + self.output[i].get_leaf(shift, scope) + '\"'\n )\n helper_dict[\"output_vec\"] = (\n '{\"' + self.output[i].get_leaf(shift, scope) + '\"}'\n )\n self.call = basecall.format_map(SafeDict(helper_dict))\n calls.append(self.writecall(config, scope, shift))\n self.call = basecall\n return calls", "def test_get_categories_from_json():\n allocator = RecipeAllocator()\n allocator.load_data(\n orders_dir=\"tests/orders.json\", recipes_dir=\"tests/recipes.json\"\n )\n allocator.get_categories_from_json()\n assert list(allocator.portion_categories_dict.keys()) == [\n \"two_portions\",\n \"four_portions\",\n ] and list(allocator.recipe_categories_dict.keys()) == [\n \"two_recipes\",\n \"three_recipes\",\n \"four_recipes\",\n ]", "def cutflow(self, *names):\n for cut in names:\n if not isinstance(cut, str) or cut not in self._names:\n raise ValueError(\n \"All arguments must be strings that refer to the names of existing selections\"\n )\n\n masksonecut, maskscutflow = [], []\n for i, cut in enumerate(names):\n mask1 = self.any(cut)\n mask2 = self.all(*(names[: i + 1]))\n masksonecut.append(mask1)\n maskscutflow.append(mask2)\n\n if not self.delayed_mode:\n nevonecut = [len(self._data)]\n nevcutflow = [len(self._data)]\n nevonecut.extend(numpy.sum(masksonecut, axis=1))\n nevcutflow.extend(numpy.sum(maskscutflow, axis=1))\n\n else:\n nevonecut = [dask_awkward.count(self._data, axis=0)]\n nevcutflow = [dask_awkward.count(self._data, axis=0)]\n nevonecut.extend([dask_awkward.sum(mask1) for mask1 in masksonecut])\n nevcutflow.extend([dask_awkward.sum(mask2) for mask2 in maskscutflow])\n\n return Cutflow(\n names, nevonecut, nevcutflow, masksonecut, maskscutflow, self.delayed_mode\n )", "def print_cutflow(cutflow):\n\n len_column1 = max([ len(k) for k in cutflow.keys() ])\n\n print(\"\\nCutflow:\")\n print(\"\\tCut\" + (len_column1-3)*\" \" + \" Abs. eff. [%]\")\n nAll = cutflow[\"noCut\"]\n for cut in cutflow.keys():\n print(\"\\t%s%s %.2f\" %(cut, (len_column1-len(cut))*\" \", 100*cutflow[cut]/nAll))\n\n return", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def hxlcut():\n run_script(hxlcut_main)", "def polyCut(*args, caching: bool=True, constructionHistory: bool=True, cutPlaneCenter:\n Union[List[float, float, float], bool]=None, cutPlaneCenterX: Union[float,\n bool]=0.0, cutPlaneCenterY: Union[float, bool]=0.0, cutPlaneCenterZ: Union[float,\n bool]=0.0, cutPlaneHeight: Union[float, bool]=0.0, cutPlaneRotate: Union[List[float,\n float, float], bool]=None, cutPlaneRotateX: Union[float, bool]=0.0,\n cutPlaneRotateY: Union[float, bool]=0.0, cutPlaneRotateZ: Union[float, bool]=0.0,\n cutPlaneSize: Union[List[float, float], bool]=None, cutPlaneWidth: Union[float,\n bool]=0.0, cuttingDirection: AnyStr=\"\", deleteFaces: bool=False, extractFaces:\n bool=False, extractOffset: Union[List[float, float, float], bool]=None,\n extractOffsetX: Union[float, bool]=0.0, extractOffsetY: Union[float, bool]=0.0,\n extractOffsetZ: Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, onObject: bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def bookAnalysisJobs(config, cuts, aliases=QFramework.TQTaggable()):\n\n # boolean variable to keep track of whether we are using cutbased or MVA\n mva = config.getTagVString(\"MVA\")\n cutbased = (len(mva)==0)\n\n # if no aliases directly provided,\n if aliases.getNTags() < 1:\n # see if there are any in the config\n aliases.importTagsWithoutPrefix(config,\"cutParameters.\")\n aliases.importTagsWithoutPrefix(config,\"aliases.\")\n\n # TODO: modularize booking of each type of analysis job?\n\n #book cutflows\n if config.getTagBoolDefault(\"cutbased.makeCutflow\",cutbased):\n QFramework.INFO(\"booking cutflow\")\n cutflowjob = QFramework.TQCutflowAnalysisJob(\"cutflowJob\")\n cuts.addAnalysisJob(cutflowjob,\"*\")\n cutbased = True\n\n # TODO: book xAOD skimming here? (worst case just implement in HWW)\n xAODdumpingConfig = QFramework.TQTaggable()\n dumpXAODs = (xAODdumpingConfig.importTagsWithoutPrefix(config,\"xAODdumping.\") > 0)\n if dumpXAODs :\n try:\n flaggingJob = ROOT.TQEventFlaggingAnalysisJob()\n if xAODdumpingConfig.hasTag(\"flagName\"): flaggingJob.setFlagName(xAODdumpingConfig.getTagStringDefault(\"flagName\",\"\"))\n flaggingCuts = xAODdumpingConfig.getTagStringDefault(\"cuts\",\"\")\n print(\"Booking event flagging jobs at cuts: {:s}\".format(flaggingCuts.Data()))\n cuts.addAnalysisJob(flaggingJob,flaggingCuts)\n except NameError:\n QFramework.ERROR(\"Cannot schedule xAOD dumping, required classes are not in your version of CAFCore. Please consider updating CAFCore\")\n\n # add the event flagging for possible unfolding\n unfoldingConfig = QFramework.TQTaggable()\n unfolding = (unfoldingConfig.importTagsWithoutPrefix(config,\"unfolding.\") > 0)\n if unfolding :\n #add a suffix to the cut names for the flags. This is needed to prevent cross talk between channels!\n unfoldingFlagSuffix = config.getTagStandardStringDefault(\"~flagSuffix\",\"_$(cand)\")\n unfoldingCuts = unfoldingConfig.getTagVString(\"flagcuts\")\n for cutName in unfoldingCuts:\n fullCutNames = cuts.getCutNames(cutName)\n for fullCutName in fullCutNames:\n unfoldingJob = ROOT.TQEventFlaggingAnalysisJob()\n flagName = fullCutName + unfoldingFlagSuffix\n unfoldingJob.setFlagName(flagName)\n cuts.addAnalysisJob(unfoldingJob,fullCutName)\n\n\n #book histograms (TH1, TH2, TH3, TProfiles,...)\n if ( config.hasTag(\"histograms.0\") or config.hasTag(\"histograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking histograms\")\n histofiles = config.getTagVString(\"histograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQHistoMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQHistoMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book multi-dim histograms (THn based, i.e., for nDim>3)\n if ( config.hasTag(\"multidimHistograms\") ) and config.getTagBoolDefault(\"makeHistograms\",cutbased):\n QFramework.INFO(\"booking multidimensional histograms\")\n histofiles = config.getTagVString(\"multidimHistograms\")\n histofiles = common.findMultipleConfigPathsFromList(histofiles)\n if QFramework.TQTHnBaseMakerAnalysisJob.importJobsFromTextFiles(histofiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printHistograms\",False)) > 0:\n print(QFramework.TQTHnBaseMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book graphs\n if ( config.hasTag(\"graphs.0\") or config.hasTag(\"graphs\") ) and config.getTagBoolDefault(\"makeGraphs\",cutbased):\n QFramework.INFO(\"booking graphs\")\n graphfiles = config.getTagVString(\"graphs\")\n graphfiles = findMultipleConfigPathsFromList(graphfiles)\n if QFramework.TQGraphMakerAnalysisJob.importJobsFromTextFiles(graphfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printGraphs\",False)) > 0:\n print(QFramework.TQGraphMakerAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book event lists\n if ( config.hasTag(\"eventlists.0\") or config.hasTag(\"eventlists\") ) and config.getTagBoolDefault(\"makeEventLists\",cutbased):\n QFramework.INFO(\"booking eventlists\")\n evtlistfiles = config.getTagVString(\"eventlists\")\n evtlistfiles = common.findMultipleConfigPathsFromList(evtlistfiles)\n if QFramework.TQEventlistAnalysisJob.importJobsFromTextFiles(evtlistfiles,cuts,aliases,\"*\") > 0:\n print(QFramework.TQEventlistAnalysisJob.getErrorMessage())\n cutbased = True\n\n #book Ntuple dumping\n if ( config.hasTag(\"ntuples.0\") or config.hasTag(\"ntuples\") ) and config.getTagBoolDefault(\"dumpNtuples\",cutbased):\n QFramework.INFO(\"preparing to dump ntuples\")\n ntupfiles = config.getTagVString(\"ntuples\")\n ntupfiles = common.findMultipleConfigPathsFromList(ntupfiles)\n if QFramework.TQNTupleDumperAnalysisJob.importJobsFromTextFiles(ntupfiles,cuts,aliases,\"*\",config.getTagBoolDefault(\"printNTuples\",False)) > 0:\n print(QFramework.TQNTupleDumperAnalysisJob.getErrorMessage());\n cutbased = True\n\n runtime = config.getFolder(\"runtime+\")\n runtime.setTagBool(\"cutbased\", cutbased)\n\n return", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def drawcutline(f,layernamelist,cutline_entities_count): \r\n \r\n #layernamelist=[layernamelist[0]] \r\n layercount=0\r\n ringlist=[[[-0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]]]\r\n flashlist=buildflashlist()\r\n cutlineset=buildcutlineset() \r\n \r\n f.write(\"0\\nSECTION\\n2\\nENTITIES\\n\")\r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n for polyline in cutlineset:\r\n cutline_entities_count=cutline_entities_count+1\r\n f.write(\"0\\nPOLYLINE\\n8\\n\"+layername+\"\\n5\\n\"+hex(cutline_entities_count)[2:]) # begin writing a polyline\r\n f.write(\"\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n40\\n0.08\\n41\\n0.08\\n\")\r\n cutline_entities_count=drawwidthpolyline(polyline, cutline_entities_count, f,layername)\r\n cutline_entities_count=drawring(ringlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawflash(flashlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawtext(cutline_entities_count, f, layername,layercount)\r\n \r\n return cutline_entities_count", "def subcfg2instance(\n cfg: DictConfig,\n keyname: str,\n valueidx: int,\n namespace: list = [],\n **kwargs: dict\n):\n # kwargs to variables\n for k,v in kwargs.items():\n globs = globals()\n locs = locals()\n exec(f'{k} = v', globs, locs)\n # multiple or one option\n if type(cfg[keyname]) == ListConfig:\n assert type(valueidx) == int, f'{keyname} has multiple options, but no index is givin.'\n c = cfg[keyname][valueidx]\n else:\n raise ValueError(f'expected ListConfig, but got {type(cfg[keyname])} in cfg {keyname} value')\n # if int, return directly\n if type(c) == int or type(c) == float:\n return c\n elif type(c) == ListConfig:\n return list(c)\n # if str enclosed in \", return as str\n elif type(c) == str and c.startswith(\"'\") and c.endswith(\"'\"):\n return c[1:-1]\n # if str, regarded as classname\n elif type(c) == str:\n classname = c\n arguments = {}\n elif type(c) == DictConfig:\n classname = list(c.keys())[0]\n assert type(classname) == str, f'expected str, but got {type(classname)} in {keyname} classname'\n arguments = list(c.values())[0]\n assert type(arguments) == DictConfig, f'expected DictConfig, but got {type(arguments)} in {keyname} arguments'\n globs = globals()\n locs = locals()\n tmp = {}\n for k,v in arguments.items():\n assert type(k) == str, f'expected str, but got {type(k)} in {keyname} argument key: {k}'\n if type(v) == int or type(v) == float or type(v) == bool:\n tmp[k] = v\n elif type(v) == str and v.startswith(\"'\") and v.endswith(\"'\"):\n tmp[k] = v[1:-1]\n elif type(v) == str:\n tmp[k] = eval(v, globs, locs)\n else:\n raise ValueError(f'expected ListConfig, DictConfig or str, but got {type(v)} in {keyname} argument value: {v}')\n arguments = tmp\n else:\n raise ValueError(f'expected DictConfig or str, but got {type(c)} in {c}')\n # get the operation corresponding to the class name\n return _classname2instance(classname, arguments, namespace)", "def _get_semantic_cuts(self, cuts_file, compute_recursive_cuts):\n sc1 = []\n sc2 = []\n if cuts_file is not None:\n sc1 = self._load_semantic_cuts_from_file(cuts_file)\n if compute_recursive_cuts:\n sc2 = self._compute_semantic_cuts()\n return sc1 + sc2", "def find_cosmics_in_cut(x, cut_wave, cut_brightest_line, line_wavelength = 0.,\n kernel_median_cosmics = 5, cosmic_higher_than = 100, extra_factor = 1., plot=False, verbose=False):\n \n gc_bl=signal.medfilt(cut_brightest_line,kernel_size=kernel_median_cosmics)\n max_val = np.abs(cut_brightest_line-gc_bl)\n\n gc=signal.medfilt(cut_wave,kernel_size=kernel_median_cosmics)\n verde=np.abs(cut_wave-gc)-extra_factor*max_val\n \n cosmics_list = [i for i, x in enumerate(verde) if x > cosmic_higher_than]\n \n if plot:\n ptitle=\"Cosmic identification in cut\"\n if line_wavelength != 0 : ptitle=\"Cosmic identification in cut at \"+np.str(line_wavelength)+\" $\\mathrm{\\AA}$\" \n plot_plot(x,verde, ymin=0,ymax=200, hlines=[cosmic_higher_than], ptitle=ptitle, ylabel=\"abs (cut - medfilt(cut)) - extra_factor * max_val\")\n \n if verbose:\n if line_wavelength == 0:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics in fibres\",cosmics_list)\n else:\n print(\"\\n> Identified\", len(cosmics_list),\"cosmics at\",np.str(line_wavelength),\"A in fibres\",cosmics_list)\n return cosmics_list", "def run(self, splat_id, graph_dict, infname, outfname, parameter_a, parameter_b, \\\n\t\targument1=None, argument2=None, argument3=None):\n\t\t(index2no, graph) = self.graph_dict2graph(graph_dict)\n\t\tclustering_test_instance = argument1\n\t\tclustering_test_instance.reformat(graph, infname, len(index2no))\n\t\treturn_code = self.call_modes(infname, outfname, len(index2no), parameter_a, parameter_b)\n\t\tif return_code!=1:\n\t\t\t#modes' normal exit code is 1\n\t\t\tprint 'call modes failed'\n\t\t\tsys.exit(1)\n\t\tif self.debug:\n\t\t\tclustering_test_instance.visualize_clusters(outfname, graph, index2no, '/tmp/test.R')\n\t\tcodense2db_instance = argument2\n\t\tcurs = argument3\n\t\treturn self.parse_modes_results(splat_id, outfname, index2no, graph, codense2db_instance, curs)", "def _process_chuck(self, c, bounds, events_slice, relevant_dates):\n lower_bound, upper_bound = bounds\n tz = self.args[\"timezone\"]\n\n # current data chunk has no data for selected events slice\n last_timestamp = convert(c.Timestamp.iloc[-1], mode='timestamp')\n too_early_data = lower_bound > last_timestamp\n first_timestamp = convert(c.Timestamp.iloc[0], mode='timestamp')\n too_late_data = upper_bound < first_timestamp\n\n if too_early_data or too_late_data:\n return None, pd.DataFrame()\n\n def search_cond(ts):\n \"\"\" Used in binary search to split ask/bid prices dataframe\n into appropriately sized chucks with accordance with\n selected events range\n \"\"\"\n ts = convert(ts, mode='timestamp')\n at = [\"year\", \"month\", \"day\", \"hour\", \"minute\"]\n if all(getattr(ts, a) == getattr(upper_bound, a) for a in at):\n return 0\n elif ts < upper_bound:\n return -1\n elif ts > upper_bound:\n return 1\n\n low, high = 0, len(c.Timestamp) - 1\n ok, idx = binary_search(c, 'Timestamp', search_cond, low, high)\n\n if ok:\n below, above = c.iloc[:idx], c.iloc[idx:]\n else:\n below, above = c, pd.DataFrame()\n relevant_dates = pd.concat([relevant_dates, below])\n\n if above.empty:\n return None, relevant_dates\n\n linked = self._link_data_and_events(\n events_slice, relevant_dates, timezone=tz)\n\n return linked, above", "def process(self, arg: Any, stock: None | StockId = None) -> list[DataPoint]:", "def result(self):\n CutflowResult = namedtuple(\n \"CutflowResult\",\n [\"labels\", \"nevonecut\", \"nevcutflow\", \"masksonecut\", \"maskscutflow\"],\n )\n labels = [\"initial\"] + list(self._names)\n return CutflowResult(\n labels,\n self._nevonecut,\n self._nevcutflow,\n self._masksonecut,\n self._maskscutflow,\n )", "def _json_parse_qc(system_label, json_calc, only_converged=False):\n df_rows = []\n # If possible, check all calculations have converged\n if only_converged and 'cc_converged' in json_calc.keys():\n if not np.all(np.array(json_calc['cc_converged'])): return []\n # Getting row-dependent data.\n qa_lambdas = json_calc['qa_lambdas']\n electronic_energies = json_calc['electronic_energies']\n scf_converged = json_calc['scf_converged']\n\n # Keys that are not present in every calculation.\n if 'cc_converged' in json_calc.keys():\n cc_converged = json_calc['cc_converged']\n else:\n cc_converged = [None for _ in scf_converged]\n if 'hf_energies' in json_calc.keys():\n hf_energies = json_calc['hf_energies']\n else:\n hf_energies = [None for _ in electronic_energies]\n if 'triples_corrections' in json_calc.keys():\n triples_corrections = json_calc['triples_corrections']\n else:\n triples_corrections = [None for _ in electronic_energies]\n if 'scf_spin_squared' in json_calc.keys():\n scf_spin_squared = json_calc['scf_spin_squared']\n else:\n scf_spin_squared = [None for _ in electronic_energies]\n if 'cc_spin_squared' in json_calc.keys():\n cc_spin_squared = json_calc['cc_spin_squared']\n else:\n cc_spin_squared = [None for _ in electronic_energies]\n if 'broken_symmetry' in json_calc.keys():\n broken_sym = json_calc['broken_symmetry']\n else:\n broken_sym = None\n\n # Adds df row for every lambda.\n for i in range(len(qa_lambdas)):\n df_dict = {'system': system_label}\n\n # Checks convergence stuff.\n if scf_converged[i] and (cc_converged[i] is None or cc_converged[i]):\n converged = True\n else:\n converged = False\n if only_converged and not converged:\n continue\n \n # Adds common information for df rows\n df_dict['atomic_numbers'] = np.array(json_calc['atomic_numbers'])\n df_dict['charge'] = json_calc['molecular_charge']\n df_dict['multiplicity'] = json_calc['molecular_multiplicity']\n df_dict['n_electrons'] = json_calc['n_electrons']\n df_dict['qc_method'] = json_calc['model']['method']\n df_dict['basis_set'] = json_calc['model']['basis']\n df_dict['converged'] = converged\n\n # Handles energy components for post-HF and DFT methods.\n if hf_energies[i] is not None:\n df_dict['hf_energy'] = hf_energies[i]\n try:\n df_dict['correlation_energy'] = electronic_energies[i] - hf_energies[i]\n except TypeError:\n df_dict['correlation_energy'] = np.nan\n else:\n df_dict['hf_energy'] = electronic_energies[i]\n df_dict['correlation_energy'] = None\n df_dict['cc_spin_squared'] = cc_spin_squared[i]\n df_dict['scf_spin_squared'] = scf_spin_squared[i]\n df_dict['triples_correction'] = triples_corrections[i]\n df_dict['broken_sym'] = broken_sym\n\n # Important ones go in front and back.\n df_dict['lambda_value'] = float(qa_lambdas[i])\n df_dict['electronic_energy'] = electronic_energies[i]\n if len(df_dict['atomic_numbers']) == 2:\n geo = np.array(json_calc['molecule']['geometry'])\n df_dict['bond_length'] = _calc_distance(geo[0], geo[1])\n \n df_rows.append(df_dict)\n\n return df_rows", "def reformat_cuts(input_cuts):\n output_cuts = []\n for cut in input_cuts:\n cut = list(cut)\n if cut[1]==None:\n cut[1]=float(\"-inf\")\n if cut[2]==None:\n cut[2]=float(\"inf\")\n cut = tuple(cut)\n output_cuts.append(cut)\n return output_cuts", "def apply_chart(rows, form_id):\n clear_form(form_id)\n section = None\n sections = []\n section_rows = []\n\n for row in rows:\n if row['section'].strip():\n if section and section_rows:\n sections.append((section, section_rows))\n section, section_rows = row['section'].strip(), []\n\n \"\"\"\n If the title column of the CSV is not blank we assume that\n this row delimiters the start of a new chart so we create a new\n section to represent it using a dummy concept uuid.\n \"\"\"\n if row['title'].strip():\n sections.append(('[chart_divider]',\n [{\n 'label': row['title'].strip(),\n 'concept': '162169',\n 'type': 'CHART_DIVIDER',\n 'format': row['format'].strip()\n }]))\n if validate_str(row['type']) and validate_ints(row['concept']):\n section_rows.append(row)\n if section and section_rows:\n sections.append((section, section_rows))\n apply_chart_sections(sections, form_id)", "def _get_categories_list(self):\n # if the argument list is empty then assume this is in an algorithm file\n args = self.arguments\n if len(args) > 0:\n return args\n else:\n if self.algorithm_version() is not None:\n return self._get_algorithm_categories_list()\n else:\n return self._get_ifunction_categories_list()", "def doClassification(self):\n halfIndex=int(len(self.dict)/2)\n i=0\n for k, v in sorted(self.dict.items(), key=lambda item: item[1]):\n if i<halfIndex:\n self.lowVolumeStockList.append(k)\n else:\n self.highVolumeStockList.append(k)\n i=i+1", "def load(config: Union[dict, FancyDict]) -> Callable:\n\n # Get the necessary dataset's things.\n assert config['DATASET'] in KNOWN_DATASETS, f\"Dataset {config['DATASET']} is unknown.\"\n\n if config['DATASET'] == 'wd50k':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k\", subtype=\"quints\")\n else:\n return load_wd50k_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k\", subtype=\"triples\")\n else:\n return load_wd50k_triples\n else:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k\", subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_wd50k_statements, maxlen=config['MAX_QPAIRS'])\n elif config['DATASET'] == 'wikipeople':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wikipeople_statements, subtype=\"quints\")\n else:\n return load_wikipeople_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wikipeople_statements, subtype=\"triples\")\n else:\n return load_wikipeople_triples\n else:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wikipeople_statements, subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_wikipeople_statements, maxlen=config['MAX_QPAIRS'])\n elif config['DATASET'] == 'wd50k_100':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_100\", subtype=\"quints\")\n else:\n return load_wd50k_100_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_100\", subtype=\"triples\")\n else:\n return load_wd50k_100_triples\n else:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_100\", subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_wd50k_100_statements, maxlen=config['MAX_QPAIRS'])\n elif config['DATASET'] == 'wd50k_33':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_33\", subtype=\"quints\")\n else:\n return load_wd50k_33_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_33\", subtype=\"triples\")\n else:\n return load_wd50k_33_triples\n else:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_33\", subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_wd50k_33_statements, maxlen=config['MAX_QPAIRS'])\n elif config['DATASET'] == 'wd50k_66':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_66\", subtype=\"quints\")\n else:\n return load_wd50k_66_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_66\", subtype=\"triples\")\n else:\n return load_wd50k_66_triples\n else:\n if config['CLEANED_DATASET']:\n return partial(load_clean_wd50k, name=\"wd50k_66\", subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_wd50k_66_statements, maxlen=config['MAX_QPAIRS'])\n elif config['DATASET'] == 'jf17k':\n if config['STATEMENT_LEN'] == 5:\n if config['CLEANED_DATASET']:\n return partial(load_clean_jf17k_statements, subtype=\"quints\")\n else:\n return load_jf17k_quints\n elif config['STATEMENT_LEN'] == 3:\n if config['CLEANED_DATASET']:\n return partial(load_clean_jf17k_statements, subtype=\"triples\")\n else:\n return load_jf17k_triples\n elif config['STATEMENT_LEN'] == -1:\n if config['CLEANED_DATASET']:\n return partial(load_clean_jf17k_statements, subtype=\"statements\", maxlen=config['MAX_QPAIRS'])\n else:\n return partial(load_jf17k_statements, maxlen=config['MAX_QPAIRS'])", "def get_events_passing_cuts(bolo_name, WIMP_mass, d_cut, analysis_type, MVA_tag, bin_X, min_X, max_X, list_variables, **kwargs): \n\n try:\n kwargs[\"weight_dir\"]\n except KeyError:\n sys.exit()\n\n #Get heat _fraction\n heat_fraction = kwargs[\"classifier_name\"][13:]\n\n #Get scaling dict to set the weights\n d_scaling = BDT_fh.open_MVA_scaling_file(bolo_name, analysis_type, MVA_tag)\n\n d_event_dir = {\"S1Pb\":\"Beta_and_Pb\", \"S2Pb\":\"Beta_and_Pb\", \"S1Beta\":\"Beta_and_Pb\", \"S2Beta\":\"Beta_and_Pb\",\n \"S1Gamma\":\"Gamma\", \"S2Gamma\":\"Gamma\", \"FidGamma\":\"Gamma\", \n \"heatonly_heat_fraction\" + heat_fraction: \"Heatonly\", \"WIMP_mass_\" + str(WIMP_mass): \"WIMP\"}\n key_heat = \"heatonly_heat_fraction\" + heat_fraction\n\n #Load data\n d_test = dp.get_data_array(bolo_name, 1, analysis_type, MVA_tag, d_event_dir.keys(), 1, list_variables, datasplit = 1)\n\n # Get classifier\n model_dir = script_utils.create_directory(\"../../Classifier_files/\" + bolo_name + \"/\" + analysis_type + \"/\"+ kwargs[\"weight_dir\"] + \"/\") \n if kwargs.has_key(\"classifier_name\"):\n modelfile = model_dir + \"xgboost_classifier_mass_\" + str(WIMP_mass) + \"_\" + kwargs[\"classifier_name\"] + \".model\"\n bst = xgb.Booster({'nthread':16}, model_file = modelfile)\n\n #Get predictions on test sample\n d_pred = {}\n d_hist = {}\n d_color = {\"S1Pb\":kOrange-8, \"S2Pb\":kOrange-9, \"S1Beta\":kGreen+2, \"S2Beta\":kGreen-3,\n \"S1Gamma\":kBlue-7, \"S2Gamma\":kBlue, \"FidGamma\":kAzure+10, key_heat: kRed, \"WIMP_mass_\" + str(WIMP_mass):kGray, \"neutron\":kMagenta}\n\n #ROOT out_dir \n root_dir = script_utils.create_directory(\"./ROOT_files/\" + bolo_name + \"/\" + analysis_type + \"/\")\n file_root = TFile(root_dir + bolo_name + \"_sensi_eff_curves_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".root\", \"read\")\n\n #Write events that pass cut to a file \n txt_dir = script_utils.create_directory(\"./Text_files/Simulated_sensitivity/\")\n with open(txt_dir + \"/simulated_events_passing_cut_heat_fraction\" + heat_fraction + \"_mass_\" + str(WIMP_mass) + \".txt\", \"w\") as fout:\n\n fout.write(\"heat_fraction,exposure,num_events_passing_cut\\n\")\n\n #Loop over possible exposure values\n for exposure in [10, 50, 100, 500]:\n script_utils.print_utility(\"Getting events passing cut for exposure of \" + str(exposure) + \" mass of \" + str(WIMP_mass))\n for event_type in d_test.keys():\n d_pred[event_type] = bst.predict( xgb.DMatrix(d_test[event_type].iloc[:,:-3].values) )\n d_hist[event_type] = TH1F(\"h\" + event_type + str(exposure), \"h\" + event_type + str(exposure), bin_X, min_X, max_X)\n PyRPl.fill_TH1(d_hist[event_type], d_pred[event_type])\n PyRPl.process_TH1(d_hist[event_type], use_fill_bool = True, color = d_color[event_type] )\n if \"WIMP\" not in event_type:\n d_hist[event_type].Scale(float(d_scaling[\"prop_\" + event_type])*float(d_scaling[\"exp_per_day\"])*exposure/float(d_hist[event_type].Integral()))\n else:\n d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Scale(1./d_hist[\"WIMP_mass_\" + str(WIMP_mass)].Integral())\n\n list_hist_bckg =[d_hist[\"S1Pb\"], d_hist[\"S2Pb\"], d_hist[\"S1Beta\"], d_hist[\"S2Beta\"], d_hist[\"S1Gamma\"], d_hist[\"S2Gamma\"], d_hist[\"FidGamma\"], d_hist[key_heat]]\n\n hsum_bckg=TH1F(\"hsum_bckg\" + str(exposure),\"hsum_bckg\" + str(exposure), bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n sumcontent = sum([h.GetBinContent(i) for h in list_hist_bckg])\n hsum_bckg.SetBinContent(i, sumcontent)\n\n fsensi = file_root.Get(\"sensitivity_expo_\" + str(exposure))\n cut_val = fsensi.GetMinimumX(2,10)\n\n #Run Poisson simulations\n list_event_pass_cut=[]\n for nsimu in range(100):\n hdatasimu = TH1F(\"hdatasimu\",\"hdatasimu\", bin_X, min_X, max_X)\n for i in range(1,bin_X+1):\n hdatasimu.SetBinContent(i, np.random.poisson(hsum_bckg.GetBinContent(i)))\n bin_cut = hdatasimu.FindBin(cut_val)\n num_entry_cut = int(hdatasimu.Integral(bin_cut, max_X))\n list_event_pass_cut.append(str(num_entry_cut))\n del hdatasimu\n fout.write(heat_fraction[1:] + \",\" + str(exposure) + \",\" + \",\".join(list_event_pass_cut) + \"\\n\")", "def cut_off_distance_calculator(config_data):\n\tatom_type = ((config_data[\"atom_id\"].unique()).astype(np.int64)).tolist()\n\tatom_type.sort()\n\tconfig_size = config_data[\"item\"].size\n\tcut_off_distance = []\n\tfor i in range(len(atom_type)):\n\t\tfor j in range(i+1,len(atom_type)-1):\n\t\t\tpair_dist_fun = pdf_calculator_pair(config_data, atom_type[i],atom_type[j])\n\t\t\tcut_off_distance.append(first_min_pdf(pair_dist_fun))\n\treturn cut_off_distance", "def call(self) -> List[Dict]:", "def __init__ (self, config_yaml):\n configdef = yaml.safe_load(io.StringIO(config_yaml)) \n\n if \"filters\" not in configdef:\n configdef = dict(filters=[configdef])\n\n self._configs = []\n\n for definition in configdef[\"filters\"]:\n config = Bunch( valid_from = None\n , volume_follows = False\n , copy_last_price = False\n , copy_last_volume = False\n , qualifier_include_filters = []\n , qualifier_exclude_filters = []\n , exclude_filters = [] )\n\n if \"filter\" in definition and definition[\"filter\"] != None:\n for exclude_filter in definition[\"filter\"]:\n parts = exclude_filter.split(\",\")\n if parts[0] == \"floor\":\n config.exclude_filters.append(FloorFilter(float(parts[1]), \"price\"))\n elif parts[0] == \"cap\":\n config.exclude_filters.append(CapFilter(float(parts[1]), \"price\")) \n elif parts[0] == \"step\":\n config.exclude_filters.append(StepFilter(int(parts[1]), float(parts[2]), float(parts[3]), \"price\"))\n else:\n raise Exception(\"Unknown filter (%s)\" % (parts[0])) \n \n if \"remove\" in definition and definition[\"remove\"] != None:\n for exclude_filter in definition[\"remove\"]:\n config.qualifier_exclude_filters.append(QualifierFilter(exclude_filter))\n \n if \"allow\" in definition and definition[\"allow\"] != None:\n for include_filter in definition[\"allow\"]:\n config.qualifier_include_filters.append(QualifierFilter(include_filter))\n\n if \"volFollows\" in definition: config.volume_follows = definition[\"volFollows\"] \n if \"copyLast\" in definition and definition[\"copyLast\"] != None:\n config.copy_last_price = definition[\"copyLast\"] \n config.copy_last_volume = definition[\"copyLast\"] \n if \"volumeLimit\" in definition and definition[\"volumeLimit\"] != None:\n config.exclude_filters.append(CapFilter(definition[\"volumeLimit\"], \"volume\"))\n if \"validFrom\" in definition and definition[\"validFrom\"] != None:\n valid_from = datetime.datetime.strptime(definition[\"validFrom\"], \"%Y-%m-%d %H:%M:%S\")\n valid_from.replace(tzinfo=pytz.utc)\n config.valid_from = common.Time.tick(valid_from)\n if \"weekTimezone\" in definition and definition[\"weekTimezone\"] != None:\n config.exclude_filters.append(WeekendFilter(definition[\"weekTimezone\"], definition[\"weekEnd\"], definition[\"weekStart\"]))\n\n self._configs.append(config)\n \n self._config_index = 0\n self._config_count = len(self._configs)", "def subdMapCut(*args, caching: bool=True, constructionHistory: bool=True, name: AnyStr=\"\",\n nodeState: Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def run_cut(self, expanded, unexpanded) :\n\t\tif not expanded :\n\t\t\treturn self.errormessage(\"Needs some objects ids to cut\")\n\t\tif not self.HasPerms(self.__context, 'View management screens') :\n\t\t\treturn -1\n\t\tstatus = 0\n\t\tobjids = []\n\t\tfor objid in expanded :\n\t\t\tif '/' in objid :\n\t\t\t\tstatus = status + self.errormessage('Paths for objects ids are not allowed at this time: %s' % objid)\n\t\t\telse :\n\t\t\t\tobjids.append(objid)\n\t\ttry :\n\t\t\tself._clipboard = self.__context.manage_cutObjects(ids = objids)\n\t\t\tfor objid in objids :\n\t\t\t\tself.htmlmessage('%s cut to clipboard' % objid)\n\t\texcept AttributeError, msg :\n\t\t\tstatus = status + self.errormessage(\"Object %s doesn't exist\" % msg)\n\t\treturn status", "def cut(S, T, graph):\n ###TODO\n pass", "def z_focus(block,cut,laser):\r\n\tcutlist = []\r\n\titerations = int(cut[\"final_dimension_z\"]/laser[\"z_spacing\"])\r\n\t#Currently x,y is decided to take up a good amount of the block, rather than having set distances and sizes\r\n\ty = cut[\"final_dimension_y\"]/2\r\n\toffset = laser[\"xy_spacing\"]\r\n\tx = 0\r\n\r\n\tcutlist.append([\"z_abs\",\"0\"])\r\n\tfor a in range(iterations):\r\n\t\tcutlist.append([\"jump\", f\"{x:.6f}\", f\"{y:.6f}\"])\r\n\t\tcutlist.append([\"mark\", f\"{x:.6f}\", f\"{-y:.6f}\"])\r\n\t\tcutlist.append([\"z_rel\", str(-laser[\"z_spacing\"])])\r\n\t\tx = x + offset\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)", "def evaluator(*args, clusters: bool=True, configuration: Union[AnyStr, List[AnyStr], bool]=\"\",\n enable: bool=True, info: bool=True, name: Union[AnyStr, bool]=\"\", nodeType:\n Union[AnyStr, List[AnyStr], bool]=\"\", nodeTypeChildren: bool=True, priority:\n Union[int, bool]=0, valueName: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def new_configuration(configuration_list):\n\n if isinstance(configuration_list, dict):\n configuration_list = [configuration_list]\n results = []\n for config_item in configuration_list:\n\n # Special case, if the platform is an expandable, convert to an array\n # of configurations that fit the bill.\n platform = config_item.get(\"platform\")\n if platform is None:\n results.append(Configuration(**config_item))\n else:\n platform_type = PlatformTypes.lookup(platform)\n if platform_type is None:\n raise TypeError(\n \"parameter \\\"platform_type\\\" must be of type PlatformTypes\")\n for item in platform_type.get_expanded():\n config_item[\"platform\"] = item\n results.append(Configuration(**config_item))\n\n # If a single object, pass back as is.\n if len(results) == 1:\n return results[0]\n return results", "def apply_cuts(chain, isotope, tree, volume):\n\n #open file which inlcudes fill levels and fill days\n infile = open(\"/users/langrock/plotting_macros/Partial_fill/split_level.txt\",\"r\")\n\n #define root file to save root files to\n outputroot = ROOT.TFile(\"/data/langrock/PartialFill/Full/root/\" + isotope + \"_\" + chain + \"_\" + volume +\".root\",\"recreate\")\n\n #define histograms\n hist = define_histograms.DefineHistograms()\n\n events_full = 0\n events_pocut = 0\n events_deltatcut = 0\n events_bifidvolcut = 0\n events_deltarcut = 0\n events_bicut = 0\n events_allcut = 0\n\n #get fill days and fill level from file, loop through each line and perform the cut selection on each day of filling\n for line in infile:\n words = line.split()\n\n if len(words)!=0:\n\n d = float(words[0])\n z_level = float(words[1])\n \n #loop through the events in the root file\n for i in range(tree.GetEntries()):\n #get variables from previous events\n tree.GetEntry(i-1)\n nhits_prev = tree.nhits\n radius_prev = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time_prev = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy_prev = tree.energy\n fitValid_prev = tree.fitValid\n x_prev = tree.posx\n y_prev = tree.posy\n z_prev = tree.posz\n\n #get variables from current events\n tree.GetEntry(i)\n nhits = tree.nhits\n radius = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy = tree.energy\n fitValid = tree.fitValid\n x = tree.posx\n y = tree.posy\n z = tree.posz\n\n #get day at which events were simulated\n day = tree.uTDays\n\n #define time differance and event distance\n delta_t = time - time_prev\n delta_r = math.sqrt(math.pow((x_prev - x),2) + math.pow((y_prev - y),2) + math.pow((z_prev - z),2))\n\n fidvol_value = 5000\n \n #if the event was generated on the current day of filling, apply cuts\n if d == day:\n\n #fill histograms and count events\n hist.h_energy_full.Fill(energy)\n hist.h_nhitspo_full.Fill(nhits)\n hist.h_nhitsbi_full.Fill(nhits_prev)\n hist.h_deltat_full.Fill(delta_t)\n hist.h_deltar_full.Fill(delta_r)\n hist.h_rfidvolbi_full.Fill(radius_prev)\n\n events_full += 1\n\n #apply fiducial vlume cut\n if radius> 0 and radius < fidvol_value and z >= z_level+653:\n\n hist.h_energy_pocut.Fill(energy)\n hist.h_nhitspo_pocut.Fill(nhits)\n hist.h_nhitsbi_pocut.Fill(nhits_prev)\n hist.h_deltat_pocut.Fill(delta_t)\n hist.h_deltar_pocut.Fill(delta_r)\n hist.h_rfidvolbi_pocut.Fill(radius_prev)\n \n events_pocut += 1\n\n #bipo212 cut selection\n if chain == \"bipo212\":\n #apply polonium candidate cut\n if nhits >= 450 and nhits <= 580:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t < 3690:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial radius cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n\n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 100:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #bipo214 cut selection\n elif chain == \"bipo214\":\n #nhits cut on polonium candidate\n if nhits >= 290 and nhits <= 450:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t > 3690 and delta_t < 1798788:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial volume cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n \n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 600:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #write all histograms to file\n outputroot.Write()\n outputroot.Close()\n\n #create string with all event counts\n outputstring = isotope + \"\\t all events: \" + str(events_full) + \"\\t fiducial volume: \" + str(events_pocut) + \"\\t Po nhits cut: \" + str(events_deltatcut) + \"\\t Delta t cut: \" + str(events_bifidvolcut) + \"\\t fiducial volume: \" + str(events_deltarcut) + \"\\t Delta r cut: \" + str(events_bicut) + \"\\t Bi nhits cut: \" + str(events_allcut) + \"\\n \" \n\n return outputstring", "def filter_stories_bypickle(feng,trigger_dict_eng,trigger_dict_span,num_line):\n# trig_story = []\n temp_stories = feng[:num_line]\n for index, story in enumerate(temp_stories):\n #for index in range(num_line):\n print(index)\n if story.get_language() != []:\n if story.get_language()[0] == 'en':\n #print('mapping eng')\n for key,trig in trigger_dict_eng.items():\n try:\n #print('mapping step 2 eng')\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n if story.get_language()[0] == 'es':\n #print('mapping span')\n for key,trig in trigger_dict_span.items():\n try:\n #print('mapping step 2 span')\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n print(story.get_taxonomy())", "def nflstandings(self, irc, msg, args, optlist, optconf, optdiv):\n \n detailed = False\n for (option, arg) in optlist:\n if option == 'detailed':\n detailed = True\n \n optconf = optconf.upper()\n optdiv = optdiv.title()\n \n if optconf != \"AFC\" and optconf != \"NFC\":\n irc.reply(\"Conference must be AFC or NFC.\")\n return\n \n if optdiv != \"North\" and optdiv != \"South\" and optdiv != \"East\" and optdiv != \"West\":\n irc.reply(\"Division must be North, South, East or West.\")\n return\n \n if not detailed:\n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvZGl2X3N0YW5kaW5nczIuanM=')\n else:\n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvZGl2X3N0YW5kaW5ncy5qcw==')\n \n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n jsondata = json.loads(html)\n \n standings = jsondata.get('content', None)\n\n if standings is None:\n irc.reply(\"Failed to load standings.\")\n return\n \n teams = [item['teams'] for item in standings if item['conference'] == optconf and item['division'] == optdiv]\n \n if not detailed: # switch for detailed. this is the short-form.\n \n append_list = []\n\n for item in teams: # teams is a list of dicts\n for team in item: # so we recurse\n append_list.append(self._translateTeam('team', 'nid', team['teamId']) + \" \" + team['winLossRecord'] + \"(\" + team['percentage'] + \")\")\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1} :: {2}\".format(ircutils.bold(optconf), ircutils.bold(optdiv), descstring)\n irc.reply(output)\n else:\n \n header = \"{0:11} {1:>3} {2:>3} {3:>3} {4:<6} {5:<5} {6:<5} {7:<5} {8:<5} {9:<4} {10:<4} {11:<4} {11:<5}\"\\\n .format(ircutils.underline(optconf + \" \" + optdiv),'W','L','T','PCT','HOME','ROAD','DIV','CONF','PF','PA','DIFF','STRK') \n\n irc.reply(header)\n\n for item in teams: # teams is a list of dicts\n for t in item: # so we recurse\n output = \"{0:9} {1:3} {2:3} {3:3} {4:6} {5:5} {6:5} {7:5} {8:5} {9:4} {10:4} {11:4} {11:5}\".format(t['team']['abbreviation'],\\\n t['wins'], t['losses'], t['ties'], t['percentage'], t['extra']['home_record'], t['extra']['road_record'],\\\n t['extra']['division_record'], t['extra']['conference_record'], t['extra']['points_for'], t['extra']['points_against'],\\\n t['extra']['home_record'], t['extra']['net_points'], t['extra']['last_5_record'])\n \n irc.reply(output)", "def cut_image(self, x, y, r_cut):\n image_cutted_raw = self.image[x - r_cut:x + r_cut + 1, y - r_cut:y + r_cut + 1]\n image_cutted = self.sub_bkg(image_cutted_raw)\n return image_cutted", "def discretizationData(self, trainData, testData, structure, numOfBins, typeOfDiscretization):\n for columnName, value in structure.items():\n if value[\"values\"] == ['Numeric']:\n colIndex = value['index']\n bins = []\n self.sortDataByAscendingOrderOFValuesInColumn(trainData, colIndex)\n if typeOfDiscretization.upper() == \"EQUAL WIDTH\":\n bins = self.createBinsByEqualWidth(trainData, colIndex, numOfBins)\n elif typeOfDiscretization.upper() == \"EQUAL DEPTH\":\n bins = self.createBinsByEqualDepth(trainData, colIndex, numOfBins)\n elif typeOfDiscretization.upper() == \"GINI INDEX\":\n bins = self.createBinsByGiniIndex(trainData, structure, colIndex, numOfBins)\n elif typeOfDiscretization.upper() == \"ENTROPY\":\n bins = self.createBinsByEntropy(trainData, structure, columnName, numOfBins)\n else:\n bins = self.createBinsByEntropy(trainData, structure, columnName, numOfBins)\n self.discretizationOFDataByColumn(trainData, colIndex, bins)\n self.discretizationOFDataByColumn(testData, colIndex, bins)\n structure[columnName]['values'] = list(bins.keys())", "def get_data_and_cuts(args):\n\n if args['verbose'] >= 2:\n print(\"Load data\\n\", flush=True)\n data = get_dataset(args)\n\n if args['verbose'] >= 2:\n print(\"Find cuts\", flush=True)\n cuts = get_cuts(data, args, verbose=args['verbose'])\n if args['verbose'] >= 2:\n print(f'\\tI found {len(cuts.values)} cuts\\n')\n\n print(\"Compute cost\", flush=True)\n cost_function = get_cost_function(data, args)\n cuts = compute_cost_and_order_cuts(cuts, cost_function)\n\n cuts = pick_cuts_up_to_order(cuts,\n percentile=args['experiment']['percentile_orders'])\n if args['verbose'] >= 2:\n max_considered_order = cuts.costs[-1]\n print(f\"\\tI will stop at order: {max_considered_order}\")\n print(f'\\tI will use {len(cuts.values)} cuts\\n', flush=True)\n\n if args['plot']['cuts']:\n if args['verbose'] >= 2:\n print(f\"\\tPlotting cuts\")\n\n plot_cuts(data, cuts,\n nb_cuts_to_plot=args['plot']['nb_cuts'],\n path=args['plot_dir'])\n\n return data, cuts", "def polyMapCut(*args, caching: bool=True, constructionHistory: bool=True, moveratio:\n Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def test_makeliststep_call_config_file():\n config_file = t_path(\n Path('steps') / 'makelist.cfg'\n )\n results = MakeListStep.call(config_file=config_file)\n assert results == [43.0, 'My hovercraft is full of eels.', False]", "def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops", "def parse_logic(self, logic):\n if 'xxx' in logic.conf:\n # self.function(logic['name'])\n pass", "def rpy_process(args):\n\n\trpy_module = importr('cocalculateur')\n\tloadjson_module = importr('jsonlite')\n\n\tjson_data = read_filter_sync(args)\n\t\n\t#json_data_json = loadjson_module.fromJSON(txt = './input_f01.json',flatten=True)\n\tconfig_data = loadjson_module.fromJSON(txt = args.config ,flatten=True)\n\toutput_data = rpy_module.f_01(json_data,config_data)\n\n\treturn output_data", "def _collectChoicesList(self, statistic, logic, fields=[], filter=None,\n program_field=None, params={}):\n\n choices = []\n if statistic.working_json:\n choices = simplejson.loads(statistic.working_json)\n\n query_filter = {}\n if program_field:\n query_filter = {program_field: statistic.scope}\n\n query = logic.getQueryForFields(filter=query_filter)\n\n next_key = None\n if statistic.next_entity:\n next_key = statistic.next_entity.key()\n query.filter('__key__ > ', next_key)\n\n entities = query.fetch(self.BATCH_SIZE)\n\n if not entities:\n next_entity = None\n result = choices\n json_to_update = 'choices_json'\n else:\n new_choices = []\n\n for entity in entities:\n\n if filter and not filter(entity, params):\n continue\n\n for field in fields:\n entity = entity.__getattribute__(field)\n\n new_choices.append(entity.key().id_or_name())\n\n choices += new_choices\n choices = list(set(choices))\n next_entity = entities[-1]\n result = None\n json_to_update = 'working_json'\n\n properties = {\n 'next_entity': next_entity,\n json_to_update: simplejson.dumps(choices)\n }\n\n self.updateEntityProperties(statistic, properties,\n store=True)\n\n return result", "def setCutRatios(self, cutRatios):\n\t\t# TODO: Check if this is an array\n\t\t# Adjust the ratio according to specifications\n\t\tfor i in range(len(cutRatios)):\n\t\t\tif cutRatios[i] < 0.5:\n\t\t\t\tcutRatios[i] = 1 - cutRatios[i]\n\t\t\telif cutRatios[i] > 1.0:\n\t\t\t\t# Scary\n\t\t\t\t# Maybe raise an error\n\t\t\t\tcutRatios[i] = cutRatios[i] - 1\n\t\tself.cutRatios = cutRatios", "def main(config_file):\n # check that there is a file at the config file location\n assert os.path.isfile(config_file), \"Config file does not exist.\"\n # load the config file with yaml\n with open(config_file, \"r\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n # check that the config file has the right keys\n keys = config.keys()\n required_keys = [\n \"strategy_execution_comparison_in_python\",\n \"platforms_objects\",\n \"folder_execution_results\",\n \"folder_comparison_results\"\n ]\n for req_key in required_keys:\n assert req_key in keys, f\"Config file missing key: {req_key}\"\n\n detectors = []\n\n if \"ks\" in config[\"strategy_execution_comparison_in_python\"]:\n detectors.append(KS_Detector())\n\n # get pairs of dictionary\n platforms = config[\"platforms_objects\"]\n assert len(platforms) == 2, \"There should be two platforms.\"\n platform_A, platform_B = platforms\n # get the execution files\n pairs_of_results = get_execution_files(\n config[\"folder_execution_results\"],\n platform_A, platform_B)\n\n for (result_A, result_B, identifier) in pairs_of_results:\n # get the results\n for detector in detectors:\n statistic, p_value = detector.check(result_A, result_B)\n comparison = {\n \"statistic\": statistic,\n \"p_value\": p_value,\n }\n with open(os.path.join(\n config[\"folder_comparison_results\"],\n identifier + \".json\"\n ), \"w\") as f:\n json.dump(comparison, f)", "def _parse_actions_list(self, actions_cfg, dataset_name):\n # iterate over actions and parse\n for i, cfg in enumerate(actions_cfg):\n actions_cfg[i] = self._parse_action(cfg, dataset_name)\n\n return actions_cfg", "def Run(self, args):\n execution_ref = args.CONCEPTS.execution.Parse()\n\n conn_context = connection_context.GetConnectionContext(\n args, flags.Product.RUN, self.ReleaseTrack())\n with serverless_operations.Connect(conn_context) as client:\n self.SetCompleteApiEndpoint(conn_context.endpoint)\n ret = client.ListTasks(execution_ref.Parent(), execution_ref.Name(),\n args.filter_flags or None)\n return sorted(ret, key=lambda x: x.index)", "def _allocate_functions(self, scenario):\n\n func_list = []\n func_tag = []\n\n if self.tacs_proc:\n # Create the list of functions and their corresponding function tags\n for func in scenario.functions:\n if func.analysis_type != \"structural\":\n func_list.append(None)\n func_tag.append(0)\n\n elif func.name.lower() == \"ksfailure\":\n ksweight = 50.0\n if func.options is not None and \"ksweight\" in func.options:\n ksweight = func.options[\"ksweight\"]\n func_list.append(\n functions.KSFailure(self.assembler, ksWeight=ksweight)\n )\n func_tag.append(1)\n\n elif func.name.lower() == \"compliance\":\n func_list.append(functions.Compliance(self.assembler))\n func_tag.append(1)\n\n elif func.name.lower() == \"temperature\":\n func_list.append(\n functions.AverageTemperature(self.assembler, volume=self.vol)\n )\n func_tag.append(1)\n\n elif func.name.lower() == \"heatflux\":\n func_list.append(functions.HeatFlux(self.assembler))\n func_tag.append(1)\n\n elif func.name.lower() == \"xcom\":\n func_list.append(\n functions.CenterOfMass(self.assembler, direction=[1, 0, 0])\n )\n func_tag.append(1)\n\n elif func.name.lower() == \"ycom\":\n func_list.append(\n functions.CenterOfMass(self.assembler, direction=[0, 1, 0])\n )\n func_tag.append(1)\n\n elif func.name.lower() == \"zcom\":\n func_list.append(\n functions.CenterOfMass(self.assembler, direction=[0, 0, 1])\n )\n func_tag.append(1)\n\n elif func.name == \"mass\":\n func_list.append(functions.StructuralMass(self.assembler))\n func_tag.append(-1)\n\n else:\n print(\"WARNING: Unknown function being set into TACS set to mass\")\n func_list.append(functions.StructuralMass(self.assembler))\n func_tag.append(-1)\n\n return func_list, func_tag", "def get_shell_commands_from_config(\n config: Dict[str, Any],\n) -> Dict[str, ExecutorFuncT]:\n shell_commands = config\\\n .get('generic_command_config', {})\\\n .get('shell_commands', {})\n\n command_dispatch_table = {}\n\n for shell_command in shell_commands:\n name = shell_command.get('name')\n command = shell_command.get('command')\n if not name or not command:\n continue\n\n allow_params = shell_command.get('allow_params', False)\n\n logging.debug(\"Loading command %s\", name)\n command_dispatch_table[name] = partial(\n _run_subprocess,\n command,\n allow_params,\n )\n return command_dispatch_table", "async def json_selector(self, url: Url, alias: str,\n json_key: Dict, config_id: int = 0):\n await self.responses[url]\n json_out = loads(self.responses[url])\n\n if isinstance(json_key['filter'], str):\n json_key['filter'] = [json_key['filter']]\n results = json_out\n for fltr in json_key['filter']:\n results = results[fltr]\n\n news_dump = NewsDump(config_id, url, alias)\n\n for story in results:\n story_dict = StoryDict()\n for k, val in json_key['attribute'].items():\n if k in ('H0', 'H1', 'H2'):\n new_val = story[val].encode(\n 'ascii', errors='ignore').decode('utf-8')\n else:\n new_val = story[val]\n story_dict.update(**{k: new_val})\n news_dump.add_story(config_id, **story_dict)", "def cutpointStrategy(listOfDict):\n df = pd.DataFrame(listOfDict)\n resultDF = df.copy(deep=True)\n for i in df.columns:\n if(df[i].dtype == np.float64):\n distNumValues = list(set(df.loc[:,i].values))\n distNumValues.sort()\n cutPoints = [(distNumValues[j]+distNumValues[j+1])/2 for j in range(len(distNumValues)-1)]\n del resultDF[i]\n for k in range(len(cutPoints)):\n for j in range(df.shape[0]):\n if(df.loc[j,i] < cutPoints[k]):\n resultDF.loc[j,i+str(cutPoints[k])] = str(min(distNumValues)) + \"..\" + str(cutPoints[k])\n else:\n resultDF.loc[j,i+str(cutPoints[k])] = str(cutPoints[k]) + \"..\" + str(max(distNumValues))\n return resultDF.T.to_dict().values()", "def filter_json(json, param, param_range):\n filtered_json = []\n\n for element in json:\n if element[param]:\n try:\n value = int(element[param])\n if param_range[0] <= value <= param_range[1]:\n filtered_json.append(element)\n except:\n pass\n\n\n return filtered_json", "def get_channel_list(json_path, category):\n with open(json_path) as json_file:\n data = json.load(json_file)\n\n channel_ids_list = data[category]\n\n return channel_ids_list", "def parse_cutadapt_logs(self, f):\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100", "def evaluate_condition(self, condition):\n df_evaluation_result = pd.read_csv(self.path_budget_evaluation_result, header=None, names=['id', 'dataset_name', 'condition', 'name', 'token', 'comment', 'ip', 'date'])\n df_evaluation_base = pd.read_csv(self.path_budget_evaluation_base)\n df_cleaned_bin = pd.read_csv(self.path_bin)\n df_answers_grouped = pd.read_pickle(self.path_answers_clean_grouped)\n df_actual_metadata = pd.read_csv(self.path_answers_metadata, index_col=0, header=[0, 1])\n df_actual_metadata = df_actual_metadata['actual']\n evaluator = ERNofeaturesEvaluator(df_evaluation_result, df_evaluation_base, df_cleaned_bin, df_actual_metadata=df_actual_metadata, target=self.target, dataset_name=self.dataset_name, df_answers_grouped=df_answers_grouped, bootstrap_n=self.bootstrap_n, repetitions=self.repetitions, replace=False)\n raw_data = evaluator.evaluate(self.feature_range, condition) # raw_data is dict: {CONDITION: {NOFEATURES: [AUCS]}}\n\n print(json.dumps(raw_data[condition]))\n exit()", "def cut_candidates(self, plate_scale, bright_cut=None, separation=None):\n\n # Zero point\n if isinstance(plate_scale, str):\n plate_scale = self.header[plate_scale]\n\n if self.photom is None:\n raise ValueError(\"photom table not built!\")\n\n cands = np.ones(len(self.photom), dtype=bool)\n\n # Cut on brightness?\n if bright_cut is not None:\n good_bright = self.photom[self.filter] > bright_cut\n cands &= good_bright\n\n # Candidate table\n self.candidates = self.photom[cands].copy()\n\n # Add coords\n coords = astropy_wcs.utils.pixel_to_skycoord(\n self.candidates['xcentroid'],\n self.candidates['ycentroid'],\n self.wcs)\n # Insist on ICRS\n coords = coords.transform_to('icrs')\n\n self.candidates['ra'] = coords.ra\n self.candidates['dec'] = coords.dec\n self.candidates['coords'] = coords\n\n # Separation\n seps = self.frb.coord.separation(coords)\n self.candidates['separation'] = seps.to('arcsec')\n\n # Cut on separation?\n if separation is not None:\n cut_seps = seps < separation\n self.candidates = self.candidates[cut_seps]\n\n # Half light\n self.candidates['half_light'] = self.candidates['semimajor_axis_sigma'] * plate_scale", "def max_cut(g):\n # Write your code here.\n return []", "def _apply_clipping(config: ClippingConfig,\n inner_factory: AggregationFactory) -> AggregationFactory:\n if isinstance(config, FixedClippingConfig):\n return clipping_factory.ClippingFactory(config.clip, inner_factory)\n elif isinstance(config, AdaptiveClippingConfig):\n clipping_quantile = _build_quantile_estimation_process(\n config.initial_clip, config.target_quantile, config.learning_rate)\n return clipping_factory.ClippingFactory(clipping_quantile, inner_factory)\n else:\n raise TypeError(f'config is not a supported type of ClippingConfig. Found '\n f'type {type(config)}.')", "def cut(value,arg):\n return cut.replace(arg,\"\")", "def __init__(self, config):\n self.cfg = config\n self.var_combinations = [\"tas:tas\", \"pr:pr\", \"pr:tas\"]\n self.seasons = [\"jja\", \"djf\", \"annual\"]\n self.projects = [\"cmip5\", \"cmip6\"]\n self.variables = [\"tas\", \"pr\"]\n self.scenarios = [\"26\", \"45\", \"85\"]\n\n # generate list of candidate bound limits\n small = np.arange(0.1, 1, 0.1)\n medium = np.arange(1, 11)\n high = np.arange(20, 100, 10)\n v_high = np.arange(150, 400, 50)\n self.bound_candidates = np.concatenate(\n (small, medium, high, v_high)) * 5 / 4", "def cut(lines=[],params=\"\"):\n if not core.is_unixy():\n raise(\"cut is only implemented on unix-like systems\")\n cmd = \"cut\"\n if params != \"\":\n cmd = cmd + \" \" + params\n res = act.call(cmd,lines)\n return res", "def apply_chart_sections(sections, form_id):\n clear_form(form_id)\n\n for i, (section, rows) in enumerate(sections):\n if section == '[chart_divider]':\n section_type = 'CHART_DIVIDER'\n else:\n section_type = section[:1] == '[' and 'TILE_ROW' or 'GRID_SECTION'\n field_id = get_field_for_section(\n section, description=json.dumps({'type': section_type}))\n section_id = add_field_to_form(form_id, field_id, None, i + 1)\n\n # Add the fields to the section with the specified names. Concepts\n # must already exist (we can't create them here, because we don't\n # know what data type they should have). For charts, the \"type\"\n # column in the CSV gives the rendering type, not the data type.\n for j, row in enumerate(rows):\n label = row.get('label', '')\n concept_ids = validate_ints(row.get('concept'))\n if section_type == 'GRID_SECTION' and len(concept_ids) != 1:\n raise ValueError('Grid rows must have exactly 1 concept ID')\n for concept_id in concept_ids:\n if not db.get('concept_id', concept_id=concept_id):\n raise ValueError(\n 'Concept %d not found in the existing dictionary'\n 'or any form in this profile' % concept_id)\n\n # Copy everything, but omit 'concept' as we have 'concepts'\n # and omit 'required' because there's a DB column for that.\n required = validate_boolean(row.get('required'))\n config = dict([(normalize_name(key), value.strip())\n for key, value in row.items() if key],\n concepts=concept_ids,\n type=validate_str(row['type']))\n for key in config.keys():\n if key in ['concept', 'required'] or config[key] == '':\n del config[key]\n\n # Put the first concept ID in the 'concept_id' column;\n # serialize all the rest of the details in the description.\n field_id = get_field_for_concept(\n concept_ids[0], label, description=json.dumps(config))\n add_field_to_form(form_id, field_id, section_id,\n field_number=j + 1, required=required)", "def dump_cuts_list(self, file_name):\n assert(file_name is not None)\n with open(file_name, 'w') as fd:\n uids = self._cuts.keys()\n uids.sort()\n for cut_uid in uids:\n cut = self._cuts[cut_uid]\n fd.write(cut.get_cost_var() + \" \" + str(cut.get_cost()) + \"\\n\")\n return", "def list_operations():", "def test_makeliststep_call_from_within_pipeline():\n config_file = t_path(\n Path('steps') / 'makelist_pipeline.cfg'\n )\n results = MakeListPipeline.call(config_file=config_file)\n assert results == [43.0, 'My hovercraft is full of eels.', False]", "def get_binners(config):\n binners = []\n if config[\"binning\"][\"metabat\"]:\n binners.append(\"metabat\")\n if config[\"binning\"][\"concoct\"]:\n binners.append(\"concoct\")\n if config[\"binning\"][\"maxbin\"]:\n binners.append(\"maxbin\")\n return binners", "def sub_process(analysis):\n\n #processing as statement\n subsentence = statement(analysis)\n if analysis.aim == 'if':\n return [','] + [analysis.aim] + subsentence\n return [analysis.aim] + subsentence", "def filter_stories(stories,trigger_dict_eng,trigger_dict_span,num_line):\n# trig_story = []\n temp_stories = stories[:num_line]\n for index, story in enumerate(temp_stories):\n print('\\n'+str(index),end=' ')\n #print(story.get_gevent_id(),story.get_dateAdded())\n story.clean_text()\n if story.get_text() == None:\n pass\n else:\n try:\n detect(story.text)\n if detect(story.text) == 'en':\n story.set_language('en')\n #print(type(story.get_language()))\n print(story.get_language())\n for key,trig in trigger_dict_eng.items():\n try:\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n if detect(story.text) == 'es':\n story.set_language('es')\n #print(type(story.get_language()))\n print(story.get_language()) \n for key,trig in trigger_dict_span.items():\n try:\n story.set_taxonomy((key,trig.get_args())) if trig.evaluate(story) else ctime()#print('False',end=' ')\n except AttributeError:\n pass\n except:\n print('error')\n print(story.get_taxonomy())", "def fid_cuts(ptname, etaname):\n cuts = []\n cuts.append(combine_cuts([ptname + ' > 4.5',\n 'TMath::Abs(' + etaname + ') < 1.2']))\n cuts.append(combine_cuts([ptname + ' > 4.0',\n var_selection('TMath::Abs('+etaname+')', 1.2, 1.4)\n ]))\n cuts.append(combine_cuts([ptname + ' > 3.5',\n var_selection('TMath::Abs('+etaname+')', 1.4, 1.6)\n ]))\n return combine_cuts(cuts, ' || ')", "def list(self, config_path: str, results_filter: Optional[ObjectType]) -> List[str]:\n ...", "def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout", "def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n #print(list(cows.items()))\n cows_list=list(cows.items())\n curr_list=[[[0]]]\n for i in range(1,len(cows_list)):\n smaller_fun(curr_list,i,limit,cows_list)\n\n ans =sorted(curr_list,key=lambda x:len(x))\n print(ans)\n ansfinal=[]\n for item in ans:\n trip=[]\n for i in range(len(item)):\n trip.append(cows_list[item[i]][0])\n ansfinal.append(trip)\n return ansfinal", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def cut_path(cutpath, mesh):\n #cutpath = np.array([[x0,0],[x0,2]])\n cutedges = []\n xis, ps = [], []\n for eidx,edge in enumerate(mesh.edges):\n x0 = np.array([mesh.x[edge[0]], mesh.y[edge[0]]])\n x1 = np.array([mesh.x[edge[1]], mesh.y[edge[1]]])\n if intersect(cutpath[0],cutpath[1], x0, x1):\n p, xi = intersection(x0, x1, cutpath[0],cutpath[1])\n xis.append(xi); ps.append(p)\n cutedges.append(eidx)\n cutedges = np.array(cutedges, dtype='int'); ps = np.array(ps)\n # Sort according to y-coordinate\n isort = np.argsort(ps[:,1])\n return cutedges[isort], np.array(xis)[isort]", "def rpy_process_r10(config_file, json_data):\n\n\trpy_module = importr('cocalculateur')\n\tloadjson_module = importr('jsonlite')\n\t\n\t#json_data = loadjson_module.fromJSON(txt = './input_f02.json',flatten=True)\n\tconfig_data = loadjson_module.fromJSON(txt = config_file ,flatten=True)\n\toutput_data = rpy_module.f_02(json_data,config_data)\n\n\treturn output_data", "def calculate_CLs(bkgonly_json, signal_patch_json):\n workspace = pyhf.workspace.Workspace(bkgonly_json)\n model = workspace.model(\n measurement_name=None,\n patches=[signal_patch_json],\n modifier_settings={\n \"normsys\": {\"interpcode\": \"code4\"},\n \"histosys\": {\"interpcode\": \"code4p\"},\n },\n )\n result = pyhf.infer.hypotest(\n 1.0, workspace.data(model), model, qtilde=True, return_expected_set=True\n )\n if isinstance(pyhf.tensorlib, pyhf.tensor.pytorch_backend):\n return result[0].tolist()[0], result[-1].tolist()\n else:\n return result[0].tolist()[0], result[-1].ravel().tolist()", "def get_category_data_off(self):\n list_categories_name=[]\n cat = requests.get('https://fr.openfoodfacts.org/categories?json=true')\n cat_data = cat.json()\n tags_list = cat_data['tags']\n print (len(tags_list))\n list_of_random_tags_list = random.sample(tags_list, k=self.view.num_to_select)\n\n for category in list_of_random_tags_list:\n try :\n category_name = category['name']\n print(category_name)\n list_categories_name.append(category_name)\n print (list_categories_name)\n self.list_categories = list_categories_name # list_categories_name is passed in the instance property\n except KeyError:\n pass\n except UnicodeEncodeError:\n pass" ]
[ "0.6149866", "0.5558365", "0.55202043", "0.52206475", "0.5109341", "0.49454144", "0.4927624", "0.49099135", "0.49094042", "0.48717844", "0.4865596", "0.4846522", "0.47562224", "0.471486", "0.47056", "0.46879908", "0.4677999", "0.46680018", "0.46643007", "0.4607456", "0.4546067", "0.44480908", "0.44266537", "0.4383215", "0.43774062", "0.43762055", "0.43481666", "0.43111384", "0.43003875", "0.428989", "0.42732367", "0.42576435", "0.42224836", "0.4216646", "0.42149162", "0.4213796", "0.42133465", "0.42029715", "0.41969165", "0.41683337", "0.41608816", "0.4156808", "0.41544527", "0.4151852", "0.4134239", "0.4134229", "0.41315025", "0.4129227", "0.41254455", "0.4113906", "0.41137922", "0.41014484", "0.40966833", "0.40847313", "0.40846714", "0.40831792", "0.40762067", "0.40721563", "0.4071959", "0.40657383", "0.40644324", "0.40639836", "0.40631795", "0.4061986", "0.40555248", "0.40549877", "0.40514573", "0.4051023", "0.40484113", "0.40457273", "0.40389824", "0.40361446", "0.403218", "0.40188283", "0.40163228", "0.40147614", "0.4012786", "0.4012502", "0.40117675", "0.4008703", "0.4007622", "0.40035215", "0.40017143", "0.40004265", "0.3998816", "0.39961672", "0.39950365", "0.39881912", "0.39769018", "0.39765093", "0.39712974", "0.3964005", "0.39628673", "0.39617503", "0.3960575", "0.39599186", "0.39584413", "0.3958112", "0.39578032", "0.3956112" ]
0.71448123
0
Get updated learning rate.
def get_lr(self): # HACK: We need to check if this is the first time ``self.get_lr()`` was called, # since ``torch.optim.lr_scheduler._LRScheduler`` will call ``self.get_lr()`` # when first initialized, but the learning rate should remain unchanged # for the first epoch. if not self._initialized: self._initialized = True return self.base_lrs step = self.last_epoch + 1 self._cycle_counter = step - self._last_restart lrs = [ self.eta_min + ((lr - self.eta_min) / 2) * ( np.cos( np.pi * (self._cycle_counter % self._updated_cycle_len) / self._updated_cycle_len ) + 1 ) for lr in self.base_lrs ] if self._cycle_counter % self._updated_cycle_len == 0: # Adjust the cycle length. self._cycle_factor *= self.factor self._cycle_counter = 0 self._updated_cycle_len = int(self._cycle_factor * self.t_max) self._last_restart = step return lrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_learning_rate(self):\n config = self.config\n cur_lr = Train_model_pipeline.adjust_learning_rate(\n self.optimizer,\n self.epoch,\n config[\"training\"][\"learning_rate\"],\n decay=config[\"training\"][\"lr_decay_rate\"],\n step=config[\"training\"][\"lr_decay_step\"],\n )\n self.cur_lr = cur_lr\n return cur_lr", "def learning_rate(epoch):\n self.lr = self.lr / 1.00000001\n return self.lr", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def get_learning_rate():\n return 0.00001", "def getActualLearnRate(self, epoch):\n raise NotImplementedError('getActualLearnRate')", "def update_learning_rate(self):\n for scheduler in self.schedulers:\n if self.opt.lr_policy == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def _get_learning_rate(self) -> float:\n if len(self.errors) > 1 and self.errors[-2] - self.errors[-1] < 0:\n self.learning_rate /= 2\n return self.learning_rate", "def __call__(self, epoch):\n # Compute the new dynamic learning rate, log in onto TensorBoard and\n # return the result for the training process\n learning_rate = self.schedule(epoch)\n tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\n return learning_rate", "def learning_rate_fn():\n start_learning_rate = FLAGS.start_learning_rate\n step = tf.cast(tf.compat.v1.train.get_or_create_global_step(), 'float32')\n effective_step = tf.maximum(step - FLAGS.lr_decay_after_num_steps + 1, 0)\n lr_step_ratio = tf.cast(effective_step, 'float32') / float(\n FLAGS.lr_decay_steps)\n warm_up_factor = tf.cast(tf.minimum(step / float(FLAGS.warm_up_steps), 1.),\n 'float32')\n final_learning_rate = FLAGS.gpu_learning_rate\n # Ease in to final learning rate.\n lr = ((1. - warm_up_factor) * start_learning_rate) + (\n warm_up_factor * final_learning_rate)\n lr = tf.cast(lr, 'float32')\n if FLAGS.lr_decay_type == 'none' or FLAGS.lr_decay_steps <= 0:\n return lr\n elif FLAGS.lr_decay_type == 'exponential':\n return lr * 0.5**lr_step_ratio\n else:\n raise ValueError('Unknown lr_decay_type', FLAGS.lr_decay_type)", "def __learning_rate(self, lr0, epoch):\n \n \"\"\"\n Dan's Methos\n \"\"\"\n lrs = lr0 * 0.001\n c = np.power((lrs/lr0), 1.0/self.__maxEpoch)\n \n return lr0*np.power(c, epoch)", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def __call__(self, num_update):\n if num_update > self.count:\n self.base_lr = self.origin_lr * pow((1 - 1.0*num_update/self.max_samples), self.factor)\n if self.base_lr < self.stop_factor_lr:\n self.base_lr = self.stop_factor_lr\n logging.info(\"Update[%d]: now learning rate arrived at %0.5e, will not \"\n \"change in the future\", num_update, self.base_lr)\n elif num_update % self.show_num == 0:\n logging.info(\"Update[%d]: Change learning rate to %0.8e\",\n num_update, self.base_lr)\n self.count = num_update\n return self.base_lr", "def update_learning_rate(self):\r\n self.scheduler.step(self.clock.epoch)", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def _calc_learning_rate(self, curr_it: int, mode: str) -> float:\n return decreasing_rate(\n self.learning_rate_start,\n self.learning_rate_end,\n iteration_max=self.max_iterations_,\n iteration=curr_it,\n mode=mode,\n )", "def learning_rate(self, t):\n # return self.init_learning_rate * (1 - t)\n return self.init_learning_rate / (1 + t)\n # return self.init_learning_rate * exp(-t)\n # return self.init_learning_rate * (.005 / self.init_learning_rate) ** t", "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def updatelearningrate(self, epoch):\n self.lr = getlearningrate(epoch=epoch, opt=self.opt)\n # update learning rate of model optimizer\n if isinstance(self.model, list):\n count = 0\n for param_group in self.optimzer.param_groups:\n # if type(model) is <list> then update modules with different learning rate\n param_group['lr'] = self.lr\n count += 1\n # print \">>> count is:\", count-1\n else:\n for param_group in self.optimzer.param_groups:\n param_group['lr'] = self.lr", "def update_learning_rate(self, validation_loss=None):\n if validation_loss is None:\n for scheduler in self.schedulers:\n scheduler.step()\n else:\n for scheduler in self.schedulers:\n scheduler.step(validation_loss)\n self.lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = {0:.7f}'.format(self.lr))", "def adjust_learning_rate(self):\n out_base_lr = self.args.base_lr\n for param_group in self.optimizer.param_groups:\n in_lr = param_group[\"initial_lr\"]\n out_lr = in_lr\n if self.args.lr_decay_type == \"cos\": # cosine lr schedule\n out_lr *= 0.5 * (1.0 + np.cos(np.pi * self.epoch / self.args.epochs))\n else: # stepwise lr schedule\n for milestone in self.args.lr_step_schedule:\n out_lr *= 0.1 if self.epoch >= milestone else 1.0\n param_group[\"lr\"] = out_lr\n if in_lr == self.args.base_lr:\n out_base_lr = out_lr\n if self.train_logger is not None:\n self.train_logger.scalar_summary(\n \"metrics/%s/epoch\" % self.full_name, self.epoch, step=self.iteration, increment_counter=False\n )\n self.train_logger.scalar_summary(\n \"metrics/%s/lr\" % self.full_name, out_base_lr, step=self.iteration, increment_counter=False\n )\n print(\"Epoch\", self.epoch, \"Learning rate\", out_base_lr)\n return out_base_lr", "def get_current_rate(self):\n pass", "def rate(self, step=None):\n if step is None:\n step = self._step\n learning_rate = (\n self.factor *\n self.model_size ** (-0.5) *\n min(step ** (-0.5), step * self.warmup ** (-1.5))\n )\n return learning_rate", "def get_learning_rate(self, step):\n rate = 0\n progress = 100 * (step / self.max_steps) # expressed in percent units\n\n if self.policy == \"fixed\":\n rate = self.base_rate\n elif self.policy == \"step\":\n current_step = math.floor(step/self.step_size)\n rate = self.base_rate * math.pow(self.gamma, current_step)\n elif self.policy == \"exp\":\n rate = self.base_rate * math.pow(self.gamma, progress)\n elif self.policy == \"inv\":\n rate = self.base_rate * math.pow(1 + self.gamma * progress, - self.power)\n elif self.policy == \"multistep\":\n if ((self.current_step < self.stepvalue_size) and (step > self.stepvalues_list[self.current_step])):\n self.current_step = self.current_step + 1\n rate = self.base_rate * math.pow(self.gamma, self.current_step)\n elif self.policy == \"poly\":\n rate = self.base_rate * math.pow(1.0 - (step / self.max_steps), self.power)\n elif self.policy == \"sigmoid\":\n rate = self.base_rate * \\\n (1.0 / (1.0 + math.exp(self.gamma * (progress - 100 * self.step_size / self.max_steps))))\n else:\n logging.error(\"Unknown learning rate policy: %s\", self.policy)\n exit(-1)\n return rate", "def get_learning_rate(learning_rate, hidden_size, learning_rate_warmup_steps):\n with tf.name_scope(\"learning_rate\"):\n warmup_steps = tf.to_float(learning_rate_warmup_steps)\n step = tf.to_float(tf.train.get_or_create_global_step())\n\n learning_rate *= (hidden_size ** -0.5)\n # Apply linear warmup\n learning_rate *= tf.minimum(1.0, step / warmup_steps)\n # Apply rsqrt decay\n learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps)) \n return learning_rate", "def get_learning_rate(training_params):\n\n initial_lr = training_params.initial_lr\n lr_decay_factor = training_params.lr_decay_factor\n lr_decay_steps_str = training_params.lr_decay_steps_str\n if lr_decay_steps_str:\n global_step = tf.train.get_or_create_global_step()\n lr_decay_steps = [int(s) for s in lr_decay_steps_str.split(\",\")]\n\n lr = tf.train.piecewise_constant(\n global_step,\n lr_decay_steps,\n [initial_lr * (lr_decay_factor ** i)\n for i in range(len(lr_decay_steps) + 1)]\n )\n else:\n lr = initial_lr\n return lr", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def get_lr(self):\n return self.optimizer.get_lr()", "def get_lr(self):\n return self.optimizer.get_lr()", "def assign_learning_rate(session, lr_update, lr_placeholder, new_lr):\n session.run(lr_update, feed_dict={lr_placeholder: new_lr})", "def rate(self):\n return self.__rate", "def data_rate(self):\n return self._data_rate", "def get_learning_rate(opt, current, best, counter, learning_rate):\n if current > best:\n best = current\n counter = 0\n elif counter > opt['delay']:\n learning_rate = learning_rate / 10.\n counter = 0\n else:\n counter += 1\n return (best, counter, learning_rate)", "def sample_rate(self):\n return self._sample_rate", "def rate(self):\n return self._rate", "def currentLearningRate(self, currentIteration, lambda1):\n return self.learning_rate * np.exp(-currentIteration / lambda1)", "def build_learning_rate(self):\n\n # TODO(arashwan): Explore if we want to only allow explicit const lr sched.\n if not self._lr_config:\n lr = self._optimizer_config.learning_rate\n else:\n lr = LR_CLS[self._lr_type](**self._lr_config.as_dict())\n\n if self._warmup_config:\n lr = WARMUP_CLS[self._warmup_type](lr, **self._warmup_config.as_dict())\n\n return lr", "def update_learning_rate(self, ppl, epoch):\n if self.start_decay_at is not None and epoch >= self.start_decay_at:\n self.start_decay = True\n if self.last_ppl is not None and ppl > self.last_ppl:\n self.start_decay = True\n\n if self.start_decay:\n self.lr = self.lr * self.lr_decay\n print(\"Decaying learning rate to {}\".format(self.lr))\n\n self.last_ppl = ppl", "def adjust_lr(self):\n learning_rate = self.params.base_lr * (1 - float(self.epoch) / self.params.num_epoch) ** self.params.power\n for param_group in self.opt.param_groups:\n param_group['lr'] = learning_rate\n print('Change learning rate into %f' % (learning_rate))\n self.summary_writer.add_scalar('learning_rate', learning_rate, self.epoch)", "def GetLearnRate(DILr,Epochs):\n\n if(DILr.mode == 'poly'):\n ScheduelLr = PolynomialDecay(maxEpochs=DILr.StepSize,initAlpha=DILr.Lr,power=DILr.factor)\n ScheduelLr.plot(range(1,int(Epochs)+1))\n lrate = LearningRateScheduler(ScheduelLr)\n elif(DILr.mode == 'cycle'):\n lrate = CyclicLR(step_size=DILr.StepSize,mode=DILr.cycle,gamma=DILr.factor,base_lr=DILr.MinLr,max_lr=DILr.Lr)\n elif(DILr.mode == 'drop'):\n ScheduelLr = StepDecay(initAlpha=DILr.Lr, factor=DILr.factor, dropEvery=DILr.StepSize)\n ScheduelLr.plot(range(1,int(Epochs)+1))\n lrate = LearningRateScheduler(ScheduelLr)\n elif(DILr.mode == 'normal'):\n lrate = None\n\n return np.asarray(lrate)", "def update_rate_hz(self) -> float:\n return self._update_rate_hz", "def update_rate(self):\n self._rate = (\n (self._received - self._samples[0]) / float(self.sample_size)\n )\n self._samples.append(self._received)", "def get_lr(self) -> Tensor:\n\n return self.lr_scheduler.get_lr()", "def step(self, epoch, val_loss=None):\n super().step(epoch, val_loss)\n # we don't change the learning rate at epoch boundaries\n return self.optimizer.get_lr()", "def step(self, epoch, val_loss=None):\n super().step(epoch, val_loss)\n # we don't change the learning rate at epoch boundaries\n return self.optimizer.get_lr()", "def LSPRefreshRate(self):\r\n\t\treturn self._get_attribute('lSPRefreshRate')", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def step_update(self, num_updates):\n if num_updates < self.cfg.warmup_updates:\n self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step\n else:\n curr_updates = num_updates - self.cfg.warmup_updates\n lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period)\n self.lr = max(self.max_lr * lr_mult, self.min_lr)\n\n self.optimizer.set_lr(self.lr)\n return self.lr", "def updateLearnRate(\n self, phi, phi_prime, eligibility_trace, discount_factor, nnz, terminal\n ):\n\n if self.learn_rate_decay_mode == \"dabney\":\n # We only update learn_rate if this step is non-terminal; else phi_prime becomes\n # zero and the dot product below becomes very large, creating a very\n # small learn_rate\n if not terminal:\n # Automatic learning rate: [Dabney W. 2012]\n # http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf\n candid_learn_rate = np.dot(\n discount_factor * phi_prime - phi, eligibility_trace\n )\n if candid_learn_rate < 0:\n self.learn_rate = np.minimum(\n self.learn_rate, -1.0 / candid_learn_rate\n )\n elif self.learn_rate_decay_mode == \"boyan\":\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n # divide by l1 of the features; note that this method is only called if phi != 0\n self.learn_rate /= np.sum(np.abs(phi))\n elif self.learn_rate_decay_mode == \"boyan_const\":\n # New little change from not having +1 for episode count\n self.learn_rate = (\n self.initial_learn_rate\n * (self.boyan_N0 + 1.0)\n / (self.boyan_N0 + (self.episode_count + 1) ** 1.1)\n )\n elif self.learn_rate_decay_mode == \"const\":\n self.learn_rate = self.initial_learn_rate\n else:\n self.logger.warn(\"Unrecognized decay mode \")", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def get_model_learning_rate(\n learning_policy, base_learning_rate, learning_rate_decay_step,\n learning_rate_decay_factor, training_number_of_steps, learning_power,\n slow_start_step, slow_start_learning_rate):\n global_step = tf.compat.v1.train.get_or_create_global_step()\n if learning_policy == 'step':\n learning_rate = tf.train.exponential_decay(\n base_learning_rate,\n global_step,\n learning_rate_decay_step,\n learning_rate_decay_factor,\n staircase=True)\n elif learning_policy == 'poly':\n learning_rate = tf.compat.v1.train.polynomial_decay(\n base_learning_rate,\n global_step,\n training_number_of_steps,\n end_learning_rate=0,\n power=learning_power)\n else:\n raise ValueError('Unknown learning policy.')\n\n # Employ small learning rate at the first few steps for warm start.\n return tf.where(global_step < slow_start_step, slow_start_learning_rate,\n learning_rate)", "def get_scan_rate(self):\n raise NotImplementedError", "def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)", "def lr_schedule(epoch,lr):\r\n learning_rate = lr\r\n if epoch > 10:\r\n learning_rate *= 0.1\r\n if epoch > 20:\r\n learning_rate *= 0.1\r\n if epoch > 50:\r\n learning_rate *= 0.01\r\n\r\n # tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\r\n return learning_rate", "def update(self) -> None:\n self.data.update()\n self._state = round(self.data.rate[\"rates\"][self._target], 3)", "def update_learning_rate(self, n_batches):\r\n criterion1 = n_batches < self.C.lr_ramp_up_minibatches\r\n criterion2 = n_batches % (self.C.lrdi + self.C.lr_ramp_up_minibatches * self.C.ramp_up_lr) == 0\r\n\r\n if self.C.ramp_up_lr and criterion1:\r\n # calculate what the \"maximum\" learning rate should be given the\r\n # input params, and ramp up the learning rate\r\n max_lr = self.C.max_rel_lr * self.C.init_lr\r\n lr_ramp_up_factor = np.exp(np.log(max_lr / self.C.init_lr) / self.C.lr_ramp_up_minibatches)\r\n\r\n # learning rate will increase if not `maximum_lr` already\r\n util.update_lr(optimizer=self.optimizer,\r\n scale_factor=lr_ramp_up_factor,\r\n maximum_lr=max_lr)\r\n\r\n elif criterion2:\r\n # decreate the learning rate\r\n min_lr = self.C.min_rel_lr * self.C.init_lr\r\n util.update_lr(optimizer=self.optimizer,\r\n scale_factor=self.C.lrdf**n_batches,\r\n minimum_lr=min_lr)", "def sample_rate(self):\n return self._sample_rate", "def sample_rate(self):\n return self._sample_rate", "def getDataRate(self):\n \n return self.DataRate", "def get_lr(self):\n\n if self.opt is None:\n raise ValueError('No learning rate schedulers initialized')\n else:\n for pg in self.opt.param_groups:\n return pg['lr']", "def LoadRateValue(self):\n\t\treturn self._get_attribute('loadRateValue')", "def get_learning_rate_decay(self):\n\n hparams = self.hparams\n if (hparams.learning_rate_decay_scheme and\n hparams.learning_rate_decay_scheme == \"luong\"):\n start_decay_step = int(hparams.num_train_steps / 2)\n decay_steps = int(hparams.num_train_steps / 10) # decay 5 times\n decay_factor = 0.5\n else:\n start_decay_step = hparams.start_decay_step\n decay_steps = hparams.decay_steps\n decay_factor = hparams.decay_factor\n\n return tf.cond(\n self.global_step < start_decay_step,\n lambda: self.learning_rate,\n lambda: tf.train.exponential_decay(\n self.learning_rate,\n (self.global_step - start_decay_step),\n decay_steps, decay_factor, staircase=True),\n name=\"learning_rate_decay_cond\")", "def sample_rate(self):\r\n return self.config.sample_rate", "def step_update(self, num_updates):\n if self.args['optimization']['warmup_updates'] > 0 and \\\n num_updates <= self.args['optimization']['warmup_updates']:\n self.warmup_factor = num_updates / float(self.args['optimization']['warmup_updates'])\n lr = self.warmup_factor * self.lr\n elif num_updates >= self.total_num_update:\n lr = self.end_learning_rate\n else:\n warmup = self.args['optimization']['warmup_updates']\n lr_range = self.lr - self.end_learning_rate\n pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup)\n lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate\n self.optimizer.set_lr(lr)\n return self.optimizer.get_lr()", "def learning_rate_schedule(current_epoch,\n current_batch,\n batches_per_epoch,\n batch_size):\n del current_batch, batches_per_epoch # not used\n initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128\n learning_rate = initial_learning_rate\n for mult, start_epoch in LR_SCHEDULE:\n if current_epoch >= start_epoch:\n learning_rate = initial_learning_rate * mult\n else:\n break\n return learning_rate", "def rate(self):\n return self.brate / FAC", "def adjust_learning_rate(optimizer, epoch):\n lr = opt.lr * (0.5 ** (epoch // opt.step))\n return lr", "def initialize_learning_rate(self):\n\n if (self.FLAGS.learning_rate_decay is \"exponential\"):\n self.learning_rate = tf.train.exponential_decay(\n self.FLAGS.learning_rate,\n self.global_step,\n self.FLAGS.decay_steps,\n self.FLAGS.decay_rate)\n else :\n self.learning_rate = self.FLAGS.learning_rate", "def learning_rate_fn(global_step):\n lr = tf.train.piecewise_constant(\n global_step, boundaries, vals)\n if warmup:\n warmup_steps = int(batches_per_epoch * 5)\n warmup_lr = (\n initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n return tf.cond(pred=global_step < warmup_steps,\n true_fn=lambda: warmup_lr,\n false_fn=lambda: lr)\n return lr # , initial_learning_rate, batches_per_epoch, vals", "def yaw_rate(self) -> float:\n return self._state[5]", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def lr(self):\n if self.T is None:\n return self.lr0\n else:\n return linear_decay(self.lr0, self.alpha, self.T, self.step)", "def eager_decay_rate():\n learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(\n np.pi *\n (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = tf.where(\n global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * tf.cast(global_step,\n tf.float32) + warmup_learning_rate\n learning_rate = tf.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return tf.where(global_step > total_steps, 0.0, learning_rate,\n name='learning_rate')", "def _get_learning_rate_warmup(global_step):\n warmup_steps = FLAGS.warmup_steps\n warmup_scheme = FLAGS.warmup_scheme\n tf.logging.info(\" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s\" %\n (FLAGS.learning_rate, warmup_steps, warmup_scheme))\n\n # Apply inverse decay if global steps less than warmup steps.\n # Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)\n # When step < warmup_steps,\n # learing_rate *= warmup_factor ** (warmup_steps - step)\n if warmup_scheme != \"t2t\":\n return FLAGS.learning_rate\n else:\n # 0.01^(1/warmup_steps): we start with a lr, 100 times smaller\n warmup_factor = tf.exp(tf.log(0.01) / warmup_steps)\n inv_decay = warmup_factor ** (\n tf.to_float(warmup_steps - global_step))\n return tf.cond(\n global_step < warmup_steps,\n lambda: inv_decay * FLAGS.learning_rate,\n lambda: FLAGS.learning_rate,\n name=\"learning_rate_warmup_cond\")", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def adjust_learning_rate(optimizer, lr, step, args):\n # decay = 0.1**(sum(epoch >= np.array(lr_steps)))\n lr = lr * (0.95**(step//args.lr_decay_every))\n print(\"current learning rate: {:.6f}\".format(lr))\n param_group = optimizer.param_groups\n for i in range(len(param_group)):\n param_group[i]['lr'] = lr\n\n return optimizer", "def lr_schedule(self, epoch):\n lr = self.params.learning_rate\n\n if epoch > int(self.params.num_epochs * 0.8):\n lr *= 1e-3\n elif epoch > int(self.params.num_epochs * 0.6):\n lr *= 1e-2\n elif epoch > int(self.params.num_epochs * 0.4):\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr", "def feedback(self, feedback: _Feedback) -> float:\n self.params, self.opt_state, cur_loss = self.update(self.params,\n self.opt_state,\n feedback)\n return cur_loss", "def get_tickrate(self):\n raise NotImplementedError", "def get_learning_rate_warmup(self):\n\n hparams = self.hparams\n warmup_steps = hparams.learning_rate_warmup_steps\n warmup_factor = hparams.learning_rate_warmup_factor\n\n # Apply inverse decay if global steps less than warmup steps.\n # Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)\n # When step < warmup_steps,\n # learing_rate *= warmup_factor ** (warmup_steps - step)\n inv_decay = warmup_factor**(\n tf.to_float(warmup_steps - self.global_step))\n\n return tf.cond(\n self.global_step < hparams.learning_rate_warmup_steps,\n lambda: inv_decay * self.learning_rate,\n lambda: self.learning_rate,\n name=\"learning_rate_warump_cond\")", "def sample_rate(self):\n return self.query_float('ENTER Current Sample Rate (Sa/s)')", "def StepIncrementLoadRate(self):\n\t\treturn self._get_attribute('stepIncrementLoadRate')", "def update_lr(self, *args, **kwargs):\n raise NotImplementedError", "def update_lr(self, *args, **kwargs):\n raise NotImplementedError", "def adjust_learning_rate(optimizer, batch):\n lr = learning_rate\n for i in range(len(steps)):\n scale = scales[i] if i < len(scales) else 1\n if batch >= steps[i]:\n lr = lr * scale\n if batch == steps[i]:\n break\n else:\n break\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr / batch_size\n return lr", "def calculateDataRate(self):\n pass", "def _get_lr(self, initial_lr, step, epoch):\n return initial_lr", "def adjust_learning_rate(self, optimizer, epoch):\n if self.args.lr_mode == 'step':\n lr = self.args.lr * (0.1 ** (epoch // self.args.lr_step))\n elif self.args.lr_mode == 'poly':\n lr = self.args.lr * (1 - epoch / self.args.train_epochs) ** 0.9\n else:\n raise ValueError('Unknown lr mode {}'.format(self.args.lr_mode))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def update_weights(x_train, y_train, weights, learning_rate):\r\n predictions = compute_prediction(x_train, weights)\r\n weights_delta = np.dot(x_train.T, y_train - predictions)\r\n m = y_train.shape[0]\r\n weights += learning_rate / float(m) * weights_delta\r\n return weights", "def rate(self):\n if self._rate:\n return self._rate\n else:\n return self._wave.getframerate()", "def relative_rate(self) -> \"double\":\n return _beamforming_swig.randomsampler_sptr_relative_rate(self)", "def adjust_learning_rate(optimizer, epoch):\n \n boundary = [args.epochs//2,args.epochs//4*3,args.epochs]\n lr = args.lr * 0.1 ** int(bisect.bisect_left(boundary, epoch))\n print('Learning rate: %f'%lr)\n #print(epoch, lr, bisect.bisect_left(boundary, epoch))\n # lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr", "def lr(self):\n pass", "def update_weights(self):\n\n\n self.w += self.learn_rate * (self.X.T.dot(self.T - self.Y)\n - self.reg_L1 * np.sign(self.w)\n - self.reg_L2 * 2*self.w)", "def decay(self):\n if self.l_rate > self.l_rate_bound[0] and self.l_rate - self.decay_rate > 0.0:\n self.l_rate -= self.decay_rate\n elif self.l_rate - self.decay_rate <= 0.0 or self.l_rate < self.l_rate_bound[0]:\n self.l_rate = self.l_rate_bound[0]\n\n for layer in self.network:\n layer.learning_rate = self.l_rate", "def get_samplerate(self):\n\t\treturn _PM_UPDATE_RATE / self.output_decimation", "def adjust_learning_rate(optimizer, epoch):\n lr = hyper.lr * (0.5 ** (epoch // 10))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def get_learn_rate_and_momentum(self, n_iter):\n return self.learn_rate_schedule.get_var_at_iter(n_iter), \\\n self.momentum_schedule.get_var_at_iter(n_iter)" ]
[ "0.79935455", "0.79565597", "0.78638303", "0.76802355", "0.7612205", "0.7602973", "0.7580131", "0.74478555", "0.7442717", "0.74091333", "0.7381036", "0.7336032", "0.7336032", "0.7331142", "0.72541714", "0.72166234", "0.72166234", "0.71983415", "0.7194819", "0.70593596", "0.70275456", "0.6987145", "0.693764", "0.6934233", "0.6930346", "0.6820161", "0.68073004", "0.6778301", "0.6776443", "0.6645793", "0.6645793", "0.6644867", "0.6633848", "0.6630429", "0.66242087", "0.65737104", "0.6559129", "0.6542309", "0.65230715", "0.65204406", "0.6520111", "0.6515483", "0.650769", "0.6501166", "0.6496104", "0.64753616", "0.64753616", "0.6384109", "0.63830966", "0.6379828", "0.6367304", "0.63473827", "0.63472897", "0.6345628", "0.6338812", "0.6309734", "0.6289677", "0.62740976", "0.62680364", "0.62680364", "0.6260199", "0.6249857", "0.6248683", "0.62400913", "0.623279", "0.6216861", "0.62125707", "0.6197729", "0.6179618", "0.6179413", "0.61756194", "0.61732835", "0.61729354", "0.6165109", "0.6161104", "0.6143663", "0.61363333", "0.61310256", "0.6129968", "0.6127155", "0.6105071", "0.6080613", "0.6076629", "0.6075132", "0.60658324", "0.60658324", "0.60640806", "0.60542774", "0.6052308", "0.6047416", "0.60450363", "0.60438454", "0.60407233", "0.60282177", "0.6010947", "0.60066205", "0.6004406", "0.6001802", "0.6001778", "0.5984309" ]
0.61572826
75
check it has a solution
def has_solution(self) -> bool: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def did_solve(self) -> bool:\n pass", "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def has_solution(self) -> bool:\n if self in [self.SATISFIED, self.ALL_SOLUTIONS, self.OPTIMAL_SOLUTION]:\n return True\n return False", "def did_solve(self) -> bool:\n return self._solution.info.status == \"solved\"", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def is_solution(self):\n # Only need to check the length because the configuration expansion assesses the feasibility.\n return len(self._path) == self._N", "def check():", "def check_if_solvable(self):\n\n self.solvable=True #status of sudoku\n for i in range(0, 9):\n for j in range(0, 9):\n if self.a[i][j]==0:\n continue\n if self.check(i, j)[self.a[i][j]]==0:\n self.solvable=False\n return False", "def checkPuzzle(self):\n print('Got to checkPuzzle')", "def is_legal_solution(self, solution):\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return self.fit_score(solution) == 0\r\n else:\r\n return self.fit_score(solution) == sum(x for x in range(1, 12))", "def test_get_solution(self):\n pass", "def ok(self, solution):\n if self.constraints is not None:\n for constraint in self.constraints:\n if not constraint(solution):\n return False\n return True", "def test_is_solved_when_puzzle_is_solved(self):\n self.assertTrue(self.sudoku.is_solved())", "def test_is_solved(self):\n p = hw.TilePuzzle([[1, 2], [3, 0]])\n self.assertTrue(p.is_solved())\n p = hw.TilePuzzle([[0, 1], [3, 2]])\n self.assertFalse(p.is_solved())", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def answer(self) -> bool:", "def test_is_solved_when_puzzle_is_not_solved(self):\n sudoku = sudolver.Sudoku()\n self.assertFalse(sudoku.is_solved())", "def did_solve(self):\n return self._solution.success", "def noSol(self):\n noSol = False \n\n cost_min_bilet = 100000\n\n for a in self.info.autobuze:\n if a.price < cost_min_bilet:\n cost_min_bilet = a.price\n\n for o in self.info.oameni:\n if o.money < cost_min_bilet and o.remaining_dest != []: \n noSol = True\n break\n \n set_destinatii = set()\n\n for o in self.info.oameni:\n if o.current_loc in set_destinatii:\n noSol = True\n break\n else:\n set_destinatii.add(o.current_loc)\n\n return noSol", "def is_solved(self):\n raise NotImplementedError()", "def did_solve(self) -> bool:\n return self._stats[\"success\"]", "def solveOneStep(self):\n ### Student code goes here\n return True", "def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True", "def solve(self):\n pass", "def solve(self):\n pass", "def is_exist_another_solution(self):\r\n # prepare data\r\n notes_quantity_min = sum(self.banknote_quantity)\r\n banknote_quantity_max = [int(math.floor(self.money / self.banknotes[i])) for i in range(0, self.n)]\r\n # model\r\n mdl = Model(name='MinSetChecker')\r\n # decision variables\r\n mdl.banknote_quantity = {i: mdl.integer_var(lb=0, ub=banknote_quantity_max[i]) for i in range(0, self.n)}\r\n # decision expressions\r\n money_amount = mdl.sum(mdl.banknote_quantity[i] * self.banknotes[i] for i in range(0, self.n))\r\n notes_quantity = mdl.sum(mdl.banknote_quantity[i] for i in range(0, self.n))\r\n # constraints\r\n mdl.add_constraint(money_amount == self.money)\r\n mdl.add_constraint(notes_quantity == notes_quantity_min)\r\n mdl.add_constraint(\r\n mdl.sum(mdl.banknote_quantity[i] == self.banknote_quantity[i] for i in range(0, self.n)) != self.n\r\n )\r\n # solve model: return True if it exists, False if not\r\n if not mdl.solve():\r\n return False\r\n else:\r\n return True", "def solve(self):", "def validate(self, solution: list) -> (bool, float):\n start = time() * 1000\n nodes = self.application.nodes()\n\n if solution is None:\n return False, round(time() * 1000 - start, 3)\n elif len([node for node in list(nodes) if node not in solution]) == 0:\n logging.info(f\"All {len(solution)} nodes got visited\")\n return True, round(time() * 1000 - start, 3)\n else:\n logging.error(f\"{len([node for node in list(nodes) if node not in solution])} nodes were NOT visited\")\n return False, round(time() * 1000 - start, 3)", "def isSolved(self):\n return self.isComplete() and self.isLegal()", "def solution(data):\n\t\tif data:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def solve(self):\n if self.is_solved():\n return True\n else:\n empty_box_coordinates = self._find_empty()\n row, column = empty_box_coordinates\n for i in range(1, 10):\n if self.is_valid_number(i, empty_box_coordinates):\n self.model[row][column] = i\n\n if self.solve():\n return True\n\n self.model[row][column] = 0\n return False", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def check_solution(self, solution):\n return isinstance(solution, str)", "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def solve(self):\n ...", "def find_solution(self):\r\n for solution in self.solutions:\r\n if self.fitting_function.is_legal_solution(solution):\r\n return solution\r\n return None", "def update_status(self):\n if len(self.invalid) != 0:\n return False\n for row in self.grid:\n for num in row:\n if num == 0:\n return False\n self.solved = True\n print(\"solved\")\n return True", "def check(self) -> None:", "def is_solved(self):\n self.solved = self.current_pos == self.finish_pos\n return self.solved", "def is_solvable(self):\n self_copy = deepcopy(self)\n return self_copy.solve()", "def is_solved(self):\n return self._start == self._target", "def solved(self):\r\n return self.puzzle.solved", "def solve(self):\n if not self.running or self.state == \"stopping\":\n return False\n\n # Find first empty tile\n target = ()\n for i in range(9**2):\n if self.board[i // 9, i % 9] == 0:\n target = (i // 9, i % 9)\n break\n\n # If there are no empty tiles, the puzzle is solved\n if not target:\n return True\n\n # Tests all possible values\n for value in range(1, 10):\n if not self.isPossibleAssign(target, value):\n continue\n\n self.update_board(target, value)\n\n if self.solve():\n return True\n\n # In case of failure, reset and return False\n self.update_board(target, 0)\n\n return False", "def solve(self):\n if not self.solvable:\n print('Suduko not Solvable')\n return False\n res=self.back(0, 0)\n # if self.a[0][0]!=0:\n # res=self.back(0, 1)\n # else:\n # for i in range(1, 10):\n # self.a[0][0]=i\n # res=self.back(0, 1)\n # if res:\n # break\n if res:\n self.check_if_solvable()\n print(\"Sudoku Solved!\")\n print(self.a)\n return self.a\n else: print(\"Not Solvable\")\n return False", "def is_solution(self, csp):\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())", "def check_solution(generation):\n # Calcula restrições para indivíduos da geração\n restrs = [restricoes(ind) for ind in generation]\n\n solution = []\n for i, retr in enumerate(restrs):\n if retr == 0:\n solution.append(generation[i])\n return solution, restrs", "def done(self):\r\n return not self.get_all_closed_cells() or self.unsolvable", "def is_solved(self) -> bool:\n return set(self.boxes) == set(self.storage_locations)", "def check_solutions(eq):\n s = diophantine(eq)\n\n factors = Mul.make_args(eq)\n\n var = list(eq.free_symbols)\n var.sort(key=default_sort_key)\n\n while s:\n solution = s.pop()\n for f in factors:\n if diop_simplify(f.subs(zip(var, solution))) == 0:\n break\n else:\n return False\n return True", "def solve(self):\n\n\t\tempty_spot = self.find_unsettled_spot()\n\t\tif not empty_spot:\n\t\t\treturn True\n\t\telse:\n\t\t\trow, col = empty_spot\n\n\t\t\t# Loop through all the available numbers\n\t\t\tfor number in range(1, 10):\n\t\t\t\t# If the number has no conflicts in its row, column or subgrid\n\t\t\t\tif self.no_conflicts(row, col, number):\n\t\t\t\t\t# Then overwrite the 0 with the new number\n\t\t\t\t\tself.grid[row][col] = number\n\n\t\t\t\t\tif self.solve():\n\t\t\t\t\t\treturn True\n\n\t\t\t\t\t# This is where backtracking happens\n\t\t\t\t\t# Reset the latest position back to 0 and try with new number value\n\t\t\t\t\tself.grid[row][col] = 0\n\n\t\treturn False", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n return True", "def check_answer(answer, solution):\n if answer == solution:\n return True\n else:\n return False", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def solve_step(self,puzzle_grid,x,y):\n self.puzzleGrid = puzzle_grid\n if(self.foundStep == False):\n self.targetCell = self.puzzleGrid.grid[x][y]\n if(self.targetCell.isSolved == False):\n self.calculate_possibilities()\n if len(self.targetCell.possibilities) == 1: #README method 1\n self.targetCell.solve()\n return True\n else:\n return self.check_neighbours() #README method 2", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def is_solved(self):\n return self.to_grid == self.from_grid", "def is_solved(self):\n\n marker = self._marker\n amount_of_pegs = 0\n for row in marker:\n for i in row:\n if i == \"*\":\n amount_of_pegs += 1\n return amount_of_pegs == 1", "def print_solution():\n pass", "async def check(self):\n\n while not self.solved:\n # Get list of possible numbers this square can have\n possibles = self.get_possible_numbers()\n # If there's only once possibility, then use this number...this square is now solved\n if len(possibles) == 1:\n self.num = possibles.pop()\n # If there are no possible squares well...something's wrong, that shouldn't be possible\n # This check is done because we want to be able to guess and check, and figure out if a guess is invalid\n elif len(possibles) == 0:\n raise ValueError(\"Impossible square; no possible numbers based on restrictions\")\n # Otherwise wait a small amount and continue\n else:\n await asyncio.sleep(0.05)", "def correct(self):\n return self._solution == self._alternatives.value", "def solution(self) -> State:", "def is_solved(self):\n marker = self._marker\n\n count = 0\n for row in marker:\n for piece in row:\n if piece == \"*\":\n count += 1\n if count == 1:\n return True\n else:\n return False", "def an_check(self):\n\t\tfor filles in self.xelt:\n\t\t\t# parcours rapide des branches niveau 1\n\t\t\tif search(r'analytic$', filles.tag):\n\t\t\t\treturn True\n\t\treturn False", "def any(self):\n valid = False\n solution = []\n while not valid:\n soln = []\n for dec in self.decisions:\n soln.append(random.randint(dec.low, dec.high))\n valid = self.ok(soln)\n if valid:\n solution = soln\n return solution", "def is_solved(self):\n i = 0\n for row in self._marker:\n for x in row:\n if x == \"*\":\n i += 1\n if i > 1:\n return False\n return True", "def violated(self) -> bool:\n ...", "def test_teacher_check_homework_negative_if_solution_is_not_ok():\n assert not opp_teacher.check_homework(result_3)", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def check_solvability(self, coefficient, result):\r\n if coefficient == 0:\r\n if result == 0:\r\n if not self.has_error:\r\n self.has_error = True\r\n self.error += INFINITE_SOLUTIONS\r\n\r\n else:\r\n if not self.has_error:\r\n self.has_error = True\r\n self.error += NO_SOLUTION", "def check():\n hokusai.check()", "def has_result(self):\n return len(self.__analysis_items) > 0", "def check_needs(self):\n if not self.target:\n lowest_status = 1\n for need in self.needs:\n if need == \"social\":\n continue\n perc = getattr(self, need) / getattr(self, f\"max_{need}\")\n if perc < lowest_status:\n lowest_status = perc\n self.satisfying = need\n\n # First check inventory\n in_inv = list(filter(lambda x: self.satisfying in x.satisfies, self.inventory))\n if in_inv:\n self.use_item(in_inv[0])\n return None\n\n if self.satisfying == \"work\" and self.get_tasks():\n task = self.get_tasks()[0]\n self.target = task.target\n self.target_job = task\n else:\n targets = self.game.find_need(self.satisfying)\n self.target = self.determine_closest(targets)\n\n if not self.target:\n print(f\"{self.name} can't satisfy {self.satisfying}\")\n else:\n self.state = f\"satisfying {self.satisfying}\"\n self.calculate_target_path()", "def IsOk(self):\r\n \r\n return True", "def validateSolution(solution) -> bool:\r\n # Does not use shortcut return, if invalidation found, to print all errors.\r\n isValid = True\r\n\r\n if not validateTeacherTimeConstraints(solution):\r\n logger.debug(\"Solution: %4i, TeacherTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateRoomTimeConstraints(solution):\r\n logger.debug(\"Solution: %4i, RoomTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateSemesterGroupTimeConstraints(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, SemesterGroupTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateStudyDays(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, StudyDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateAllLessonsAsBlockCourses(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, AllLessonsAsBlock Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateBlocksOnlyInSameRoom(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, BlocksOnlyInSameRoom Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateConsecutiveLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, ConsecutiveLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateForenoonLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, ForenoonLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateGivenTimeslots(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, GivenTimeslots Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateLessonTakePlaceOnOneDay(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, LessonTakePlaceOnOneDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateLessonTime(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, LessonTime Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLessonsPerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLessonsPerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLessonsPerDayPerSemesterGroup(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLessonsPerDayPerSemesterGroup Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLecturesPerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLecturesPerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateMaxLecturesAsBlockForTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, MaxLecturesAsBlockForTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateOneCoursePerDayPerTeacher(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, OneCoursePerDayPerTeacher Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateOnlyOneNotAllInOneBlockLessonPerDay(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, NotAllInOneBlockLessonsPerDay Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateRoomNotAvailableTimes(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, RoomNotAvailable Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateTeacherNotAvailableTimes(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, TeacherNotAvailable Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateSameTimeLessons(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, SameTimeLessons Constraint Fail!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n if not validateTimeslotVarHelperVariables(solution, solution.orm):\r\n logger.debug(\"Solution: %4i, TimeslotBoolVars Wrong Values!\" % solution.solutionIndex)\r\n isValid = False\r\n\r\n return isValid", "def is_solved(self):\n peg_count = 0\n for row in self._marker:\n for item in row:\n if item == '*':\n peg_count += 1\n return peg_count == 1", "def isSolvable(self):\n tiles = []\n for i in range(len(self.tiles)):\n for j in range(len(self.tiles)):\n if self.tiles[j][1] * 3 + self.tiles[j][0] + 1 == i + 1:\n tiles.append(j + 1)\n count = 0\n for i in range(len(tiles) - 1):\n for j in range(i + 1, len(tiles)):\n if tiles[i] > tiles[j] and tiles[i] != 9:\n count += 1\n return count % 2 == 0 and count != 0", "def find_solution(self):\n print(\"\\nFinding ICTS Solution...\")\n ######### Fill in the ICTS Algorithm here #########\n result = self.stat_tracker.time(\"time\", lambda: self.bfs())\n if result == -1:\n self.stat_tracker.stats['time'] = -1\n return []\n self.stat_tracker.write_stats_to_file(self.stat_tracker.get_results_file_name())\n return result\n ###################################################", "def test_check_solved():\n game = Game()\n game.word = 'word'\n game.pattern = 'word'\n game.check_solved()\n assert game.solved is True", "def check_validity(self):", "def is_in_solution(self, cell: Cell) -> bool:\n while cell in self.prev_cells:\n cell = self.prev_cells[cell]\n return cell == self.maze.start_cell", "def q1(puzzle):\n mysudoku = build_csp(puzzle)\n solution = mysudoku.backtracking_search()\n return solution, mysudoku", "def check_infeasibility(self):\n self.logger.info(\"Checking infeasibility for manual design.\")\n ccs = list(nx.connected_components(self.optimization_graph.to_undirected(as_view=True)))\n nb_connected_components = len(ccs)\n if nb_connected_components > 1:\n raise DHCOptimizerException(\"The given optimization graph has several connected\"\n \" components (%d)\" % nb_connected_components)", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True", "def _create_solution(self) -> bool:\n row = 0\n col = 0\n for i in range(81):\n # current cell\n row = i // 9\n col = i % 9\n\n # if cell is empty we try placing number in it\n if self._grid_sol[row][col] == 0:\n shuffle(NUMLIST)\n for n in NUMLIST:\n\n # if n is viable for placement for cell then place it\n if not SudokuGrid.check_valid_placement(n, row, col,\n self._grid_sol):\n self._grid_sol[row][col] = n\n\n # check if grid is full and return true\n if SudokuGrid.check_grid(self._grid_sol):\n return True\n\n # otherwise recurse to place next cell\n elif self._create_solution():\n return True\n\n # break loop if no valid placement in cell\n break\n\n # will set current cell to 0 and go back to previous recursion\n # to find another valid cell placement combination\n self._grid_sol[row][col] = 0\n return False", "def solveOneStep(self):\n ### Student code goes here\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n return self.BFS()", "def checkConflicts(self):\n\t\tapDisplay.printError(\"you did not create a 'checkConflicts' function in your script\")\n\t\traise NotImplementedError()", "def get_sol(self):", "def validate_conn(self, solution):\r\n\r\n active_nodes = [idx for idx, value in enumerate(solution) # remove not included nodes in solution\r\n if value != 0 and idx not in self.dead_nodes and self.network.get_node(idx).energy >= cf.COMMUNICATION_ENERGY]\r\n active_nodes.append(-1) # add a sink node \r\n visited = self.DFS(self.network_graph, active_nodes[0], active_nodes)\r\n if len(visited) == len(active_nodes):\r\n return True\r\n else:\r\n return False", "def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__", "def _check_fitted(self):\n assert self.subspace_basis is not None, \\\n 'You must fit %s before you can project' % self.__class__.__name__" ]
[ "0.7137686", "0.7136563", "0.7095094", "0.70714545", "0.7049462", "0.7029765", "0.6964778", "0.6960238", "0.6939422", "0.68836486", "0.6876838", "0.6864891", "0.68153703", "0.6815134", "0.6724094", "0.6690701", "0.66641307", "0.6656697", "0.6656075", "0.65969825", "0.65919304", "0.65758735", "0.6547716", "0.6543017", "0.65353775", "0.65353775", "0.6507536", "0.64726526", "0.6444415", "0.6419664", "0.6391716", "0.6368501", "0.63516545", "0.6331601", "0.63271123", "0.6325308", "0.63082695", "0.63021266", "0.629292", "0.62920123", "0.62790847", "0.6276159", "0.6273044", "0.6270922", "0.62449265", "0.62244594", "0.6219936", "0.6153984", "0.6125219", "0.61220324", "0.6120793", "0.611572", "0.6113501", "0.6113501", "0.6113501", "0.6113501", "0.6099087", "0.6052086", "0.60474426", "0.60340834", "0.6021602", "0.6013065", "0.6007345", "0.59949034", "0.59910053", "0.5985295", "0.5984759", "0.59775525", "0.59697676", "0.5965046", "0.5964745", "0.596323", "0.5958595", "0.59512734", "0.5948536", "0.5948536", "0.5948536", "0.5925185", "0.59172815", "0.59101075", "0.5907072", "0.5905568", "0.5900555", "0.5887706", "0.5884249", "0.5871189", "0.5870154", "0.5858173", "0.5853559", "0.5850932", "0.5846748", "0.58448726", "0.5821868", "0.5819312", "0.58103925", "0.58103573", "0.58076006", "0.5805483", "0.58038", "0.58038" ]
0.8243674
0
Define the menu layout
def get_html(self) -> List[ComponentMeta]: menu = dbc.DropdownMenu( children=[ dbc.DropdownMenuItem(_menu_settings["header"], header=True), dbc.DropdownMenuItem( _menu_settings["item-0"][0], _menu_settings["item-0"][1], external_link=True, target="_blank", ), dbc.DropdownMenuItem( _menu_settings["item-1"][0], _menu_settings["item-1"][1], external_link=True, target="_blank", ), ], in_navbar=True, label="Learn More", color="light", right=True, ) return [menu]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_menus( self ):", "def create_menu():", "def create_layout() -> None:\n\n st.sidebar.title(\"Menu\")\n app_mode = st.sidebar.selectbox(\"Please select a page\", [' I. Homepage',\n \"II. Download data\" ,\n \"III. Statistic Data\",\n ' IV. AGF Indices',\n ' V. Notes',\n \" VI. Rank of patient\" ])\n \n if app_mode == ' I. Homepage':\n load_homepage() \n elif app_mode == \"III. Statistic Data\":\n leyer.leyer() \n elif app_mode == ' IV. AGF Indices':\n single.AGF_indices() \n elif app_mode == \"II. Download data\":\n download_data.download_data() \n elif app_mode == ' V. Notes':\n text_input.text_input()\n elif app_mode == \" VI. Rank of patient\":\n rank_of_patient.rank_of_patient()", "def create_layout( self ):", "def layout(self):\n pass", "def CreateLayout(self):\n # Defines the title of the Dialog\n self.SetTitle(\"A Custom Dialog with a Top Menu\")\n\n # Flushes all the already existing menu to create our one. The content will be on the left.\n self.MenuFlushAll()\n\n # Creates a Sub menu begin to insert new menu entry\n self.MenuSubBegin(\"Left Menu\")\n\n # Adds a string with a given ID, so it will trigger a call to Command once clicked\n self.MenuAddString(self.ID_LEFT_MENU_FIRST_ITEM, \"Close\")\n\n # Finalizes the Sub Menu\n self.MenuSubEnd()\n\n # Finalizes the menu\n self.MenuFinished()\n\n # Creates a Group in the Menu. The content will be on the right\n if self.GroupBeginInMenuLine():\n # Creates a BitmapButtonCustomGui with the find icon\n settings = c4d.BaseContainer()\n settings[c4d.BITMAPBUTTON_BUTTON] = True\n settings[c4d.BITMAPBUTTON_BORDER] = False\n settings[c4d.BITMAPBUTTON_TOGGLE] = True\n settings[c4d.BITMAPBUTTON_ICONID1] = c4d.RESOURCEIMAGE_SCENEBROWSER_FIND2\n settings[c4d.BITMAPBUTTON_ICONID2] = c4d.RESOURCEIMAGE_SCENEBROWSER_FIND1\n\n self.displayContentButtonDlg = self.AddCustomGui(self.ID_RIGHT_MENU_SHOW_CONTENT,\n c4d.CUSTOMGUI_BITMAPBUTTON, \"\",\n c4d.BFH_CENTER | c4d.BFV_CENTER, 0, 0, settings)\n\n self.GroupEnd()\n\n # Creates a group that will contain the content that will be hidden when the BitmapButton is pressed. It's\n # important to have a parent group to the group that needs to be hidden since you need to redraw this parent\n # group after the visibility definition.\n if self.GroupBegin(self.ID_MAIN_GROUP, c4d.BFH_LEFT | c4d.BFV_CENTER):\n\n # The group that will be hidden\n if self.GroupBegin(self.ID_HIDDEN_GROUP, c4d.BFH_LEFT | c4d.BFV_CENTER):\n # Adds the content you want to toggle\n self.AddStaticText(0, c4d.BFH_LEFT | c4d.BFV_CENTER, name=\"test\")\n\n self.GroupEnd()\n self.GroupEnd()\n\n # Adds two buttons, Ok and Cancel\n self.AddDlgGroup(c4d.DLG_OK | c4d.DLG_CANCEL)\n\n return True", "def menuBar(self):\n\n\t\tmenu = Menu(self.master)\n\t\tself.master.config(menu=menu)\n\n\t\t#File Menu\n\t\tfyle = Menu(menu)\n\t\tfyle.add_command(label='New',command=self.newPad)\n\t\tfyle.add_command(label='Open File',command=self.open_file)\n\t\tfyle.add_command(label='Save', command=self.saveFile)\n\t\tfyle.add_command(label='Save As',command=self.saveFileAs)\n\t\tfyle.add_command(label='Exit', command=outer.destroy)\n\t\tmenu.add_cascade(label='File',menu=fyle)\n\n\t\t#Edit Menu\n\t\tedit = Menu(menu)\n\t\tedit.add_command(label='Cut')\n\t\tedit.add_command(label='Copy')\n\t\tedit.add_command(label='Paste')\n\t\tedit.add_command(label='Undo')\n\t\tedit.add_command(label='Redo')\n\t\tmenu.add_cascade(label='Edit',menu=edit)\n\n\t\t#View Menu\n\t\tview = Menu(menu)\n\t\tview.add_command(label='Line Numbers')\n\t\tmenu.add_cascade(label='View', menu=view)\n\n\n\t\t#Help Menu\n\t\thelp = Menu(menu)\n\t\thelp.add_command(label='About')\n\t\tmenu.add_cascade(label='Help',menu=help)", "def make_top_menus(self):\n menubar = tk.Menu(self)\n\n # create a pulldown menu for languages, and add it to the menu bar\n language_menu = tk.Menu(menubar, tearoff=0)\n language_menu.add_command(label=self.translate(\"English\"), command=lambda: self.replace_language('english'))\n language_menu.add_command(label=self.translate(\"Spanish\"), command=lambda: self.replace_language('spanish'))\n language_menu.add_command(label=self.translate(\"Portuguese\"), command=lambda: self.replace_language('portuguese'))\n menubar.add_cascade(label=self.translate(\"Languages\"), menu=language_menu)\n \n # create a pulldown menu for switching context areas, and add it to the menu bar\n context_menu = tk.Menu(menubar, tearoff=0)\n context_menu.add_command(label=self.translate(\"Chile\"), command=lambda: self.switch_context('Chile'))\n context_menu.add_command(label=self.translate(\"Indonesia\"), command=lambda: self.switch_context('Indonesia'))\n context_menu.add_command(label=self.translate(\"Luanda\"), command=lambda: self.switch_context('Luanda'))\n context_menu.add_command(label=self.translate(\"Querétaro\"), command=lambda: self.switch_context('Querétaro'))\n context_menu.add_command(label=self.translate(\"Rio de Janeiro\"), command=lambda: self.switch_context('Rio de Janeiro'))\n context_menu.add_command(label=self.translate(\"Santiago\"), command=lambda: self.switch_context('Santiago'))\n menubar.add_cascade(label=self.translate(\"Locations\"), menu=context_menu)\n \n # create a pulldown menu for arrangment, and add it to the menu bar\n language_menu = tk.Menu(menubar, tearoff=0)\n language_menu.add_command(label=self.translate(\"Graphs-Graphs\"), command=lambda: self.switch_arrangment(['Graph', 'Graph']))\n language_menu.add_command(label=self.translate(\"Graphs-Map\"), command=lambda: self.switch_arrangment(['Graph', 'Map']))\n language_menu.add_command(label=self.translate(\"Map-Graphs\"), command=lambda: self.switch_arrangment(['Map', 'Graph']))\n language_menu.add_command(label=self.translate(\"Map-Map\"), command=lambda: self.switch_arrangment(['Map', 'Map']))\n menubar.add_cascade(label=self.translate(\"Arrange\"), menu=language_menu)\n \n # create an exit command that closes the UI\n menubar.add_command(label=self.translate(\"Exit\"), command=self.destroy)\n \n # display the menu\n menubar.config(font=self.small_font)\n self.config(menu=menubar)\n \n return menubar", "def makeMenu(self):\n\t\tself.fileMenu = self.menuBar().addMenu(self.tr(\"&Arquivo\"))\n\t\tself.fileMenu.addAction(self.newAct)\n\t\tself.fileMenu.addAction(self.openAct)\n\t\tself.fileMenu.addAction(self.saveAct)\n\t\tself.fileMenu.addAction(self.exportAct)\n\t\tself.fileMenu.addSeparator() \n\t\tself.fileMenu.addAction(self.exitAct)\n\n\t\tself.editMenu = self.menuBar().addMenu(self.tr(\"&Editar\"))\n\t\t\n\t\tself.helpMenu = self.menuBar().addMenu(self.tr(\"&Ajuda\"))\n\t\tself.helpMenu.addAction(self.aboutAct)", "def init_layout(self):\n super(QtToolButton, self).init_layout()\n for child in self.children():\n if isinstance(child, QtMenu):\n self.widget.setMenu(child.widget)\n break", "def __admin_menu(self):\n log.debug(\"Displaying __admin_menu\")\n self.menu = TelegramMenu(\"config/comunda_admin_menu.bpmn\", self, \"MenuStart\")\n self.menu.admin_menu(\"MenuStart\", \"menu_admin_main_txt\")\n return", "def controls_setup(self):\n self.dashboard = element.Link(self, class_name='menu-dashboard', alias='Dashboard Menu')\n # catalogue and sub-menus\n self.catalogue = element.LinkButton(self, class_name='menu-catalogue', alias='Catalogue Menu')\n self.products = element.Link(self, class_name='submenu-products', alias='Catalogue->Products Menu')\n self.product_types = element.Link(self, class_name='submenu-product-types',\n alias='Catalogue->Product Types Menu')\n self.categories = element.Link(self, class_name='submenu-categories', alias='Catalogue->Categories Menu')\n self.ranges = element.Link(self, class_name='submenu-ranges', alias='Catalogue->Ranges Menu')\n self.stock_alerts = element.Link(self, class_name='submenu-low-stock-alerts',\n alias='Catalogue->Low Stock Alerts Menu')\n # fulfilment and sub-menus\n self.fulfilment = element.LinkButton(self, class_name='menu-fulfilment', alias='Fulfilment Menu')\n self.orders = element.Link(self, class_name='submenu-orders', alias='Fulfilment->Orders Menu')\n self.statistics = element.Link(self, class_name='submenu-statistics', alias='Fulfilment->Statistics Menu')\n self.partners = element.Link(self, class_name='submenu-partners', alias='Fulfilment->Partners Menu')\n # customers and sub-menus\n self.customers = element.LinkButton(self, class_name='menu-customers', alias='Customers Menu')\n self.customers_submenu = element.Link(self, class_name='submenu-customers', alias='Customers->Customers Menu')\n self.deleted_accounts = element.Link(self, class_name='submenu-deleted_accounts',\n alias='Customers->Deleted Accounts Menu')\n self.stock_alert_requests = element.Link(self, class_name='submenu-stock-alert-requests-customers',\n alias='Customers->Stock Alert Requests Menu')\n # offers and sub-menus\n self.offers = element.LinkButton(self, class_name='menu-offers', alias='Offers Menu')\n self.offers_submenu = element.Link(self, class_name='submenu-offers', alias='Offers->Offers Menu')\n self.vouchers = element.Link(self, class_name='submenu-vouchers', alias='Offers->Vouchers Menu')\n # content and sub-menus\n self.content_menu = element.LinkButton(self, class_name='menu-content', alias='Content Menu')\n self.pages = element.Link(self, class_name='menu-pages', alias='Content->Pages Menu')\n self.announcements = element.Link(self, class_name='menu-announcements', alias='Content->Announcements Menu')\n self.reviews = element.Link(self, class_name='menu-reviews', alias='Content->Reviews Menu')\n\n self.reports = element.Link(self, class_name='menu-reports', alias='Reports Menu')\n self.store_info = element.LinkButton(self, css_selector='.dropdown.store > a', alias='Store Info Menu')\n self.partner_store = []", "def create_menu_bar(self, parent_layout):\n self.menu_bar = QtWidgets.QMenuBar()\n # all menu bar tabs ===============\n # File -------------------\n file_menu = self.menu_bar.addMenu(\"File\")\n file_menu.setTearOffEnabled(1)\n\n # Menu bar actions ===============\n # File\n file_menu.addAction(self.export_settings_action)\n file_menu.addAction(self.import_settings_action)\n\n # Adding to the Layout ===============\n parent_layout.setMenuBar(self.menu_bar)", "def layout(self):\n menu = self.menuBar()\n menu.setNativeMenuBar(False)\n\n file_menu = menu.addMenu(\"File\")\n fitting_commands = menu.addMenu(\"Fitting\")\n help_menu = menu.addMenu(\"Help\")\n\n prog_info = QAction(\"About\", self)\n prog_info.triggered.connect(self.version)\n help_menu.addAction(prog_info)\n\n doc_info = QAction(\"Documentation\", self)\n doc_info.triggered.connect(self.docs)\n help_menu.addAction(doc_info)\n\n fit_exp = QAction(\"Fit Experiments\", self)\n fit_exp.setShortcut(\"Ctrl+F\")\n fit_exp.triggered.connect(self.fit_exp)\n fitting_commands.addAction(fit_exp)\n\n add_exp = QAction(\"Add Experiment\", self)\n add_exp.setShortcut(\"Ctrl+Shift+N\")\n add_exp.triggered.connect(self.add_file)\n file_menu.addAction(add_exp)\n\n save_exp = QAction(\"Export Results\", self)\n save_exp.setShortcut(\"Ctrl+S\")\n save_exp.triggered.connect(self.save_file)\n file_menu.addAction(save_exp)\n\n file_menu.addSeparator()\n\n new_exp = QAction(\"New Session\", self)\n new_exp.setShortcut(\"Ctrl+N\")\n new_exp.triggered.connect(self.new_exp)\n file_menu.addAction(new_exp)\n\n close_window = QAction(\"Close Window\", self)\n close_window.setShortcut(\"Ctrl+W\")\n close_window.triggered.connect(self.close_program)\n file_menu.addAction(close_window)\n\n # add shortcut actions to main window, for qt5 bug\n self.addAction(add_exp)\n self.addAction(fit_exp)\n self.addAction(save_exp)\n self.addAction(new_exp)\n self.addAction(close_window)\n self.addAction(doc_info)\n self.addAction(prog_info)\n\n self._exp = Splitter(self)\n self.setCentralWidget(self._exp)\n\n self.resize(1000, 600)\n self.move(QApplication.desktop().screen().rect().center()-self.rect().center())\n self.setWindowTitle('pytc')\n self.show()", "def defaultLayout():\n return ['OverlayDisplayToolBar',\n 'OrthoToolBar',\n 'OverlayListPanel',\n 'LocationPanel']", "def init_layout(self):\n pass", "def createMenus(self):\n\n self.fileMenu = QMenu(\"&File\", self)\n self.fileMenu.addAction(self.openAct)\n self.fileMenu.addAction(self.addAct)\n self.fileMenu.addSeparator()\n # self.fileMenu.addAction(self.showSessionAct)\n self.fileMenu.addAction(self.exitAct)\n\n self.helpMenu = QMenu(\"&Help\", self)\n self.helpMenu.addAction(self.aboutAct)\n self.helpMenu.addAction(self.aboutQtAct)\n\n self.viewMenu = QMenu(\"&View\", self)\n\n self.sortMenu = QMenu(\"Sort by\", self.viewMenu, enabled=False)\n self.groupMenu = QMenu(\"Group by\", self.viewMenu, enabled=False)\n\n self.showGroupMenu = QMenu(\"Load Group\", self.fileMenu, enabled=False)\n self.addGroupDataMenu = QMenu('Add Group', self.fileMenu, enabled=False)\n self.fileMenu.addMenu(self.showGroupMenu)\n self.fileMenu.addMenu(self.addGroupDataMenu)\n self.fileMenu.addAction(self.seeAllGroupAct)\n self.viewMenu.addMenu(self.groupMenu)\n self.viewMenu.addMenu(self.sortMenu)\n\n # Add filters to \"Sort by\"\n self.create_sort_menu()\n self.sortMenu.addAction(self.ageSortAct)\n self.sortMenu.addAction(self.sexSortAct)\n self.sortMenu.addAction(self.genotypeSortAct)\n self.sortMenu.addAction(self.speciesSortAct)\n self.sortMenu.addAction(self.subjectIDSortAct)\n self.sortMenu.addAction(self.weightSortAct)\n self.sortMenu.addAction(self.birthSortAct)\n self.sortMenu.addSeparator()\n\n self.sortMenu.addAction(self.fluorescenceSortAct)\n self.sortMenu.addAction(self.imagesegSortAct)\n self.sortMenu.addAction(self.rasterSortAct)\n\n # Add filters to \"Group by\"\n self.create_group_menu()\n self.groupMenu.addAction(self.ageGroupAct)\n self.groupMenu.addAction(self.sexGroupAct)\n self.groupMenu.addAction(self.genotypeGroupAct)\n self.groupMenu.addAction(self.speciesGroupAct)\n self.groupMenu.addAction(self.subjectIDGroupAct)\n self.groupMenu.addAction(self.weightGroupAct)\n self.groupMenu.addAction(self.birthGroupAct)\n\n self.groupMenu.addSeparator()\n\n self.groupMenu.addAction(self.fluorescenceGroupAct)\n self.groupMenu.addAction(self.imagesegGroupAct)\n self.groupMenu.addAction(self.rasterGroupAct)\n\n self.menuBar().addMenu(self.fileMenu)\n self.menuBar().addMenu(self.viewMenu)\n self.menuBar().addMenu(self.helpMenu)", "def init_layout(self):\n\t\tself.pack_start(self.edit, expand=True)\n\t\tself.pack_start(self.button, expand=False)\n\t\tself.show_all()", "def _generate_layout(self):\n\n pass", "def menu():\n return render_template('menu.html')", "def menu(self):\n menu = list()\n \n \n menu.extend([\n {\n 'title': 'Bootstrap Demo',\n 'href': self.request.route_url('bootstrap_demo'),\n 'icon': \"fa fa-twitter-square\"\n },\n {\n 'title': 'Jade Demo',\n 'href': self.request.route_url('jade_demo'),\n 'icon': \"fa fa-indent\"\n },\n ])\n if self.user:\n menu.extend([\n {\n 'title': 'Entities',\n 'icon': \"fa fa-bar-chart\",\n 'dropdown': [\n {\n 'title': 'All entities',\n 'href': self.request.route_url(\n 'entities',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'serverside-columnsearch'\n }\n ),\n 'icon': \"fa fa-bar-chart\"},\n {\n 'title': 'CPTs',\n 'href': self.request.route_url(\n 'cpts',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'columnsearch'\n }\n ),\n }\n ]\n }\n ]),\n if self.user.has_admin:\n menu.append(\n {\n 'title': \"User Management\",\n 'icon': \"fa fa-users\",\n 'dropdown': [\n {\n 'title': 'User Overview',\n 'href': self.request.route_url(\n 'users',\n ext='html',\n _query={\n 'renderer': 'datatable',\n 'options': 'serverside-columnsearch'\n }\n ),\n 'icon': 'fa fa-users',\n },\n {\n 'title': 'Add User',\n 'href': self.request.route_url('user_create'),\n 'icon': 'fa fa-user-plus',\n }\n ]\n }\n )\n\n return menu", "def createMenubar(self):\r\n # Create menubar\r\n self.menubar = tk.Menu(tearoff=False)\r\n self.root.config(menu=self.menubar)\r\n filemenu = tk.Menu(self.menubar,tearoff=False)\r\n filemenu.add_command(label=\"Edit Video/fNIRS Sources\",command=self.launchImportWindow)\r\n filemenu.add_command(label=\"Synchronise Video/fNIRS\",command=self.launchSyncToolWindow)\r\n filemenu.add_command(label=\"Help\",command=self.launchHelpWindow)\r\n filemenu.add_command(label=\"Quit\",command=self.quit)\r\n self.menubar.add_cascade(label=\"Project\",menu=filemenu)", "def create_menu(self, root):\n menubar = Menu(root)\n root['menu'] = menubar\n\n menu_file = Menu(menubar)\n menu_run = Menu(menubar)\n menu_folders = Menu(menubar)\n menu_links = Menu(menubar)\n menu_help = Menu(menubar)\n menu_beta = Menu(menubar)\n menubar.add_cascade(menu=menu_file, label='File')\n menubar.add_cascade(menu=menu_run, label='Run')\n menubar.add_cascade(menu=menu_folders, label='Folders')\n menubar.add_cascade(menu=menu_links, label='Links')\n menubar.add_cascade(menu=menu_help, label='Help')\n menubar.add_cascade(menu=menu_beta, label='Experimental')\n\n menu_file.add_command(\n label='Re-load param set', command=self.load_params,\n accelerator='Ctrl+L')\n menu_file.add_command(\n label='Re-save param set', command=self.save_params,\n accelerator='Ctrl+S')\n menu_file.add_command(\n label='Output log', command=lambda: LogWindow(self.root))\n if sys.platform != 'darwin':\n menu_file.add_command(\n label='Exit', command=self.exit_program, accelerator='Alt+F4')\n root.bind_all('<Control-l>', lambda e: self.load_params())\n root.bind_all('<Control-s>', lambda e: self.save_params())\n\n menu_run.add_command(\n label='Dwarf Fortress', command=self.lnp.run_df,\n accelerator='Ctrl+R')\n menu_run.add_command(\n label='Init Editor', command=self.run_init, accelerator='Ctrl+I')\n root.bind_all('<Control-r>', lambda e: self.lnp.run_df())\n root.bind_all('<Control-i>', lambda e: self.run_init())\n\n menu_folders.add_command(\n label='Savegame Folder', command=self.lnp.open_savegames)\n menu_folders.add_command(\n label='Utilities Folder', command=self.lnp.open_utils)\n menu_folders.add_command(\n label='Graphics Folder', command=self.lnp.open_graphics)\n menu_folders.add_separator()\n menu_folders.add_command(\n label='Main Folder', command=self.lnp.open_main_folder)\n menu_folders.add_command(\n label='LNP Folder', command=self.lnp.open_lnp_folder)\n menu_folders.add_command(\n label='Dwarf Fortress Folder', command=self.lnp.open_df_folder)\n menu_folders.add_command(\n label='Init Folder', command=self.lnp.open_init_folder)\n\n menu_links.add_command(\n label=\"DF Homepage\", command=self.lnp.open_df_web)\n menu_links.add_command(label=\"DF Wiki\", command=self.lnp.open_wiki)\n menu_links.add_command(label=\"DF Forums\", command=self.lnp.open_forums)\n\n menu_help.add_command(\n label=\"Help\", command=self.show_help, accelerator='F1')\n menu_help.add_command(\n label=\"About\", command=self.show_about, accelerator='Alt+F1')\n root.bind_all('<F1>', lambda e: self.show_help())\n root.bind_all('<Alt-F1>', lambda e: self.show_about())\n root.createcommand('tkAboutDialog', self.show_about)\n\n menu_beta.add_command(\n label='Toggle graphics pack patching', command=self.toggle_patching)", "def main_aqa_layout():\n\n layout = html.Div(\n [\n # html.Div(\n # id=server.config['NAVBAR_CONTAINER_ID'],\n # children=[\n # navbar\n # ],\n # # id=\"header\",\n # # children=[\n # # # Header(),\n # # html.Div(\n # # id=server.config['NAVBAR_CONTAINER_ID'],\n # # children=navbar\n # # ),\n # # ]\n # ),\n dcc.Location(id=\"url\", pathname=\"/\"),\n navbar,\n dbc.Container(id=\"content\", style={\"padding\": \"20px\"}),\n ]\n )\n\n return layout", "def _do_layout(self):\n return", "def _create_menu(self):\n menubar = Menu(self.root)\n # Game dropdown menu\n gamemenu = Menu(menubar, tearoff=0)\n gamemenu.add_command(\n label='New Game', command=self.new_game)\n gamemenu.add_separator()\n gamemenu.add_command(label='Exit', command=self.root.quit)\n menubar.add_cascade(label='Game', menu=gamemenu)\n\n # Options dropdown menu\n optionmenu = Menu(menubar, tearoff=0)\n optionmenu.add_command(label='Scattershot', command=self.game.spray)\n optionmenu.add_command(label='All Ships Attack', command=None)\n menubar.add_cascade(label='Options', menu=optionmenu)\n\n # Help dropdown menu\n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label='Help Index', command=None)\n helpmenu.add_command(label='About...', command=None)\n menubar.add_cascade(label='Help', menu=helpmenu)\n\n self.root.config(menu=menubar)", "def contextMenuEvent(self, event):\n\n cmenu = QMenu(self)\n quitAct = cmenu.addAction(\"Quit\")\n fullScreenAct = cmenu.addAction(\"Toggle fullscreen\")\n stretchAct = cmenu.addAction(\"Toggle stretch\")\n coordAct = cmenu.addAction(\"Show/Hide coordinates\")\n fixedAct = cmenu.addAction(\"Show/Hide fix state\")\n labelAct = cmenu.addAction(\"Show/Hide labels\")\n layoutMenu = cmenu.addMenu(\"Layout\")\n\n if self.layoutManager.layout.adjustNumberAllowed:\n entries = []\n maxMenu = cmenu.addMenu(\"Max Cams\")\n entries.extend([i for i in range(1, 1 + len(self.layoutManager.camIds))])\n entries.append(\"Unlimited\")\n for e in entries:\n a = maxMenu.addAction(str(e))\n a.name = \"limit\"\n a.value = e\n\n entries = self.layoutManager.repository.getAllLayoutIds()\n for e in entries:\n a = layoutMenu.addAction(str(e))\n a.name = \"layout\"\n a.value = e\n\n action = cmenu.exec_(self.mapToGlobal(event.pos()))\n if action == quitAct:\n qApp.quit()\n elif action == fullScreenAct:\n if self.isFullScreen():\n self.showNormal()\n self.setCursor(Qt.ArrowCursor)\n else:\n self.showFullScreen()\n self.setCursor(Qt.BlankCursor)\n elif action == stretchAct:\n current = self.layoutManager.view.stretch\n self.layoutManager.setStretchMode(not current)\n elif action == labelAct:\n current = self.layoutManager.view.showLabels\n self.layoutManager.setLabelMode(not current)\n elif action == coordAct:\n current = self.layoutManager.view.showCoords\n self.layoutManager.setLabelCoordMode(not current)\n elif action == fixedAct:\n current = self.layoutManager.view.showFixed\n self.layoutManager.setLabelFixedMode(not current)\n elif hasattr(action, 'name'):\n if action.name == \"limit\":\n v = action.value\n self.layoutManager.setMaxCams(0 if v == \"Unlimited\" else v)\n elif action.name == \"layout\":\n self.layoutManager.setLayout(action.value)\n self.updateStatusBar()", "def menuBarLayout(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, childArray: bool=True, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, exists: bool=True, fullPathName: bool=True,\n height: Union[int, bool]=0, highlightColor: Union[List[float, float, float],\n bool]=None, isObscured: bool=True, manage: bool=True, menuArray: bool=True,\n menuBarVisible: bool=True, menuIndex: List[AnyStr, int]=None, noBackground:\n bool=True, numberOfChildren: bool=True, numberOfMenus: bool=True,\n numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, preventOverride: bool=True, statusBarMessage:\n AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def Crearmenu():\n layoutM = [\n [sg.T(\"ScrabbleAR\", size=(16, 1), justification=\"center\",\n font=(\"Times New Roman\", 25))],\n [sg.T(\" Bienvenido a ScrabbleAR!, el juego donde \")],\n [sg.T(\" hay que armar palabras para ganar \")],\n [sg.B(\"Iniciar nuevo juego\", size=(17, 1), key=\"inicio\"),\n sg.B(\"Configuracion\", size=(17, 1), key=\"config\")],\n [sg.B(\"Puntuaciones\", size=(17, 1), key=\"puntos\"),\n sg.B(\"Salir\", size=(17, 1), key=\"exit\")]\n ]\n\n if(os.path.isfile(\"Guardado.json\")):\n layoutM += [[sg.B(\"Continuar partida\", size=(36, 1), key=\"continue\")]]\n\n window = sg.Window(\"ScrabbleAR - Menu\", layoutM)\n\n return window", "def setLayout():\n global layout\n\n if p.GetString(\"Layout\") == \"Grid\":\n layoutFlow.setEnabled(False)\n layoutGrid.setEnabled(True)\n layoutStretch.setEnabled(True)\n layout = layoutGrid\n else:\n layoutGrid.setEnabled(False)\n layoutStretch.setEnabled(False)\n layoutFlow.setEnabled(True)\n layout = layoutFlow", "def __createLayout(self):\r\n self.__createCanvas()\r\n self.__createButton()\r\n self.__createInputFunction()\r\n self.__createLimits()\r\n self.__styleLayout()", "def show_menus(self, type_):\n if type_ == self._current:\n # do nothing\n pass\n else:\n if self._current == self.TYPE_VOIGT:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n elif self._current == self.TYPE_GISO:\n # Plot menus are visible; hide them.\n plot_menu_labels = [menu.label for menu in self._plot_menus]\n\n for menu in self.top_level_menus:\n if menu.label in plot_menu_labels:\n self.Remove(self.FindMenu(menu.label))\n\n # Rebuild the view menu by deleting everything from it and then \n # reappending the appropriate items.\n while self.view_menu.GetMenuItemCount():\n #self.view_menu.DeleteItem(self.view_menu.FindItemByPosition(0))\n self.view_menu.Delete(self.view_menu.FindItemByPosition(0))\n\n _append_items(self._main, self.view_menu, self._menu_data[type_])\n\n if type_ == self.TYPE_VOIGT:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n elif type_ == self.TYPE_GISO:\n # add plot menus\n for menu in self._plot_menus[::-1]:\n self.Insert(_PLOT_MENU_INSERT_INDEX, menu, menu.label)\n # Under wxPython 2.9, the menus I add with this call to \n # Insert() don't have their label set. I think it's a bug,\n # but I can't recreate it outside of this app. Manually\n # setting the label here is a workaround.\n self.SetMenuLabel(_PLOT_MENU_INSERT_INDEX, menu.label)\n\n\n self._current = type_", "def initMenu(self):\n self.fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n self.fileMenu.addAction(self.createProjectAction)\n self.fileMenu.addAction(self.openProjectAction)\n\n #TODO : problem displaying submenu\n #self.recentMenu = self.fileMenu.addMenu(self.tr(\"Open &recent\"))\n #for recentProject in self._controller.getSession().recentProjects():\n #recentAction = QtGui.QAction(self.tr(str(recentProject.getPath())), self)\n #self.connect(recentAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openRecent(recentProject.getPath())\"))\n #self.recentMenu.addAction(recentAction)\n\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.importVideoAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.saveProjectAction)\n\n self.helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n self.helpMenu.addAction(self.aboutAction)", "def footer_nav(self):\r\n buttons = [NamedButton(\"help\", False, nocname=True),\r\n NamedButton(\"blog\", False, nocname=True),\r\n NamedButton(\"stats\", False, nocname=True),\r\n NamedButton(\"feedback\", False),\r\n NamedButton(\"bookmarklets\", False),\r\n NamedButton(\"socialite\", False),\r\n NamedButton(\"buttons\", True),\r\n NamedButton(\"widget\", True),\r\n NamedButton(\"code\", False, nocname=True),\r\n NamedButton(\"mobile\", False, nocname=True),\r\n NamedButton(\"store\", False, nocname=True),\r\n NamedButton(\"ad_inq\", False, nocname=True),\r\n ]\r\n\r\n return NavMenu(buttons, base_path = \"/\", type = \"flatlist\")", "def controls_setup(self):\n\n self.new_items = element.TopMenuLink(self, alias='New Items Menu',\n css_selector='#browse > li:nth-child(1) > a')\n if self.fixed:\n self.jewelry = element.TopMenuLink(self, alias='Jewelry Menu',\n css_selector='#browse > li:nth-child(2) > a')\n self.artwork = element.TopMenuLink(self, alias='Artwork Menu',\n css_selector='#browse > li:nth-child(3) > a')\n self.homeware = element.TopMenuLink(self, alias='Homeware Menu',\n css_selector='#browse > li:nth-child(4) > a')\n self.clothing = element.TopMenuLink(self, alias='Clothing Menu',\n css_selector='#browse > li:nth-child(5) > a')\n self.vintage = element.TopMenuLink(self, alias='Vintage Menu',\n css_selector='#browse > li:nth-child(6) > a')\n self.food = element.TopMenuLink(self, alias='Food Menu',\n css_selector='#browse > li:nth-child(7) > a')\n self.rings = element.SubMenuLink(self, alias='Jewelry->Rings Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(1) > a')\n self.necklaces = element.SubMenuLink(self, alias='Jewelry->Necklaces Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(2) > a')\n self.bracelets = element.SubMenuLink(self, alias='Jewelry->Bracelets Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(3) > a')\n self.earrings = element.SubMenuLink(self, alias='Jewelry->Earrings Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(4) > a')\n self.broaches = element.SubMenuLink(self, alias='Jewelry->Broaches and Pins Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(5) > a')\n self.wall_art = element.SubMenuLink(self, alias='Artwork->Wall Art Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(1) > a')\n self.ceramics = element.SubMenuLink(self, alias='Jewelry->Ceramics Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(2) > a')\n self.sculpture = element.SubMenuLink(self, alias='Jewelry->Sculpture Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(3) > a')\n self.living = element.SubMenuLink(self, alias='Homeware->Living Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(1) > a')\n self.furniture = element.SubMenuLink(self, alias='Homeware->Furniture Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(2) > a')\n self.kitchen = element.SubMenuLink(self, alias='Homeware->Kitchen Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(3) > a')\n self.lighting = element.SubMenuLink(self, alias='Homeware->Lighting Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(4) > a')\n self.outdoor = element.SubMenuLink(self, alias='Homeware->Outdoor Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(5) > a')\n self.organize = element.SubMenuLink(self, alias='Homeware->Organize Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(6) > a')\n self.party = element.SubMenuLink(self, alias='Homeware->Party Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(7) > a')\n self.sub_food = element.SubMenuLink(self, alias='Homeware->Food Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(8) > a')\n self.pets = element.SubMenuLink(self, alias='Homeware->For Pets Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(9) > a')\n self.women = element.SubMenuLink(self, alias='Clothing->Women Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(1) > a')\n self.men = element.SubMenuLink(self, alias='Clothing->Men Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(2) > a')\n self.kids = element.SubMenuLink(self, alias='Clothing->Kids Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(3) > a')\n self.pet_lovers = element.SubMenuLink(self, alias='Clothing->Pet Lovers Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(4) > a')\n self.antiques = element.SubMenuLink(self, alias='Vintage->Antiques Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(1) > a')\n self.accessories = element.SubMenuLink(self, alias='Vintage->Accessories Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(2) > a')\n self.sub_clothing = element.SubMenuLink(self, alias='Vintage->Clothing Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(3) > a')\n self.sub_homeware = element.SubMenuLink(self, alias='Vintage->Homeware Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(4) > a')\n self.toys = element.SubMenuLink(self, alias='Vintage->Toys Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(5) > a')\n self.books = element.SubMenuLink(self, alias='Vintage->Books Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(6) > a')\n else:\n self.menus = []", "def _createDisplayMenu(ned, menu):\n pass", "def initMenu(self, menu):\n menu.clear()\n \n self.subMenus = []\n \n adminMenu = QMenu(self.tr(\"Administration\"), menu)\n adminMenu.setTearOffEnabled(True)\n adminMenu.addAction(self.gitShowConfigAct)\n adminMenu.addAction(self.gitRepoConfigAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitReflogBrowserAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateIgnoreAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateArchiveAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitStatisticsAct)\n adminMenu.addAction(self.gitVerifyAct)\n adminMenu.addAction(self.gitHouseKeepingAct)\n self.subMenus.append(adminMenu)\n \n bundleMenu = QMenu(self.tr(\"Bundle Management\"), menu)\n bundleMenu.setTearOffEnabled(True)\n bundleMenu.addAction(self.gitBundleAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleVerifyAct)\n bundleMenu.addAction(self.gitBundleListHeadsAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleApplyFetchAct)\n bundleMenu.addAction(self.gitBundleApplyPullAct)\n self.subMenus.append(bundleMenu)\n \n patchMenu = QMenu(self.tr(\"Patch Management\"), menu)\n patchMenu.setTearOffEnabled(True)\n patchMenu.addAction(self.gitCheckPatchesAct)\n patchMenu.addAction(self.gitApplyPatchesAct)\n patchMenu.addSeparator()\n patchMenu.addAction(self.gitShowPatcheStatisticsAct)\n self.subMenus.append(patchMenu)\n \n bisectMenu = QMenu(self.tr(\"Bisect\"), menu)\n bisectMenu.setTearOffEnabled(True)\n bisectMenu.addAction(self.gitBisectStartAct)\n bisectMenu.addAction(self.gitBisectStartExtendedAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectGoodAct)\n bisectMenu.addAction(self.gitBisectBadAct)\n bisectMenu.addAction(self.gitBisectSkipAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectResetAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectLogBrowserAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectCreateReplayAct)\n bisectMenu.addAction(self.gitBisectEditReplayAct)\n bisectMenu.addAction(self.gitBisectReplayAct)\n self.subMenus.append(bisectMenu)\n \n tagsMenu = QMenu(self.tr(\"Tags\"), menu)\n tagsMenu.setIcon(UI.PixmapCache.getIcon(\"vcsTag.png\"))\n tagsMenu.setTearOffEnabled(True)\n tagsMenu.addAction(self.vcsTagAct)\n tagsMenu.addAction(self.gitTagListAct)\n tagsMenu.addAction(self.gitDescribeTagAct)\n self.subMenus.append(tagsMenu)\n \n branchesMenu = QMenu(self.tr(\"Branches\"), menu)\n branchesMenu.setIcon(UI.PixmapCache.getIcon(\"vcsBranch.png\"))\n branchesMenu.setTearOffEnabled(True)\n branchesMenu.addAction(self.gitBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitBranchListAct)\n branchesMenu.addAction(self.gitMergedBranchListAct)\n branchesMenu.addAction(self.gitNotMergedBranchListAct)\n branchesMenu.addAction(self.gitShowBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitDeleteRemoteBranchAct)\n self.subMenus.append(branchesMenu)\n \n changesMenu = QMenu(self.tr(\"Manage Changes\"), menu)\n changesMenu.setTearOffEnabled(True)\n changesMenu.addAction(self.gitUnstageAct)\n changesMenu.addAction(self.vcsRevertAct)\n changesMenu.addAction(self.vcsMergeAct)\n changesMenu.addAction(self.gitCommitMergeAct)\n changesMenu.addAction(self.gitCancelMergeAct)\n \n remotesMenu = QMenu(self.tr(\"Remote Repositories\"), menu)\n remotesMenu.setTearOffEnabled(True)\n remotesMenu.addAction(self.gitRemotesShowAct)\n remotesMenu.addAction(self.gitRemoteShowAct)\n remotesMenu.addSeparator()\n remotesMenu.addAction(self.gitRemoteAddAct)\n remotesMenu.addAction(self.gitRemoteRenameAct)\n remotesMenu.addAction(self.gitRemoteChangeUrlAct)\n remotesMenu.addAction(self.gitRemoteCredentialsAct)\n remotesMenu.addAction(self.gitRemoteRemoveAct)\n remotesMenu.addAction(self.gitRemotePruneAct)\n \n cherrypickMenu = QMenu(self.tr(\"Cherry-pick\"), menu)\n cherrypickMenu.setIcon(UI.PixmapCache.getIcon(\"vcsGraft.png\"))\n cherrypickMenu.setTearOffEnabled(True)\n cherrypickMenu.addAction(self.gitCherryPickAct)\n cherrypickMenu.addAction(self.gitCherryPickContinueAct)\n cherrypickMenu.addAction(self.gitCherryPickQuitAct)\n cherrypickMenu.addAction(self.gitCherryPickAbortAct)\n \n stashMenu = QMenu(self.tr(\"Stash\"), menu)\n stashMenu.setTearOffEnabled(True)\n stashMenu.addAction(self.gitStashAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBrowserAct)\n stashMenu.addAction(self.gitStashShowAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashApplyAct)\n stashMenu.addAction(self.gitStashPopAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBranchAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashDropAct)\n stashMenu.addAction(self.gitStashClearAct)\n \n submodulesMenu = QMenu(self.tr(\"Submodules\"), menu)\n submodulesMenu.setTearOffEnabled(True)\n submodulesMenu.addAction(self.gitSubmoduleAddAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesInitAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateInitAct)\n submodulesMenu.addAction(self.gitSubmodulesDeinitAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesUpdateAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateRemoteAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateOptionsAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesSyncAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesListAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesStatusAct)\n submodulesMenu.addAction(self.gitSubmodulesSummaryAct)\n \n act = menu.addAction(\n UI.PixmapCache.getIcon(\n os.path.join(\"VcsPlugins\", \"vcsGit\", \"icons\", \"git.png\")),\n self.vcs.vcsName(), self._vcsInfoDisplay)\n font = act.font()\n font.setBold(True)\n act.setFont(font)\n menu.addSeparator()\n \n menu.addAction(self.gitFetchAct)\n menu.addAction(self.gitPullAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommitAct)\n menu.addAction(self.gitPushAct)\n menu.addSeparator()\n menu.addMenu(changesMenu)\n menu.addMenu(stashMenu)\n menu.addSeparator()\n menu.addMenu(cherrypickMenu)\n menu.addSeparator()\n menu.addMenu(bundleMenu)\n menu.addMenu(patchMenu)\n menu.addSeparator()\n menu.addMenu(remotesMenu)\n menu.addMenu(submodulesMenu)\n menu.addSeparator()\n menu.addMenu(tagsMenu)\n menu.addMenu(branchesMenu)\n menu.addSeparator()\n menu.addAction(self.gitLogBrowserAct)\n menu.addSeparator()\n menu.addAction(self.vcsStatusAct)\n menu.addSeparator()\n menu.addAction(self.vcsDiffAct)\n menu.addAction(self.gitExtDiffAct)\n menu.addSeparator()\n menu.addAction(self.vcsSwitchAct)\n menu.addSeparator()\n menu.addMenu(bisectMenu)\n menu.addSeparator()\n menu.addAction(self.vcsCleanupAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommandAct)\n menu.addSeparator()\n menu.addMenu(adminMenu)\n menu.addSeparator()\n menu.addAction(self.gitEditUserConfigAct)\n menu.addAction(self.gitConfigAct)\n menu.addSeparator()\n menu.addAction(self.vcsNewAct)\n menu.addAction(self.vcsExportAct)", "def do_layout(self):\n self.define_panel_structure()\n self.layout_selection()\n self.layout_data_list()\n self.layout_batch()\n self.layout_button()", "def menu(self):\n try:\n return get_template('{}/menu.html'.format(self.label))\n except TemplateDoesNotExist:\n return Template('')", "def Adjust_Menu( self, menuoptions = 0):\r\n pass\r\n #base_tree = 6\r\n #profile_tree = 7\r\n #if( menuoptions == 0 ):\r\n # self.treeview_menu.entryconfig( base_tree , state=\"active\" )\r\n # self.treeview_menu.entryconfig( profile_tree , state=\"disabled\" )\r\n # self.menu.entryconfig( 4 , state=\"active\" )\r\n # self.menu.entryconfig( 5 , state=\"disabled\" )\r\n #elif(menuoptions == 1):\r\n # self.treeview_menu.entryconfig(base_tree ,state=\"disabled\")\r\n # self.treeview_menu.entryconfig(profile_tree ,state=\"active\")\r\n # self.menu.entryconfig(4 ,state=\"disabled\")\r\n # self.menu.entryconfig(5 ,state=\"active\")\r", "def initialise_menu_bar(self):\n # Set up options to change SLAM type.\n slam_type_menu = self.menuBar().addMenu('SLAM Type')\n slam_type_ag = QtWidgets.QActionGroup(self, exclusive=True)\n slam_type_scan_matching = slam_type_ag.addAction(QtWidgets.QAction('Scan Matching', self, checkable=True))\n slam_type_hough = slam_type_ag.addAction(QtWidgets.QAction('Hough Landmarks', self, checkable=True))\n slam_type_ransac = slam_type_ag.addAction(QtWidgets.QAction('RANSAC Landmarks', self, checkable=True))\n slam_type_naive = slam_type_ag.addAction(QtWidgets.QAction('Naive', self, checkable=True))\n slam_type_scan_matching.triggered.connect(lambda x: self.set_slam_type(SlamMode.SCAN_MATCHING))\n slam_type_hough.triggered.connect(lambda x: self.set_slam_type(LandmarkMode.HOUGH))\n slam_type_ransac.triggered.connect(lambda x: self.set_slam_type(LandmarkMode.RANSAC))\n slam_type_naive.triggered.connect(lambda x: self.set_slam_type(SlamMode.NAIVE))\n slam_type_menu.addAction(slam_type_scan_matching)\n slam_type_menu.addAction(slam_type_hough)\n slam_type_menu.addAction(slam_type_ransac)\n slam_type_menu.addAction(slam_type_naive)\n self.menuBar().addMenu(slam_type_menu)\n\n # Set up miscellaneous options.\n options_menu = QtWidgets.QMenu(\"Options\", self)\n options_automatic = QtWidgets.QAction(\"Automatic\", options_menu, checkable=True)\n options_automatic.triggered.connect(self.set_automatic)\n options_menu.addAction(options_automatic)\n self.menuBar().addMenu(options_menu)\n\n # Set up options for the display.\n display_menu = QtWidgets.QMenu(\"Display Mode\", self)\n display_ag = QtWidgets.QActionGroup(self, exclusive=True)\n display_map = display_ag.addAction(QtWidgets.QAction('Map Distribution', self, checkable=True))\n display_prob = display_ag.addAction(QtWidgets.QAction('Probability Distribution', self, checkable=True))\n display_map.triggered.connect(lambda x: self.set_map_mode(MapMode.DIST))\n display_prob.triggered.connect(lambda x: self.set_map_mode(MapMode.PROB))\n display_menu.addAction(display_map)\n display_menu.addAction(display_prob)\n self.menuBar().addMenu(display_menu)\n\n # Set up options for how the robot is followed.\n tracking_menu = QtWidgets.QMenu(\"Tracking Mode\", self)\n tracking_ag = QtWidgets.QActionGroup(self, exclusive=True)\n tracking_free = tracking_ag.addAction(QtWidgets.QAction('Free', self, checkable=True))\n tracking_adjusted = tracking_ag.addAction(QtWidgets.QAction('Adjusted Robot', self, checkable=True))\n tracking_raw = tracking_ag.addAction(QtWidgets.QAction('Raw Robot', self, checkable=True))\n tracking_free.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.FREE))\n tracking_adjusted.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.ADJUSTED))\n tracking_raw.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.STATE))\n tracking_menu.addAction(tracking_raw)\n tracking_menu.addAction(tracking_free)\n tracking_menu.addAction(tracking_adjusted)\n self.menuBar().addMenu(tracking_menu)\n\n # Set up the map display mode.\n map_menu = QtWidgets.QMenu(\"Map Display Mode\", self)\n map_ag = QtWidgets.QActionGroup(self, exclusive=True)\n map_local = map_ag.addAction(QtWidgets.QAction('Local', self, checkable=True))\n map_global = map_ag.addAction(QtWidgets.QAction('Global', self, checkable=True))\n map_local.triggered.connect(lambda x: self.set_display_mode(self.grid.view_mode.LOCAL))\n map_global.triggered.connect(lambda x: self.set_display_mode(self.grid.view_mode.ADJUSTED))\n map_menu.addAction(map_local)\n map_menu.addAction(map_global)\n self.menuBar().addMenu(map_menu)\n\n # Set up the probability display mode.\n probability_menu = QtWidgets.QMenu(\"Probability Mode\", self)\n probability_ag = QtWidgets.QActionGroup(self, exclusive=True)\n probability_prior = probability_ag.addAction(QtWidgets.QAction('Prior Probabilities', self, checkable=True))\n probability_slam = probability_ag.addAction(QtWidgets.QAction('SLAM Probabilities', self, checkable=True))\n probability_combined = probability_ag.addAction(\n QtWidgets.QAction('Combined Probabilities', self, checkable=True))\n probability_prior.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.PRIOR_PROBABILITIES))\n probability_slam.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.SLAM_PROBABILITIES))\n probability_combined.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.COMBINED_PROBABILITIES))\n probability_menu.addAction(probability_prior)\n probability_menu.addAction(probability_slam)\n probability_menu.addAction(probability_combined)\n self.menuBar().addMenu(probability_menu)\n\n # Set up the probability slider.\n probability_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n probability_slider.setMinimum(0)\n probability_slider.setMaximum(100)\n probability_slider.setValue(50)\n probability_slider.setTickPosition(QtWidgets.QSlider.TicksBelow)\n probability_slider.setTickInterval(5)\n probability_slider.valueChanged.connect(self.set_probability_alpha)\n\n # Add the slider, status bar and title.\n self.statusBar.addWidget(probability_slider)\n self.setWindowTitle(\"SLAM\")\n self.setStatusBar(self.statusBar)", "def __init__(self, *args, **kwargs):\n\n super(MainMenu, self).__init__(*args, **kwargs)\n\n #TODO: Move these out to a BoxLayout. So they can be centered\n # Easier.\n\n self.mod_list_container = ModListContainer()\n self.add_widget(self.mod_list_container)\n \n #Creates the Exit Button and sets it as a child of self.\n self._exit_button = ExitButton()\n self._exit_button.text = \"Exit\"\n self.add_widget(self._exit_button)", "def loadMenu(self):\r\n show_empty_root_items = pos.config['menu', 'show_empty_root_items']\r\n show_disabled_items = pos.config['menu', 'show_disabled_items']\r\n self.mainToolbook.AssignImageList(pos.menu.il)\r\n \r\n for root in pos.menu.main.items:\r\n if not root.enabled and not show_disabled_items:\r\n continue\r\n enabled_children = [i for i in root.children if i.enabled]\r\n if show_disabled_items:\r\n children = root.children\r\n else:\r\n children = enabled_children\r\n # Hide empty menu root items\r\n if len(children) == 0 and not show_empty_root_items:\r\n continue\r\n page = self.getToolbookPage(children)\r\n self.mainToolbook.AddPage(imageId=root.image, page=page, select=False, text=root.label)\r\n page.Enable(root.enabled)# and len(enabled_children) != 0)\r", "def menus(self):\r\n return []", "def header_nav(self):\r\n\r\n menu_stack = PaneStack()\r\n\r\n # Ensure the default button is the first tab\r\n #default_button_name = c.site.default_listing\r\n\r\n main_buttons = [\r\n ExpandableButton('main', dest = '/promoted', sr_path = False, sub_menus =\r\n [ NamedButton('posts', dest = '/promoted', sr_path = False),\r\n NamedButton('comments', dest = '/comments', sr_path = False)]),\r\n ExpandableButton('discussion', dest = \"/r/discussion/new\", sub_reddit = \"/r/discussion/\", sub_menus =\r\n [ NamedButton('posts', dest = \"/r/discussion/new\", sr_path = False),\r\n NamedButton('comments', dest = \"/r/discussion/comments\", sr_path = False)])\r\n ]\r\n\r\n menu_stack.append(NavMenu(main_buttons, title = _('Filter by'), _id='nav', type='navlist'))\r\n\r\n\r\n if self.header_sub_nav:\r\n menu_stack.append(NavMenu(self.header_sub_nav, title = _('Filter by'), _id='filternav', type='navlist'))\r\n\r\n return menu_stack", "def create_menu():\n MenuData = [\n (\"&Draw Variables\",drawable.ask),\n (\"&Show Variables\",printall),\n (\"&Print Variables\",printval),\n (\"&Edit Variable\",edit),\n (\"&Rename Variable\",rename),\n (\"&Forget Variables\",forget),\n (\"---\",None),\n (\"&Create Plane\",\n [(\"Coordinates\", \n [(\"Point and normal\", createPlaneCoordsPointNormal),\n (\"Three points\", createPlaneCoords3Points),\n ]), \n (\"Visually\", \n [(\"Three points\", createPlaneVisual3Points),\n ]),\n ]),\n (\"&Select Plane\",planes.ask),\n (\"&Draw Selection\",planes.draw),\n (\"&Forget Selection\",planes.forget),\n (\"---\",None),\n (\"&Pick Actors\",pick_actors),\n (\"&Pick Elements\",pick_elements),\n (\"&Pick Points\",pick_points),\n (\"&Pick Edges\",pick_edges),\n (\"---\",None),\n ('&Selection',\n [('&Create Report',report_selection),\n ('&Set Property',setprop_selection),\n ('&Grow',grow_selection),\n ('&Partition',partition_selection),\n ('&Get Partition',get_partition),\n ('&Export',export_selection),\n ]),\n (\"---\",None),\n ('&Query',\n [('&Actors',query_actors),\n ('&Elements',query_elements),\n ('&Points',query_points),\n ('&Edges',query_edges),\n ('&Distances',query_distances),\n ]),\n (\"---\",None),\n (\"&Close\",close_menu),\n ]\n return widgets.Menu('Tools',items=MenuData,parent=GD.gui.menu,before='help')", "def layoutDefault(self): # real signature unknown; restored from __doc__\n pass", "def init_layout(self):\n\t\tbox1 = gtk.VBox()\n\t\tbox1.pack_start(self.labelName)\n\t\tbox1.pack_start(self.labelDirectory)\n\t\tif self.labelDefaults is not None:\n\t\t\tbox1.pack_start(self.labelDefaults)\n\n\t\tbox2 = gtk.HBox()\n\t\tbox2.pack_start(self.directorySelector.edit, expand=True)\n\t\tbox2.pack_start(self.directorySelector.button, expand=False)\n\n\t\tbox3 = gtk.VBox()\n\t\tbox3.pack_start(self.editName)\n\t\tbox3.pack_start(box2)\n\t\tif self.comboDefaults is not None:\n\t\t\tbox3.pack_start(self.comboDefaults)\n\n\t\tbox4 = gtk.HBox()\n\t\tbox4.pack_start(box1, expand=False)\n\t\tbox4.pack_start(box3)\n\n\t\tself.vbox.pack_start(box4, expand=False)\n\t\tself.vbox.pack_start(gtk.VBox())\n\n\t\tself.show_all()", "def _setupSideMenu(self):\r\n dataGroup = QGroupBox(\"Select Data\")\r\n dataGroup.setLayout(self._setupDataGroup())\r\n self.addWidget(dataGroup, 1)\r\n\r\n modelsGroup = QGroupBox(\"Select Hazard Functions\")\r\n modelsGroup.setLayout(self._setupModelsGroup())\r\n self.addWidget(modelsGroup, 2)\r\n\r\n metricsGroup = QGroupBox(\"Select Covariates\")\r\n metricsGroup.setLayout(self._setupMetricsGroup())\r\n self.addWidget(metricsGroup, 2)\r\n\r\n self.runButton = QPushButton(\"Run Estimation\")\r\n self.runButton.clicked.connect(self._emitRunModelSignal)\r\n self.addWidget(self.runButton, 1)\r\n\r\n self.addStretch(1)\r\n\r\n # signals\r\n self.sheetSelect.currentIndexChanged.connect(self._emitSheetChangedSignal) # when sheet selection changed\r", "def misc_menu(self):\n # info needed to separate edit and view widgets in self.widget_classes\n name_test_current = [\n (\"Editor\", lambda x: x.lep_type == 'EDITOR', self.edit_widget.__class__),\n (\"Viewer\", lambda x: x.lep_type != 'EDITOR', self.view_widget.__class__),\n ]\n\n menu = QtWidgets.QMenu()\n for name, is_one, current in name_test_current:\n # list Editor widgets, then Viewer widgets\n for widget_class in [i for i in self.widget_classes if is_one(i)]:\n\n def cb(checked, widget_class=widget_class):\n self.set_widget(widget_class=widget_class)\n\n act = QAction(f\"{name}: {widget_class.lep_name}\", self)\n act.setCheckable(True)\n act.setChecked(widget_class == current)\n act.triggered.connect(cb)\n menu.addAction(act)\n\n button = self.control_menu_button\n point = button.position().toPoint() if isQt6 else button.pos() # Qt6 documentation is wrong.\n global_point = button.mapToGlobal(point)\n menu.exec_(global_point)", "def create_menu(self):\n self.menubar = wx.MenuBar()\n menu_file = wx.Menu()\n menu_help = wx.Menu()\n # Add save and exit to File menu\n menu_save = menu_file.Append(-1, '&Save plot\\tCtrl-S',\n 'Save plot to a file')\n menu_file.AppendSeparator()\n menu_exit = menu_file.Append(-1, '&Exit\\tCtrl-X',\n 'Exit the program')\n self.Bind(wx.EVT_MENU, self.on_save, menu_save)\n self.Bind(wx.EVT_MENU, self.on_exit, menu_exit)\n # Add an about in the Help menu. Will update later.\n help_about = menu_help.Append(-1, '&About',\n 'About the program')\n menu_help.AppendSeparator()\n self.Bind(wx.EVT_MENU, self.on_about, help_about)\n\n # Add them both to menubar\n self.menubar.Append(menu_file, '&File')\n self.menubar.Append(menu_help, '&Help')\n self.SetMenuBar(self.menubar)", "def __setupMenu(self):\n quit_action = QAction(\"&Exit\", self)\n quit_action.setShortcut('Ctrl+Q')\n quit_action.triggered.connect(self.close)\n\n sign_out_action = QAction(\"Sign out\", self)\n sign_out_action.setShortcut('Ctrl+L')\n sign_out_action.triggered.connect(lambda: (self.app.logOut(), self.hide(), self.requestCredentials()))\n\n change_password_action = QAction(\"Change password\", self)\n change_password_action.triggered.connect(self.requestPasswordChange)\n\n about_action = QAction(\"About\", self)\n about_action.triggered.connect(lambda: QMessageBox.about(self, \"About\", u'© ' + __author__ + ' 2013'))\n\n self.file_menu = self.menuBar().addMenu(\"&File\")\n self.file_menu.addAction(quit_action)\n\n self.account_menu = self.menuBar().addMenu(\"&Account\")\n self.account_menu.addAction(sign_out_action)\n self.account_menu.addAction(change_password_action)\n\n self.help_menu = self.menuBar().addMenu(\"&Help\")\n self.help_menu.addAction(about_action)", "def main_menu(self):\n\n # Set the window background\n self.palette = QPalette()\n self.pixmap = QPixmap('./pictures/menu_cat.png').scaled(860, 640)\n self.palette.setBrush(QPalette.Background, QBrush(self.pixmap))\n self.setPalette(self.palette)\n\n for item in self.mainmenu_items:\n item.show()\n for item in self.mapmenu_items:\n item.hide()", "def present_menu (self, menu, groupName = 'main'):\n \n if not hasattr (cherrypy.request, 'nav'):\n cherrypy.request.nav = {}\n\n if not groupName in cherrypy.request.nav:\n cherrypy.request.nav [groupName] = []\n \n for item in menu.items:\n cherrypy.request.nav [groupName].append (item)", "def get_menus():\n\n pass", "def _setupTab1(self):\r\n horizontalLayout = QHBoxLayout() # main layout\r\n\r\n self.sideMenu = SideMenu1()\r\n horizontalLayout.addLayout(self.sideMenu, 15)\r\n self.plotAndTable = PlotAndTable(\"Plot\", \"Table\")\r\n horizontalLayout.addWidget(self.plotAndTable, 85)\r\n\r\n self.setLayout(horizontalLayout)", "def _create_layout(self):\n # Level Size\n self._level_size_lay = PySide2.QtWidgets.QHBoxLayout()\n self._level_size_lay.addWidget(self._level_size_lbl)\n self._level_size_lay.addSpacing(10)\n self._level_size_lay.addWidget(self._level_size_x_lbl)\n self._level_size_lay.addWidget(self._level_size_x_spinbox)\n self._level_size_lay.addSpacing(5)\n self._level_size_lay.addWidget(self._level_size_y_lbl)\n self._level_size_lay.addWidget(self._level_size_y_spinbox)\n self._level_size_lay.addSpacing(5)\n self._level_size_lay.addWidget(self._level_size_z_lbl)\n self._level_size_lay.addWidget(self._level_size_z_spinbox)\n self._level_size_lay.addStretch()\n\n # Minimum Length\n self._minimum_length_lay = PySide2.QtWidgets.QHBoxLayout()\n self._minimum_length_lay.addWidget(self._minimum_length_checkbox)\n self._minimum_length_lay.addSpacing(5)\n self._minimum_length_lay.addWidget(self._minimum_length_spinbox)\n self._minimum_length_lay.addStretch()\n\n # Minimum Length\n self._maximum_length_lay = PySide2.QtWidgets.QHBoxLayout()\n self._maximum_length_lay.addWidget(self._maximum_length_checkbox)\n self._maximum_length_lay.addSpacing(5)\n self._maximum_length_lay.addWidget(self._maximum_length_spinbox)\n self._maximum_length_lay.addStretch()\n\n # Seed\n self._seed_lay = PySide2.QtWidgets.QHBoxLayout()\n self._seed_lay.addWidget(self._seed_checkbox)\n self._seed_lay.addSpacing(5)\n self._seed_lay.addWidget(self._seed_le)\n\n # Generator Settings Group Box\n self._generator_lay = PySide2.QtWidgets.QVBoxLayout()\n self._generator_lay.addLayout(self._level_size_lay)\n self._generator_lay.addLayout(self._minimum_length_lay)\n self._generator_lay.addLayout(self._maximum_length_lay)\n self._generator_lay.addLayout(self._seed_lay)\n self._generator_group_box.setLayout(self._generator_lay)\n\n # Block Size\n self._block_size_lay = PySide2.QtWidgets.QHBoxLayout()\n self._block_size_lay.addWidget(self._block_size_lbl)\n self._block_size_lay.addSpacing(10)\n self._block_size_lay.addWidget(self._block_size_x_lbl)\n self._block_size_lay.addWidget(self._block_size_x_spinbox)\n self._block_size_lay.addSpacing(5)\n self._block_size_lay.addWidget(self._block_size_y_lbl)\n self._block_size_lay.addWidget(self._block_size_y_spinbox)\n self._block_size_lay.addSpacing(5)\n self._block_size_lay.addWidget(self._block_size_z_lbl)\n self._block_size_lay.addWidget(self._block_size_z_spinbox)\n self._block_size_lay.addStretch()\n\n # Group Name\n self._group_name_lay = PySide2.QtWidgets.QHBoxLayout()\n self._group_name_lay.addWidget(self._group_name_lbl)\n self._group_name_lay.addSpacing(10)\n self._group_name_lay.addWidget(self._group_name_le)\n\n # Maya Scene Group Box\n self._scene_lay = PySide2.QtWidgets.QVBoxLayout()\n self._scene_lay.addLayout(self._block_size_lay)\n self._scene_lay.addLayout(self._group_name_lay)\n self._scene_group_box.setLayout(self._scene_lay)\n\n # Object Blocks\n for blk_type in VALID_BLOCK_TYPES:\n # Path\n self._object_blocks[blk_type][\"pth_lay\"] = PySide2.QtWidgets.QHBoxLayout()\n self._object_blocks[blk_type][\"pth_lay\"].addWidget(self._object_blocks[blk_type][\"pth_lbl\"])\n self._object_blocks[blk_type][\"pth_lay\"].addSpacing(10)\n self._object_blocks[blk_type][\"pth_lay\"].addWidget(self._object_blocks[blk_type][\"pth_le\"])\n \n # Weight\n self._object_blocks[blk_type][\"weight_lay\"] = PySide2.QtWidgets.QHBoxLayout()\n self._object_blocks[blk_type][\"weight_lay\"].addWidget(self._object_blocks[blk_type][\"weight_lbl\"])\n self._object_blocks[blk_type][\"weight_lay\"].addSpacing(10)\n self._object_blocks[blk_type][\"weight_lay\"].addWidget(self._object_blocks[blk_type][\"weight_spinbox\"])\n self._object_blocks[blk_type][\"weight_lay\"].addStretch()\n\n # Object Block Group\n self._object_blocks[blk_type][\"group_lay\"] = PySide2.QtWidgets.QVBoxLayout()\n self._object_blocks[blk_type][\"group_lay\"].addSpacing(15)\n self._object_blocks[blk_type][\"group_lay\"].addLayout(self._object_blocks[blk_type][\"pth_lay\"])\n self._object_blocks[blk_type][\"group_lay\"].addLayout(self._object_blocks[blk_type][\"weight_lay\"])\n self._object_blocks[blk_type][\"group\"].setLayout(self._object_blocks[blk_type][\"group_lay\"])\n\n # Object Block Group Box\n self._block_lay = PySide2.QtWidgets.QVBoxLayout()\n for blk_type in VALID_BLOCK_TYPES:\n self._block_lay.addWidget(self._object_blocks[blk_type][\"group\"])\n self._block_group_box.setLayout(self._block_lay)\n\n # Buttons\n self._button_lay = PySide2.QtWidgets.QHBoxLayout()\n self._button_lay.addWidget(self._cancel_btn)\n self._block_size_lay.addSpacing(5)\n self._button_lay.addWidget(self._generate_btn)\n\n # Main\n self._main_lay = PySide2.QtWidgets.QVBoxLayout()\n self._main_lay.addWidget(self._generator_group_box)\n self._main_lay.addWidget(self._scene_group_box)\n self._main_lay.addWidget(self._block_group_box)\n self._main_lay.addLayout(self._button_lay)\n\n # Set the layout\n self.setLayout(self._main_lay)", "def __init__(self, handlerClass):\n\t\tsuper(Layout, self).__init__()\n\t\tself.set_property('orientation', Gtk.Orientation.VERTICAL)\n\t\tself.handler = handlerClass\n\t\tself.init_layout()\n\t\tself.show_all()", "def layoutCMakeLists(self):\n self.cmakelists.layout()", "def draw_main_menu():\n draw_cover()\n draw_menu_buttons()\n draw_border()", "def get_root_layout(self):\n layout = dbc.Container(\n fluid=True,\n children=[\n self.get_nav_bar_layout(),\n dbc.Row(\n [\n # Real time update database\n dcc.Interval(\n id='real_time_db_update',\n interval=300000, # in milliseconds\n n_intervals=0\n ),\n dbc.Col(children=self.get_sidebar_layout(), md=2),\n dbc.Col(children=self.get_potential_deal_table_layout(), md=10)\n\n ]\n ),\n dbc.Row(\n dbc.Label(children=\"Made with ❤️ in India\"),\n className=\"justify-content-center\"\n )\n ]\n )\n return layout", "def _create_menu(self):\n menu = ImageMenu(0.04, 0.16, 0.23, self.y_for_x(0.23) * 0.7)\n menu.items_per_col = 2\n menu.visible_rows = 2\n menu.visible_cols = 4\n\n clips = self.video_library.get_video_clips()\n clips_list = [[clip.thumbnail_url, clip] for clip in clips]\n menu.async_add_clips(clips_list)\n\n # Create list indicator\n self.list_indicator = ListIndicator(0.7, 0.8, 0.2, 0.045,\n ListIndicator.HORIZONTAL)\n self.list_indicator.set_maximum(len(clips))\n self.list_indicator.show()\n self.add(self.list_indicator)\n\n # Create information labels\n self.clip_title = Label(0.042, \"title\", 0.15, 0.77, \"\",\n font_weight=\"bold\")\n self.clip_title.set_ellipsize(pango.ELLIPSIZE_END)\n self.clip_title.set_line_wrap(False)\n self.clip_title.width = 0.5\n self.add(self.clip_title)\n\n self.clip_info = Label(0.034, \"subtitle\", 0.15, 0.85, \"\")\n self.clip_info.set_ellipsize(pango.ELLIPSIZE_END)\n self.clip_info.set_line_wrap(False)\n self.clip_info.width = 0.5\n self.add(self.clip_info)\n\n return menu", "def main_menu(self):\n menu_string = \"Main menu\\n\"\n menu_string += \"\\t1. Modify a list\\n\"\n menu_string += \"\\t2. Grade submenu\\n\"\n menu_string += \"\\t3. Search for something\\n\"\n menu_string += \"\\t4. Get a statistic\\n\"\n menu_string += \"\\t5. Undo/Redo\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {'0': self.__no_command,\n '1': self.__modify_submenu,\n '2': self.__grade_submenu,\n '3': self.__search_submenu,\n '4': self.__statistics_submenu,\n '5': self.__undo_submenu\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def menu(self):\n from mainmenu import Menu\n gm = Menu(self.screen)\n gm.run()", "def initViewMenu(self):\n menu = QMenu(QCoreApplication.translate('ViewManager', '&View'),\n self.ui)\n menu.setTearOffEnabled(True)\n menu.addActions(self.viewActGrp.actions())\n menu.addSeparator()\n menu.addActions(self.viewFoldActGrp.actions())\n menu.addSeparator()\n menu.addAction(self.previewAct)\n menu.addAction(self.astViewerAct)\n menu.addSeparator()\n menu.addAction(self.unhighlightAct)\n menu.addSeparator()\n menu.addAction(self.newDocumentViewAct)\n if self.canSplit():\n menu.addAction(self.newDocumentSplitViewAct)\n menu.addSeparator()\n menu.addAction(self.splitViewAct)\n menu.addAction(self.splitOrientationAct)\n menu.addAction(self.splitRemoveAct)\n menu.addAction(self.nextSplitAct)\n menu.addAction(self.prevSplitAct)\n \n return menu", "def set_ui(self):\n\n self.setLayout(self.horizon_layout)\n self.setWindowTitle(\"数据采集\")\n self.setWindowIcon(self.Icon)\n self.setWindowState(Qt.WindowMaximized)\n # self.resize(self._size_of_x, self._size_of_y)\n\n # //-set left\n self.horizon_left_layout1.addWidget(self.ECG)\n self.horizon_left_layout1.addWidget(self.ECGWin)\n self.horizon_left_layout2.addWidget(self.Respiration)\n self.horizon_left_layout2.addWidget(self.RespirationWin)\n self.horizon_left_layout3.addWidget(self.PulseWave)\n self.horizon_left_layout3.addWidget(self.PulseWaveWin)\n # self.horizon_left_layout4.addWidget(self.SpO2)\n # self.horizon_left_layout4.addWidget(self.SpO2Win)\n\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout1)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout2)\n # self.vertical_left_layout.addStretch(1)\n self.vertical_left_layout.addLayout(self.horizon_left_layout3)\n # self.vertical_left_layout.addStretch(1)\n # self.vertical_left_layout.addLayout(self.horizon_left_layout4)\n # self.vertical_left_layout.addStretch(1)\n\n # //-set right\n # self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.save)\n self.vertical_right_layout.addWidget(self.clear)\n self.vertical_right_layout.addWidget(self.receive)\n self.vertical_right_layout.addStretch(1)\n self.vertical_right_layout.addWidget(self.exit)\n # self.vertical_right_layout.addStretch(1)\n\n # //-set layout\n # self.horizon_layout.addStretch(0)\n self.horizon_layout.addLayout(self.vertical_left_layout)\n # self.horizon_layout.addStretch(0)\n # self.horizon_layout.addWidget(self.dataWin)\n self.horizon_layout.addLayout(self.vertical_right_layout)", "def makeMenuBar(self):\n\n # Make a file menu with Hello and Exit items\n fileMenu = wx.Menu()\n # The \"\\t...\" syntax defines an accelerator key that also triggers\n # the same event\n helloItem = fileMenu.Append(-1, \"&保存日志\",\n \"Help string shown in status bar for this menu item\")\n newItem = fileMenu.Append(wx.ID_NEW,\"打开\")\n fileMenu.AppendSeparator()\n # When using a stock ID we don't need to specify the menu item's\n # label\n exitItem = fileMenu.Append(wx.ID_EXIT)\n\n # Now a help menu for the about item\n helpMenu = wx.Menu()\n aboutItem = helpMenu.Append(wx.ID_ABOUT)\n\n # Make the menu bar and add the two menus to it. The '&' defines\n # that the next letter is the \"mnemonic\" for the menu item. On the\n # platforms that support it those letters are underlined and can be\n # triggered from the keyboard.\n menuBar = wx.MenuBar()\n menuBar.Append(fileMenu, \"&文件\")\n menuBar.Append(helpMenu, \"&帮助\")\n\n # Give the menu bar to the frame\n self.SetMenuBar(menuBar)\n\n # Finally, associate a handler function with the EVT_MENU event for\n # each of the menu items. That means that when that menu item is\n # activated then the associated handler function will be called.\n self.Bind(wx.EVT_MENU, self.OnHello, helloItem)\n self.Bind(wx.EVT_MENU, self.OnExit, exitItem)\n self.Bind(wx.EVT_MENU, self.OnAbout, aboutItem)\n self.Bind(wx.EVT_MENU, self.OnOpenFileDir,newItem)", "def drawMenu(self):\r\n menuText, menuSize = self.menuView.draw() \r\n self.drawCenteredText(menuText, menuSize, .5, 11.0/16)", "def add_default_menu(self):\n data = get_default_menu()\n\n for i, link in enumerate(data):\n link[\"position\"] = i\n self.menu.create(**link)", "def menu_org(cls):\n\n OM = S3OrgMenuLayout\n return OM()", "def build(self):\n self.title = 'Processamento Digital de Imagens'\n self.main_layout = MainLayout()\n return self.main_layout", "def create_menu(self):\n self.create_actions()\n self._menu = QtGui.QMenu('QA measurements')\n for action in self._menu_actions:\n self._menu.addAction(action)\n menuBar = util.get_main_win().menuBar()\n for action in menuBar.actions():\n if action.menu().title() == \"QA measurements\":\n menuBar.removeAction(action)\n menuBar.addMenu(self._menu)", "def main_menu_toolbar():\n\n pass", "def gui_layout(self) -> List[List[sg.Element]]:\n tabs = []\n if self.label != \"new\":\n tabs.append(sg.Tab(\"View\", self.gui_layout_view(), key=self.key_gen(\"view\")))\n\n tabs.append(sg.Tab(\"Edit\", self.gui_layout_edit(), key=self.key_gen(\"edit\")))\n\n return [[sg.TabGroup([tabs], key=\"controller_%s_tabs\" % self.label)]]", "def createMainMenu(self, menuConfig):\n menu = tk.Menu(self.root)\n self._addMenuChilds(menu, menuConfig)\n self.root.config(menu=menu)\n return menu", "def _initializeMenus(self):\n menubar = wx.MenuBar()\n\n fileMenuData = [(wx.ID_NEW, 'New Experiment', None, self._onNew),\n (wx.ID_OPEN, 'Open Experiment', None, self._onOpen),\n (ID_PREMADE, 'Open Premade', None, self._onPremade),\n (wx.ID_EXIT, 'Exit', None, self._onExit)]\n gh.createMenu(self, menubar, '&File', fileMenuData)\n \n toolsMenu = wx.Menu()\n controllerMenu = wx.Menu()\n keys = list(self.controllerIDs.keys())\n keys.sort()\n for key in keys:\n currId = self.controllerIDs[key].GetValue()\n controllerMenu.Append(currId, key)\n self.Bind(wx.EVT_MENU, self._onController, id=currId)\n toolsMenu.AppendSubMenu(controllerMenu, 'Controllers')\n toolsMenu.AppendSeparator()\n toolsMenu.Append(ID_SETTINGS, 'Settings', 'Edit software settings')\n toolsMenu.Append(ID_USERS.GetValue(), 'Users', 'Edit the list of users')\n self.btnUserSettings = toolsMenu.Append(ID_USER_SETTINGS, \n 'User settings', \n 'Edit the preferences for the currently-selected user')\n self.Bind(wx.EVT_MENU, self._onSettings, id=ID_SETTINGS)\n self.Bind(wx.EVT_MENU, self._onUsers, id=ID_USERS)\n self.Bind(wx.EVT_MENU, self._onUserSettings, id=ID_USER_SETTINGS)\n menubar.Append(toolsMenu, '&Tools')\n\n helpMenuData = [(wx.ID_HELP, 'Help', 'View software help',\n self.onHelp),\n (wx.ID_ABOUT, 'About', 'About', self.onAbout),\n (ID_CHANGELOG, 'Change log', 'View revision history',\n self._onChangeLog)]\n gh.createMenu(self, menubar, '&Help', helpMenuData)\n\n self.SetMenuBar(menubar)", "def main_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_main_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def start_menu(self, kwargs=None):\n\n # Create sublayout\n start_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.Start_posy, self.Start_posx, self.Start_ysize, self.Start_xsize\n )\n\n # Adding variables to the default dict\n # self.variables.default_values_dict[\"settings\"].update({\"End_time\": \"NaN\", \"Start_time\": \"NaN\", \"Bad_strips\": 0})\n # self.variables.default_values_dict[\"settings\"].update({\"Measurement_running\": False, \"Alignment\": True, \"Environment_status\": True})\n\n # Orders\n @raise_exception\n def exit_order(kwargs=None):\n reply = QMessageBox.question(\n None,\n \"Warning\",\n \"Are you sure to quit?\",\n QMessageBox.Yes,\n QMessageBox.No,\n )\n if reply == QMessageBox.Yes:\n result = QMessageBox.question(\n None,\n \"Save session?\",\n \"Do you want to save the current session?\",\n QMessageBox.Yes,\n QMessageBox.No,\n )\n if result == QMessageBox.Yes:\n self.variables.message_to_main.put({\"SAVE_SESSION\": True})\n self.variables.message_to_main.put({\"CLOSE_PROGRAM\": True})\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\n \"The program is shuting down, depending on the amount of instruments attached to the PC this can take a while...\"\n )\n # msg.setInformativeText(\"This is additional information\")\n msg.setWindowTitle(\"Shutdown in progress\")\n # msg.setDetailedText(\"The details are as follows:\")\n msg.exec_()\n\n @raise_exception\n def Start_order(kwargs=None):\n if self.variables.default_values_dict[\"settings\"][\n \"Current_filename\"\n ] and os.path.isdir(\n self.variables.default_values_dict[\"settings\"][\"Current_directory\"]\n ):\n if not self.variables.default_values_dict[\"settings\"][\n \"Measurement_running\"\n ]:\n self.variables.reset_plot_data()\n\n # Ensures that the new values are in the state machine\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n additional_settings = {\n \"Save_data\": True,\n \"Filepath\": self.variables.default_values_dict[\"settings\"][\n \"Current_directory\"\n ],\n \"Filename\": self.variables.default_values_dict[\"settings\"][\n \"Current_filename\"\n ],\n \"Project\": self.variables.default_values_dict[\"settings\"][\n \"Current_project\"\n ],\n \"Sensor\": self.variables.default_values_dict[\"settings\"][\n \"Current_sensor\"\n ],\n \"environment\": True, # if enviroment surveillance should be done\n \"skip_init\": False,\n } # warning this prevents the device init\n\n self.job.generate_job(additional_settings)\n\n # order = {\"Measurement\": {\"Save_data\": True,\"Filepath\": self.variables.default_values_dict[\"settings\"][\"Current_directory\"],\"Filename\": self.variables.default_values_dict[\"settings\"][\"Current_filename\"], \"Longterm_IV\": {\"StartVolt\": self.variables.default_values_dict[\"settings\"][\"Longterm_IV\"][0],\"EndVolt\": self.variables.default_values_dict[\"settings\"][\"Longterm_IV\"][1],\"Longterm_IV_time\": self.variables.default_values_dict[\"settings\"][\"Longterm_IV_time\"],\"Steps\": 10}}} # just for now\n # self.variables.message_from_main.put(order)\n\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"Please enter a valid filepath and filename.\",\n QMessageBox.Ok,\n )\n\n @raise_exception\n def Stop_order(kwargs=None):\n order = {\"ABORT_MEASUREMENT\": True} # just for now\n self.variables.message_to_main.put(order)\n\n @raise_exception\n def Load_order(kwargs=None):\n \"\"\"This function loads an old measurement file and displays it if no measurement is curently conducted\"\"\"\n\n if not self.variables.default_values_dict[\"settings\"][\n \"Measurement_running\"\n ]:\n fileDialog = QFileDialog()\n file = fileDialog.getOpenFileName()\n\n if file[0]:\n pass\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"You cannot load a measurement files while data taking is in progress.\",\n QMessageBox.Ok,\n )\n\n @raise_exception\n def create_statistic_text(kwargs=None):\n try:\n bias = (\n \"Bias Voltage: \"\n + str(\n en.EngNumber(\n float(\n self.variables.default_values_dict[\"settings\"][\n \"bias_voltage\"\n ]\n )\n )\n )\n + \"V\"\n + \"\\n\\n\"\n )\n except:\n bias = \"Bias Voltage: NONE V\" + \"\\n\\n\"\n starttime = (\n \"Start time: \"\n + str(\n self.variables.default_values_dict[\"settings\"].get(\n \"Start_time\", None\n )\n )\n + \"\\n\\n\"\n )\n eastend = (\n \"East. end time: \"\n + str(\n self.variables.default_values_dict[\"settings\"].get(\"End_time\", None)\n )\n + \"\\n\\n\"\n )\n striptime = (\n \"Strip meas. time: \"\n + str(\n round(\n float(\n self.variables.default_values_dict[\"settings\"].get(\n \"strip_scan_time\", 0\n )\n ),\n 2,\n )\n )\n + \" sec\"\n + \"\\n\\n\"\n )\n badstrips = (\n \"Bad strips: \"\n + str(\n self.variables.default_values_dict[\"settings\"].get(\n \"Bad_strips\", None\n )\n )\n + \"\\n\\n\"\n )\n currentstrip = (\n \"Current strip: \"\n + str(\n self.variables.default_values_dict[\"settings\"].get(\n \"current_strip\", None\n )\n )\n + \"\\n\\n\"\n )\n\n return str(\n starttime + eastend + striptime + currentstrip + badstrips + bias\n )\n\n # Exit button\n\n qbtn = QPushButton(\"Quit\")\n qbtn.clicked.connect(exit_order)\n qbtn.resize(qbtn.sizeHint())\n start_layout.addWidget(qbtn, 1, 1)\n\n # Start button\n\n qbtn = QPushButton(\"Start\")\n qbtn.clicked.connect(Start_order)\n qbtn.resize(qbtn.sizeHint())\n start_layout.addWidget(qbtn, 0, 0)\n\n # Stop button\n\n qbtn = QPushButton(\"Stop\")\n qbtn.clicked.connect(Stop_order)\n qbtn.resize(qbtn.sizeHint())\n start_layout.addWidget(qbtn, 1, 0)\n\n # Load button\n\n qbtn = QPushButton(\"Load\")\n qbtn.clicked.connect(Load_order)\n qbtn.resize(qbtn.sizeHint())\n start_layout.addWidget(qbtn, 0, 1)\n\n # Error log\n\n textbox_label = QLabel()\n textbox_label.setText(\"Event Log\")\n textbox_label.setFont(self.font)\n\n # Appearance of the Error Log\n self.errors = QLabel()\n self.errors.setStyleSheet(\"background : rgb(245,245,245)\")\n self.errors.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n self.errors.setText(\"No errors =)\")\n self.errors.setMinimumHeight(80)\n\n start_layout.addWidget(textbox_label, 2, 0)\n start_layout.addWidget(self.errors, 3, 0, 4, 2)\n\n # Stats text\n\n # Label of the Error Log\n textbox_label = QLabel()\n textbox_label.setText(\"Statistics\")\n textbox_label.setFont(self.font)\n\n # Appearance of the stats\n textbox = QLabel()\n textbox.setStyleSheet(\"background : rgb(245,245,245)\")\n textbox.setFrameStyle(QFrame.Panel | QFrame.Raised)\n textbox.setFont(self.font)\n textbox.setText(create_statistic_text())\n textbox.setAlignment(QtCore.Qt.AlignVCenter)\n # textbox.setMinimumHeight(80)\n # textbox.setMinimumWidth(250)\n # textbox.setMaximumHeight(210)\n # textbox.setMaximumWidth(250)\n\n # start_layout.addWidget(textbox_label, 0, 2)\n start_layout.addWidget(textbox, 0, 2, 6, 1)\n\n # Stats textbox\n\n # Appearance of the stats led\n textbox_led = QLabel()\n textbox_led.setMidLineWidth(4)\n textbox_led.setStyleSheet(\"background : rgb(128,128,128); border-radius: 25px\")\n font = QtGui.QFont()\n font.setPointSize(10)\n textbox_led.setFont(font)\n textbox_led.setAlignment(QtCore.Qt.AlignCenter)\n # textbox.setMinimumHeight(70)\n # textbox.setMaximumHeight(70)\n\n start_layout.addWidget(textbox_led, 6, 2)\n\n # Update functions\n\n def error_update():\n last_errors = self.variables.event_loop_thread.error_log\n error_text = \"\"\n for error in reversed(last_errors[-100:]):\n error_text += \": \".join(error) + \"\\n\"\n\n if self.errors.text() != error_text: # Only update text if necessary\n self.errors.setText(error_text)\n\n def stat_update():\n new_text = create_statistic_text()\n if textbox.text() != new_text:\n textbox.setText(new_text)\n\n def led_update():\n\n current_state = self.variables.default_values_dict[\"settings\"][\n \"current_led_state\"\n ]\n alignment = self.variables.default_values_dict[\"settings\"][\"Alignment\"]\n running = self.variables.default_values_dict[\"settings\"][\n \"Measurement_running\"\n ]\n enviroment = self.variables.default_values_dict[\"settings\"][\n \"Environment_status\"\n ]\n\n if current_state != \"running\" and running:\n self.variables.default_values_dict[\"settings\"][\n \"current_led_state\"\n ] = \"running\"\n textbox_led.setStyleSheet(\n \"background : rgb(0,0,255); border-radius: 25px\"\n )\n textbox_led.setText(\"Measurement running\")\n return\n\n elif current_state != \"Alignment\" and not alignment and not running:\n self.variables.default_values_dict[\"settings\"][\n \"current_led_state\"\n ] = \"Alignment\"\n textbox_led.setStyleSheet(\n \"background : rgb(255,0,0); border-radius: 25px\"\n )\n textbox_led.setText(\"Alignement missing\")\n return\n\n elif (\n current_state != \"environment\"\n and not enviroment\n and alignment\n and not running\n ):\n self.variables.default_values_dict[\"settings\"][\n \"current_led_state\"\n ] = \"environment\"\n textbox_led.setStyleSheet(\n \"background : rgb(255,153,51); border-radius: 25px\"\n )\n textbox_led.setText(\"Environment status not ok\")\n return\n\n if current_state != \"ready\" and alignment and not running and enviroment:\n self.variables.default_values_dict[\"settings\"][\n \"current_led_state\"\n ] = \"ready\"\n textbox_led.setStyleSheet(\n \"background : rgb(0,255,0); border-radius: 25px\"\n )\n textbox_led.setText(\"Ready to go\")\n return\n\n # Adding update functions\n self.variables.add_update_function(error_update)\n self.variables.add_update_function(stat_update)\n self.variables.add_update_function(led_update)\n\n start_layout.setContentsMargins(8, 8, 8, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n start_layout,\n self.Start_posy,\n self.Start_posx,\n self.Start_ysize,\n self.Start_xsize,\n )", "def create_menu(self, menu_items):\n self.menu = Gtk.Menu()\n\n # loop through the menu items\n for k, v in enumerate(menu_items):\n # menu item\n first_item = Gtk.MenuItem(v['title'])\n first_item.connect('activate', self.open_news_url, v['url'])\n self.menu.append(first_item)\n\n # separator item\n separator = Gtk.SeparatorMenuItem()\n self.menu.append(separator)\n\n # settings item\n settings_item = Gtk.MenuItem('Settings')\n settings_item.connect('activate', self.on_settings)\n self.menu.append(settings_item)\n\n # about item\n about_item = Gtk.MenuItem('About')\n about_item.connect('activate', self.on_about)\n self.menu.append(about_item)\n\n # exit item\n exit_item = Gtk.MenuItem('Exit')\n exit_item.connect('activate', self.stop)\n self.menu.append(exit_item)\n\n self.menu.show_all()\n self.indicator.set_menu(self.menu)\n\n return self.menu", "def init_helpmenu(self):\n self.menubar[\"helpmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"helpmenu\"].add_command(label=\"Help Index\", command=todo)\n self.menubar[\"helpmenu\"].add_command(label=\"About...\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Help\", menu=self.menubar[\"helpmenu\"])", "def create_menu_bar(self):\n\n\t\t# menus\n\t\tself.__app_menu = QMenu('App')\n\t\t# actions\n\t\tself.__clr_logs = QAction('Clear Logs')\n\t\tself.__quit = QAction('Quit')\n\t\tself.__hide = QAction('Hide')\n\n\t\tself.__quit.triggered.connect(self.exit_app)\n\t\tself.__hide.triggered.connect(self.set_visible)\n\n\t\tself.__app_menu.addActions([self.__clr_logs, self.__hide, self.__quit])\n\t\tself.menuBar().addMenu(self.__app_menu)", "def create(self):\n # Positioning background and pointer indicator for main menu\n self.surface.blit(self.main_menu_background, (0, 0))\n self.surface.blit(self.main_menu_greets, self.main_menu_greets_position)\n self.show_mouse_position_with_px()\n self.main_menu_buttons()", "def create_menu_set(name):\n\t\t\tmenu = getattr(self, name + \"_menu\")\n\t\t\ttv = getattr(self, \"tv_\" + name)\n\t\t\tcid_index = getattr(self, \"cid_\" + name)\n\n\t\t\t# bind menu helper\n\t\t\tdef bind_menu(label):\n\t\t\t\tdef bind_menu_inner(func):\n\t\t\t\t\tmenu.add_command(label=label, command=func)\n\t\t\t\t\treturn func\n\t\t\t\treturn bind_menu_inner\n\n\t\t\t# add commands...\n\t\t\t@bind_menu(\"刪除\")\n\t\t\tdef tvdelete():\n\t\t\t\tif messagebox.askyesno(\"Comic Crawler\", \"確定刪除?\"):\n\t\t\t\t\tselected = tv.selection()\n\t\t\t\t\tself.remove(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"移至頂部\")\n\t\t\tdef tvlift():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tself.downloader.mission_manager.lift(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"移至底部\")\n\t\t\tdef tvdrop():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tself.downloader.mission_manager.drop(name, *[cid_index[cid] for cid in selected])\n\n\t\t\t@bind_menu(\"改名\")\n\t\t\tdef tvchangetitle():\n\t\t\t\tselected = tv.selection()\n\t\t\t\tmission = cid_index[selected[0]]\n\t\t\t\tselect_title(self.root, mission)\n\n\t\t\t@bind_menu(\"重新選擇集數\")\n\t\t\tdef tvReselectEP():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\treselect_episodes(self.root, mission)\n\n\t\t\t@bind_menu(\"開啟資料夾\")\n\t\t\tdef tvOpen():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tsavepath = setting[\"savepath\"]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\tfolder = os.path.join(savepath, safefilepath(mission.title))\n\t\t\t\t\tos.startfile(os.path.expanduser(folder))\n\n\t\t\t@bind_menu(\"開啟網頁\")\n\t\t\tdef tvOpenBrowser():\n\t\t\t\ts = tv.selection()\n\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\tfor mission in missions:\n\t\t\t\t\twebbrowser.open(mission.url)\n\n\t\t\tif name == \"view\":\n\t\t\t\t@bind_menu(\"加入圖書館\")\n\t\t\t\tdef tvAddToLib():\n\t\t\t\t\ts = tv.selection()\n\t\t\t\t\tmissions = [ cid_index[i] for i in s ]\n\t\t\t\t\ttitles = [ m.title for m in missions ]\n\t\t\t\t\tself.downloader.mission_manager.add(\"library\", *missions)\n\t\t\t\t\tsafeprint(\"已加入圖書館︰{}\".format(\", \".join(titles)))\n\n\t\t\t# menu call\n\t\t\tdef tvmenucall(event):\n\t\t\t\tmenu.post(event.x_root, event.y_root)\n\t\t\ttv.bind(\"<Button-3>\", tvmenucall)", "def _init_default_layout(self):\n self._main_v_layout_ = QVBoxLayout(self)\n self._main_v_layout_.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self._main_v_layout_)", "def create_menus( self ):\n # XXX: for whatever reason, closing the window via menu action\n # when there are more than two QMainWindow instances causes\n # a hard crash. Left in for now since shortcut works fine,\n # but a refactor is necessary to get Record/Art viewer to be\n # QWidget descendants, rather than QMainWindow.\n self.closeAct = QAction( \"C&lose\", self, shortcut=\"Ctrl+W\",\n triggered=self.close )\n\n self.commitAct = QAction( \"&Commit\", self, shortcut=\"Ctrl+S\",\n triggered=self.commit_record )\n\n self.windowMenu = QMenu( \"&Window\", self )\n self.windowMenu.addAction( self.commitAct )\n self.windowMenu.addAction( self.closeAct )\n\n self.newArtistAct = QAction( \"New &Artist\", self, shortcut=\"Ctrl+A\",\n triggered=self.new_artist )\n self.databaseMenu = QMenu( \"&Database\", self )\n self.databaseMenu.addAction( self.newArtistAct )\n\n self.menuBar().addMenu( self.windowMenu )\n self.menuBar().addMenu( self.databaseMenu )", "def set_menus(self):\n #---MENU\n self.menubar = wx.MenuBar() # initialize a menu bar at the top\n\n fileMenu = wx.Menu() # initialize a menu and add items to it\n newItem = fileMenu.Append(wx.ID_NEW, '&New Project', 'New')\n openItem = fileMenu.Append(wx.ID_OPEN, '&Open', 'Open Existing Input File')\n saveItem = fileMenu.Append(wx.ID_SAVE, '&Save', 'Write Project')\n quitItem = fileMenu.Append(wx.ID_EXIT, '&Quit', 'Quit')\n\n #toolMenu = wx.Menu() # initialize another menu and add stuff to it\n #pngItem = toolMenu.Append(wx.ID_ANY, '&Build LaTeX Formulas', 'LaTeX Formulas')\n\n nmlMenu = wx.Menu() # initialize another menu\n\n # puposefully do *not* bind this with anything just yet\n nmlItem = nmlMenu.Append(wx.ID_ANY, '--No File Loaded--', '--No File Loaded--')\n\n # add the menu(s) to the menu bar\n self.menubar.Append(fileMenu, '&File')\n #self.menubar.Append(toolMenu, '&Tools')\n self.menubar.Append(nmlMenu, '&Namelists')\n\n self.nml_menu_index = 1 # index into menubar that returns the Namelist menu\n\n # finalize/build the menubar\n self.SetMenuBar(self.menubar)\n\n # direct GUI what to do when something is selected\n self.Bind(wx.EVT_MENU, self.buttons.OnNew, newItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnOpen, openItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnSave, saveItem)\n self.Bind(wx.EVT_MENU, self.buttons.OnQuit, quitItem)\n\n #self.Bind(wx.EVT_MENU, self.buttons.OnBuildPNGs, pngItem)\n\n #---TOOLBAR\n toolbar = self.CreateToolBar() # build a toolbar\n\n # build tools\n file_dir = os.path.dirname(os.path.abspath(__file__)) # directory of current file\n image_dir = os.path.join(file_dir, 'images')\n ntool = toolbar.AddTool(wx.ID_ANY, 'New', wx.Bitmap(os.path.join(image_dir, 'new_file.png')))\n otool = toolbar.AddTool(wx.ID_ANY, 'Open', wx.Bitmap(os.path.join(image_dir, 'open_folder.png')))\n stool = toolbar.AddTool(wx.ID_ANY, 'Save', wx.Bitmap(os.path.join(image_dir, 'filesave.png')))\n\n # direct GUI what to do when something is selected\n self.Bind(wx.EVT_TOOL, self.buttons.OnNew, ntool)\n self.Bind(wx.EVT_TOOL, self.buttons.OnOpen, otool)\n self.Bind(wx.EVT_TOOL, self.buttons.OnSave, stool)\n\n # finalize/build the toolbar\n toolbar.Realize()", "def menu_indentation(self, event=None):\n self.parentPanel.indentation_guides(event)", "def main_layout(modValues, mod_units, header=''):\n sg.theme('DarkAmber')\n divider = '---------------------------------------------------------------'\\\n '---------------------------------------------------------------'\n spacer = ' '\\\n ' '\n help_text = ' Help '\n\n tab1_layout = [[sg.Text(spacer+' '),\n sg.Button(help_text, key='bioprocess_help')],\n\n [sg.Button(modValues['side1'], key='side1'), sg.Text(' --> '),\n sg.Button(modValues['sub1'], key='sub1'), sg.Text(' --> '),\n sg.Button(modValues['proc1'], key='proc1'), sg.Text(' --> '),\n sg.Button(modValues['prod1'], key='prod1')\n ],\n\n [sg.Text(' | ')],\n\n [sg.Button(modValues['material'], key='material'), sg.Text(' --> '),\n sg.Button(modValues['substrate'], key='substrate'), sg.Text(' --> '),\n sg.Button(modValues['process'], key='process'), sg.Text(' --> '),\n sg.Button(modValues['product'], key='product')\n ],\n\n [sg.Text(' | ')],\n\n [sg.Button(modValues['side2'], key='side2'), sg.Text(' --> '),\n sg.Button(modValues['sub2'], key='sub2'), sg.Text(' --> '),\n sg.Button(modValues['proc2'], key='proc2'), sg.Text(' --> '),\n sg.Button(modValues['prod2'], key='prod2')\n ],\n\n [sg.Text(divider, key='changeTEXT')],\n\n # add spaces to prevent disappearing-text bug\n [sg.Text('Change ____: ', key='changeMod')],\n\n [sg.Combo(values=[''], key='changeOptions', size=(20, 1)),\n sg.Button('Apply Change')],\n\n [sg.Text('\\n\\n\\n')],\n\n [sg.Text(spacer),\n sg.Button(' UNDO ', key='undo'),\n sg.Button('Load Preset', key='load'),\n sg.Button('Save & Quit', key='exit')]\n\n ]\n\n tab2_layout = [[sg.T(spacer+' '),\n sg.Button(help_text, key='details_help')],\n\n [sg.Text('See details for:')],\n\n [sg.Combo(values=mod_units,\n key='detailOptions', size=(20, 1)),\n sg.Button('Enter', key='Detail Chosen')]]\n\n # TODO:\n # TRY TO UPDATE TAB2 TEXT INSTEAD OF USING A POPUP\n # [sg.Text('Details for _______: \\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n',\n # key='detailText')]]\n\n tab3_layout = [[sg.T(spacer+' '),\n sg.Button(help_text, key='custom_help')],\n\n [sg.Text('Select conversion type:')],\n\n [sg.Combo(values=['material', 'side', 'substrate'],\n key='customTypes', size=(20, 1)),\n sg.Button('Launch', key='customType Chosen')], ]\n\n layout = [[sg.TabGroup([[sg.Tab('Bioprocess', tab1_layout),\n sg.Tab('Details', tab2_layout),\n sg.Tab('Custom', tab3_layout)]])]]\n\n return layout", "def _initialize_main_window_menu(self, lang='en-US'):\n\n self.option_add('*tearOff', FALSE)\n self.menubar = Menu(self)\n self.config(menu=self.menubar)\n self.filemenu = Menu(self.menubar)\n self.menubar.add_cascade(label=LOCALIZED_TEXT[lang]['File'], \\\n menu=self.filemenu)\n self.filemenu.add_command(label=\\\n LOCALIZED_TEXT[lang]['Load project settings'], \\\n command=self._on_click_f0_next)\n self.filemenu.add_command(label=LOCALIZED_TEXT[lang]['Save'], \\\n command=self._on_save_project)\n self.filemenu.add_command(label=\\\n LOCALIZED_TEXT[lang]['Delete project settings'], \\\n command=self._on_del_project)\n self.filemenu.add_separator()\n self.filemenu.add_command(label=LOCALIZED_TEXT[lang]['Exit'], \\\n command=self.quit)\n\n self.helpmenu = Menu(self.menubar)\n self.menubar.add_cascade(label=LOCALIZED_TEXT[lang]['Help'], \\\n menu=self.helpmenu)\n self.helpmenu.add_command(label=LOCALIZED_TEXT[lang]['Read Me'], \\\n command=self._on_read_me)\n self.helpmenu.add_command(label=LOCALIZED_TEXT[lang]['About...'], \\\n command=on_copyright)", "def init_mainmenu(self):\n\n self.play = QPushButton('PLAY!', self)\n self.play.setFixedSize(312, 144)\n self.play.adjustSize()\n self.play.setStyleSheet('''\n background-image: url(./pictures/console_button.png);\n border: none;\n ''')\n self.play.move(520, 180)\n self.play.clicked.connect(self.map_menu)\n self.mainmenu_items.append(self.play)\n\n self.quit = QPushButton('', self)\n self.quit.setFixedSize(192, 192)\n self.quit.setStyleSheet('''\n background-image: url(./pictures/quit.png);\n border: none;\n ''')\n self.quit.move(575, 400)\n self.quit.clicked.connect(exit)\n self.mainmenu_items.append(self.quit)\n\n self.show()", "def createMenu():\n mType = -1\n if auth.is_logged_in() and auth.has_membership('administrador',auth.user.id):\n return menuAdmin\n elif auth.is_logged_in():\n return menuUser\n else:\n return menuPublic", "def _setup_layout(self):\r\n\t\tbtn_toggle_server = Button(self, \\\r\n\t\t\ttext = \"啟動伺服器\", command = self._toggle_server, \\\r\n\t\t\tname = \"btn_toggle_server\")\r\n\t\tbtn_toggle_server.pack(side = LEFT)\r\n\r\n\t\tlabel_IP = Label(self, text = \"IP: \", anchor = W)\r\n\t\tlabel_IP.pack(side = LEFT)\r\n\t\tentry_IP = Entry(self, width = 15, name = \"entry_IP\")\r\n\t\tentry_IP.pack(side = LEFT)\r\n\t\tlabel_port = Label(self, text = \"Port: \", anchor = W)\r\n\t\tlabel_port.pack(side = LEFT)\r\n\t\tentry_port = Entry(self, width = 5, name = \"entry_port\")\r\n\t\tentry_port.pack(side = LEFT)\r\n\r\n\t\tlabel_connections = Label(self, text = \"連接數: -/-\", \\\r\n\t\t\tname = \"label_connections\")\r\n\t\tlabel_connections.pack(side = LEFT)", "def __showMenuSetup(self):\n self.__showMenuBtn = QtWidgets.QPushButton(\"Shows \",self)\n self.__showMenuBtn.setIcon(QtGui.QIcon(\":show.png\"))\n self.__showMenuBtn.pressed.connect(self.__showMenuCheck) # pylint: disable=no-member\n self.__toolbar.addWidget(self.__showMenuBtn)\n\n self.__showMenu = QtWidgets.QMenu(self)\n self.__showMenuBtn.setMenu(self.__showMenu)\n self.__showMenuBtn.setFocusPolicy(QtCore.Qt.NoFocus)\n self.__showMenu.setFont(cuegui.Constants.STANDARD_FONT)\n self.__showMenu.triggered.connect(self.__showMenuHandle) # pylint: disable=no-member\n self.app.facility_changed.connect(self.__showMenuUpdate)\n\n self.__showMenuUpdate()", "def setupMenus(self):\n \n exit = QAction(QIcon('gui/icons/close.png'), 'Exit', self)\n exit.setShortcut('Ctrl+Q')\n exit.setStatusTip('Exit application')\n self.connect(exit, SIGNAL('triggered()'), SLOT('close()'))\n \n menubar = self.menuBar()\n file = menubar.addMenu('&File')\n self.getMessagesMenu = file.addMenu('&Get Messages')\n \n file.addAction(exit)\n \n toolbar = self.addToolBar('Exit')\n toolbar.addAction(exit)", "def init_mapmenu(self):\n\n self.level1 = QPushButton('', self)\n self.level1.setFixedSize(192, 192)\n self.level1.setStyleSheet('''\n color: black;\n background-image: url(./pictures/level1.png);\n border: none;\n ''')\n self.level1.move(80, 60)\n self.level1.clicked.connect(self.start_level1)\n self.mapmenu_items.append(self.level1)\n\n self.level2 = QPushButton('', self)\n self.level2.setFixedSize(192, 192)\n self.level2.setStyleSheet('''\n color: black;\n background-image: url(./pictures/level2.png);\n border: none;\n ''')\n self.level2.move(330, 70)\n self.level2.clicked.connect(self.start_level2)\n self.mapmenu_items.append(self.level2)\n\n self.level3 = QPushButton('', self)\n self.level3.setFixedSize(192, 192)\n self.level3.setStyleSheet('''\n color: black;\n background-image: url(./pictures/level3.png);\n border: none;\n ''')\n self.level3.move(580, 70)\n self.level3.clicked.connect(self.start_level3)\n self.mapmenu_items.append(self.level3)\n\n self.level_n = QPushButton('', self)\n self.level_n.setFixedSize(192, 192)\n self.level_n.setStyleSheet('''\n color: black;\n background-image: url(./pictures/level_n.png);\n border: none;\n ''')\n self.level_n.move(80, 330)\n self.level_n.clicked.connect(self.start_level_n)\n self.mapmenu_items.append(self.level_n)\n\n self.cancel = QPushButton('Cancel', self)\n self.cancel.setFixedSize(256, 128)\n self.cancel.setStyleSheet('''\n background-image: url(./pictures/cancel.png);\n border: none;\n ''')\n self.cancel.move(578, 470)\n self.cancel.clicked.connect(self.main_menu)\n self.mapmenu_items.append(self.cancel)", "def hlp(self):\r\n \r\n self.menubar.entryconfig(\"Help\", state = 'disabled')\r\n self.menubar.entryconfig(\"File\", state = 'disabled')\r\n self.hlpframe = LabelFrame(self.master, text = \"Help & About\", font = ('impact', 10), width = 580, height = 662)\r\n self.hlpframe.place(x = 5, y = 5)\r\n self.abt = LabelFrame(self.hlpframe, width = 566, height = 111)\r\n self.abt.place(x = 5)\r\n self.abtproj = Label(self.abt, text = \"A Program for citating web pages using APA 6th edition, commonly used in social sciences:\")\r\n self.abtproj.place(x=35, y = 20)\r\n self.projtitle = Label(self.abt, text = \"APA 6th Edition Web Citation Generator\", font = ('impact', 25))\r\n self.projtitle.place(x = 16, y = 40)\r\n self.how2use = LabelFrame(self.hlpframe, text = \"How to use?\", font = ('impact', 8), width = 566, height = 400)\r\n self.how2use.place(x = 5, y = 110)\r\n self.inst1 = Label(self.how2use, text = \"◾ Enter the required details first and click \\\"Create\\\":\", justify = LEFT)\r\n self.inst1.place(x = 1, y = 1)\r\n self.inst1auth = Label(self.how2use, text = \"▸ You can choose if the article's author is one or more author or a group of authors.\", justify = LEFT)\r\n self.inst1auth.place(x = 25, y = 20)\r\n self.inst1auth2 = Label(self.how2use, text = \"▸ By adding another author, enter the name of the first author to activate the \\\"+\\\" button to add \\n another one.\", justify = LEFT)\r\n self.inst1auth2.place(x=25, y = 40)\r\n self.inst1dates = Label(self.how2use, text = \"▸ Click the dropdown menus of Months, Days, and Years to choose for the article's date of \\n publication and the date of when did you retrieved it.\",justify = LEFT)\r\n self.inst1dates.place(x=25, y= 75)\r\n self.inst1datestip = Label(self.how2use, text = \"▸ If the article does not have a date of publication, just leave it blank or you can set it to none, \\n same goes for retrieval date if you don't want to include it.\",justify = LEFT)\r\n self.inst1datestip.place(x = 25, y = 113)\r\n self.inst2edit = Label(self.how2use, text = \"◾ In Editable result you can make changes on your created citation:\", justify = LEFT)\r\n self.inst2edit.place(x = 1, y = 160)\r\n self.inst2conf = Label(self.how2use, text = \"▸ Click \\\"Confirm\\\" to save changes and move on to Final result or \\\"Discard\\\" to go back to creating \\n a new citation.\",justify = LEFT)\r\n self.inst2conf.place(x=25, y = 180)\r\n self.inst3final = Label(self.how2use, text = \"◾ The Final result adds indents and handles the citations alphabetically:\",justify = LEFT)\r\n self.inst3final.place(x = 1, y = 225)\r\n self.inst3add = Label(self.how2use, text = \"▸ To add another citation to the reference list click \\\"Add another citation!\\\".\",justify = LEFT)\r\n self.inst3add.place(x = 25, y = 245)\r\n self.inst3dis = Label(self.how2use, text = \"▸ If you clicked \\\"Clear all\\\" it'll prompt you to clear the reference list, once confirmed it cannot \\n be undone.\", justify = LEFT)\r\n self.inst3dis.place(x = 25, y = 265)\r\n self.inst4sav = Label(self.how2use, text = \"◾ Saving the reference list by \\\".txt\\\" or \\\".doc\\\" type:\",justify = LEFT)\r\n self.inst4sav.place(x = 1, y = 310)\r\n self.inst4how = Label(self.how2use, text = \"▸ You can save the reference list by going to \\\"File\\\", \\\"Save as...\\\".\", justify=LEFT)\r\n self.inst4how.place(x = 25, y = 330)\r\n self.inst4note = Label(self.how2use, text = \"▸ Once the file has been saved, you can't make changes on it using the program.\", justify = LEFT)\r\n self.inst4note.place(x = 25, y = 350)\r\n self.lbl = LabelFrame(self.hlpframe, width = 566, height = 55)\r\n self.lbl.place(x = 5, y = 515)\r\n self.built = Label(self.lbl, text = \"Built using Python\", font = 'impact')\r\n self.built.place(x = 225)\r\n self.jp = Label(self.lbl, text = \"John Paul G. Zoleta Bachelor of Science in Computer Science College of Mary Immaculate\")\r\n self.jp.place(x =25, y = 25)\r\n\r\n self.ok = Button(self.hlpframe, text = \"Okay ⌐■_■\",activebackground = 'gray', command = self.hlpframeclear, width = 79, height = 3)\r\n self.ok.place(x = 6, y = 575)", "def _create_menu(self):\n\n self.quit_item.connect('activate', gtk.main_quit, gtk)\n\n self.menu.append(self.quit_item)\n self.status_icon.connect('popup-menu', show_menu, self.menu)", "def add_menus_and_status(self):\r\n# Adding the statusbar and menubar\r\n self.statusBar().showMessage(\"\")\r\n menubar = self.menuBar()\r\n# Adding the first menu to the menubar\r\n menu = menubar.addMenu(\"Menu\")\r\n# Adding the first \"choise\" in the menu\r\n contact_action = QAction(\"Contact\", self)\r\n contact_action.setStatusTip(\"Contact PyCrypt\")\r\n contact_action.triggered.connect(self.email)\r\n menu.addAction(contact_action)\r\n# Adding a separator line to the menu\r\n menu.addSeparator()\r\n# Adding the second \"choise\" to the menu\r\n exit_action = QAction(\"Exit\", self) # Create an exit action\r\n exit_action.setStatusTip(\"Click to exit the application\")\r\n exit_action.triggered.connect(self.close) # Close application when clicked\r\n exit_action.setShortcut(\"Ctrl+Q\") # Keyboard shortcut to exit app\r\n menu.addAction(exit_action)", "def create_menus( self ):\n\n self.saveAct = QAction( \"&Save\", self, shortcut=\"Ctrl+S\",\n triggered=self.save_database )\n self.exitAct = QAction( \"E&xit\", self, shortcut=\"Ctrl+Q\",\n triggered=self.close )\n\n self.aboutAct = QAction( \"&About\", self, triggered=self.about )\n\n self.aboutQtAct = QAction( \"About &Qt\", self,\n triggered=QApplication.instance().aboutQt )\n\n self.fileMenu = QMenu( \"&File\", self )\n self.fileMenu.addAction( self.saveAct )\n self.fileMenu.addAction( self.exitAct )\n\n self.helpMenu = QMenu( \"&Help\", self )\n self.helpMenu.addAction( self.aboutAct )\n self.helpMenu.addAction( self.aboutQtAct )\n\n self.menuBar().addMenu( self.fileMenu )\n self.menuBar().addMenu( self.helpMenu )", "def add_menubar(self):\n extractAction = QtGui.QAction(\"&Exit\", self)\n #extractAction.setShortcut(\"Ctrl+Q\")\n #extractAction.setStatusTip('Leave The App')\n extractAction.triggered.connect(self.close_application)\n\n self.mainMenu = self.menuBar()\n self.fileMenu = self.mainMenu.addMenu('&File')\n self.fileMenu.addAction(extractAction)\n\n databaseManager = QtGui.QAction(\"&Start Database Manager\", self)\n self.databaseMenu = self.mainMenu.addMenu('&Database')\n self.databaseMenu.addAction(databaseManager)\n\n self.motionSelectorMenu = self.mainMenu.addMenu('&Motion Selector')\n motionSelector = QtGui.QAction(\"&Select Motion ROI\", self)\n self.choose_ROI = self.motionSelectorMenu.addAction(motionSelector)\n motionSelector.triggered.connect(self.video_view.choose_ROI)", "def main_menu(self):\n return [SitemapEntry(self.config.options.mount_label.title(), '.')]" ]
[ "0.7228054", "0.7096764", "0.70145935", "0.6930015", "0.6892374", "0.68760407", "0.67835313", "0.66314065", "0.66196865", "0.65105206", "0.6458277", "0.6445279", "0.6445256", "0.6438403", "0.64325565", "0.63978887", "0.6381949", "0.637196", "0.63675284", "0.6361078", "0.63495255", "0.6296995", "0.6281093", "0.62478787", "0.62308383", "0.62206644", "0.62155646", "0.621453", "0.6193968", "0.61187065", "0.6083228", "0.607818", "0.6072228", "0.6066607", "0.6062008", "0.6051792", "0.6044734", "0.60443443", "0.6030706", "0.60302114", "0.6024032", "0.60111326", "0.59909517", "0.5989682", "0.59893507", "0.5986302", "0.59834737", "0.598176", "0.59731317", "0.59652835", "0.5965033", "0.5946077", "0.59354836", "0.5929277", "0.5925962", "0.5922356", "0.5898102", "0.58954513", "0.5893424", "0.5880681", "0.5856653", "0.58564913", "0.58457", "0.5843046", "0.5842748", "0.58424234", "0.58381176", "0.58354366", "0.58270717", "0.5819555", "0.5806265", "0.5804588", "0.5803803", "0.5790518", "0.5789496", "0.577788", "0.5768094", "0.5764783", "0.5759694", "0.5759462", "0.57516557", "0.5750583", "0.57397527", "0.57384896", "0.5738312", "0.5733133", "0.5731302", "0.5720719", "0.5710078", "0.57100105", "0.5709685", "0.5696106", "0.5681528", "0.56803375", "0.5679431", "0.5676677", "0.5673173", "0.56721604", "0.5666384", "0.56577957", "0.5646318" ]
0.0
-1
Initialize the navigation bar
def get_html(self) -> List[ComponentMeta]: nav = dbc.Navbar( className="penn-medicine-header px-0", children=html.Div( className="d-flex align-items-center w-100", children=[ html.Div( className="px-3", style={"width": "320px"}, children=html.A( href="https://www.pennmedicine.org", className="penn-medicine-header__logo", title="Go to the Penn Medicine home page", ), ), html.Div( className="flex-fill", children=dbc.Container( children=[ dbc.NavbarBrand( children=html.H1( style={"font": "inherit", "margin": "0"}, children=_brand_text, ), href="/", ) ] + self.menu.component ), ), ], ), dark=True, fixed="top", color="", ) return [nav]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n super(QtMainWindow, self).initialize()\n self.update_menu_bar()", "def init_menu_bar(self):\n # Load from file action\n load_action = QtGui.QAction(QtGui.QIcon('open.png'), '&From File...', self)\n load_action.setShortcut('Ctrl+L')\n load_action.setStatusTip('Load image from file to scan')\n load_action.triggered.connect(self._scan_file_image)\n\n # Continuous scanner mode\n live_action = QtGui.QAction(QtGui.QIcon('open.png'), '&Camera Capture', self)\n live_action.setShortcut('Ctrl+W')\n live_action.setStatusTip('Capture continuously from camera')\n live_action.triggered.connect(self._start_live_capture)\n\n # Exit Application\n exit_action = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)\n exit_action.setShortcut('Ctrl+Q')\n exit_action.setStatusTip('Exit application')\n exit_action.triggered.connect(QtGui.qApp.quit)\n\n # Open options dialog\n options_action = QtGui.QAction(QtGui.QIcon('exit.png'), '&Options', self)\n options_action.setShortcut('Ctrl+O')\n options_action.setStatusTip('Open Options Dialog')\n options_action.triggered.connect(self._open_options_dialog)\n\n # Create menu bar\n menu_bar = self.menuBar()\n file_menu = menu_bar.addMenu('&File')\n file_menu.addAction(exit_action)\n\n scan_menu = menu_bar.addMenu('&Scan')\n scan_menu.addAction(load_action)\n scan_menu.addAction(live_action)\n\n option_menu = menu_bar.addMenu('&Option')\n option_menu.addAction(options_action)", "def initialise_menu_bar(self):\n # Set up options to change SLAM type.\n slam_type_menu = self.menuBar().addMenu('SLAM Type')\n slam_type_ag = QtWidgets.QActionGroup(self, exclusive=True)\n slam_type_scan_matching = slam_type_ag.addAction(QtWidgets.QAction('Scan Matching', self, checkable=True))\n slam_type_hough = slam_type_ag.addAction(QtWidgets.QAction('Hough Landmarks', self, checkable=True))\n slam_type_ransac = slam_type_ag.addAction(QtWidgets.QAction('RANSAC Landmarks', self, checkable=True))\n slam_type_naive = slam_type_ag.addAction(QtWidgets.QAction('Naive', self, checkable=True))\n slam_type_scan_matching.triggered.connect(lambda x: self.set_slam_type(SlamMode.SCAN_MATCHING))\n slam_type_hough.triggered.connect(lambda x: self.set_slam_type(LandmarkMode.HOUGH))\n slam_type_ransac.triggered.connect(lambda x: self.set_slam_type(LandmarkMode.RANSAC))\n slam_type_naive.triggered.connect(lambda x: self.set_slam_type(SlamMode.NAIVE))\n slam_type_menu.addAction(slam_type_scan_matching)\n slam_type_menu.addAction(slam_type_hough)\n slam_type_menu.addAction(slam_type_ransac)\n slam_type_menu.addAction(slam_type_naive)\n self.menuBar().addMenu(slam_type_menu)\n\n # Set up miscellaneous options.\n options_menu = QtWidgets.QMenu(\"Options\", self)\n options_automatic = QtWidgets.QAction(\"Automatic\", options_menu, checkable=True)\n options_automatic.triggered.connect(self.set_automatic)\n options_menu.addAction(options_automatic)\n self.menuBar().addMenu(options_menu)\n\n # Set up options for the display.\n display_menu = QtWidgets.QMenu(\"Display Mode\", self)\n display_ag = QtWidgets.QActionGroup(self, exclusive=True)\n display_map = display_ag.addAction(QtWidgets.QAction('Map Distribution', self, checkable=True))\n display_prob = display_ag.addAction(QtWidgets.QAction('Probability Distribution', self, checkable=True))\n display_map.triggered.connect(lambda x: self.set_map_mode(MapMode.DIST))\n display_prob.triggered.connect(lambda x: self.set_map_mode(MapMode.PROB))\n display_menu.addAction(display_map)\n display_menu.addAction(display_prob)\n self.menuBar().addMenu(display_menu)\n\n # Set up options for how the robot is followed.\n tracking_menu = QtWidgets.QMenu(\"Tracking Mode\", self)\n tracking_ag = QtWidgets.QActionGroup(self, exclusive=True)\n tracking_free = tracking_ag.addAction(QtWidgets.QAction('Free', self, checkable=True))\n tracking_adjusted = tracking_ag.addAction(QtWidgets.QAction('Adjusted Robot', self, checkable=True))\n tracking_raw = tracking_ag.addAction(QtWidgets.QAction('Raw Robot', self, checkable=True))\n tracking_free.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.FREE))\n tracking_adjusted.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.ADJUSTED))\n tracking_raw.triggered.connect(lambda x: self.set_tracking_mode(TrackingMode.STATE))\n tracking_menu.addAction(tracking_raw)\n tracking_menu.addAction(tracking_free)\n tracking_menu.addAction(tracking_adjusted)\n self.menuBar().addMenu(tracking_menu)\n\n # Set up the map display mode.\n map_menu = QtWidgets.QMenu(\"Map Display Mode\", self)\n map_ag = QtWidgets.QActionGroup(self, exclusive=True)\n map_local = map_ag.addAction(QtWidgets.QAction('Local', self, checkable=True))\n map_global = map_ag.addAction(QtWidgets.QAction('Global', self, checkable=True))\n map_local.triggered.connect(lambda x: self.set_display_mode(self.grid.view_mode.LOCAL))\n map_global.triggered.connect(lambda x: self.set_display_mode(self.grid.view_mode.ADJUSTED))\n map_menu.addAction(map_local)\n map_menu.addAction(map_global)\n self.menuBar().addMenu(map_menu)\n\n # Set up the probability display mode.\n probability_menu = QtWidgets.QMenu(\"Probability Mode\", self)\n probability_ag = QtWidgets.QActionGroup(self, exclusive=True)\n probability_prior = probability_ag.addAction(QtWidgets.QAction('Prior Probabilities', self, checkable=True))\n probability_slam = probability_ag.addAction(QtWidgets.QAction('SLAM Probabilities', self, checkable=True))\n probability_combined = probability_ag.addAction(\n QtWidgets.QAction('Combined Probabilities', self, checkable=True))\n probability_prior.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.PRIOR_PROBABILITIES))\n probability_slam.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.SLAM_PROBABILITIES))\n probability_combined.triggered.connect(\n lambda x: self.set_probability_mode(self.grid.probability_mode.COMBINED_PROBABILITIES))\n probability_menu.addAction(probability_prior)\n probability_menu.addAction(probability_slam)\n probability_menu.addAction(probability_combined)\n self.menuBar().addMenu(probability_menu)\n\n # Set up the probability slider.\n probability_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n probability_slider.setMinimum(0)\n probability_slider.setMaximum(100)\n probability_slider.setValue(50)\n probability_slider.setTickPosition(QtWidgets.QSlider.TicksBelow)\n probability_slider.setTickInterval(5)\n probability_slider.valueChanged.connect(self.set_probability_alpha)\n\n # Add the slider, status bar and title.\n self.statusBar.addWidget(probability_slider)\n self.setWindowTitle(\"SLAM\")\n self.setStatusBar(self.statusBar)", "def initialize_navigation(self):\n # dynamic navigation\n if not self.is_static:\n self.add_uniform(\"scale\", vartype=\"float\", ndim=2, data=(1., 1.))\n self.add_uniform(\"translation\", vartype=\"float\", ndim=2, data=(0., 0.))\n \n self.add_vertex_header(\"\"\"\n // Transform a position according to a given scaling and translation.\n vec2 transform_position(vec2 position, vec2 scale, vec2 translation)\n {\n return scale * (position + translation);\n }\n \"\"\")\n \n if not self.is_static: \n pos = \"transform_position(%s.xy, scale, translation)\" % self.position_attribute_name\n else:\n pos = \"%s.xy\" % self.position_attribute_name\n \n if self.is_position_3D:\n vs = \"\"\"gl_Position = vec4(%s, %s.z, 1.);\"\"\" % (pos,\n self.position_attribute_name)\n else:\n vs = \"\"\"gl_Position = vec4(%s, 0., 1.);\"\"\" % (pos)\n \n if self.depth is not None:\n vs += \"\"\"gl_Position.z = %.4f;\"\"\" % self.depth\n \n self.add_vertex_main(vs, position='last', name='navigation')", "def _init(self):\n self.wx_menu = wx.Menu()", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def setNavigation(*args):", "def controls_setup(self):\n\n self.inbox = element.NavigationTab(self, css_selector='.messaging a.nav-inbox', alias='Inbox Tab')\n self.sent = element.NavigationTab(self, css_selector='.messaging a.nav-sent', alias='Sent Tab')\n self.write = element.NavigationTab(self, css_selector='.messaging a.nav-write', alias='Write Tab')\n self.archives = element.NavigationTab(self, css_selector='.messaging a.nav-archive', alias='Archives Tab')\n self.trash = element.NavigationTab(self, css_selector='.messaging a.nav-trash', alias='Trash Tab')", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def _setup_status_bar(self):\n self.status = NSStatusBar.systemStatusBar().statusItemWithLength_(NSVariableStatusItemLength)\n self.status.setMenu_(self.status_menu)\n self.status.setTitle_(\"Core\")\n self.status.setHighlightMode_(objc.YES)", "def initMenu(self):\n self.fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n self.fileMenu.addAction(self.createProjectAction)\n self.fileMenu.addAction(self.openProjectAction)\n\n #TODO : problem displaying submenu\n #self.recentMenu = self.fileMenu.addMenu(self.tr(\"Open &recent\"))\n #for recentProject in self._controller.getSession().recentProjects():\n #recentAction = QtGui.QAction(self.tr(str(recentProject.getPath())), self)\n #self.connect(recentAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openRecent(recentProject.getPath())\"))\n #self.recentMenu.addAction(recentAction)\n\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.importVideoAction)\n self.fileMenu.addSeparator()\n self.fileMenu.addAction(self.saveProjectAction)\n\n self.helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n self.helpMenu.addAction(self.aboutAction)", "def _initMenuBar(self):\n\t\tmenubar = wx.MenuBar()\n\t\tfileMenu = wx.Menu()\n\t\t\n\t\t# File -> Open\n\t\topenItem = wx.MenuItem(fileMenu, wx.ID_OPEN)\n\t\tfileMenu.Append(openItem)\n\t\tself.Bind(wx.EVT_MENU, self.OnOpen, openItem)\n\n\t\t# File -> Save\n\t\t#saveItem = wx.MenuItem(fileMenu, wx.ID_SAVE)\n\t\t#fileMenu.Append(saveItem)\n\t\t#self.Bind(wx.EVT_MENU, self.OnSave, saveItem)\n\t\t\n\t\tfileMenu.AppendSeparator()\n\t\t\n\t\t# File -> Quit\n\t\tquitItem = wx.MenuItem(fileMenu, wx.ID_EXIT)\n\t\tfileMenu.Append(quitItem)\n\t\tself.Bind(wx.EVT_MENU, self.OnQuit, quitItem)\n\t\t\n\t\t# Connect the menu bar\n\t\tmenubar.Append(fileMenu, \"&File\")\n\t\tself.SetMenuBar(menubar)", "def header_nav(self):\r\n\r\n menu_stack = PaneStack()\r\n\r\n # Ensure the default button is the first tab\r\n #default_button_name = c.site.default_listing\r\n\r\n main_buttons = [\r\n ExpandableButton('main', dest = '/promoted', sr_path = False, sub_menus =\r\n [ NamedButton('posts', dest = '/promoted', sr_path = False),\r\n NamedButton('comments', dest = '/comments', sr_path = False)]),\r\n ExpandableButton('discussion', dest = \"/r/discussion/new\", sub_reddit = \"/r/discussion/\", sub_menus =\r\n [ NamedButton('posts', dest = \"/r/discussion/new\", sr_path = False),\r\n NamedButton('comments', dest = \"/r/discussion/comments\", sr_path = False)])\r\n ]\r\n\r\n menu_stack.append(NavMenu(main_buttons, title = _('Filter by'), _id='nav', type='navlist'))\r\n\r\n\r\n if self.header_sub_nav:\r\n menu_stack.append(NavMenu(self.header_sub_nav, title = _('Filter by'), _id='filternav', type='navlist'))\r\n\r\n return menu_stack", "def init_helpmenu(self):\n self.menubar[\"helpmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"helpmenu\"].add_command(label=\"Help Index\", command=todo)\n self.menubar[\"helpmenu\"].add_command(label=\"About...\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Help\", menu=self.menubar[\"helpmenu\"])", "def controls_setup(self):\n\n self.to_homepage = element.Link(self, class_name='nav-home', alias='Return to site Link')\n self.account = element.Link(self, class_name='nav-account', alias='Account Profile Link')\n self.logout = element.Link(self, class_name='nav-logout', alias='Logout Link')", "def controls_setup(self):\n\n self.login = element.Link(self, class_name='nav-login', alias='Navbar->Login Link')\n self.register = element.Link(self, class_name='nav-register', alias='Navbar->Register Link')\n self.logout = element.Link(self, class_name='nav-logout', alias='Navbar->Logout Link')\n self.be_a_merchant = element.Link(self, class_name='nav-partner-join',\n alias='Navbar->Be A Merchant Link')\n self.wishlist = element.Link(self, class_name='nav-wishlist', alias='Navbar->Wishlist Icon Button')\n self.cart = element.Link(self, class_name='nav-cart', alias='Navbar->Cart Icon Button')\n self.account = element.Link(self, css_selector='a.nav-account', alias='Navbar->Account Link')\n self.messages = element.Link(self, class_name='nav-messages', alias='Navbar->Messages Link')\n self.dashboard = element.Link(self, class_name='nav-dashboard', alias='Navbar->Dashboard Link')\n self.messages_count = element.Element(self, dom_id='postman_unread_count',\n alias='Navbar->Unread Messages Count Label')\n\n self.search_query = element.TextBox(self, name='q', alias='Navbar->Search Box')\n self.search_button = element.Button(self, css_selector='form.search button[type=submit]',\n alias='Navbar->Search Icon Button')", "def __init__(self):\r\n super().__init__()\r\n self._setupSideMenu()", "def navigation(self):\r\n self._navigation = [\r\n ('home', 'Home', self.request.route_path('home'),),\r\n None,\r\n \"Database contents\",\r\n ('list_parts', 'List of parts', self.request.route_path('list_parts', page=1),),\r\n ('list_packages', 'List of packages', self.request.route_path('list_packages', page=1),),\r\n ('list_manufacturers', 'List of manufacturers', self.request.route_path('list_manufacturers', page=1),),\r\n None,\r\n \"Administrative\",\r\n ('add_part', 'Add part', self.request.route_path('add_part'),),\r\n ('storage', 'Storage', self.request.route_path('storage'),),\r\n ]\r\n\r\n self.navigation_hook()\r\n return self._navigation", "def menuBar(self):\n\n\t\tmenu = Menu(self.master)\n\t\tself.master.config(menu=menu)\n\n\t\t#File Menu\n\t\tfyle = Menu(menu)\n\t\tfyle.add_command(label='New',command=self.newPad)\n\t\tfyle.add_command(label='Open File',command=self.open_file)\n\t\tfyle.add_command(label='Save', command=self.saveFile)\n\t\tfyle.add_command(label='Save As',command=self.saveFileAs)\n\t\tfyle.add_command(label='Exit', command=outer.destroy)\n\t\tmenu.add_cascade(label='File',menu=fyle)\n\n\t\t#Edit Menu\n\t\tedit = Menu(menu)\n\t\tedit.add_command(label='Cut')\n\t\tedit.add_command(label='Copy')\n\t\tedit.add_command(label='Paste')\n\t\tedit.add_command(label='Undo')\n\t\tedit.add_command(label='Redo')\n\t\tmenu.add_cascade(label='Edit',menu=edit)\n\n\t\t#View Menu\n\t\tview = Menu(menu)\n\t\tview.add_command(label='Line Numbers')\n\t\tmenu.add_cascade(label='View', menu=view)\n\n\n\t\t#Help Menu\n\t\thelp = Menu(menu)\n\t\thelp.add_command(label='About')\n\t\tmenu.add_cascade(label='Help',menu=help)", "def set_navigation(self):\n self.start_button.controlUp(self.osc_button)\n self.start_button.controlLeft(self.osc_button)\n self.osc_button.controlDown(self.start_button)\n self.osc_button.controlRight(self.start_button)\n self.start_button.controlDown(self.close_button)\n self.close_button.controlLeft(self.start_button)\n\t # Set initial focus.\n self.setFocus(self.start_button)", "def open_navbar(self, title: Optional[str] = None):\n self._open_layouting.append(\"AppBar\")\n self._client.set_appbar(AppBar(title=Text(title)))", "def navigation_hook(self):\r\n pass", "def set_navigation(self):\n self.start_button.controlRight(self.close_button)\n self.start_button.controlLeft(self.htsp_button)\n self.start_button.controlUp(self.htsp_button)\n self.htsp_button.controlRight(self.tvh_button)\n self.tvh_button.controlLeft(self.htsp_button)\n self.htsp_button.controlDown(self.start_button)\n self.tvh_button.controlDown(self.start_button)\n self.close_button.controlLeft(self.start_button)\n\t # Set initial focus.\n self.setFocus(self.start_button)", "def initialize(self):\n self.setWindowTitle(\"Playlist Maker\")\n self.setGeometry(0,0, 800, 494)\n self.mbox = QVBoxLayout()\n self.hbox = QHBoxLayout()\n self.hbtnbox = QHBoxLayout()", "def __setupMenu(self):\n quit_action = QAction(\"&Exit\", self)\n quit_action.setShortcut('Ctrl+Q')\n quit_action.triggered.connect(self.close)\n\n sign_out_action = QAction(\"Sign out\", self)\n sign_out_action.setShortcut('Ctrl+L')\n sign_out_action.triggered.connect(lambda: (self.app.logOut(), self.hide(), self.requestCredentials()))\n\n change_password_action = QAction(\"Change password\", self)\n change_password_action.triggered.connect(self.requestPasswordChange)\n\n about_action = QAction(\"About\", self)\n about_action.triggered.connect(lambda: QMessageBox.about(self, \"About\", u'© ' + __author__ + ' 2013'))\n\n self.file_menu = self.menuBar().addMenu(\"&File\")\n self.file_menu.addAction(quit_action)\n\n self.account_menu = self.menuBar().addMenu(\"&Account\")\n self.account_menu.addAction(sign_out_action)\n self.account_menu.addAction(change_password_action)\n\n self.help_menu = self.menuBar().addMenu(\"&Help\")\n self.help_menu.addAction(about_action)", "def controls_setup(self):\n\n self.new_items = element.TopMenuLink(self, alias='New Items Menu',\n css_selector='#browse > li:nth-child(1) > a')\n if self.fixed:\n self.jewelry = element.TopMenuLink(self, alias='Jewelry Menu',\n css_selector='#browse > li:nth-child(2) > a')\n self.artwork = element.TopMenuLink(self, alias='Artwork Menu',\n css_selector='#browse > li:nth-child(3) > a')\n self.homeware = element.TopMenuLink(self, alias='Homeware Menu',\n css_selector='#browse > li:nth-child(4) > a')\n self.clothing = element.TopMenuLink(self, alias='Clothing Menu',\n css_selector='#browse > li:nth-child(5) > a')\n self.vintage = element.TopMenuLink(self, alias='Vintage Menu',\n css_selector='#browse > li:nth-child(6) > a')\n self.food = element.TopMenuLink(self, alias='Food Menu',\n css_selector='#browse > li:nth-child(7) > a')\n self.rings = element.SubMenuLink(self, alias='Jewelry->Rings Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(1) > a')\n self.necklaces = element.SubMenuLink(self, alias='Jewelry->Necklaces Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(2) > a')\n self.bracelets = element.SubMenuLink(self, alias='Jewelry->Bracelets Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(3) > a')\n self.earrings = element.SubMenuLink(self, alias='Jewelry->Earrings Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(4) > a')\n self.broaches = element.SubMenuLink(self, alias='Jewelry->Broaches and Pins Submenu',\n css_selector='#browse > li:nth-child(2) > ul > li:nth-child(5) > a')\n self.wall_art = element.SubMenuLink(self, alias='Artwork->Wall Art Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(1) > a')\n self.ceramics = element.SubMenuLink(self, alias='Jewelry->Ceramics Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(2) > a')\n self.sculpture = element.SubMenuLink(self, alias='Jewelry->Sculpture Submenu',\n css_selector='#browse > li:nth-child(3) > ul > li:nth-child(3) > a')\n self.living = element.SubMenuLink(self, alias='Homeware->Living Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(1) > a')\n self.furniture = element.SubMenuLink(self, alias='Homeware->Furniture Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(2) > a')\n self.kitchen = element.SubMenuLink(self, alias='Homeware->Kitchen Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(3) > a')\n self.lighting = element.SubMenuLink(self, alias='Homeware->Lighting Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(4) > a')\n self.outdoor = element.SubMenuLink(self, alias='Homeware->Outdoor Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(5) > a')\n self.organize = element.SubMenuLink(self, alias='Homeware->Organize Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(6) > a')\n self.party = element.SubMenuLink(self, alias='Homeware->Party Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(7) > a')\n self.sub_food = element.SubMenuLink(self, alias='Homeware->Food Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(8) > a')\n self.pets = element.SubMenuLink(self, alias='Homeware->For Pets Submenu',\n css_selector='#browse > li:nth-child(4) > ul > li:nth-child(9) > a')\n self.women = element.SubMenuLink(self, alias='Clothing->Women Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(1) > a')\n self.men = element.SubMenuLink(self, alias='Clothing->Men Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(2) > a')\n self.kids = element.SubMenuLink(self, alias='Clothing->Kids Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(3) > a')\n self.pet_lovers = element.SubMenuLink(self, alias='Clothing->Pet Lovers Submenu',\n css_selector='#browse > li:nth-child(5) > ul > li:nth-child(4) > a')\n self.antiques = element.SubMenuLink(self, alias='Vintage->Antiques Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(1) > a')\n self.accessories = element.SubMenuLink(self, alias='Vintage->Accessories Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(2) > a')\n self.sub_clothing = element.SubMenuLink(self, alias='Vintage->Clothing Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(3) > a')\n self.sub_homeware = element.SubMenuLink(self, alias='Vintage->Homeware Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(4) > a')\n self.toys = element.SubMenuLink(self, alias='Vintage->Toys Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(5) > a')\n self.books = element.SubMenuLink(self, alias='Vintage->Books Submenu',\n css_selector='#browse > li:nth-child(6) > ul > li:nth-child(6) > a')\n else:\n self.menus = []", "def set_navigation(self):\n self.start_button.controlRight(self.close_button)\n self.close_button.controlLeft(self.start_button)\n\t # Set initial focus.\n self.setFocus(self.start_button)", "def define_nav_elements(self):\n return [\n TabTip(app=self.app),\n TabIris(app=self.app),\n InstructionsTab(app=self.app),\n ]", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def __init__(self,parent,pageNames=[],**kw):\n Frame.__init__(self, parent, kw)\n self.grid_location(0,0)\n self.columnconfigure(0,weight=1)\n self.rowconfigure(1,weight=1)\n self.tabBar=Frame(self)\n self.tabBar.grid(row=0,column=0,sticky=EW)\n self.activePage=StringVar(self)\n self.defaultPage=''\n self.pages={}\n for name in pageNames:\n self.AddPage(name)", "def set_navigation(self):\n self.close_button.controlUp(self.nos_button)\n self.nos_button.controlDown(self.madeira_button)\n self.nos_button.controlRight(self.nowo_button)\n self.nowo_button.controlDown(self.madeira_button)\n self.nowo_button.controlLeft(self.nos_button)\n self.madeira_button.controlUp(self.nos_button)\n self.madeira_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def nav(self):\n if self._nav is None:\n self._loads()\n return self._nav", "def initialize(self):\n super(QtTreeView, self).initialize()\n self.set_header_visible(self.shell_obj.header_visible)", "def controls_setup(self):\n self.dashboard = element.Link(self, class_name='menu-dashboard', alias='Dashboard Menu')\n # catalogue and sub-menus\n self.catalogue = element.LinkButton(self, class_name='menu-catalogue', alias='Catalogue Menu')\n self.products = element.Link(self, class_name='submenu-products', alias='Catalogue->Products Menu')\n self.product_types = element.Link(self, class_name='submenu-product-types',\n alias='Catalogue->Product Types Menu')\n self.categories = element.Link(self, class_name='submenu-categories', alias='Catalogue->Categories Menu')\n self.ranges = element.Link(self, class_name='submenu-ranges', alias='Catalogue->Ranges Menu')\n self.stock_alerts = element.Link(self, class_name='submenu-low-stock-alerts',\n alias='Catalogue->Low Stock Alerts Menu')\n # fulfilment and sub-menus\n self.fulfilment = element.LinkButton(self, class_name='menu-fulfilment', alias='Fulfilment Menu')\n self.orders = element.Link(self, class_name='submenu-orders', alias='Fulfilment->Orders Menu')\n self.statistics = element.Link(self, class_name='submenu-statistics', alias='Fulfilment->Statistics Menu')\n self.partners = element.Link(self, class_name='submenu-partners', alias='Fulfilment->Partners Menu')\n # customers and sub-menus\n self.customers = element.LinkButton(self, class_name='menu-customers', alias='Customers Menu')\n self.customers_submenu = element.Link(self, class_name='submenu-customers', alias='Customers->Customers Menu')\n self.deleted_accounts = element.Link(self, class_name='submenu-deleted_accounts',\n alias='Customers->Deleted Accounts Menu')\n self.stock_alert_requests = element.Link(self, class_name='submenu-stock-alert-requests-customers',\n alias='Customers->Stock Alert Requests Menu')\n # offers and sub-menus\n self.offers = element.LinkButton(self, class_name='menu-offers', alias='Offers Menu')\n self.offers_submenu = element.Link(self, class_name='submenu-offers', alias='Offers->Offers Menu')\n self.vouchers = element.Link(self, class_name='submenu-vouchers', alias='Offers->Vouchers Menu')\n # content and sub-menus\n self.content_menu = element.LinkButton(self, class_name='menu-content', alias='Content Menu')\n self.pages = element.Link(self, class_name='menu-pages', alias='Content->Pages Menu')\n self.announcements = element.Link(self, class_name='menu-announcements', alias='Content->Announcements Menu')\n self.reviews = element.Link(self, class_name='menu-reviews', alias='Content->Reviews Menu')\n\n self.reports = element.Link(self, class_name='menu-reports', alias='Reports Menu')\n self.store_info = element.LinkButton(self, css_selector='.dropdown.store > a', alias='Store Info Menu')\n self.partner_store = []", "def add_navnodes(self):\n pass", "def create_menu_bar(self):\n\n\t\t# menus\n\t\tself.__app_menu = QMenu('App')\n\t\t# actions\n\t\tself.__clr_logs = QAction('Clear Logs')\n\t\tself.__quit = QAction('Quit')\n\t\tself.__hide = QAction('Hide')\n\n\t\tself.__quit.triggered.connect(self.exit_app)\n\t\tself.__hide.triggered.connect(self.set_visible)\n\n\t\tself.__app_menu.addActions([self.__clr_logs, self.__hide, self.__quit])\n\t\tself.menuBar().addMenu(self.__app_menu)", "def init_mainmenu(self):\n\n self.play = QPushButton('PLAY!', self)\n self.play.setFixedSize(312, 144)\n self.play.adjustSize()\n self.play.setStyleSheet('''\n background-image: url(./pictures/console_button.png);\n border: none;\n ''')\n self.play.move(520, 180)\n self.play.clicked.connect(self.map_menu)\n self.mainmenu_items.append(self.play)\n\n self.quit = QPushButton('', self)\n self.quit.setFixedSize(192, 192)\n self.quit.setStyleSheet('''\n background-image: url(./pictures/quit.png);\n border: none;\n ''')\n self.quit.move(575, 400)\n self.quit.clicked.connect(exit)\n self.mainmenu_items.append(self.quit)\n\n self.show()", "def set_navigation(self):\n self.close_button.controlUp(self.hispasat_button)\n self.hispasat_button.controlDown(self.close_button)\n self.hispasat_button.controlRight(self.astra_button)\n self.astra_button.controlRight(self.hotbird_button)\n self.astra_button.controlDown(self.close_button)\n self.hotbird_button.controlLeft(self.astra_button)\n self.hotbird_button.controlDown(self.close_button)\n self.astra_button.controlLeft(self.hispasat_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def add_menubar(self):\n extractAction = QtGui.QAction(\"&Exit\", self)\n #extractAction.setShortcut(\"Ctrl+Q\")\n #extractAction.setStatusTip('Leave The App')\n extractAction.triggered.connect(self.close_application)\n\n self.mainMenu = self.menuBar()\n self.fileMenu = self.mainMenu.addMenu('&File')\n self.fileMenu.addAction(extractAction)\n\n databaseManager = QtGui.QAction(\"&Start Database Manager\", self)\n self.databaseMenu = self.mainMenu.addMenu('&Database')\n self.databaseMenu.addAction(databaseManager)\n\n self.motionSelectorMenu = self.mainMenu.addMenu('&Motion Selector')\n motionSelector = QtGui.QAction(\"&Select Motion ROI\", self)\n self.choose_ROI = self.motionSelectorMenu.addAction(motionSelector)\n motionSelector.triggered.connect(self.video_view.choose_ROI)", "def _initializeMenus(self):\n menubar = wx.MenuBar()\n\n fileMenuData = [(wx.ID_NEW, 'New Experiment', None, self._onNew),\n (wx.ID_OPEN, 'Open Experiment', None, self._onOpen),\n (ID_PREMADE, 'Open Premade', None, self._onPremade),\n (wx.ID_EXIT, 'Exit', None, self._onExit)]\n gh.createMenu(self, menubar, '&File', fileMenuData)\n \n toolsMenu = wx.Menu()\n controllerMenu = wx.Menu()\n keys = list(self.controllerIDs.keys())\n keys.sort()\n for key in keys:\n currId = self.controllerIDs[key].GetValue()\n controllerMenu.Append(currId, key)\n self.Bind(wx.EVT_MENU, self._onController, id=currId)\n toolsMenu.AppendSubMenu(controllerMenu, 'Controllers')\n toolsMenu.AppendSeparator()\n toolsMenu.Append(ID_SETTINGS, 'Settings', 'Edit software settings')\n toolsMenu.Append(ID_USERS.GetValue(), 'Users', 'Edit the list of users')\n self.btnUserSettings = toolsMenu.Append(ID_USER_SETTINGS, \n 'User settings', \n 'Edit the preferences for the currently-selected user')\n self.Bind(wx.EVT_MENU, self._onSettings, id=ID_SETTINGS)\n self.Bind(wx.EVT_MENU, self._onUsers, id=ID_USERS)\n self.Bind(wx.EVT_MENU, self._onUserSettings, id=ID_USER_SETTINGS)\n menubar.Append(toolsMenu, '&Tools')\n\n helpMenuData = [(wx.ID_HELP, 'Help', 'View software help',\n self.onHelp),\n (wx.ID_ABOUT, 'About', 'About', self.onAbout),\n (ID_CHANGELOG, 'Change log', 'View revision history',\n self._onChangeLog)]\n gh.createMenu(self, menubar, '&Help', helpMenuData)\n\n self.SetMenuBar(menubar)", "def set_navigation(self):\n self.close_button.controlUp(self.wp_button)\n self.wp_button.controlDown(self.close_button)\n self.wp_button.controlRight(self.wp2_button)\n self.wp2_button.controlDown(self.close_button)\n self.wp2_button.controlLeft(self.wp_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def onInit(self):\n self.list_control = self.getControl(6)\n self.getControl(1).setLabel(self.window_title)\n self.getControl(3).setVisible(False)\n try:\n self.getControl(7).setLabel(xbmc.getLocalizedString(222))\n except Exception:\n pass\n\n self.getControl(5).setVisible(False)\n\n # add our items to the listing and focus the control\n self.list_control.addItems(self.listing)\n self.setFocus(self.list_control)", "def get_nav_bar(session):\n result = '<nav>'\n result += ' <ul>'\n result += ' <li><h1 id=\"title\">Beelance2</h1></li>'\n result += ' <li><a href=\"/\">Home</a></li>'\n if session.username:\n result += ' <li><a href=\"logout\">Logout</a></li>'\n result += ' <li><a href=\"new_project\">New</a></li>'\n result += ' <li><a href=\"change_password\">Change Password</a></li>'\n else:\n result += ' <li><a href=\"register\">Register</a></li>'\n result += ' <li><a href=\"login\">Login</a></li>'\n result += ' <li><a href=\"open_projects\">Projects</a></li>'\n result += ' </ul>'\n result += '</nav>'\n return result", "def init_toolbar(self):\n raise NotImplementedError", "def create_status_bar(self):\n self.statusbar = self.CreateStatusBar()", "def _initialize_main_window_menu(self, lang='en-US'):\n\n self.option_add('*tearOff', FALSE)\n self.menubar = Menu(self)\n self.config(menu=self.menubar)\n self.filemenu = Menu(self.menubar)\n self.menubar.add_cascade(label=LOCALIZED_TEXT[lang]['File'], \\\n menu=self.filemenu)\n self.filemenu.add_command(label=\\\n LOCALIZED_TEXT[lang]['Load project settings'], \\\n command=self._on_click_f0_next)\n self.filemenu.add_command(label=LOCALIZED_TEXT[lang]['Save'], \\\n command=self._on_save_project)\n self.filemenu.add_command(label=\\\n LOCALIZED_TEXT[lang]['Delete project settings'], \\\n command=self._on_del_project)\n self.filemenu.add_separator()\n self.filemenu.add_command(label=LOCALIZED_TEXT[lang]['Exit'], \\\n command=self.quit)\n\n self.helpmenu = Menu(self.menubar)\n self.menubar.add_cascade(label=LOCALIZED_TEXT[lang]['Help'], \\\n menu=self.helpmenu)\n self.helpmenu.add_command(label=LOCALIZED_TEXT[lang]['Read Me'], \\\n command=self._on_read_me)\n self.helpmenu.add_command(label=LOCALIZED_TEXT[lang]['About...'], \\\n command=on_copyright)", "def createMenubar(self):\r\n # Create menubar\r\n self.menubar = tk.Menu(tearoff=False)\r\n self.root.config(menu=self.menubar)\r\n filemenu = tk.Menu(self.menubar,tearoff=False)\r\n filemenu.add_command(label=\"Edit Video/fNIRS Sources\",command=self.launchImportWindow)\r\n filemenu.add_command(label=\"Synchronise Video/fNIRS\",command=self.launchSyncToolWindow)\r\n filemenu.add_command(label=\"Help\",command=self.launchHelpWindow)\r\n filemenu.add_command(label=\"Quit\",command=self.quit)\r\n self.menubar.add_cascade(label=\"Project\",menu=filemenu)", "def navigation(self, **args):\n return self.pageConfig['navigation'] % self.pageConfig", "def init_widget(self):\n self._build_config()\n self._raw_toolbar.initToolbar(self.config)", "def set_navigation(self):\n self.close_button.controlUp(self.kvimvtv_button)\n self.kvimvtv_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def initUI(self):\n\n self.wid = RosGenWidget()\n self.setCentralWidget(self.wid)\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Файл')\n editMenu = menubar.addMenu('&Редактирование')\n self.create_menu_par('Менеджер подписчиков и издателей', self.wid.show_manager, fileMenu, 'Ctrl+M')\n self.create_menu_par('Очистить', self.wid.clear_all_lines, editMenu, 'Ctrl+D')\n self.create_menu_par('Загрузить данные из...', self.wid.open_fileDialog, fileMenu, 'Ctrl+F')\n self.create_menu_par('Сохранить как...', self.wid.save_fileDialog, fileMenu, 'Ctrl+S')\n self.create_menu_par('Выход', self.exit_app, fileMenu, 'Esc')\n self.statusbar = self.statusBar()\n self.statusbar.showMessage('Ожидание данных')\n self.wid.msg2Statusbar[str].connect(self.statusbar.showMessage)\n self.setGeometry(600, 200, 700, 400)\n self.setWindowTitle('Генератор шаблонов ROS-приложения')\n self.show()", "def create_menu_bar(self, parent_layout):\n self.menu_bar = QtWidgets.QMenuBar()\n # all menu bar tabs ===============\n # File -------------------\n file_menu = self.menu_bar.addMenu(\"File\")\n file_menu.setTearOffEnabled(1)\n\n # Menu bar actions ===============\n # File\n file_menu.addAction(self.export_settings_action)\n file_menu.addAction(self.import_settings_action)\n\n # Adding to the Layout ===============\n parent_layout.setMenuBar(self.menu_bar)", "def set_up_menu(self):\n self.app.title = \"work\"\n self.timer.start()", "def set_navigation(self):\n self.close_button.controlUp(self.tdt_button)\n self.tdt_button.controlDown(self.close_button)\n self.tdt_button.controlRight(self.meo_button)\n self.meo_button.controlRight(self.vodafone_button)\n self.meo_button.controlDown(self.close_button)\n self.vodafone_button.controlLeft(self.meo_button)\n self.vodafone_button.controlDown(self.close_button)\n self.meo_button.controlLeft(self.tdt_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def makeMenu(self):\n\t\tself.fileMenu = self.menuBar().addMenu(self.tr(\"&Arquivo\"))\n\t\tself.fileMenu.addAction(self.newAct)\n\t\tself.fileMenu.addAction(self.openAct)\n\t\tself.fileMenu.addAction(self.saveAct)\n\t\tself.fileMenu.addAction(self.exportAct)\n\t\tself.fileMenu.addSeparator() \n\t\tself.fileMenu.addAction(self.exitAct)\n\n\t\tself.editMenu = self.menuBar().addMenu(self.tr(\"&Editar\"))\n\t\t\n\t\tself.helpMenu = self.menuBar().addMenu(self.tr(\"&Ajuda\"))\n\t\tself.helpMenu.addAction(self.aboutAct)", "def main_nav():\n return render_template('home.html')", "def set_navigation(self):\n self.close_button.controlUp(self.k1plus_button)\n self.k1plus_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlDown(self.close_button)\n self.k2pro_button.controlDown(self.k1pro_button)\n self.k3pro_button.controlDown(self.k1pro_button)\n self.k1pro_button.controlUp(self.k2pro_button)\n self.k1plus_button.controlRight(self.k2pro_button)\n self.k2pro_button.controlRight(self.k3pro_button)\n self.k3pro_button.controlLeft(self.k2pro_button)\n self.k3pro_button.controlRight(self.k1pro_button)\n self.k2pro_button.controlLeft(self.k1plus_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def set_navigation(self):\n self.close_button.controlUp(self.browse_button)\n self.browse_button.controlDown(self.next_button)\n self.browse_button.controlRight(self.next_button)\n self.next_button.controlRight(self.close_button)\n self.next_button.controlLeft(self.browse_button)\n self.close_button.controlLeft(self.next_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def initialize(self):\n # Notebook holds all the tabs\n n = ttk.Notebook(self)\n f1 = RectTab(self)\n f2 = LTab(self)\n f3 = CircTab(self)\n f4 = BuminTab(self)\n f5 = LorentzTab(self)\n n.add(f1, text='Rectangle')\n n.add(f2, text='L')\n n.add(f3, text='Circle ')\n n.add(f4, text='Buminovich')\n n.add(f5, text='Lorentz')\n # need to pack for the Notebook to display\n n.pack()", "def InitUI(self):\n\t\tself._initMenuBar()\n\t\tself._initLayout()\n\t\t\n\t\t# Bindings\n\t\tself.Bind(wx.EVT_BUTTON, self.OnButtonClicked)\n\t\t\n\t\t# We can't even start without an input file\n\t\tself.OnOpen(None)", "def update_menu_bar(self):\n menu_bar = self.shell_obj.menu_bar\n if menu_bar is not None:\n self.widget.setMenuBar(menu_bar.toolkit_widget)\n else:\n self.widget.setMenuBar(None)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def __init__(self):\n self.stack = QWidget()", "def __init__(self, menu_name):\n self.menu_name = menu_name", "def __loadMenuStructure(self):\r\n try:\r\n f = open(os.path.join(core.FW_conf['settings_dir'],'navigate.settings'), 'r')\r\n data = f.read()\r\n f.close()\r\n self.menu = json.loads(data)\r\n\r\n except Exception as e:\r\n debug.err('Error loading navigation data: %s' % e.message)", "def addToNavBar(des, tipe, Xrc):\n title = des.split(\"/\")[-1].replace(\".html\", \"\")\n new_nav = \"<li id=\\\"$ID$\\\"><a style=\\\"cursor: pointer\\\" onclick=\\\"document.getElementById(\\'Xdisplay\\').contentWindow.location.replace(\\'$LINK$\\'); updateExplorer(\\'$LINK$\\')\\\">$TITLE$</a></li>\\n\\t\\t\\t\\t\\t\\t\\t<!-- $XBOOKS_NAV$ -->\"\n nav = \"<!-- $XBOOKS_NAV$ -->\"\n\n with open(\"Xblog/docs/index.html\", 'r') as f:\n index = f.read()\n f.close()\n\n with open(\"Xblog/docs/index.html\", 'w') as f:\n if tipe == \"Xbook\":\n index = index.replace(nav, new_nav.replace(\"$ID$\", title).replace('$TITLE$', title).replace('$LINK$', '\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + title + '/index.html'))\n if tipe == \"Xpage\":\n index = index.replace(nav, new_nav.replace(\"$ID$\", title).replace('$TITLE$', title).replace('$LINK$', '\\\\\\\\' + Xrc[\"gh_repo_name\"] + '/' + 'notebooks/' + title + '.html'))\n f.write(index)\n f.close()\n ccc.success(\"adding \" + title + \" to navigation pallete\")", "def __init_UI(self):\r\n\r\n ## Setting up the vertical bar\r\n # self.bar = self.verticalScrollBar()\r\n\r\n # Create the inner widget of the scroll area\r\n self.inner_widget = QWidget(self)\r\n self.setWidget(self.inner_widget)\r\n\r\n # Create a vertical layout inside the previous widget\r\n self.__layout = QVBoxLayout(self)\r\n self.inner_widget.setLayout(self.__layout)\r\n\r\n # More settings\r\n self.setWidgetResizable(True)", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def awakeFromNib(self):\n self._setup_status_bar()", "def initStatusBar(self):\n self.statusBar().showMessage(self.tr(\"Ready\"))", "def set_navigation(self):\n self.close_button.controlUp(self.wetek_button)\n self.wetek_button.controlDown(self.close_button)\n self.wetek_button.controlRight(self.k_button)\n self.k_button.controlRight(self.khadas_button)\n self.k_button.controlDown(self.close_button)\n self.khadas_button.controlRight(self.generic_button)\n self.khadas_button.controlDown(self.close_button)\n self.generic_button.controlLeft(self.khadas_button)\n self.generic_button.controlDown(self.close_button)\n self.k_button.controlLeft(self.wetek_button)\n self.khadas_button.controlLeft(self.k_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def init_editmenu(self):\n self.menubar[\"editmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"editmenu\"].add_command(label=\"Undo\", command=todo)\n self.menubar[\"editmenu\"].add_separator()\n self.menubar[\"editmenu\"].add_command(label=\"Cut\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Copy\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Paste\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Delete\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Select All\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Edit\", menu=self.menubar[\"editmenu\"])", "def init_filemenu(self):\n self.menubar[\"filemenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"filemenu\"].add_command(label=\"New\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Open\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Save as...\", command=todo)\n self.menubar[\"filemenu\"].add_command(label=\"Close\", command=todo)\n self.menubar[\"filemenu\"].add_separator()\n self.menubar[\"menubar\"].add_cascade(\n label=\"File\", menu=self.menubar[\"filemenu\"])", "def set_navigation(self):\n self.close_button.controlUp(self.reader1_button)\n self.close_button.controlLeft(self.next_button)\n self.reader1_button.controlRight(self.reader2_button)\n self.reader2_button.controlRight(self.reader3_button)\n self.reader3_button.controlRight(self.reader4_button)\n self.reader4_button.controlRight(self.reader5_button)\n self.reader1_button.controlDown(self.next_button)\n self.reader2_button.controlDown(self.next_button)\n self.reader3_button.controlDown(self.next_button)\n self.reader4_button.controlDown(self.next_button)\n self.reader5_button.controlDown(self.next_button)\n self.next_button.controlUp(self.reader5_button)\n self.next_button.controlRight(self.close_button)\n self.reader5_button.controlLeft(self.reader4_button)\n self.reader4_button.controlLeft(self.reader3_button)\n self.reader3_button.controlLeft(self.reader2_button)\n self.reader2_button.controlLeft(self.reader1_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Ciné Club\")\n self.setup_ui() # Ajout des Widgets.\n self.populate_movies()\n self.setup_connexions() # Création des connexion entre widgets.", "def setupMenus(self):\n \n exit = QAction(QIcon('gui/icons/close.png'), 'Exit', self)\n exit.setShortcut('Ctrl+Q')\n exit.setStatusTip('Exit application')\n self.connect(exit, SIGNAL('triggered()'), SLOT('close()'))\n \n menubar = self.menuBar()\n file = menubar.addMenu('&File')\n self.getMessagesMenu = file.addMenu('&Get Messages')\n \n file.addAction(exit)\n \n toolbar = self.addToolBar('Exit')\n toolbar.addAction(exit)", "def setup(self):\n self.ui_manager.purge_ui_elements()\n\n button = buttons.MenuButton(\n 'Menu',\n center_x=self.window.width // 2,\n center_y=self.window.height // 9,\n width=200,\n user=self.user.text\n )\n button.set_style_attrs(\n bg_color_hover=(159, 255, 233),\n bg_color_press=(51, 201, 166),\n )\n self.ui_manager.add_ui_element(button)", "def make_top_menus(self):\n menubar = tk.Menu(self)\n\n # create a pulldown menu for languages, and add it to the menu bar\n language_menu = tk.Menu(menubar, tearoff=0)\n language_menu.add_command(label=self.translate(\"English\"), command=lambda: self.replace_language('english'))\n language_menu.add_command(label=self.translate(\"Spanish\"), command=lambda: self.replace_language('spanish'))\n language_menu.add_command(label=self.translate(\"Portuguese\"), command=lambda: self.replace_language('portuguese'))\n menubar.add_cascade(label=self.translate(\"Languages\"), menu=language_menu)\n \n # create a pulldown menu for switching context areas, and add it to the menu bar\n context_menu = tk.Menu(menubar, tearoff=0)\n context_menu.add_command(label=self.translate(\"Chile\"), command=lambda: self.switch_context('Chile'))\n context_menu.add_command(label=self.translate(\"Indonesia\"), command=lambda: self.switch_context('Indonesia'))\n context_menu.add_command(label=self.translate(\"Luanda\"), command=lambda: self.switch_context('Luanda'))\n context_menu.add_command(label=self.translate(\"Querétaro\"), command=lambda: self.switch_context('Querétaro'))\n context_menu.add_command(label=self.translate(\"Rio de Janeiro\"), command=lambda: self.switch_context('Rio de Janeiro'))\n context_menu.add_command(label=self.translate(\"Santiago\"), command=lambda: self.switch_context('Santiago'))\n menubar.add_cascade(label=self.translate(\"Locations\"), menu=context_menu)\n \n # create a pulldown menu for arrangment, and add it to the menu bar\n language_menu = tk.Menu(menubar, tearoff=0)\n language_menu.add_command(label=self.translate(\"Graphs-Graphs\"), command=lambda: self.switch_arrangment(['Graph', 'Graph']))\n language_menu.add_command(label=self.translate(\"Graphs-Map\"), command=lambda: self.switch_arrangment(['Graph', 'Map']))\n language_menu.add_command(label=self.translate(\"Map-Graphs\"), command=lambda: self.switch_arrangment(['Map', 'Graph']))\n language_menu.add_command(label=self.translate(\"Map-Map\"), command=lambda: self.switch_arrangment(['Map', 'Map']))\n menubar.add_cascade(label=self.translate(\"Arrange\"), menu=language_menu)\n \n # create an exit command that closes the UI\n menubar.add_command(label=self.translate(\"Exit\"), command=self.destroy)\n \n # display the menu\n menubar.config(font=self.small_font)\n self.config(menu=menubar)\n \n return menubar", "def set_navigation(self):\n self.no_button.controlLeft(self.yes_button)\n self.no_button.controlRight(self.close_button)\n self.close_button.controlUp(self.no_button)\n self.yes_button.controlRight(self.no_button)\n self.yes_button.controlDown(self.close_button)\n\t # Set initial focus.\n self.setFocus(self.no_button)", "def main_menu_toolbar():\n\n pass", "def navbar_list(cls):\n return cls.objects.filter(status=0).filter(is_nav=True)[:10]", "def set_navigation(self):\n self.close_button.controlUp(self.wplnb1_button)\n self.wplnb1_button.controlRight(self.wplnb2_button)\n self.wplnb2_button.controlRight(self.wplnboth_button)\n self.wplnb1_button.controlDown(self.close_button)\n self.wplnb2_button.controlDown(self.close_button)\n self.wplnboth_button.controlDown(self.close_button)\n self.wplnb1_button.controlLeft(self.wplnboth_button) \n self.wplnb2_button.controlLeft(self.wplnb1_button) \n self.wplnboth_button.controlLeft(self.wplnb2_button)\n self.wplnboth_button.controlRight(self.wplnb1_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)", "def __init__(self, test_object, fixed=True, **kwargs):\n\n self.fixed = fixed\n super(CategoriesNavbar, self).__init__(test_object, **kwargs)", "def initUI(self):\n \n self.setWindowTitle(\"Intecol Flir camera\")\n self.setGeometry(300, 100, 1012, 622)", "def setup(self):\n self.ui_manager.purge_ui_elements()\n\n button = buttons.MenuButton(\n 'Menu',\n center_x=self.window.width // 2,\n center_y=self.window.height // 6,\n width=200,\n user=self.user.text\n )\n button.set_style_attrs(\n bg_color=(255, 153, 204),\n bg_color_hover=(255, 102, 178),\n bg_color_press=(204, 0, 102),\n )\n self.ui_manager.add_ui_element(button)", "def DebugMenuProviderMixin_on_init(self):\n BaseMenuProviderMixin.BaseMenuProviderMixin_on_init(self)\n # Define the core object\n self.activeMenuReference = PhoUIContainer.init_from_dict({'top_level_menu': None, 'actions_dict': {}, 'menu_provider_obj': None})", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def __init__(self):\r\n super().__init__()\r\n self._setupTab1()", "def initMenu(self, menu):\n menu.clear()\n \n self.subMenus = []\n \n adminMenu = QMenu(self.tr(\"Administration\"), menu)\n adminMenu.setTearOffEnabled(True)\n adminMenu.addAction(self.gitShowConfigAct)\n adminMenu.addAction(self.gitRepoConfigAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitReflogBrowserAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateIgnoreAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitCreateArchiveAct)\n adminMenu.addSeparator()\n adminMenu.addAction(self.gitStatisticsAct)\n adminMenu.addAction(self.gitVerifyAct)\n adminMenu.addAction(self.gitHouseKeepingAct)\n self.subMenus.append(adminMenu)\n \n bundleMenu = QMenu(self.tr(\"Bundle Management\"), menu)\n bundleMenu.setTearOffEnabled(True)\n bundleMenu.addAction(self.gitBundleAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleVerifyAct)\n bundleMenu.addAction(self.gitBundleListHeadsAct)\n bundleMenu.addSeparator()\n bundleMenu.addAction(self.gitBundleApplyFetchAct)\n bundleMenu.addAction(self.gitBundleApplyPullAct)\n self.subMenus.append(bundleMenu)\n \n patchMenu = QMenu(self.tr(\"Patch Management\"), menu)\n patchMenu.setTearOffEnabled(True)\n patchMenu.addAction(self.gitCheckPatchesAct)\n patchMenu.addAction(self.gitApplyPatchesAct)\n patchMenu.addSeparator()\n patchMenu.addAction(self.gitShowPatcheStatisticsAct)\n self.subMenus.append(patchMenu)\n \n bisectMenu = QMenu(self.tr(\"Bisect\"), menu)\n bisectMenu.setTearOffEnabled(True)\n bisectMenu.addAction(self.gitBisectStartAct)\n bisectMenu.addAction(self.gitBisectStartExtendedAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectGoodAct)\n bisectMenu.addAction(self.gitBisectBadAct)\n bisectMenu.addAction(self.gitBisectSkipAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectResetAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectLogBrowserAct)\n bisectMenu.addSeparator()\n bisectMenu.addAction(self.gitBisectCreateReplayAct)\n bisectMenu.addAction(self.gitBisectEditReplayAct)\n bisectMenu.addAction(self.gitBisectReplayAct)\n self.subMenus.append(bisectMenu)\n \n tagsMenu = QMenu(self.tr(\"Tags\"), menu)\n tagsMenu.setIcon(UI.PixmapCache.getIcon(\"vcsTag.png\"))\n tagsMenu.setTearOffEnabled(True)\n tagsMenu.addAction(self.vcsTagAct)\n tagsMenu.addAction(self.gitTagListAct)\n tagsMenu.addAction(self.gitDescribeTagAct)\n self.subMenus.append(tagsMenu)\n \n branchesMenu = QMenu(self.tr(\"Branches\"), menu)\n branchesMenu.setIcon(UI.PixmapCache.getIcon(\"vcsBranch.png\"))\n branchesMenu.setTearOffEnabled(True)\n branchesMenu.addAction(self.gitBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitBranchListAct)\n branchesMenu.addAction(self.gitMergedBranchListAct)\n branchesMenu.addAction(self.gitNotMergedBranchListAct)\n branchesMenu.addAction(self.gitShowBranchAct)\n branchesMenu.addSeparator()\n branchesMenu.addAction(self.gitDeleteRemoteBranchAct)\n self.subMenus.append(branchesMenu)\n \n changesMenu = QMenu(self.tr(\"Manage Changes\"), menu)\n changesMenu.setTearOffEnabled(True)\n changesMenu.addAction(self.gitUnstageAct)\n changesMenu.addAction(self.vcsRevertAct)\n changesMenu.addAction(self.vcsMergeAct)\n changesMenu.addAction(self.gitCommitMergeAct)\n changesMenu.addAction(self.gitCancelMergeAct)\n \n remotesMenu = QMenu(self.tr(\"Remote Repositories\"), menu)\n remotesMenu.setTearOffEnabled(True)\n remotesMenu.addAction(self.gitRemotesShowAct)\n remotesMenu.addAction(self.gitRemoteShowAct)\n remotesMenu.addSeparator()\n remotesMenu.addAction(self.gitRemoteAddAct)\n remotesMenu.addAction(self.gitRemoteRenameAct)\n remotesMenu.addAction(self.gitRemoteChangeUrlAct)\n remotesMenu.addAction(self.gitRemoteCredentialsAct)\n remotesMenu.addAction(self.gitRemoteRemoveAct)\n remotesMenu.addAction(self.gitRemotePruneAct)\n \n cherrypickMenu = QMenu(self.tr(\"Cherry-pick\"), menu)\n cherrypickMenu.setIcon(UI.PixmapCache.getIcon(\"vcsGraft.png\"))\n cherrypickMenu.setTearOffEnabled(True)\n cherrypickMenu.addAction(self.gitCherryPickAct)\n cherrypickMenu.addAction(self.gitCherryPickContinueAct)\n cherrypickMenu.addAction(self.gitCherryPickQuitAct)\n cherrypickMenu.addAction(self.gitCherryPickAbortAct)\n \n stashMenu = QMenu(self.tr(\"Stash\"), menu)\n stashMenu.setTearOffEnabled(True)\n stashMenu.addAction(self.gitStashAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBrowserAct)\n stashMenu.addAction(self.gitStashShowAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashApplyAct)\n stashMenu.addAction(self.gitStashPopAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashBranchAct)\n stashMenu.addSeparator()\n stashMenu.addAction(self.gitStashDropAct)\n stashMenu.addAction(self.gitStashClearAct)\n \n submodulesMenu = QMenu(self.tr(\"Submodules\"), menu)\n submodulesMenu.setTearOffEnabled(True)\n submodulesMenu.addAction(self.gitSubmoduleAddAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesInitAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateInitAct)\n submodulesMenu.addAction(self.gitSubmodulesDeinitAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesUpdateAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateRemoteAct)\n submodulesMenu.addAction(self.gitSubmodulesUpdateOptionsAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesSyncAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesListAct)\n submodulesMenu.addSeparator()\n submodulesMenu.addAction(self.gitSubmodulesStatusAct)\n submodulesMenu.addAction(self.gitSubmodulesSummaryAct)\n \n act = menu.addAction(\n UI.PixmapCache.getIcon(\n os.path.join(\"VcsPlugins\", \"vcsGit\", \"icons\", \"git.png\")),\n self.vcs.vcsName(), self._vcsInfoDisplay)\n font = act.font()\n font.setBold(True)\n act.setFont(font)\n menu.addSeparator()\n \n menu.addAction(self.gitFetchAct)\n menu.addAction(self.gitPullAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommitAct)\n menu.addAction(self.gitPushAct)\n menu.addSeparator()\n menu.addMenu(changesMenu)\n menu.addMenu(stashMenu)\n menu.addSeparator()\n menu.addMenu(cherrypickMenu)\n menu.addSeparator()\n menu.addMenu(bundleMenu)\n menu.addMenu(patchMenu)\n menu.addSeparator()\n menu.addMenu(remotesMenu)\n menu.addMenu(submodulesMenu)\n menu.addSeparator()\n menu.addMenu(tagsMenu)\n menu.addMenu(branchesMenu)\n menu.addSeparator()\n menu.addAction(self.gitLogBrowserAct)\n menu.addSeparator()\n menu.addAction(self.vcsStatusAct)\n menu.addSeparator()\n menu.addAction(self.vcsDiffAct)\n menu.addAction(self.gitExtDiffAct)\n menu.addSeparator()\n menu.addAction(self.vcsSwitchAct)\n menu.addSeparator()\n menu.addMenu(bisectMenu)\n menu.addSeparator()\n menu.addAction(self.vcsCleanupAct)\n menu.addSeparator()\n menu.addAction(self.vcsCommandAct)\n menu.addSeparator()\n menu.addMenu(adminMenu)\n menu.addSeparator()\n menu.addAction(self.gitEditUserConfigAct)\n menu.addAction(self.gitConfigAct)\n menu.addSeparator()\n menu.addAction(self.vcsNewAct)\n menu.addAction(self.vcsExportAct)", "def set_navigation(self):\n self.close_button.controlUp(self.dvbc_button)\n self.dvbc_button.controlDown(self.close_button)\n self.dvbc_button.controlRight(self.dvbs_button)\n self.dvbs_button.controlRight(self.dvbt_button)\n self.dvbs_button.controlDown(self.close_button)\n self.dvbt_button.controlLeft(self.dvbs_button)\n self.dvbt_button.controlDown(self.close_button)\n self.dvbs_button.controlLeft(self.dvbc_button)\n\t # Set initial focus.\n self.setFocus(self.close_button)" ]
[ "0.68171155", "0.6107692", "0.6052145", "0.60315436", "0.60225713", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6012156", "0.6007548", "0.5998332", "0.59736603", "0.59092045", "0.5872221", "0.58312136", "0.5814118", "0.5792713", "0.57674927", "0.5748322", "0.57369053", "0.5715075", "0.5661508", "0.5645447", "0.5634402", "0.56327146", "0.5615507", "0.5603622", "0.5601747", "0.5591947", "0.558912", "0.5565005", "0.5535212", "0.5518953", "0.5510145", "0.5508734", "0.55040926", "0.5474946", "0.54745585", "0.54640245", "0.5460302", "0.5442251", "0.5441814", "0.54334295", "0.5419367", "0.5405158", "0.54034543", "0.5397711", "0.5397469", "0.53964525", "0.5392674", "0.5388263", "0.5374395", "0.53605986", "0.5353351", "0.5348337", "0.53389287", "0.533014", "0.5317819", "0.53174186", "0.5316028", "0.5307394", "0.530648", "0.5301261", "0.5299045", "0.5297928", "0.5296906", "0.52901185", "0.5289143", "0.52712345", "0.5260929", "0.52599853", "0.52591264", "0.52547723", "0.5253548", "0.5252623", "0.5250307", "0.5246659", "0.52449906", "0.5243231", "0.5240096", "0.5239651", "0.52389425", "0.5237134", "0.5221186", "0.5213828", "0.5211044", "0.52046794", "0.5187182", "0.5184076", "0.51828885", "0.5182564", "0.51819474", "0.51738447", "0.5173576" ]
0.0
-1
Reads Images in base directory DIR using 'classes' (computed from sub directories )
def preprocess_from_dir(DIR, classes=None, IMG_SIZE=(224,224), channels=3, per_class_size=None, normalize_train=False, mean_subtraction=None, isShuffle=True, save_data=False, destination_filename=None, verbose=1): return_classes_flag = False data = [] if not exists(DIR): raise ValueError('The specified directory does not exist') if IMG_SIZE is None: raise ValueError('IMG_SIZE must be specified') if not isinstance(IMG_SIZE, tuple) or len(IMG_SIZE) != 2: raise ValueError('IMG_SIZE must be a tuple of size 2 (width,height)') if verbose in [0,1]: if verbose == 0: display_count = False else: display_count = True else: raise ValueError('verbose flag must be either 1 (display progress to terminal) or 0 otherwise') if not isinstance(save_data, bool): raise ValueError('save_data must be a boolean (True/False)') if classes is None: return_classes_flag = True else: if not isinstance(classes, list): raise ValueError('"classes" must be a list') if save_data: if destination_filename is None: raise ValueError('Specify a destination file name') elif not ('.npy' in destination_filename or '.npz' in destination_filename): raise ValueError('Specify the correct numpy destination file extension (.npy or .npz)') if not save_data and destination_filename is not None: destination_filename = None # Loading from Numpy Files if destination_filename is not None and exists(destination_filename): print('[INFO] Loading from Numpy Files') since = time.time() data = np.load(destination_filename, allow_pickle=True) end = time.time() took = end - since print('----------------------------------------------') print(f'[INFO] Loaded in {took:.0f}s from Numpy Files') return data # Extracting image data and adding to `data` else: if destination_filename is not None: print(f'[INFO] Could not find {destination_filename}. Generating the training data') else: print('[INFO] Could not find a file to load from. Generating the training data') print('----------------------------------------------') # Starting timer since_preprocess = time.time() if classes is None: classes = get_classes_from_dir(DIR) # Removing false folders classes = _check_for_false_folders(DIR, classes) if per_class_size is None: per_class_size = len(listdir(minijoin(DIR, classes[0]), verbose=0)) if mean_subtraction is not None: # Checking if 'mean_subtraction' values are valid. Returns boolean value subtract_mean = _check_mean_sub_values(mean_subtraction, channels) for item in classes: class_path = minijoin(DIR, item) class_label = classes.index(item) count = 0 tens_list = list_images(class_path, use_fullpath=True, verbose=0) for image_path in tens_list: if count != per_class_size: # image_path = minijoin(class_path, image) # Returns the resized image (ignoring aspect ratio since it isn't relevant for Deep Computer Vision models) tens = imread(image_path, target_size=IMG_SIZE, rgb=True) if tens is None: continue # Gray if channels == 1: tens = to_gray(tens) # Normalizing if normalize_train: tens = normalize(tens) # Subtracting Mean # Mean must be calculated ONLY on the training set if mean_subtraction is not None and subtract_mean: mean_subtract = MeanProcess(mean_subtraction, channels) tens = mean_subtract.mean_preprocess(tens, channels) # Appending to train set data.append([tens, class_label]) count +=1 if display_count is True: _printTotal(count, item) else: break # Shuffling the Training Set if isShuffle is True: data = shuffle(data) # Converting to Numpy data = to_array(data) # Saves the Data set as a .npy file if save_data: #Converts to Numpy and saves if destination_filename.endswith('.npy'): print('[INFO] Saving as .npy file') elif destination_filename.endswith('.npz'): print('[INFO] Saving as .npz file') # Saving since = time.time() np.save(destination_filename, data) end = time.time() time_elapsed = end-since minu_elapsed = time_elapsed // 60 sec_elapsed = time_elapsed % 60 print(f'[INFO] {destination_filename} saved! Took {minu_elapsed:.0f}m {sec_elapsed:.0f}s') #Returns Training Set end_preprocess = time.time() time_elapsed_preprocess = end_preprocess - since_preprocess minu = time_elapsed_preprocess // 60 sec = time_elapsed_preprocess % 60 print('----------------------------------------------') print(f'[INFO] {len(data)} files preprocessed! Took {minu:.0f}m {sec:.0f}s') if return_classes_flag: return data, classes else: return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def _get_filenames_and_classes(dataset_dir):\n # print 'DATASET DIR:', dataset_dir\n # print 'subdir:', [name for name in os.listdir(dataset_dir)]\n # dataset_main_folder_list = []\n # for name in os.listdir(dataset_dir):\n # \tif os.path.isdir(name):\n # \t\tdataset_main_folder_list.append(name)\n dataset_main_folder_list = [name for name in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir,name))]\n dataset_root = os.path.join(dataset_dir, dataset_main_folder_list[0])\n directories = []\n class_names = []\n for filename in os.listdir(dataset_root):\n path = os.path.join(dataset_root, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n \n count = 0\n #print(directories)\n for directory in directories:\n #print(directory)\n #continue\n for filename in os.listdir(directory):\n print(filename)\n path = os.path.join(directory, filename)\n\n im = Image.open(path)\n imResize = im.resize((28,28), Image.ANTIALIAS)\n imResize.save(path, 'bmp')\n print(count)\n count = count + 1\n \n\n\n \n return", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def get_instances_of_class(cls, folder):\n data = list()\n for _, _, filenames in os.walk(folder):\n for filename in filenames:\n if filename.endswith(\".jpg\"):\n last = filename.split(\"/\")[-1]\n if re.match(cls, last):\n data.append(last)\n return data", "def load_paths_and_labels(self,classes):\n\t\tim_paths , im_labels = [], [] \n\n\t\tfor image_type in classes:\n\t\t\tmypath = self.data_path + self.dataset + '/' + image_type\n\t\t\tonlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]\n\t\t\tclass_support = 0\n\t\t\tfor file_name in onlyfiles:\n\t\t\t\t#print file_name\n\t\t\t\tif file_name != '.DS_Store':\n\t\t\t\t\tim_path = mypath = self.data_path + self.dataset + '/' + image_type + '/' + file_name\n\t\t\t\t\tim_paths.append(im_path)\n\t\t\t\t\tim_labels.append(image_type)\n\t\t\t\tclass_support += 1\n\t\t\t\tif self.support_per_class != None and class_support == self.support_per_class:\n\t\t\t\t\tbreak\n\n\t\tcombined = zip(im_paths, im_labels)\n\t\trandom.shuffle(combined)\n\t\t\n\t\tim_paths[:], im_labels[:] = zip(*combined)\n\n\t\treturn im_paths,im_labels", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def read_from_folder(args, n_values=50):\n images = []\n img_id = 0\n basedir = str(args['input_train'])\n class_dirs = os.listdir(basedir)\n # load images from base directory\n for class_dir in class_dirs:\n image_files = glob.glob(os.path.join(basedir, class_dir, \"*\"))\n\n # test case\n if args['test']:\n image_files = image_files[0:n_values]\n\n for image_file in image_files:\n img = image.OCRImage(pil_image=Image.open(image_file),\n img_id=img_id,\n img_class=class_dir,\n img_hex=image_file[:-4][-4:])\n images.append(img)\n img_id += 1\n\n return images", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def __init__(self, data_dir, file_prefix, num_images):\n print(file_prefix)\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def readImages(imgFolder='img/'):\n #Each image in images is a numpy array of shape 192x168(x1) (heightxwidth)\n #images datatype is a regular numpy list\n filenames = os.listdir(imgFolder)\n if imgFolder == 'img/':\n images = [imageio.imread('img/'+fn+'/image0.jpg')[::,::].astype(np.float32)/255. for fn in filenames]#glob.glob(imgFolder+'*.jpg')]\n else:\n images = [imageio.imread(imgFolder+fn)[::,::].astype(np.float32)/255. for fn in filenames]\n return images", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)", "def __init__(self, data_dir):\n self.data_dir = data_dir\n\n # reading in the images present\n self.files = os.listdir(self.data_dir)", "def getimgs():", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n self.images = []\n self.display_match = False\n self.useBlending = False\n print('found %d images' % len(self.files))", "def __init__(self, data_dir, pairs_filepath, img_ext, num_random_images_per_folder):\n self.data_dir = data_dir\n self.pairs_filepath = pairs_filepath\n self.img_ext = img_ext\n self.num_random_images_per_folder = num_random_images_per_folder\n\n if os.name == 'nt':\n self.separator = \"\\\\\"\n else:\n self.separator = \"/\"\n\n self.remaining = []\n for name in os.listdir(self.data_dir):\n if os.path.isdir(os.path.join(self.data_dir, name)):\n self.remaining.append(name)", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def get_images(path, ext=\".jpg\"):\n return get_files(path, ext)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def grab_files(local_list=local_list,\n acts=acts,\n class_dict=class_dict,\n verbose=verbose,\n imagenet_root='/storage/data/imagenet_2012/',\n in_class_sub_dirs=True):\n selected_image_list = []\n found_classes = []\n for selected_point in local_list:\n # grab filename\n selected_file = acts.get_file_name(selected_point).decode('UTF-8')\n if verbose:\n pass\n #print(selected_file)\n class_dir_label = selected_file.split('_')[0]\n if in_class_sub_dirs:\n # we've assumed files are in folders labelled by class!\n selected_image_list.append(imagenet_root + class_dir_label + '/' + selected_file)\n else:\n selected_image_list.append(imagenet_root + selected_file)\n class_no = class_dict[selected_file.split('_')[0]]\n if not class_no in found_classes:\n found_classes.append(class_no)\n return selected_image_list", "def _get_filenames_and_classes(dataset_dir):\n directories = []\n class_names = []\n for filename in os.listdir(dataset_dir):\n path = os.path.join(dataset_dir, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n image_filenames = []\n\n for directory in directories:\n for filename in os.listdir(directory):\n path = os.path.join(directory, filename)\n image_filenames.append(path)\n\n return image_filenames, class_names", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def read_images(path, image_size=None):\n c = 0\n X = []\n y = []\n folder_names = []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n folder_names.append(subdirname)\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n #try:\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n # resize to given size (if given)\n if (image_size is not None):\n im = cv2.resize(im, image_size)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n #except IOError, (errno, strerror):\n # print \"I/O error({0}): {1}\".format(errno, strerror)\n # except:\n # print \"Unexpected error:\", sys.exc_info()[0]\n # raise\n c = c+1\n return [X,y,folder_names]", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def read_raw_img(kind):\n\n mypath = RAW_DIR_PATH[kind]\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))\n and f[0] != '.']\n random.shuffle(files)\n\n if kind == 'bad':\n files *= 3\n\n for img in files:\n yield Image.open(mypath + img)", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def get_cls_img(root: str, suffix: str) -> dict:\n cls_img = dict.fromkeys(list_dir(root=root))\n for dir_ in cls_img:\n cls_img[dir_] = list_files(root=os.path.join(root, dir_), suffix=suffix)\n\n return cls_img", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def load_images(image_types=None,\n directory=None,\n images_per_type=None,\n image_size=224,\n process=False,\n model=mobilenet_v2):\n\n images_numpy = []\n images_class = []\n\n for image_type in image_types:\n images_path = os.path.join(directory, image_type, '*.jpg')\n for i, filename in enumerate(glob.glob(images_path)):\n try:\n if i == images_per_type:\n break\n loaded_image = load_img(filename, target_size=(image_size, image_size))\n images_numpy.append(img_to_array(loaded_image))\n images_class.append(image_type)\n except Exception as e:\n print('TypeError: {}'.format(e))\n\n if process:\n image_batch = np.expand_dims(images_numpy, axis=0)\n images_processed = model.preprocess_input(image_batch.copy())\n images_class_processed = process_images_class(images_class)\n\n return images_processed[0], images_class_processed\n\n else:\n return images_numpy, images_class", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def _get_filenames_and_classes(dataset_dir):\n image_root = os.path.join(dataset_dir)\n directories = []\n class_names = []\n for filename in os.listdir(image_root):\n path = os.path.join(image_root, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n\n photo_filenames = []\n for directory in directories:\n for filename in os.listdir(directory):\n path = os.path.join(directory, filename)\n photo_filenames.append(path)\n\n return photo_filenames, sorted(class_names)", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def getfiles_from_dir(self,dir):\n assert not os.path.isdir(dir),\"Invalid dir format\"+str(dir)\n print(\"-----Read Dir :\",dir)\n self.files=glob.glob(os.path.join(dir,\"./*.tif\"))", "def dataset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f).category(filebase(d))", "def read_images(path, sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n c = c+1\n return [X,y]", "def read_imgs(path):\n dirs = os.listdir(path)\n imgs = []\n for fn in dirs:\n img_path = path + '/' + fn\n img = cv2.imread(img_path, 1)\n img = np.float32(cv2.resize(img, (224, 224))) / 255\n imgs.append(img)\n imgs = np.array(imgs)\n return imgs", "def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images", "def fastset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f, category=filebase(d))", "def get_images_from_dir(src_folder, n_images=None, shuffle=False):\n\tvalid_extensions = set([\"bmp\", \"jpeg\", \"jpg\", \"png\", \"tif\", \"tiff\"])\n\tsrc_contents = os.walk(src_folder)\n\tdirpath, _, fnames = src_contents.next()\n\timg_dir = os.path.split(dirpath)[-1]\n\timg_files = [os.path.join(dirpath, name) for name in fnames]\n\tif shuffle:\n\t\trandom.shuffle(img_files)\n\tif n_images:\n\t\timg_files = img_files[:n_images]\n\timages = [cv2.imread(name, cv2.IMREAD_GRAYSCALE) for name in \n\t\t\t img_files[:n_images] if os.path.splitext(name)[-1][1:].lower() \n\t\t\t in valid_extensions]\n\tif shuffle:\n\t\trandom.shuffle(images)\n\treturn images", "def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def load_set(directName, n = np.inf):\n # Loaded a set of images\n\n files = os.listdir(directName)\n n = min(n, len(files))\n #n = len(files)\n print(\"Loading \" + str(n) + \" images\")\n imgs = [mpimg.imread(directName + files[i]) for i in range(n)]\n\n return imgs", "def loadImagesFromDirectory(self, directoryPath):\n if isdir(directoryPath):\n self._getImagesFromDirectory(directoryPath)\n else:\n print(directoryPath + \" does not exists\")", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def readImages(image_dir):\n images = {}\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(reduce(list.__add__, map(glob, search_paths)))\n for f in image_files:\n images[f[f.rfind(\"/\") + 1:f.rfind(\".\")]] = cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR)\n\n return images", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def get_data_loaders(img_dir, img_height, img_width, batch_size=8):\n total_count = sum([len(files) for r, d, files in os.walk(img_dir)])\n\n data_transform = torchvision.transforms.Compose(\n [\n transforms.Resize((img_height, img_width)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n )\n \n # build a dataset of images from the img_dir directory\n im_folder = torchvision.datasets.ImageFolder(img_dir, transform=data_transform)\n model_dataset = td.datasets.WrapDataset(im_folder)\n\n dataset_loader = torch.utils.data.DataLoader(model_dataset, batch_size=batch_size)\n\n return dataset_loader, total_count", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name", "def process_images(image_folder: Path) -> List[Dict]:\n images = []\n files = image_folder.glob(\"*.jpg\")\n\n for file_path in files:\n file_name = file_path.name\n file_id = file_name.split(\".jpg\")[0]\n file_id = file_id.split(\"in\")[-1]\n file_id = int(file_id)\n file_id = f\"{file_path.parent.parent.name}_{str(file_id)}\"\n\n width, height = imagesize.get(str(file_path))\n\n image_data = {\"id\": file_id,\n \"width\": width,\n \"height\": height,\n \"filename\": str(file_path)}\n images.append(image_data)\n\n return images", "def split_dir(dirr, output_dir, dirs=['train', 'validation', 'test'], split=(.5,.25,.25)):\n\n # get all image paths\n image_paths = []\n for filepath in pathlib.Path(dirr).glob('**/*'):\n image_paths.append(filepath.absolute())\n\n # organize into {class_name:[class_image_paths, ...], ...}\n class_dict = {}\n for i in image_paths:\n fname = str(i).split(\"/\")\n file_name = fname[len(fname)-1]\n class_name = fname[len(fname)-2]\n if class_name not in class_dict.keys():\n class_dict[class_name] = []\n class_dict[class_name].append(str(i))\n\n del class_dict['images'] #I don't know why\n\n # organize into {class_name:[[train_paths],[validation_paths],[test_paths]], ...}\n # by given\n for k in class_dict.keys():\n paths = class_dict[k]\n\n train_split = int(len(paths)*split[0])\n validation_split = int(len(paths)*split[1])\n\n train_paths = paths[train_split:]\n validation_paths = paths[train_split:validation_split+train_split]\n test_paths = paths[validation_split+train_split:]\n\n class_dict[k] = [train_paths, validation_paths, test_paths]\n\n # make output dirs\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n os.makedirs(output_dir+\"/\"+dirs[0])\n os.makedirs(output_dir+\"/\"+dirs[1])\n os.makedirs(output_dir+\"/\"+dirs[2])\n\n # move everything\n for k in class_dict.keys():\n for d_i,d in enumerate(dirs):\n\n if not os.path.exists(output_dir+\"/\"+d+\"/\"+k):\n os.makedirs(output_dir+\"/\"+d+\"/\"+k)\n\n for path in class_dict[k][d_i]:\n file_name = path.split(\"/\")\n file_name = file_name[len(file_name)-1]\n copyfile(path, output_dir+\"/\"+d+\"/\"+k+\"/\"+file_name)", "def __init__(self, directory='Wang_Data'):\n self.directory = directory\n self._image_names = []\n for i in range(1000):\n self._image_names.append(str(i) + '.jpg')", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def load_sprites(dir=\"/home/robin/workspace/python/ipt/chess/sprites\"):\n arr = []\n chdir(dir)\n for i in range(12):\n img = mimg.imread(\"sprite_\"+\"{:0>2d}\".format(i)+\".png\")\n arr.append(img)\n return arr", "def read_images(fs, img_path_batch, mode=\"rb\"):\n result = []\n logging.info(\"Start to read images at {}\".format(socket.gethostname()))\n for (label, img_path) in img_path_batch:\n img = read_image(fs, img_path, mode)\n result.append((label, img))\n logging.info(\"Finish the reading of {} images on {}\".format(\n len(result), socket.gethostname()))\n return result", "def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images", "def __init__(self, root_dir, dataset='LSUN17', transform=None):\n self.root_dir = root_dir\n self.dataset = dataset\n self.images = [f for f in os.listdir(self.root_dir) if f.endswith(('.jpg', '.jpeg', '.png'))]\n self.images.sort()\n self.transform = transform", "def _iter_images(self):\n raise NotImplementedError", "def list_images(path=['.']):\n for image_dir in set(path):\n if not os.path.isdir(image_dir):\n continue\n for filename in os.listdir(image_dir):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n\n filepath = os.path.join(image_dir, filename)\n yield strutils.decode(filepath)", "def _load_generator(self, sub_folder: str, include_labels: bool)\\\n -> Generator[Union[Tuple[List[np.ndarray], np.ndarray], List[np.ndarray]], None, None]:\n paths: List[str] = self._get_iter_list(os.path.join(self._fileRoot, sub_folder))\n print(\"loading generator for \" + str(len(paths)) + \" images\")\n\n def iter_paths():\n grays: List[np.ndarray] = []\n labels: List[np.ndarray] = []\n classifiers: List[np.ndarray] = []\n current_batch: int = 0\n\n for f in paths:\n image: np.ndarray = cv2.cvtColor(\n cv2.resize(cv2.imread(f), (config.IMAGE_SIZE, config.IMAGE_SIZE)),\n cv2.COLOR_RGB2Lab\n )\n\n gray: np.ndarray = np.reshape(image[:, :, 0], (config.IMAGE_SIZE, config.IMAGE_SIZE, 1))\n classifier: np.ndarray = np.reshape(cv2.resize(gray, (112, 112)), (112, 112, 1))\n\n current_batch += 1\n if include_labels:\n labels.append(image[:, :, 1:])\n grays.append(gray)\n classifiers.append(classifier)\n\n # batch complete, yield\n if current_batch == config.BATCH_SIZE:\n if include_labels:\n yield [np.asarray(grays) / 255.0, np.asarray(classifiers) / 255.0], np.asarray(labels) / 255\n else:\n yield [np.asarray(grays) / 255.0, np.asarray(classifiers) / 255.0]\n\n current_batch = 0\n if include_labels:\n labels.clear()\n grays.clear()\n classifiers.clear()\n\n if len(labels) > 0:\n # unfinished batch remaining\n if include_labels:\n yield [np.asarray(grays) / 255.0, np.asarray(classifiers) / 255.0], np.asarray(labels) / 255\n else:\n yield [np.asarray(grays) / 255.0, np.asarray(classifiers) / 255.0]\n\n if include_labels:\n for _ in range(config.NUM_EPOCHS):\n for i in iter_paths():\n yield i\n else:\n for i in iter_paths():\n yield i", "def read_images(path, fileNameFilter=FileNameFilter(\"None\"), sz=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n if fileNameFilter(filename):\n try:\n im = Image.open(os.path.join(subject_path, filename))\n im = im.convert(\"L\")\n # resize to given size (if given)\n if (sz is not None):\n im = im.resize(sz, Image.ANTIALIAS)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise \n c = c+1\n return [X,y]", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass" ]
[ "0.7321848", "0.68896234", "0.6826946", "0.66698617", "0.6657406", "0.6633416", "0.65999025", "0.6553935", "0.6523276", "0.6521498", "0.64866954", "0.64619726", "0.6459432", "0.6443935", "0.6438473", "0.64238954", "0.64133865", "0.6406419", "0.6398897", "0.63965183", "0.63886374", "0.6383361", "0.6368225", "0.63544637", "0.6320698", "0.6311417", "0.62830853", "0.6246722", "0.6242717", "0.6240772", "0.62385917", "0.62215436", "0.61970884", "0.6188395", "0.6166824", "0.61304903", "0.6127415", "0.6115946", "0.6109745", "0.6106774", "0.6102908", "0.6078958", "0.6071749", "0.60713696", "0.60709393", "0.6070571", "0.60678303", "0.6059203", "0.60557383", "0.6054674", "0.60492885", "0.6042284", "0.6040529", "0.60315585", "0.60268205", "0.60259885", "0.602589", "0.6020271", "0.6014905", "0.60131973", "0.601201", "0.6011653", "0.6008764", "0.6007971", "0.6004781", "0.60034996", "0.60014224", "0.59795904", "0.59741634", "0.5971065", "0.5966052", "0.59657985", "0.59637594", "0.5962156", "0.5961304", "0.59568864", "0.59501904", "0.5938393", "0.59376234", "0.5934045", "0.59262633", "0.5916534", "0.59075725", "0.5897926", "0.5893281", "0.5890652", "0.5886388", "0.58862054", "0.5873979", "0.5869494", "0.58660334", "0.5864274", "0.5855832", "0.58541", "0.5852954", "0.5849211", "0.5847712", "0.5842059", "0.5837448", "0.5835919" ]
0.5859994
92
Normalizes the data to mean 0 and standard deviation 1
def normalize(x, dtype='float32'): # x/=255.0 raises a TypeError # x = x/255.0 # Converting to float32 and normalizing (float32 saves memory) x = x.astype(dtype) / 255 return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data", "def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std", "def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def normalize_mean0std1(data,data_mean=None,data_std=None,tol=1e-6):\n if data_mean is None:\n data_mean=np.mean(data,axis=0)\n data_mean.reshape((1,data_mean.shape[0]))\n if data_std is None:\n data_std=np.std(data,axis=0)\n data_std.reshape((1,data_std.shape[0]))\n #tol=0#1e-8\n return (data-data_mean)/(data_std+tol),data_mean,data_std", "def standardize(data):\n stddev = data.std()\n #if stddev == 0.:\n # sys.exit(\"data.std() == 0. !\")\n if stddev != 0.:\n data = (data - data.mean()) / (data.std())\n\n return data", "def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)", "def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X", "def feature_normalize(X):\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n X_std[0, 0] = 1\n X_normalize = (X - X_mean) / X_std\n X_normalize[:, 0] = 1.0\n return X_normalize, X_mean, X_std", "def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)", "def standardize(X):\n X_std = X\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n for col in range(np.shape(X)[1]):\n if std[col]:\n X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]\n # X_std = (X - X.mean(axis=0)) / X.std(axis=0)\n return X_std", "def normalize_data(data=None):\n # Data pre-processing\n n = data.shape[0]\n for i in range(n):\n xx = data[i,:,:]\n xx -= np.mean(xx) # Centering in 0\n xx /= np.linalg.norm(xx) # Normalizing to 1\n data[i] = xx # Affect value\n return data", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)", "def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std", "def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use", "def normal_(self, mean, std):\n if mean != 0:\n raise ValueError(f'Currently only mean=0 is supported, but got mean={mean}')", "def normal_(self, mean=0, std=1):\n if mean != 0:\n raise ValueError(f'Currently only mean=0 is supported, but got mean={mean}')", "def denormalize(x, std, mean):\n out = x * std + mean\n return out.clamp(0, 1)", "def norm_data(self):\n if (self.nrows, self.ncolumns) < self.data.shape:\n self.data = self.data[0:self.nrows, 0:self.ncolumns]\n if self.data.dtype != np.float64:\n self.data = self.data.astype(np.float64)\n self.meanval = self.data.mean()\n self.stdval = self.data.std()", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def standardize_data(Xtrain,Xtest):\n \n ### Import modulates\n import numpy as np\n\n Xmean = np.nanmean(Xtrain,axis=0)\n Xstd = np.nanstd(Xtrain,axis=0)\n Xtest = (Xtest - Xmean)/Xstd\n Xtrain = (Xtrain - Xmean)/Xstd\n \n stdVals = (Xmean,Xstd)\n stdVals = stdVals[:]\n \n return Xtrain,Xtest,stdVals", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def normalize(df, mean=None, std=None):\n if mean is None:\n mean = df.mean(0)\n if std is None:\n std = df.std(0)\n\n # ensure we don't divide by zero in columns with zero std (all entries identical)\n try:\n # if df was a 1d array or pd.Series to begin with, std will be a\n # non-subscriptable float, so we handle that case in except\n std[std == 0] = 1\n except TypeError:\n std = std if std > 0 else 1\n\n # return mean and std to be able to revert normalization later\n return (df - mean) / std, [mean, std]", "def normalize_feature(df):\n return df.apply(lambda column: (column - column.mean()) / column.std())", "def samele_wise_normalization(data):\n if np.max(data) == np.min(data):\n return np.ones_like(data, dtype=np.float32) * 1e-6\n else:\n return 1.0 * (data - np.min(data)) / (np.max(data) - np.min(data))", "def standardization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the mean values and the standard deviations of the input numpy array along the axis \n Mean = np.mean(input_data, axis = 0)\n Std = np.std(input_data, axis = 0)\n\n # Standardization \n standardized_input_data = (input_data - Mean) / (Std + sys.float_info.min)\n\n # Return standardized input data\n return standardized_input_data", "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def normalize(self, mean=None, std=None):\n if mean is None:\n mean = self.mean\n if std is None:\n std = self.std\n\n new = self.copy()\n new.data = (new.data - mean) / std\n return new", "def normalize(arr, stats=False):\n arr = np.array(arr)\n mean = arr.mean()\n std = arr.std()\n normed = (arr - mean) / std\n if not stats:\n return normed\n return normed, mean, std", "def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data", "def normalization_test(x_test, meanV, stdV): \n eps = np.finfo(float).eps \n x_test_post = (x_test - meanV)/(stdV + eps) \n \n return x_test_post", "def normalize(self):\n self._data /= self.norm()", "def normalizeData(pre_signal):\n\n if sp.any(sp.isnan(pre_signal)):\n print('there are NaNs in the data matrix, making them zero')\n\n pre_signal[sp.isnan(pre_signal)] = 0\n mean_vector = sp.mean(pre_signal, axis=0, keepdims=True)\n normed_signal = pre_signal - mean_vector\n norm_vector = sp.linalg.norm(normed_signal, axis=0, keepdims=True)\n norm_vector[norm_vector == 0] = 1e-116\n normed_signal = normed_signal / norm_vector\n\n return normed_signal, mean_vector, norm_vector", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalize_features(df):\r\n mu = df.mean()\r\n sigma = df.std()\r\n \r\n if (sigma == 0).any():\r\n raise Exception(\"One or more features had the same value for all samples, and thus could \" + \\\r\n \"not be normalized. Please do not include features with only a single value \" + \\\r\n \"in your model.\")\r\n df_normalized = (df - df.mean()) / df.std()\r\n\r\n return df_normalized, mu, sigma", "def normalise(self,data,take_logs:bool=False):\n\n # Normalise vector to sum up to 1\n normalised_vector = data/np.sum(data)\n\n # If take logs is selected, take logs\n if take_logs:\n return np.log(normalised_vector)\n else:\n return normalised_vector", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x, axis=0)\n x[:, std_x > 0] = x[:, std_x > 0] / std_x[std_x > 0]\n\n tx = np.hstack((np.ones((x.shape[0], 1)), x))\n return tx, mean_x, std_x", "def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x", "def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean", "def z_normalize(ts):\n\n ts -= np.mean(ts)\n std = np.std(ts)\n\n if std == 0:\n raise ValueError(\"The Standard Deviation cannot be zero\")\n\n #ts /= std\n return ts / std", "def normalize(X, mu, sigma):\n return (X - mu) / sigma", "def standardise(x):\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n std_x = np.std(x, axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def standardize(self, inputData):\n\n return (inputData - self.mean) / self.std", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x,axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x,axis=0)\n x = x / std_x\n return x, mean_x, std_x", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def vis_normalize(a, s=0.1):\n return s * (a - a.mean()) / (max(a.std(), 1e-4)) + 0.5", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def standardize(X, axis=0, ddof=0):\n\n # Modified from scikit-learn.preprocessing.scale()!\n\n #X = np.asarray(X)\n X = np.asarray(X, dtype=np.float) # XXX: what about dtype? convert to float64? for higher precision? let client decide?\n Xr = np.rollaxis(X, axis) # view on X to enable broadcasting on the axis we are interested in\n \n mean_ = Xr.mean(axis=0)\n std_ = Xr.std(axis=0, ddof=ddof)\n std_[std_ == 0.0] = 1.0 # avoid NaNs due to div/zero\n\n # center mean on zero\n Xr -= mean_\n\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n mean_1 = Xr.mean(axis=0)\n if not np.allclose(mean_1, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n mean_ += mean_1\n\n # scale to unit variance\n Xr /= std_\n\n # If mean_2 is not 'close to zero', it comes from the fact that\n # std_ is very small so that mean_2 = mean_1/std_ > 0, even if\n # mean_1 was close to zero. The problem is thus essentially due\n # to the lack of precision of mean_. A solution is then to\n # substract the mean again.\n mean_2 = Xr.mean(axis=0)\n if not np.allclose(mean_2, 0.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0.\")\n Xr -= mean_2\n mean_ += mean_2\n\n # Additional check if variances are 'close to one'\n std_1 = Xr.std(axis=0, ddof=ddof)\n if not np.allclose(std_1, 1.0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. Standard deviation \"\n \"not close to one after scaling.\")\n\n return X, mean_, std_", "def norm_data(data):\n return (data-np.min(data))/(np.max(data)-np.min(data))", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def featureNormalize(X):\n\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n\n X_normalized = (X - mu) / sigma\n\n return X_normalized, mu, sigma", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def normalize(D, ntype=0, means=None, stds=None):\n \n if (not isinstance(D,np.ndarray)) or (len(D.shape) > 2):\n raise AssertionError(\"Input D must be derivative of numpy.ndarray and have less than 3 dimensions.\")\n \n (D,initial_shape) = ensure_column(D)\n \n n_rows = D.shape[0] \n \n if means is None:\n means = bn.nanmean(D, axis= 0) \n \n tmp = D - np.tile( means, (n_rows,1) ) # temporary result. Data with \n # substracted mean \n \n if stds is None:\n if (ntype == 0): \n stds = bn.nanstd(tmp,axis=0, ddof=1 ) # one degree of freadom as matlab default\n \n elif (ntype == 1):\n stds = bn.nanmax(np.abs(tmp), axis=0)\n \n elif (ntype == 2): \n stds = np.sqrt( bn.nansum( np.power(tmp,2) , axis = 0) ) \n \n elif (ntype == 3): \n stds = np.ones( (D.shape[1],) )\n \n else:\n raise ValueError(\"Normalization type %s is unknown\" % ntype)\n \n # result = np.dot( tmp , np.diagflat( 1./stds ) )\n result = np.divide( tmp, stds ) \n \n result = rev_ensure_column(result,initial_shape)\n D = rev_ensure_column(D,initial_shape) \n \n return (result,means,stds)", "def normalize_features(df):\n mu = df.mean()\n sigma = df.std()\n\n if (sigma == 0).any():\n raise Exception(\"One or more features had the same value for all samples, and thus could \" +\n \"not be normalized. Please do not include features with only a single value \" +\n \"in your model.\")\n df_normalized = (df - df.mean()) / df.std()\n\n return df_normalized, mu, sigma", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def normalize(self, df):\n return (df - df.mean()) / (df.max() - df.min())", "def scale_data(self, data):\n return (data - self.mean)/self.std", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x", "def normalize(array, inplace=False):\n if inplace:\n array -= ds_mean\n array /= ds_std\n else:\n array = (array - ds_mean) / ds_std\n return array", "def normalize(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )\n\n ret_df = df.copy()\n t = ret_df[comm_keys]\n ret_df[comm_keys] = (t - t.mean()) / t.std()\n\n return ret_df", "def normalize(data):\n # normalize data and return\n # https://stackoverflow.com/questions/29661574/normalize-numpy-array-columns-in-python\n return (data - data.min(axis=0)) / data.ptp(axis=0)", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()", "def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]", "def to_norm(data):\n print('The dtgeostats.utils.to_norm function is under construction - use with caution...')\n mu = np.mean(data)\n sd = np.std(data)\n z = (data - mu) / sd\n bins = len(z)\n\n # Get cumulative probability and normal-score values\n counts, bin_edges = np.histogram(z, bins=bins, normed=True)\n cprob = np.cumsum(counts)/sum(counts)*0.99 # = f[:, 1] or inv[:, 0]\n nscore_value = (bin_edges[:-1] + bin_edges[1:]) / 2 # = f[:, 0] or inv[:, 1]\n\n # Apply to data\n z = st.norm(0, 1).ppf(cprob)\n z = np.where(z == np.inf, np.nan, z)\n z = np.where(np.isnan(z), np.nanmax(z), z)\n return z, cprob, nscore_value", "def normalization_constants(X):\n\n mean = np.mean(X, axis=0)\n standard_deviation = np.std(X, axis=0)\n\n return mean, standard_deviation", "def partial_flatten_and_normalize(x):\n x = np.reshape(x, (x.shape[0], -1))\n return (x - np.mean(x)) / np.std(x)", "def unscale_data(self, data):\n return (data + self.mean)*self.std", "def gaussianNorm(df, mean = None, std = None):\n mean = mean if isinstance(mean, pd.Series) else df.mean(axis=0)\n std = std if isinstance(std, pd.Series) else df.std(axis=0)\n return (df - mean) / std", "def normalization_constants(X):\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n return (mean, std)", "def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])", "def _normalize(a: np.ndarray, u: float=0, s: float=1) -> np.ndarray:\n a_norm = (a - np.mean(a)) / (np.std(a) + STABILITY)\n a_rescaled = a_norm * s + u\n\n return a_rescaled", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values", "def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values", "def standardizeData(tdata,vdata):\n tmean = tdata.mean(axis=0)\n tstd = tdata.std(axis=0)\n tdataNew = (tdata - tmean)/tstd\n vdataNew = (vdata - tmean)/tstd\n return tdataNew, vdataNew", "def gaussianNormalised(data, mu, sigma):\n data = data - mu\n g = exp ( - data**2 / (2*sigma**2) )\n gSum = np.sum(g)\n \n if gSum == 0:\n print \"Warning gaussianNormalised:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g)\n else:\n return (g / gSum)", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def normalize(self, X):\n return X - X.mean()" ]
[ "0.8334346", "0.8145812", "0.8115416", "0.80993706", "0.80993706", "0.8077115", "0.7967708", "0.78944945", "0.78458416", "0.78052527", "0.7734523", "0.7707518", "0.770103", "0.76728106", "0.76322484", "0.75377613", "0.75027645", "0.74743336", "0.7383516", "0.7299807", "0.72801566", "0.7273466", "0.72678435", "0.72524214", "0.72298557", "0.72298557", "0.72189116", "0.7208859", "0.7185113", "0.7177867", "0.7176174", "0.7170348", "0.7160823", "0.7159511", "0.7156391", "0.7155928", "0.71266663", "0.711195", "0.7110814", "0.71014315", "0.7095076", "0.7073178", "0.70585257", "0.70574635", "0.70542663", "0.7033648", "0.7029224", "0.7023796", "0.70123917", "0.70123917", "0.7004796", "0.7004722", "0.69932395", "0.6979652", "0.69685054", "0.6965548", "0.6959201", "0.6932794", "0.69270843", "0.69085276", "0.6906606", "0.69056535", "0.68983227", "0.68970716", "0.68954915", "0.6893305", "0.6880912", "0.68689495", "0.68659884", "0.6862422", "0.68614924", "0.6835252", "0.6819378", "0.6814307", "0.68140775", "0.6791162", "0.6789252", "0.67884886", "0.67524755", "0.6751134", "0.67495483", "0.6747014", "0.67446965", "0.6739871", "0.673759", "0.6737098", "0.67265105", "0.67094225", "0.67089885", "0.6705295", "0.6703051", "0.6697444", "0.6696376", "0.66898847", "0.66898847", "0.66862386", "0.66862386", "0.66823953", "0.6680393", "0.6677602", "0.6676945" ]
0.0
-1
Step O must be finished before step C can begin.
def convert_input_text(text): steps = defaultdict(list) predecessors = set() for line in text: regex = search(r"Step (.) must be finished before step (.) can begin.", line) # steps[step] = [list of predecessors] steps[regex.group(2)].append(regex.group(1)) predecessors.add(regex.group(1)) for key in predecessors - set(steps): steps[key] = [] return steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_step(self) -> None:", "def _step(self) -> None:", "def _step(self):\n pass", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()", "def step(self):\n\n pass", "def step(self):\r\n raise NotImplementedError", "def step_forward(self):", "def _step(self, whence):\n pass", "def step(self):\n raise NotImplementedError", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n while self.state != STATE_TERMINAL:\n self.step_strategies[self.state]()", "def step(self, action):", "def step_B(self, *args, **kwargs):\n if kwargs.get(\"complete_quest_B\", False):\n self.quester.msg(\"Completed step B of quest!\")\n self.quester.db.test_quest_counter = 0\n self.current_step = \"C\"\n self.progress()", "def perform_step(self) -> None:\n pass", "def after_step():\n raise NotImplementedError", "def stepo_cmd(cmd, cnt, args):\n #log(\"stepo\"+str(args[1:]))\n cpu.set_break(cpu.pc+4)\n cpu.list_breaks()\n go_cmd(cmd, cnt, args)\n cpu.clear_break(cpu.pc+4)\n cpu.list_breaks()", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def run_one_step(self):\n pass", "def step(self, state):", "def step_A(self, *args, **kwargs):\n # note - this could be done with a direct db query instead to avoid a loop, for a\n # unit test it's fine though\n if any(obj for obj in self.quester.contents if obj.tags.has(\"QuestA\", category=\"quests\")):\n self.quester.msg(\"Completed step A of quest!\")\n self.current_step = \"B\"\n self.progress()", "def next_step(self):\n self.proceed()\n self.execute_current()", "def step(self, memories):\n return", "def step(self):\n #for the first step, we calculate only with the previous state\n if self.isFirstStep:\n F = (self.pot.f)(self.x, self.U)\n self.isFirstStep = False\n #else, we calculate with the previous step and the one before to keep the second order accuracy\n else:\n F = 3/2*(self.pot.f)(self.x, self.U)-1/2*(self.pot.f)(self.x, self.oldU)\n self.oldU = np.copy(self.U)\n C = self.B.dot(self.U)+self.dt*F\n C[0] = 0\n C[-1] = 0\n self.U = lin.solve_banded((1,1), self.A, C)", "def step(self):\n self.latent.step()", "def _step(self):\n self.sort()\n selection = self._select()\n offspring = self._crossover(selection)\n self._mutate(offspring)\n\n self.sort()\n if self.elite_num > 0:\n offspring[:self.elite_num] = self.population[:self.elite_num]\n\n self.population[:] = offspring\n\n self.sort()\n if self.cull_num > 0:\n self.population[-self.cull_num:] = self._initialize(self.cull_num)", "def _monte_carlo_step(self):\n # Take a random step. Make a copy of x because the step_taking\n # algorithm might change x in place\n x_after_step = np.copy(self.x)\n x_after_step = self.step_taking(x_after_step)\n\n # do a local minimization\n minres = self.minimizer(x_after_step)\n x_after_quench = minres.x\n energy_after_quench = minres.fun\n if not minres.success:\n self.res.minimization_failures += 1\n if self.disp:\n print(\"warning: basinhopping: local minimization failure\")\n\n if hasattr(minres, \"nfev\"):\n self.res.nfev += minres.nfev\n if hasattr(minres, \"njev\"):\n self.res.njev += minres.njev\n if hasattr(minres, \"nhev\"):\n self.res.nhev += minres.nhev\n\n # accept the move based on self.accept_tests. If any test is False,\n # then reject the step. If any test returns the special string\n # 'force accept', then accept the step regardless. This can be used\n # to forcefully escape from a local minimum if normal basin hopping\n # steps are not sufficient.\n accept = True\n for test in self.accept_tests:\n testres = test(f_new=energy_after_quench, x_new=x_after_quench,\n f_old=self.energy, x_old=self.x)\n if testres == 'force accept':\n accept = True\n break\n elif testres is None:\n raise ValueError(\"accept_tests must return True, False, or \"\n \"'force accept'\")\n elif not testres:\n accept = False\n\n # Report the result of the acceptance test to the take step class.\n # This is for adaptive step taking\n if hasattr(self.step_taking, \"report\"):\n self.step_taking.report(accept, f_new=energy_after_quench,\n x_new=x_after_quench, f_old=self.energy,\n x_old=self.x)\n\n return accept, minres", "def phase(self):\n pass", "def step(self, action):\n raise NotImplementedError", "def end_phase():\n pass", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()", "def step(self, step=None):\n pass", "def step(self):\n try:\n self.tiempos.siguiente()\n except StopIteration:\n return", "def step(self, **kwargs):\n pass", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def step(self, action):\n raise NotImplementedError()", "def step(self, move):", "def step(self):\n self.function()", "def TestOneStep(self):\n pass", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"collision\"] > 0:\n logging.debug(\"A collision occured\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}", "def step(self, action):\n\n # ==\n # Transition, reward and termination\n done = False\n reward = self.get_current_reward(self.state)\n\n # Leaf and absorbing nodes\n if self.state <= 1:\n done = True\n if self.state == 1:\n self.state = 0 # go to absorbing\n else:\n self.state = int(self.state // 2)\n\n # ==\n # Features\n phi = self.state_2_features(self.state)\n\n return phi, reward, done, {}", "def _worker(self):\n player = self.first_player # first player\n q = [None, self.q1, self.q2]\n states = [None, 'reset', 'reset']\n not_ret = None\n while True:\n op, a, ret = q[player].get()\n # print(f'worker: player: {player} op: {op}')\n try:\n assert op == states[\n player], f'state {op} expects {states[player]}'\n\n if op == 'reset':\n if player == self.first_player:\n # first time case\n s = self.env.reset()\n # set trace\n self.trace.set_init(self.env)\n legal_moves = self.env.legal_moves()\n ret.set((s, None, None, None, legal_moves))\n states[player] = 'step'\n else:\n states[player] = 'step'\n\n # switch player\n if player == 1:\n player = 2\n else:\n player = 1\n not_ret = ret\n elif op == 'step':\n # handle error\n s, r, done, info = self.env.step(a)\n # keep trace\n self.trace.step(a)\n\n if done:\n ret.set((s, r, done, info, []))\n not_ret.set((s, -r, done, info, []))\n # end\n self.done = True\n break\n else:\n legal_moves = self.env.legal_moves()\n not_ret.set((s, -r, done, info, legal_moves))\n\n # switch player\n if player == 1:\n player = 2\n else:\n player = 1\n not_ret = ret\n else:\n raise NotImplementedError()\n except Exception as e:\n # exception\n # return as exception\n ret.set(e)\n if not_ret is not None:\n not_ret.set(\n Exception('your opponent did something unexpected'))\n self._exception(\n Exception('something else terminated the environment'))\n break", "def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}", "def step(self, action):\n r, is_valid_action = -1, False\n # Action 0: Nichts\n if action == 0:\n r, is_valid_action = 0, True\n # Action 1: Laden\n if action == 1 and self.residual < 0:\n r = 1#abs(self.residual)*100\n is_valid_action = self.el_storage.charge(abs(self.residual), 15, self.time)\n # Action 2: Entladen\n if action == 2 and self.residual > 0:\n r = 1#abs(self.residual)*100\n is_valid_action = self.el_storage.discharge(abs(self.residual), 15, self.time)\n # Fehler- und Rewardüberprüfung\n if not is_valid_action:\n r = -1\n # Bereite den nächsten state vor\n self.residual = self.loadprofile.valueForTimestamp(self.time) - self.pv.valueForTimestamp(self.time)*10\n if self.pv.valueForTimestamp(self.time) > self.max_pv:\n self.max_pv = self.pv.valueForTimestamp(self.time)\n state = np.hstack([self.calc_time_waves(), self.el_storage.stateOfCharge/self.el_storage.capacity, self.residual])\n self.time += 1\n done = self.time >= self.rand_start + self.EP_LEN\n self.cum_r += r\n return state, r, done, self.cum_r", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def step(self) -> bool:\n raise NotImplementedError()", "def step_C(self, *args, **kwargs):\n if self.quester.db.test_quest_counter and self.quester.db.test_quest_counter > 5:\n self.quester.msg(\"Quest complete! Get XP rewards!\")\n self.quester.db.xp += 10\n self.complete()", "def proceed(self):\n if self.current_step is None or self.step_position == StepPosition.Before:\n return\n\n for condition, transition in self.current_step.conditions:\n if condition.satisfied():\n new_proc = transition.procedure\n self.current_procedure_id = new_proc\n self.current_step = self._suite[new_proc].steps[transition.step]\n self.step_position = StepPosition.Before\n break", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n pass", "def step(self, action):\n pass", "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def step(self, action: ActionType) -> None:\n raise NotImplementedError", "def moveSpecialOb(self):\n\t\tfor obJ in self.special:\n\t\t\tobJ.moveStep()", "def task3(self):\n\n pass", "def step(self, action):\n\n state, reward, done = super(GridWorld, self).step(action)\n\n done = (state == self.absorbing_state) or done\n\n return state, reward, done", "def _prey_step(self):\n raise NotImplementedError()", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def compute_step(X):\n return MOVING_STEP", "def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)", "def run(self, p):\n while self.state < 3:\n self.__step(p)", "def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"lane_invasion\"] > 0:\n logging.debug(\"A lane was invaded\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def main():\n aoc_input = aoc_01_input.get_input()\n\n current_direction = 'N'\n steps_north = 0\n steps_east = 0\n\n # For part 2: Store all the coords visited in a list\n all_coords_list = []\n # A variable to save HQ coordinates in\n hq_coords = None\n\n for instruction in aoc_input:\n # One instruction is eg 'R2' or 'L44'\n input_turn = instruction[0]\n input_steps = int(instruction[1:])\n\n current_direction = change_direction(current_direction, input_turn)\n\n if current_direction == 'N':\n\n for k in range(input_steps):\n current_coords = [steps_north + k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north += input_steps\n\n elif current_direction == 'E':\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east + k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east += input_steps\n\n elif current_direction == 'S':\n\n for k in range(input_steps):\n current_coords = [steps_north - k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north -= input_steps\n\n else:\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east - k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east -= input_steps\n\n current_coords = [steps_north, steps_east]\n\n total_distance = abs(steps_north) + abs(steps_east)\n\n total_distance_part2 = abs(hq_coords[0]) + abs(hq_coords[1])\n\n print('Part 1: {}'.format(total_distance))\n print('Part 2: {}'.format(total_distance_part2))\n\n # print('Part 1: {}'.format(get_root(aoc_input[:])['name']))\n # print('Part 2: {}'.format(find_imbalance(aoc_input[:])))", "def test_step_end(self, output: Optional[_STEP_OUTPUT_TYPE]) -> \\\n Optional[_STEP_OUTPUT_TYPE]:\n output = to_cpu(output)\n\n return super().test_step_end(output)", "def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o", "def endOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.", "def step(self, action): # action is nb-cops-sized or 1-sized\n reward = 0\n done = False\n\n action = np.array(action)\n\n def old_pos(set=None):\n if set is None:\n return self.cops_pos if self.is_cops_turn else self.rob_pos\n else:\n if self.is_cops_turn:\n self.cops_pos = action\n else:\n self.rob_pos = action\n\n invalids = []\n\n if self.is_first_turn:\n self.graph.set_cr(action, self.is_cops_turn)\n else:\n edges = self.graph.get_rep()[old_pos(), action]\n invalids = edges != 1\n invalids[action == old_pos()] = False\n invalids = np.where(invalids == True)[0]\n if invalids.shape[0] != 0:\n action[invalids] = old_pos()[invalids] # correct action\n self.graph.set_cr(action, self.is_cops_turn)\n\n old_pos(action)\n if not self.is_cops_turn and self.is_first_turn:\n self.is_first_turn = False\n self.is_cops_turn = not self.is_cops_turn\n if self.rob_pos is not None and self.rob_pos[0] in self.cops_pos:\n print(\"Cops won\")\n done = True\n reward += (1 if self.is_cops_turn else -1) * REWARD_END_WL\n\n reward += (-1 if self.is_cops_turn else +1) * REWARD_STEP_WL\n reward -= len(invalids) * REWARD_INVALID\n\n observation = self.graph.get_attr()\n\n if self.is_cops_turn:\n self.cops_rew += reward\n else:\n self.rob_rew += reward\n\n if not done:\n if self.is_cops_turn and self.cops is not None:\n observation, _, done, _ = self.step(self.cops.act(observation))\n elif not self.is_cops_turn and self.robber is not None:\n observation, _, done, _ = self.step(self.robber.act(observation))\n return observation, reward, done, {}", "def step_async(self, actions):", "def test_progress(self):\n # A requires a certain object in inventory\n self._fulfillA()\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"B\")\n\n # B requires progress be called with specific kwarg\n # should not step (no kwarg)\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"B\")\n\n # should step (kwarg sent)\n self.character.quests.progress(complete_quest_B=True)\n self.assertEqual(self._get_quest().current_step, \"C\")\n\n # C requires a counter Attribute on char be high enough\n self._fulfillC()\n self.character.quests.progress()\n self.assertEqual(self._get_quest().current_step, \"C\") # still on last step\n self.assertEqual(self._get_quest().is_completed, True)", "def step(self):\n try:\n self.agents.sort(key=lambda x: x.dist)\n except Exception as e:\n print(e)\n\n for agent in self.agents:\n try:\n agent.step()\n except Exception as e:\n print(e)\n\n\n # Removes agents if they reach exit\n for exit in self.model.exits:\n x, y = exit.pos[0] * 6 + 1, exit.pos[1] * 6 + 1\n if agent.node == (x, y):\n try:\n agent.saved()\n except Exception as e:\n print(e)", "def step(self, model):\n pass", "def step(self, model):\n pass", "def step(self, action):\n\n ob = self.sendCmd(self.url, self.actions_list[action])\n print(\"ob -> {}\".format(ob['obsequium']))\n\n self.obsequium = int(ob[\"obsequium\"])\n self.bonus = int(ob[\"bonus\"])\n\n # we need to check is make sense finish it\n if self.is_game_done:\n raise RuntimeError(\"Episode is done\")\n self.curr_step += 1\n self._take_action(action)\n\n reward = self._get_reward()\n\n # TODO: revisar ob = self._get_state()\n\n return ob, reward, self.is_game_done, {}", "def perform_step(self, action):\n pass", "def solveOneStep(self):\n ### Student code goes here\n return True", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def step(self):\n self.generation += 1\n Membrane.step(self)", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def Advance():\n warp.step()", "def step(self):\n #_increment timers\n for agent in self.agents:\n agent.tick()\n\n # choose agent pair\n agentA, agentB = self.choose()\n\n # interact\n agentA.step(agentB)\n agentB.step(agentA)\n\n # log results\n self.logger.log(agentA, agentB)\n\n # increment counters\n self.steps += 1\n self.time += 1", "def step(self):\n # No need for epsilon as exploration is controlled by c\n\n\n # Step in time (choose an action) \n n = np.sum(self.action_count)\n if n > 0: # Condition to evaluate on first iteration, because np.log(0) = -inf\n mask = self.action_count > 0 # Mask to avoid division by 0 on the formula for upper confidence uncertainties\n uncertainties = np.zeros(self.action_count.shape)\n uncertainties[mask] = self.c*np.sqrt(np.log(n)/self.action_count[mask])\n uncertainties[~mask] = float('inf') # We increment uncertainty of actions we've never chosen\n else:\n uncertainties = np.array(np.repeat(float('inf'), len(self.action_count))) \n optimals = self.Q + uncertainties # Uncertainty rises the value of less chosen actions, hence promoting exploration\n \n max_actions = np.argwhere(optimals == np.amax(optimals)).flatten() # greedy actions (max value)\n if len(max_actions) == 1:\n self.last_action = max_actions\n else:\n self.last_action = np.random.choice(max_actions)\n\n return self.last_action", "def horde_step(self, observation):", "def c_test_step_inp(self, particles, best_state, best_fitness, run_locals):\r\n return 1", "def step(self, action_code):\n # Decision for 1 unit or city\n self.learning_agent.take_action(action_code,\n self.game,\n unit=self.last_observation_object[0],\n city_tile=self.last_observation_object[1],\n team=self.last_observation_object[2]\n )\n\n self.current_step += 1\n\n # Get the next observation\n is_new_turn = True\n is_game_over = False\n is_game_error = False\n try:\n (unit, city_tile, team, is_new_turn) = next(self.match_generator)\n\n obs = self.learning_agent.get_observation(self.game, unit, city_tile, team, is_new_turn)\n self.last_observation_object = (unit, city_tile, team, is_new_turn)\n except StopIteration:\n # The game episode is done.\n is_game_over = True\n obs = None\n except GameStepFailedException:\n # Game step failed, assign a game lost reward to not incentivise this\n is_game_over = True\n obs = None\n is_game_error = True\n\n # Calculate reward for this step\n reward = self.learning_agent.get_reward(self.game, is_game_over, is_new_turn, is_game_error)\n\n return obs, reward, is_game_over, {}", "def step(self, action_code):\n # Decision for 1 unit or city\n self.learning_agent.take_action(action_code,\n self.game,\n unit=self.last_observation_object[0],\n city_tile=self.last_observation_object[1],\n team=self.last_observation_object[2]\n )\n\n self.current_step += 1\n\n # Get the next observation\n is_new_turn = True\n is_game_over = False\n is_game_error = False\n try:\n (unit, city_tile, team, is_new_turn) = next(self.match_generator)\n\n obs = self.learning_agent.get_observation(self.game, unit, city_tile, team, is_new_turn)\n self.last_observation_object = (unit, city_tile, team, is_new_turn)\n except StopIteration:\n # The game episode is done.\n is_game_over = True\n obs = None\n except GameStepFailedException:\n # Game step failed, assign a game lost reward to not incentivise this\n is_game_over = True\n obs = None\n is_game_error = True\n\n # Calculate reward for this step\n reward = self.learning_agent.get_reward(self.game, is_game_over, is_new_turn, is_game_error)\n\n return obs, reward, is_game_over, {}", "def step5(self):\n\t\tself.j = self.k\n\t\tif self.b[self.k] == 'e':\n\t\t\ta = self.m()\n\t\t\tif a > 1 or (a == 1 and not self.cvc(self.k-1)):\n\t\t\t\tself.k = self.k - 1\n\t\tif self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:\n\t\t\tself.k = self.k -1", "def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None", "def step(self, actions):\n observations = []\n rewards = []\n dones = []\n self.perform_action(self.car1, actions[0])\n self.perform_action(self.car2, actions[1])\n p.stepSimulation()\n observations = self._get_obs()\n for i in range(2):\n rewards.append(self._get_rewards(i, observations[i]))\n self.step_counter+=1\n self.reduce = self.reduce * 0.9993\n for i in range(2):\n done, cause = self._check_done(observations[i])\n dones.append(done)\n if(done == 1):\n if(cause==1):\n rewards[1] += 72000\n rewards[0] -= 72000\n elif(cause==0):\n rewards[0] += 72000\n rewards[1] -= 72000\n elif(cause==2):\n rewards[i] -= 72000\n return observations, rewards, dones, None", "def build_step(self):\n pass", "def build_step(self):\n pass", "def checkStep(rc, steps, run_status, prog_args):\n\n if (rc == FAILURE) or (rc == EXCEPTION):\n buildException(run_status, 'previous command failed')\n else:\n defer.maybeDeferred(lambda x: startNextStep(x,\n run_status, prog_args), steps)", "def exceptional_scenario_3a(self, browser, actor, context):\n preconditions(browser, actor)\n self.start_up(browser, context)\n step_1(browser)\n self.step_2(browser)\n step_3a(browser) # The actor fills in the fields but omit mandatory fields\n self.step_4a(browser) # system warn, (back to the step 3)" ]
[ "0.63544285", "0.63411146", "0.6261979", "0.61237234", "0.59484553", "0.58107793", "0.5798005", "0.5778751", "0.57389134", "0.56745815", "0.56745815", "0.56745815", "0.5651764", "0.56497353", "0.5640354", "0.5634729", "0.5625408", "0.55929816", "0.55789346", "0.5559355", "0.55388045", "0.5526125", "0.5516983", "0.55084795", "0.55039537", "0.5500527", "0.54764193", "0.54727966", "0.547013", "0.5457454", "0.5457387", "0.54484355", "0.54376733", "0.54362226", "0.5433301", "0.5430933", "0.5417969", "0.54120344", "0.5395339", "0.5385263", "0.5379066", "0.537667", "0.53476524", "0.5337586", "0.5335381", "0.5329588", "0.53094083", "0.5282767", "0.5248223", "0.52445024", "0.5244144", "0.52432626", "0.5220421", "0.52203166", "0.5199522", "0.5199522", "0.51987267", "0.51982975", "0.5195788", "0.51921475", "0.5191453", "0.5181557", "0.5176014", "0.5159043", "0.5158437", "0.5158423", "0.5157877", "0.5156096", "0.5155544", "0.5155544", "0.51533395", "0.5152461", "0.5146654", "0.5144447", "0.5143695", "0.5140005", "0.5139892", "0.5130857", "0.5129892", "0.5129892", "0.5125759", "0.5123937", "0.51189494", "0.5116102", "0.5096466", "0.50823563", "0.50805634", "0.5077729", "0.50758564", "0.50729024", "0.5064175", "0.5063658", "0.5061823", "0.5061823", "0.5058768", "0.5057576", "0.5052826", "0.5051477", "0.5051477", "0.50466007", "0.50398475" ]
0.0
-1
Make sure that we can pop from the dictionary's value.
def test_remove_predecessors(): assert remove_predecessors({"A": ["B", "C"]}, "B") == {"A": ["C"]} assert remove_predecessors({"A": ["B", "C"]}, "D") == {"A": ["B", "C"]}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def pop(self, key):\n pass", "def remove_value(self, thing_key, dkey):\n if thing_key in self.things_dict:\n dic = self.things_dict[thing_key]\n if type(dic) != type({}):\n return\n dic.pop(dkey, None)", "def _map_pop(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n ret = self[key]\n del self[key]\n return ret", "def popitem(self):\n if self.used == 0:\n raise KeyError(\"empty dictionary\")\n entry0 = self.table[0]\n entry = entry0\n i = 0\n if entry0.value is None:\n # The first entry in the table's hash is abused to hold the index to\n # the next place to look for a value to pop.\n i = entry0.hash\n if i >= self.size or i < i:\n i = 1\n entry = self.table[i]\n while entry.value is None:\n i += 1\n if i >= self.size:\n i = 1\n entry = self.table[i]\n res = entry.key, entry.value\n self._del(entry)\n # Set the next place to start.\n entry0.hash = i + 1\n return res", "def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value", "def pop(self, k, d=None): # real signature unknown; restored from __doc__\n pass", "def pop(self, key):\n return self.__data_dict.pop(key)", "def popitem(self):\r\n result = super(EmittingWeakKeyDefaultDict, self).popitem()\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result", "def remove(self, value) -> None:\n key = getattr(value, self.keyattr)\n if callable(key):\n key = key()\n with suppress(ValueError):\n self.data[key].remove(value)\n self.size -= 1", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def dict_pop(d, key):\n return d.pop(key)", "def popitem(self):\n return self.__data_dict.popitem()", "def pop(self, key, *args):\r\n result = super(EmittingWeakKeyDefaultDict, self).pop(key, *args)\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result", "def pop(self, key, *args):\n if len(args) > 1:\n raise TypeError('pop expected at most 2 arguments, got %s' %\n (len(args) + 1))\n if key in self:\n val = self[key]\n del self[key]\n else:\n try:\n val = args[0]\n except IndexError:\n raise KeyError(key)\n return val", "def pop(self, key, *args):\r\n try:\r\n return self.data.pop(key)()\r\n except (KeyError, SleekRefDied):\r\n if args:\r\n (default,) = args\r\n return default\r\n raise KeyError(key)", "def popitem(self):\r\n while True:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def pop(self, key, d=None):\n if self._can_del(key):\n r = dict.pop(self, key, d)\n self._post_del(key)\n return r\n else:\n raise Exception('Cannot `pop`, deletion of key \"{}\" failed.'.format(key))", "def pop(self, prefix: str, value: Any = None) -> Any:\n try:\n value = self[prefix]\n del self[prefix]\n except KeyError:\n if value is None:\n raise\n return value", "def pop(self, key: T) -> Optional[U]:\n if key in self._store:\n return self._store.pop(key)\n return None", "def pop(self, key, *args):\r\n return self.data.pop(ref(key), *args)", "def pop(self, k, d=None):\n try:\n answer = self[k]\n del self[k]\n return answer\n except KeyError:\n return d", "def popitem(self):\r\n while 1:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def test_splittable_pop_pending(self):\n a, b = self.make_shared_key_dict(2)\n\n a['a'] = 4\n with self.assertRaises(KeyError):\n b.pop('a')", "def pop(self):\n pass", "def pop(self):\n pass", "def popitem(self):\r\n while True:\r\n key, sleek_ref = self.data.popitem()\r\n try:\r\n return key, sleek_ref()\r\n except SleekRefDied:\r\n pass", "def pop(self, key, *args):\r\n return self.data.pop(IdentityRef(key), *args)", "def _dpop(dictionary, key, default=None):\n try:\n ret = dictionary[key]\n del dictionary[key]\n except KeyError:\n ret = default\n\n return ret", "def pop(self):\n\n value = self.values[0]\n if len(self.values) == 1:\n self.values = []\n else:\n self.populate(self.values[1:])\n return value", "def pop(self):", "def pop(self):", "def pop():", "def __delitem__(self, key):\n super(ReadOnlyDict, self).__delitem__(key)", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove(self, val):\n if val in self.dic:\n i = self.dic[val]\n if i<len(self.data)-1:\n self.data[i]=self.data[-1]\n self.dic[self.data[i]]=i\n self.data.pop()\n self.dic.pop(val,0)\n return True\n else:\n return False", "def popitem(self):\n pass", "def __delitem__(self, key):\n if not self._set:\n raise TypeError('This dict is read-only')\n return self._set(key, None)", "def popitem(self):\n try:\n return self._maps[0].popitem()\n except KeyError:\n raise KeyError('No keys found in the last mapping.')", "def __exit__(self, type, value, traceback):\n self.pop()", "def testPopEntry(self):\n cache = ActionCache()\n z = self.ArbitraryAction('x', 'y')\n w= self.ArbitraryAction('x', 'y')\n cache.append(z)\n cache.append(w)\n self.failUnless(cache.pop(w) == z)\n self.failUnless(cache.get(w) == w)\n self.failUnless(cache.pop(z) == w)\n self.failUnless(cache.pop(z) == None)\n self.failIf(cache.contains(w))", "def remove(self, key: str) -> None:\n thekey = self._gethash(key)\n if self.HashMap[thekey] is not None:\n if len(self.HashMap[thekey]) == 2:\n self.HashMap[\n self._gethash(key)\n ] = None # Keep the location but set the value to None\n else:\n hashkey = self._gethash(key)\n idx = self._find_if_hashclash(key, hashkey, \"i\")\n self.HashMap[hashkey].pop(idx)\n self.HashMap[hashkey].pop(idx)\n self.length -= 1", "def pop_and_restore(hsh, key, default=None):\n if key in hsh:\n value = hsh.pop(key)\n was_there = True\n else:\n value = default\n was_there = False\n\n yield value\n\n if was_there:\n hsh[key] = value\n else:\n hsh.pop(key, None)", "def pop(self):\n raise NotImplementedError", "def remove(self, value):\r\n if value not in self:\r\n raise KeyError(value)\r\n self.discard(value)", "def popitem(self):\n with self.__plock:\n if len(self._keys) == 0:\n raise KeyError('Empty')\n\n key = self._keys[-1]\n val = self[key]\n del self[key]\n\n return (key, val)", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def pop(self, key, default=NOT_GIVEN):\n if key in self:\n ret = self[key]\n del self[key]\n return ret\n elif default is NOT_GIVEN:\n raise KeyError(key)\n else:\n return default", "def pop(self):\n if self.items:\n return self.items.pop()\n return None", "def remove(self, val):\n if val in self.dict_val:\n list_index = self.dict_val[val]\n last_ele_index = len(self.list_val) -1\n if list_index == last_ele_index:\n self.dict_val.pop(val)\n self.list_val.pop()\n else:\n self.dict_val[self.list_val[last_ele_index]] = list_index\n self.list_val[list_index], self.list_val[last_ele_index] = self.list_val[last_ele_index], self.list_val[list_index]\n self.dict_val.pop(val)\n self.list_val.pop()\n # for index in range(list_index, len(self.list_val)):\n # self.dict_val[self.list_val[index]] -= 1\n # self.dict_val.pop(val)\n # self.list_val.pop(list_index)\n return True\n else:\n return False", "def pop(self) -> Any:\n # TODO: Implement this method\n ...", "def _map_popitem(self):\n if len(self) == 0:\n raise KeyError('key not found')\n key = self.keys()[0]\n return (key, self.pop(key))", "def __delitem__(self, key):\n try:\n del self._maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def remove(self, e):\n try:\n del self.vals[e]\n except:\n return", "def popitem(self): # real signature unknown; restored from __doc__\n pass", "def _unbox(self, value):\n try:\n return value.getKey()\n except AttributeError:\n return value", "def test_pop(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 100, sched)\n self.assertEqual(inst_map.pop(\"tmp\", 100), sched)\n self.assertFalse(inst_map.has(\"tmp\", 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(\"tmp\"), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:\n value = self[key]\n new_dict = dict(self._dict)\n new_dict.pop(key)\n new_self = type(self)(new_dict)\n return new_self, value", "def poplast(self, k=_MISSING, default=_MISSING):\n if k is _MISSING:\n if self:\n k = self.root[PREV][KEY]\n else:\n raise KeyError('empty %r' % type(self))\n try:\n self._remove(k)\n except KeyError:\n if default is _MISSING:\n raise KeyError(k)\n return default\n values = super(OrderedMultiDict, self).__getitem__(k)\n v = values.pop()\n if not values:\n super(OrderedMultiDict, self).__delitem__(k)\n return v", "def __delitem__(self, key):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n raise KeyError\n else:\n kvp.delete()", "def __value_del(self):\n self.delete()", "def remove(self, key: int) -> None:\n if key in self.keys:\n idx = self.keys.index(key)\n self.keys.pop(idx)\n self.values.pop(idx)", "def remove_value(self, key: keyType, value: valueType) -> None:\n self.validate(key, value)\n head_node_index, chain_node_index = self.exist_key(key)\n if head_node_index == -1:\n raise Exception\n if value not in self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values:\n raise Exception\n if self.hashTable[head_node_index].count == 1:\n self.hashTable[head_node_index] = HeadNode()\n elif self.hashTable[head_node_index].count > 1:\n values_number = len(self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values)\n if 1 == values_number:\n self.hashTable[head_node_index].count -= 1\n self.hashTable[head_node_index].singlyLinkedList.pop(chain_node_index)\n elif values_number > 1:\n self.hashTable[head_node_index].count -= 1\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values.remove(value)\n else:\n raise Exception\n else:\n raise Exception", "def pop_(self):\n\n return self.items.pop()", "def remove_value(self, value: Hashable) -> bool:\n\t\treturn self.remove_values([value])", "def remove_value(self, key: str) -> None:\n raise NotImplementedError", "def del_value(self):\n return self.list.pop()", "def pop(self, key, default=None):\n with self.lock:\n try:\n item = dict.__getitem__(self, key)\n del self[key]\n return item[0]\n except KeyError:\n return default", "def _valueRemoved(self):\n self.__changed = 1\n if self.size() == 0:\n raise ConsistencyFailure()", "def test_splittable_popitem(self):\n a, b = self.make_shared_key_dict(2)\n\n orig_size = sys.getsizeof(a)\n\n item = a.popitem() # split table is combined\n self.assertEqual(item, ('z', 3))\n with self.assertRaises(KeyError):\n del a['z']\n\n self.assertGreater(sys.getsizeof(a), orig_size)\n self.assertEqual(list(a), ['x', 'y'])\n self.assertEqual(list(b), ['x', 'y', 'z'])", "def pop(self):\n try:\n self.mappings.pop()\n except IndexError:\n raise RuntimeError('stack is empty')", "def remove(self, data, key, value):\n if key in data:\n if not value: # value is empty or false, just remove it\n data.pop(key, None) # delete\n elif isinstance(value, type(data[key])): # if same type\n if isinstance(value, list): # if it's a list, like modules\n data[key] = list(set(data[key]) - set(value))\n elif isinstance(\n value, dict\n ): # if it's a dict, difference of the keys and rebuild dict\n for k, v in value.items():\n data[key][k] = self.remove(data[key], k, v)\n else:\n raise TypeError(\n f\"Value of {key} is {type(value)} and\"\n f\" the imported {key} is {type(data[key])}. Type mismatch.\"\n )\n return data[key]", "def pop(self):\n\n if self.items:\n return self.items.pop()\n\n return None", "def remove(self):\n if LongObjectHashMap.self.modCount != self.expectedModCount:\n raise ConcurrentModificationException()\n if self.lastReturned == self.EMPTY_KEY:\n raise IllegalStateException()\n self.count -= 1\n LongObjectHashMap.self.remove(self.lastReturned)\n self.lastReturned = self.EMPTY_KEY\n self.expectedModCount = LongObjectHashMap.self.modCount", "def pop(self) -> T:\n pass", "def stack_pop(self, key):\n task = Task.current_task()\n try:\n context = task._context_stack\n except AttributeError:\n raise KeyError('pop from empty stack') from None\n value = context[key]\n stack_value = value.pop()\n if not value:\n context.pop(key)\n return stack_value", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def get(self, key):\n try: \n value = self.pop(key) \n self[key] = value \n return value \n\n except KeyError: \n raise KeyError", "def pop(self):\n if not self.value:\n return\n s = []\n while len(self.value) > 1:\n s.append(self.value.pop())\n peek = self.value.pop()\n while s:\n self.value.append(s.pop())\n return peek", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def remove(self, val):\n if self.lookup.get(val, 0) > 0:\n self.lookup[val] = self.lookup.get(val, 0) - 1", "def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n        if val in self.hashmap:\n            temp=self.list[-1]\n            self.list[-1],self.list[self.hashmap[val]]=self.list[self.hashmap[val]],self.list[-1]\n            self.hashmap[temp]=self.hashmap[val]\n            self.list.pop()\n            del self.hashmap[val]\n            return True\n        return False", "def pop(self, key, *args): # pylint: disable=arguments-differ\n try:\n return self._maps[0].pop(key, *args)\n except KeyError:\n raise KeyError(\n 'Key not found in the last mapping: {!r}'.format(key))", "def _put_to_back(self, key, value):\n\n pass", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def pop(self, *args):\n have_default = len(args) == 2\n try:\n v = self[args[0]]\n except KeyError:\n if have_default:\n return args[1]\n raise\n else:\n del self[args[0]]\n return v", "def clear(self):\n super(ReadOnlyDict, self).clear() # pragma: no cover", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True", "def pop(self):\n pass", "def __delitem__(self, key):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n if len(self.data_with_same_key[key]) == 1:\r\n self.data[key] = self.data_with_same_key.pop(key)[0]\r\n else:\r\n self.data[key] = self.data_with_same_key[key].pop(-1)\r\n else:\r\n del self.data[key]", "def discard_value(collection, key, value):\n try:\n values = collection[key]\n except KeyError:\n pass\n else:\n values.discard(value)\n if not values:\n del collection[key]", "def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n index = self.map[val]\n del self.map[val]\n \n if index+1 != len(self.keys):\n var = self.keys[-1]\n self.keys[index] = self.keys[-1]\n self.map[var] = index\n self.keys = self.keys[:-1]\n # print('removing. ', self.map)\n return True", "def __delitem__(self, key):\n pass", "def __delitem__(self, key):\n pass", "def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False" ]
[ "0.7131473", "0.7101309", "0.67752934", "0.67436886", "0.6718935", "0.6602139", "0.6528528", "0.64869654", "0.648454", "0.6478647", "0.6470268", "0.6445865", "0.64151543", "0.64034253", "0.6395446", "0.62999696", "0.6270913", "0.62059563", "0.6182744", "0.61714023", "0.6167908", "0.61415315", "0.6140226", "0.6126592", "0.61261946", "0.6114957", "0.6114957", "0.61119896", "0.61117166", "0.6106291", "0.6089665", "0.6081835", "0.6081835", "0.60810757", "0.6035018", "0.6030156", "0.6010946", "0.6009054", "0.5985446", "0.59793895", "0.5979265", "0.59573895", "0.59156424", "0.5906184", "0.5894558", "0.5881515", "0.58809805", "0.5878347", "0.58744764", "0.58633095", "0.58618194", "0.58592004", "0.58538306", "0.583762", "0.58216923", "0.58164567", "0.581597", "0.58027816", "0.5800376", "0.57961863", "0.5793141", "0.579183", "0.5791451", "0.57893854", "0.5778523", "0.5778376", "0.577532", "0.57710785", "0.5763651", "0.57620937", "0.5741654", "0.5738875", "0.57368726", "0.5733314", "0.5728577", "0.5726279", "0.5719833", "0.57154655", "0.57154655", "0.57154655", "0.57082087", "0.57078326", "0.57059693", "0.57047486", "0.57003635", "0.57003635", "0.5696381", "0.5696279", "0.56956035", "0.56925374", "0.56870794", "0.5678642", "0.5677438", "0.56689596", "0.56541383", "0.5641899", "0.5640469", "0.56378365", "0.56378365", "0.5623214", "0.5623214" ]
0.0
-1
Compare the sort to the example.
def test_find_sequential_ordering(): example = { "C": [], "A": ["C"], "F": ["C"], "B": ["A"], "D": ["A"], "E": ["B", "D", "F"], } assert find_sequential_ordering(example) == "CABDFE"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sort_array(self):\r\n self.assertEqual(sort_array([6, 4, 9, 10]), [4, 6, 9, 10])", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def test_sort(self):\n expected = [\n self.TDTT(when=self.dt_when - (3*self.SORT_DELTA)),\n self.TDTT(when=self.dt_when - self.SORT_DELTA),\n self.TDTT(when=self.dt_when),\n self.TDTT(when=self.dt_when + self.SORT_DELTA),\n self.TDTT(when=self.dt_when + (2*self.SORT_DELTA)),\n ]\n self.assertTrue(self.is_sorted_ascending_by_when(expected))\n\n unsorted = [\n expected[3], expected[2], expected[4], expected[1], expected[0],\n ]\n self.assertFalse(self.is_sorted_ascending_by_when(unsorted))\n self.assertNotEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in unsorted])\n\n now_sorted = self.TDTT.sort(unsorted)\n self.assertTrue(self.is_sorted_ascending_by_when(now_sorted))\n self.assertEquals(\n [str(dt) for dt in expected],\n [str(dt) for dt in now_sorted])", "def test_insertSort2(self):\n\t\tsortObj=insertSort()\n\t\tself.assertNotEqual(sortObj.run_sort(self.test_2[0]),self.test_2[1])", "def test_sort(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"b\", \"a\", \"C\", \"B\", \"A\"],\n args=[],\n kwargs={},\n expect=[\"A\", \"B\", \"C\", \"a\", \"b\"],\n ),\n Case(\n description=\"lists of objects with key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"Baz\"}, {\"title\": \"bar\"}, {\"title\": \"foo\"}],\n ),\n Case(\n description=\"lists of objects with missing key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"bar\"}, {\"title\": \"foo\"}, {\"heading\": \"Baz\"}],\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"baz\"}],\n args=[\"title\", \"heading\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=123,\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"z\": \"z\", \"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=FilterValueError,\n ),\n ]\n\n self._test(Sort, test_cases)", "def test_sort_sorted():\n test_data = [1, 2, 3]\n sorted_data = bubble_sort(test_data)\n assert sorted_data == test_data", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])", "def testSort(self):\n numlist = [6,4.78,1.2,5]\n numlist.sort()\n self.assertEqual([1.2,4.78,5,6],numlist)\n \n strlist = [\"kgb\",\"mss\",\"cheka\"]\n strlist.sort()\n self.assertEqual([\"cheka\",\"kgb\",\"mss\"],strlist) \n \n # ------------ reverse sort\n numlist.sort(reverse = True)\n self.assertEqual([6,5,4.78,1.2],numlist)", "def test_sort_by_similarity(self):\n expected_ids = [id_ for id_, weight in sorted(self.id_weight_map.items(),\n key=lambda item: item[1])\n if weight >= Assessment.similarity_options[\"threshold\"]]\n\n query = [{\n \"object_name\": \"Assessment\",\n \"type\": \"ids\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\n \"expression\": {\n \"op\": {\"name\": \"similar\"},\n \"object_name\": \"Assessment\",\n \"ids\": [str(self.assessment.id)],\n },\n },\n }]\n response = self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n )\n\n # note that in our test data every similar object has a different weight;\n # the order of objects with same weight is undefined after sorting\n self.assertListEqual(\n json.loads(response.data)[0][\"Assessment\"][\"ids\"],\n expected_ids,\n )", "def run_mergesort(original, expected):\n mergesort(original)\n assert original == expected", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )", "def test_sort_all_equal():\n assert bubble_sort([1, 1, 1, 3, 4, 10, 2, 3]) == [1, 1, 1, 2, 3, 3, 4, 10]", "def test():\n testList1 = [54,26,93,17,77,31,44,55,20]\n testList2 = [54,26,93,17,77,31,44,55,20]\n \n shellSort(testList1)\n testList2.sort()\n \n assert testList1 == testList2\n \n print(\"Test: Success\")", "def test_sort_all_equal():\n test_data = [1, 1, 1]\n sorted_data = bubble_sort(test_data)\n assert sorted_data == [1, 1, 1]", "def sortby(self):\n ...", "def test_sort_sorted():\n assert bubble_sort([1, 2, 3, 4, 5, 6]) == [1, 2, 3, 4, 5, 6]", "def testArrayOfTwoSorted():\n arr = [1, 2]\n sort(arr)\n expectedArr = [1, 2]\n assert isEqual(arr, expectedArr)", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def pass_test(sort_func, arr):\n sort_array = sorted(arr[:])\n # Print accordingly\n if sort_func(arr[:]) == sort_array:\n print(\"Test Passed\")\n else:\n print(\"Error: Test not passed\")", "def sort(self):\r\n\t\treturn sorted(self.sample)", "def reorder_examples(self):\n self.example_wise_shrink(Ordering, key=sort_key)", "def test_bogo(self):\n integers = bogo_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_merge(self):\n integers = merge_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_selection(self):\n integers = bubble_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_calc_sort_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(after_object=self.test.datum_type1,\n sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def test_sorted_is_not_original():\n test_data = [3, 2, 1]\n sorted_data = bubble_sort(test_data)\n assert sorted_data != test_data", "def sort_results(self):\n pass", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test_single():\n assert bubble_sort([1]) == [1]", "def test_single():\n assert bubble_sort([1]) == [1]", "def assert_sorted_features(features, **sort_args):\n for _ in xrange(10): # tests with a few shuffles\n shuffled = features[:]\n random.shuffle(shuffled)\n sort_features(shuffled, **sort_args)\n eq_(shuffled, features)", "def test_sorting(sort=bubble_sort, num_items=20, max_value=50):\n # Create a list of items randomly sampled from range [1...max_value]\n items = random_ints(num_items, 1, max_value)\n print('Initial items: {!r}'.format(items))\n print('Sorted order? {!r}'.format(is_sorted(items)))\n\n # Change this sort variable to the sorting algorithm you want to test\n sort = selection_sort\n print('Sorting items with {}(items)'.format(sort.__name__))\n sort(items)\n print('Sorted items: {!r}'.format(items))\n print('Sorted order? {!r}'.format(is_sorted(items)))", "def test_three_identical():\n run_mergesort([3, 3, 3], [3, 3, 3])", "def test_for_different_input_sizes_asc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.asc(c)\n copy.sort()\n\n assert c == copy", "def test_sort():\n data = [\"filename_{}.py\".format(i) for i in range(200)]\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def test_sorter_functions(func, array_to_sort, expected_sorted_array):\n assert func(array_to_sort) == expected_sorted_array", "def test_quick(self):\n integers = quick_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_insertion(self):\n integers = insertion_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_calc_sort_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object._calc_sort_value(sort_base_length=3,\n increment=1,\n sort_prefix_parts=[test_object.datum_group.sort]\n )\n expected = 10101\n self.assertEqual(expected, actual)", "def test_sorting():\n circles = [Circle(i) for i in range(10, 1, -1)] \n sorted_circles = sorted(circles, key=Circle.sort_key)\n assert circles != sorted_circles", "def testSorting(self):\n mtt.makeTempDirParent()\n shuffledTargets = list(g_targetBlocks)\n for i in xrange(0, 200):\n tmpDir = os.path.abspath(mtt.makeTempDir('sorting'))\n random.shuffle(g_nonTargetBlocks)\n random.shuffle(shuffledTargets)\n shuffledBlocks = list(shuffledTargets)\n lower = 0\n for j in xrange(0, len(g_nonTargetBlocks)):\n # randomly insert the non target blocks, but keep a record\n # of their relative order.\n index = random.randint(lower, len(shuffledBlocks))\n shuffledBlocks.insert(index, g_nonTargetBlocks[j])\n lower = index + 1\n testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n ''.join(shuffledBlocks), g_headers)\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafSorter'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n '--seq', 'hg18.chr7']\n outpipes = [os.path.abspath(os.path.join(tmpDir, 'sorted.maf'))]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafIsSorted(os.path.join(tmpDir, 'sorted.maf')))\n mtt.removeDir(tmpDir)", "def test_sort(self):\n # Create a new REANATemplate with an empty workflow specification and\n # a list of five parameters\n template = REANATemplate(\n workflow_spec={},\n parameters=[\n pd.parameter_declaration('A', index=1),\n pd.parameter_declaration('B'),\n pd.parameter_declaration('C'),\n pd.parameter_declaration('D', index=2),\n pd.parameter_declaration('E', index=1)\n ],\n validate=True\n )\n # Get list of sorted parameter identifier from listing\n keys = [p.identifier for p in template.list_parameter()]\n self.assertEqual(keys, ['B', 'C', 'A', 'E', 'D'])", "def test_sort_sorted():\n sorted_data = [1, 2, 3, 4, 5]\n sorted_list = bubble_sort(sorted_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def test_three_element_input(self):\n res = merge_sort([2, 1, 3])\n self.assertEqual(res, [1, 2, 3])", "def __cmp__(self,o):\n\t\treturn cmp(self.weight,o.weight)", "def testArrayOfOne():\n arr = [1]\n sort(arr)\n expectedArr = [1]\n assert isEqual(arr, expectedArr)", "def test1_3():\n print(\"\\t-sort students on their GPA.\")\n tc = unittest.TestCase()\n students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]\n sortedstudents = mysort(students, lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1))\n expected = [ Student('Angela', 2.5), Student('Josh', 3.0), Student('Jia', 3.5), Student('Vinesh', 3.8) ]\n tc.assertEqual(sortedstudents, expected)", "def test1_3():\n print(\"\\t-sort students on their GPA.\")\n tc = unittest.TestCase()\n students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]\n sortedstudents = mysort(students, lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1))\n expected = [ Student('Angela', 2.5), Student('Josh', 3.0), Student('Jia', 3.5), Student('Vinesh', 3.8) ]\n tc.assertEqual(sortedstudents, expected)", "def test_original_unchanged():\n test_data = [3, 2, 1]\n sorted_data = bubble_sort(test_data)\n assert test_data == [3, 2, 1]", "def test_bubble(self):\n integers = bubble_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def __cmp__(self, _other):\n return cmp(self.fitness(), _other.fitness())", "def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_items_fixture, sorted, benchmark)", "def compare_instances(self, inst1, inst2):\n for skey, sdir in zip(self._sort_keys, self._sort_dirs):\n resultflag = 1 if sdir == 'desc' else -1\n if inst1[skey] < inst2[skey]:\n return resultflag\n elif inst1[skey] > inst2[skey]:\n return resultflag * -1\n return 0", "def compare(self) -> int:", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def test_sort_all_equal():\n equal_data = [1, 1, 1, 1, 1]\n sorted_list = bubble_sort(equal_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def test_single():\n test_data = [randint(0, 10)]\n assert len(bubble_sort(test_data)) == 1", "def test_one():\n run_mergesort([1], [1])", "def __cmp__(self, other):\n return cmp((self.benchmark, self.name), (other.benchmark, other.name))", "def test_get_sort_value_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value(after_object=self.test.datum_type1)\n expected = 10101\n self.assertEqual(expected, actual)", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def test_for_different_input_sizes_desc(self):\n for size in range(1, 50):\n c = [random.randint(1, 1000) for _ in range(size)]\n\n copy = c\n\n # sort using mergeSort and using builtin sort\n sort.desc(c)\n copy.sort(reverse=True)\n\n assert c == copy", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def test_calc_sort_multiple_parts(self):\n test_object = self.test.datum_object1\n sort_parts = [test_object.datum_group.sort,\n test_object.datum_type.sort\n ]\n actual = self.test.datum_object1._calc_sort_value(sort_base_length=2,\n increment=1,\n sort_prefix_parts=sort_parts\n )\n expected = 101010010\n self.assertEqual(expected, actual)", "def test_two_element_input(self):\n res = merge_sort([2, 1])\n self.assertEqual(res, [1, 2])", "def testArrayOfTwoNotSorted():\n arr = [2, 1]\n sort(arr)\n expectedArr = [1, 2]\n assert isEqual(arr, expectedArr)", "def test_sort_cards(a_list, result):\n assert sort_cards(a_list) == result", "def test_sort_and_fill_taxa_summaries_same(self):\r\n exp = [(['Even7', 'Even8'], ['Eukarya'], array([[1.0, 1.0]])),\r\n (['Even7', 'Even8'], ['Eukarya'], array([[1.0, 1.0]]))]\r\n obs = _sort_and_fill_taxa_summaries([self.taxa_summary3,\r\n self.taxa_summary3])\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n exp = [(['Even7', 'Even8'], ['Eukarya'], array([[1.0, 1.0]])),\r\n (['Even1', 'Even2'], ['Eukarya'], array([[0.5, 0.6]]))]\r\n obs = _sort_and_fill_taxa_summaries([self.taxa_summary3,\r\n self.taxa_summary4])\r\n self.compare_multiple_level_array(obs, exp)\r\n\r\n # Test the other direction.\r\n exp = [(['Even1', 'Even2'], ['Eukarya'], array([[0.5, 0.6]])),\r\n (['Even7', 'Even8'], ['Eukarya'], array([[1.0, 1.0]]))]\r\n obs = _sort_and_fill_taxa_summaries([self.taxa_summary4,\r\n self.taxa_summary3])\r\n self.compare_multiple_level_array(obs, exp)", "def sort():\n return -1", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort(self):\r\n return self.sort_targets([self])", "def test_get_sort_value_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value()\n expected = 10101\n self.assertEqual(expected, actual)", "def test_compare(self):", "def cmp(cls, first, second):\n return first == second", "def test_properties_xsorted(things, reverse):\n assert_property_xsorted_is_the_same_as_sorted(xsorted, things, reverse)", "def test_sorted_is_not_original():\n first_list = [3, 2, 1]\n new_list = bubble_sort(first_list)\n assert new_list != first_list", "def test_sort_success_return_sortedArray(self):\n\n # prepare\n unsortedArray = [12.0, 13.5, 1.0, 5.5,\n 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0,\n 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n\n # execute\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n\n # assert\n self.assertEqual(expectedResult, actuatlResponse)", "def test_heap_sort(self):\n integers = heap_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_shell(self):\n integers = shell_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_get_sort_info(self):\n ars = self.ar[2009][11]['day']\n self.assertEqual(ars.get_sort_info(), (31, 'key', True))", "def __cmp__(self, other):\n assert isinstance(other, Caption)\n if self.score == other.score:\n return 0\n elif self.score < other.score:\n return -1\n else:\n return 1", "def test_random_lst():\n from quick_sort import quick_sort\n lst_sort = sorted(rand_lst)\n assert quick_sort(rand_lst) == lst_sort", "def test_sort_otu_table(self):\r\n\r\n actual = sort_otu_table(parse_biom_table_str(self.otu_table1),\r\n ['NA', 'Key', 'Fing'])\r\n expected = parse_biom_table_str(self.age_sorted_otu_table1)\r\n self.assertEqual(actual, expected)", "def compare(self, other):\n # First, compare sections\n if (self.section != \"\" or other.section != \"\") and self.section != other.section:\n if self.section == \"\" and other.section != \"\":\n return -1\n elif self.section != \"\" and other.section == \"\":\n return 1\n else:\n if self.section > other.section:\n return 1\n else:\n return -1\n\n # Next, compare topics\n if self.topic != other.topic:\n stopic = _split(self.topic)\n otopic = _split(other.topic)\n if stopic[0] != otopic[0]:\n if stopic[0] > otopic[0]:\n return 1\n else:\n return -1\n if float(stopic[1]) > float(otopic[1]):\n return 1\n else:\n return -1\n\n # Then sub-topics\n if self.sub_topic != other.sub_topic:\n result = _compare(self.sub_topic, other.sub_topic)\n if result != 0:\n return result\n\n # Then cutters\n if self.cutter != other.cutter:\n result = _compare(self.cutter, other.cutter)\n if result != 0:\n return result\n\n # Then normal after-effects in V-Y-O-C priority\n if self.version != other.version:\n if self.version > other.version:\n return 1\n return -1\n\n if self.year != other.year:\n if self.year > other.year:\n return 1\n return -1\n\n # We must take the work letter into account\n if self.work_letter != other.work_letter:\n if self.work_letter > other.work_letter:\n return 1\n return -1\n\n # If any unknown additions are present, try to guess at those.\n if self.other != other.other:\n # TODO: Try to guess numbers vs words and such\n if self.other > other.other:\n return 1\n return -1\n\n # Copy is always evaluated last\n if self.copy != other.copy:\n if self.copy > other.copy:\n return 1\n return -1\n\n return 0 # All else fails, we must be equal.", "def test_cmp(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n c1 = treantclass('a')\n c2 = treantclass('b')\n c3 = treantclass('c')\n\n assert sorted([c3, c2, c1]) == [c1, c2, c3]\n assert c1 <= c2 < c3\n assert c3 >= c2 > c1", "def test_20el_sorted_array(self):\n res = merge_sort([1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])\n self.assertEqual(res, [1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])", "def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")", "def __cmp__(self, other):\n _, _ = self, other\n return 0", "def test_dotted_sorting(self):\n assert natsort(['1.5', '1.0']) == ['1.0', '1.5']", "def __cmp__(self, other):\n if self.weight > other.weight:\n return 1\n elif self.weight < other.weight:\n return -1\n else:\n return 0", "def test_two_ordered():\n run_mergesort([1, 2], [1, 2])", "def test_short_bubble_sort(self):\n self.assertEqual(\n short_bubble_sort([5, 4, 3, 2, 1]),\n [1, 2, 3, 4, 5]\n )\n self.assertEqual(\n short_bubble_sort([54, 26, 93, 17, 77, 31, 44, 55, 20]),\n [17, 20, 26, 31, 44, 54, 55, 77, 93]\n )", "def assert_property_xsorted_is_the_same_as_sorted(_xsorted, things, reverse):\n expected = list(sorted(things, reverse=reverse))\n actual = list(_xsorted(things, reverse=reverse))\n assert actual == expected", "def sorted(self): \n pass", "def test_one_element_input(self):\n res = merge_sort([1])\n self.assertEqual(res, [1])", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def validate_sort_order(filter, main_field):\n\n # The tiebreaker fields are always in the same order, but\n # if the main sort field is one of the tiebreaker fields,\n # it's removed from the list -- there's no need to sort on\n # that field a second time.\n default_sort_fields = [\n {x: \"asc\"} for x in ['sort_author', 'sort_title', 'work_id']\n if x != main_field\n ]\n assert default_sort_fields == filter.sort_order[1:]\n return filter.sort_order[0]" ]
[ "0.67170554", "0.6628306", "0.6544598", "0.65049016", "0.64927745", "0.64916337", "0.64914834", "0.6488707", "0.64275587", "0.64019525", "0.6396425", "0.6384852", "0.6351696", "0.6299925", "0.6294106", "0.62695056", "0.62650406", "0.62421703", "0.6240233", "0.6208013", "0.62055814", "0.6196684", "0.6190443", "0.6161362", "0.6154642", "0.6144998", "0.61388254", "0.6123305", "0.61231714", "0.6090602", "0.6086295", "0.6086295", "0.6085445", "0.6073969", "0.60463977", "0.60435706", "0.6042163", "0.6039403", "0.6037857", "0.603186", "0.60213304", "0.60092133", "0.5996817", "0.599424", "0.5992938", "0.59902084", "0.59893227", "0.59793603", "0.59723186", "0.59723186", "0.59674644", "0.59289587", "0.59280723", "0.592568", "0.5918451", "0.591195", "0.5906282", "0.58973324", "0.58900857", "0.5885482", "0.5873144", "0.5868777", "0.5865916", "0.58633184", "0.586133", "0.58589596", "0.58556473", "0.5848615", "0.58473283", "0.5841354", "0.58410704", "0.5840029", "0.5830574", "0.58254975", "0.58198756", "0.5819052", "0.5805925", "0.58005536", "0.57962096", "0.57915986", "0.5770998", "0.5764344", "0.5744785", "0.57259625", "0.57054573", "0.57044256", "0.57041585", "0.57031476", "0.5701245", "0.56934106", "0.5674514", "0.567116", "0.56658053", "0.5659388", "0.565685", "0.5650227", "0.56461245", "0.5643203", "0.5640868", "0.56388503", "0.56284" ]
0.0
-1
Compare the duration to the example.
def test_find_parallel_duration(): pt2_example = { "C": [], "A": ["C"], "F": ["C"], "B": ["A"], "D": ["A"], "E": ["B", "D", "F"], } assert find_parallel_duration(pt2_example, 2, 0) == 15
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def test_duration_property(self):\n recording_dt = 0.1\n recording_shape = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n expected_duration = recording_shape['no_timesteps'] * recording_dt\n test_rec = rt.Recording(\n np.zeros(\n [\n recording_shape['no_channels'],\n recording_shape['no_timesteps'],\n recording_shape['no_sweeps'],\n ]\n ),\n dt=recording_dt,\n )\n npt.assert_almost_equal(\n test_rec.duration,\n expected_duration,\n err_msg='Expected {} for `duration` property; got {} instead.'.format(\n expected_duration, test_rec.duration\n ),\n )", "def is_duration_consistent(self):\n return (self._duration is None or\n self._duration == total_minutes(self.duration))", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def testHrtDuration(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"duration\")\n\n self.util.stringPropertyTest(self, attr, \"duration\")", "def check_diff(self,game,wanted_diff,wanted_starting_time=''):\n return True", "def test_parse_duration(\n test_input: int,\n expected: datetime.timedelta,\n):\n assert tvmaze.parsers.parse_duration(test_input) == expected", "def inner_test(param: datetime.timedelta):\n self.assertEqual(param, datetime.timedelta(\n days=3, hours=2, minutes=5, seconds=43\n ))", "def duration(self):\r\n return self.t2 - self.t1", "def after(self, time2):\r\n return self.to_seconds() > time2.to_seconds()", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def check_last_cycle_duration(self):\n min_pm_time = timedelta(seconds=self.args.min_pm_time)\n max_pm_time = timedelta(seconds=self.args.max_pm_time)\n if self.args.pm_timestamp:\n pm_timestamp = datetime.fromtimestamp(self.args.pm_timestamp)\n now = datetime.now()\n pm_time = now - pm_timestamp\n if pm_time < min_pm_time:\n raise TestFailed(\n \"{0} time less than expected: {1} < {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, min_pm_time\n )\n )\n if pm_time > max_pm_time:\n raise TestFailed(\n \"{0} time greater than expected: {1} > {2}\".format(\n self.args.pm_operation.capitalize(), pm_time, max_pm_time\n )\n )\n\n logging.info(\n \"{0} time: {1}\".format(self.args.pm_operation.capitalize(), pm_time)\n )", "def duration(self):\n pass", "def duration(self):\n pass", "def after(self, time2):\n return self.to_seconds() > time2.to_seconds()", "def test_details_time(self):\n self.assertLess(self.details.time, datetime.now(timezone.utc))", "def calc_time_match(song: Song, result: Result) -> float:\n\n if result.duration > song.duration:\n return 100 - (result.duration - song.duration)\n\n return 100 - (song.duration - result.duration)", "def verify_event_timing(self, event, item):\n return True", "def test_initialization_of_homework_result_created():\n expected = datetime.datetime.now()\n assert result_1.created - expected < datetime.timedelta(seconds=1)", "def test_accurate(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n times = self.G.pattern.nonzero()[1]*self.dt\n self.assertTrue(np.allclose(sorted(times), M.t))\n for (i, t) in zip(M.i, M.t):\n self.assertTrue(self.G.pattern[i, int_r(t/self.dt)])", "def __check_total_duration(self, duration: int) -> bool:\n available_duration = N_EIGHTHS_PER_MEASURE * (self.n_measures - 1)\n return self.current_time_in_eighths + duration <= available_duration", "def test_case(self):\n expected = dict(seconds=1)\n self.assertEqual(expected, util.parse_relative_time_string(\"+1s\"))\n self.assertEqual(expected, util.parse_relative_time_string(\"+1S\"))", "def test__put_duration_into():\n for input_value, defaults, expected_output in (\n (0.0, False, {}),\n (0.0, True, {'duration_sec': 0.0}),\n (1.0, False, {'duration_sec': 1.0}),\n ):\n data = put_duration_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def test_run_now(curent_time,state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.run_now(curent_time) == state", "def test_analyze_time(self):\n self.ph5validate.analyze_time()\n self.assertEqual(self.ph5validate.das_time.keys(), [('12183', 1, 500)])\n Dtime = self.ph5validate.das_time[('12183', 1, 500)]\n\n # 3 different deploy time\n self.assertEqual(len(Dtime['time_windows']), 5)\n\n # station 9001\n self.assertEqual(Dtime['time_windows'][0],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][1],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][2],\n (1550849950, 1550850034, '9001'))\n # station 9002\n self.assertEqual(Dtime['time_windows'][3],\n (1550850043, 1550850093, '9002'))\n # station 9003\n self.assertEqual(Dtime['time_windows'][4],\n (1550850125, 1550850187, '9003'))\n\n self.assertEqual(Dtime['min_deploy_time'],\n [1550849950,\n 'Data exists before deploy time: 7 seconds.'])", "def match(self, dt):\n raise NotImplemented", "def test_duration_argument_is_working_properly(self):\n d = DurationMixin(duration=10)\n self.assertEqual(10, d.duration)", "async def test_source_up_to_dateness(self):\n response = await self.collect(get_request_json_return_value={\"timestamp\": \"1565284457173\"})\n expected_age = days_ago(datetime_fromtimestamp(1565284457173 / 1000.0))\n self.assert_measurement(response, value=str(expected_age))", "def within_threshold(self, other):\n if abs(other.ts - self.ts) < TIME_THRESHOLD:\n return True\n return False", "def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_all_repetition_frequency_have_timedelta(self):\n for value in EventRepetitionFrequency:\n if value is EventRepetitionFrequency.not_repeated:\n self.assertIsNone(value.to_timedelta())\n else:\n self.assertIsNotNone(value.to_timedelta())", "def test_time_dicts():\n dmd = DMD()\n dmd.fit(X=sample_data_1, Y=sample_data_2)\n expected_dict = {\"dt\": 1, \"t0\": 0, \"tend\": 13}\n np.testing.assert_equal(dmd.original_time, expected_dict)\n np.testing.assert_equal(dmd.dmd_time, expected_dict)", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def __eq__(self, other):\n return self.times == other.times", "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def duration(self) -> int:\n return 0", "def duration(self):\n self._duration = self.lib.iperf_get_test_duration(self._test)\n return self._duration", "def inner_test(param: datetime.timedelta):\n pass", "def test_time(self):\r\n pass", "def test_time_signature(score1, score2, measure = 0, part = 0):\n\n\n\n\n\tdiff = ScoreDiff(score1, score2, path)\n\treturn diff.have_same_time_signature(measure, part)", "def test_updated_ts_lessthan_equal(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n 'question_text': 'how was lunch?',\n 'variation_id': '1',\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n\n # Test the less-than case.\n data['updated_ts'] = self.timestamp(offset=-10)\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n resp_data = json.loads(resp.content)\n assert (\n resp_data['errors']['updated_ts'] ==\n 'updated timestamp is same or older than existing data'\n )\n\n # Test the equal case.\n data['updated_ts'] = self.timestamp()\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 400\n resp_data = json.loads(resp.content)\n assert (\n resp_data['errors']['updated_ts'] ==\n 'updated timestamp is same or older than existing data'\n )", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def assert_interval_to_seconds(self, expected, *inputs):\n for value in inputs:\n self.assertEqual(interval_to_seconds(value), expected)", "async def test_source_up_to_dateness_with_default(self):\n response = await self.collect()\n self.assert_measurement(response, value=str((datetime.now() - datetime(2020, 1, 1)).days))", "def compare_pairs(pair_1: StudentPair, pair_2: StudentPair) -> bool:\n if pair_1[\"time\"].duration() == pair_2[\"time\"].duration():\n return pair_1[\"dates\"] < pair_2[\"dates\"]\n\n return pair_1[\"time\"].duration() < pair_2[\"time\"].duration()", "async def test_duration_only(self):\n ctx = MockContext()\n channel, duration = self.cog.parse_silence_args(ctx, 15, 10)\n\n self.assertEqual(ctx.channel, channel)\n self.assertEqual(15, duration)", "def test_get_between_datetime_same_microseconds(self):\n now = datetime.datetime.utcnow()\n start_dt = testdata.get_past_datetime(now)\n stop_dt = testdata.get_between_datetime(start_dt, now)\n self.assertGreater(stop_dt, start_dt)", "def _matchTime(self, time: float):\n return self._comparator['Time'] < time", "def not_before_duration(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"not_before_duration\")", "def _is_available_by_duration(self) -> bool:\n current_time = datetime.datetime.utcnow()\n if self.time_start is not None and self.time_start > current_time:\n return False\n if self.time_stop is not None and self.time_stop < current_time:\n return False\n return True", "def test_equals_with_different_scales(self):\n measurement_1 = Measurement(self.metric(), {\"count\": {\"status\": \"target_met\"}})\n measurement_2 = Measurement(self.metric(), {\"count\": {\"status\": \"target_not_met\"}})\n self.assertFalse(measurement_1.equals(measurement_2))", "def __eq__(self, other):\n return self.semitone_interval == other.semitone_interval", "def _isBasisDuration(self):\n invdur = 1/self.getDurationNoDot()\n if invdur % 1 > 0:\n return False\n else:\n return True", "def test_training_duration_unit(\n self,\n train_dataloader: DataLoader,\n model: ComposerModel,\n unit: TimeUnit,\n ):\n\n # Construct the trainer\n event_counter_callback = EventCounterCallback()\n trainer = Trainer(\n model=model,\n train_dataloader=train_dataloader,\n callbacks=[event_counter_callback],\n )\n\n # Get the batch size\n batch_size = train_dataloader.batch_size\n assert batch_size is not None\n\n # Get the dataloader length\n dataloader_len = trainer.state.dataloader_len\n assert dataloader_len is not None\n dataloader_len = int(dataloader_len)\n\n # Get the dataset size\n assert train_dataloader.dataset is not None\n assert isinstance(train_dataloader.dataset, collections.abc.Sized)\n num_samples_per_epoch = len(train_dataloader.dataset)\n assert num_samples_per_epoch % batch_size == 0, 'This test assumes no drop_last'\n\n # Determine the duration (given the unit) and the number of calls to .fit()\n # to train 1 epoch\n if unit == TimeUnit.SAMPLE:\n duration = Time.from_sample(batch_size)\n num_steps_per_epoch = num_samples_per_epoch // batch_size\n elif unit == TimeUnit.BATCH:\n duration = Time.from_batch(1)\n num_steps_per_epoch = dataloader_len\n elif unit == TimeUnit.EPOCH:\n duration = Time.from_epoch(1)\n num_steps_per_epoch = 1\n else:\n raise ValueError(f'Unsupported unit: {unit}')\n\n current_epoch_time = datetime.timedelta(seconds=0)\n\n # Train for one epoch, incrementally in steps of size `duration`\n for i in range(num_steps_per_epoch):\n # Train for `duration`\n trainer.fit(duration=duration)\n\n # Determine the number of batches trained\n if unit in (TimeUnit.SAMPLE, TimeUnit.BATCH):\n num_batches_trained = i + 1\n else:\n num_batches_trained = dataloader_len\n\n # Validate the time\n assert trainer.state.timestamp.batch == num_batches_trained\n assert trainer.state.timestamp.sample == num_batches_trained * batch_size\n assert trainer.state.timestamp.token == 0 # tokens not tracked\n assert trainer.state.timestamp.token_in_epoch == 0 # tokens not tracked\n assert trainer.state.timestamp.total_wct > current_epoch_time\n\n # Validate the event counter callback\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_START] == 1\n assert event_counter_callback.event_to_num_calls[Event.BATCH_START] == num_batches_trained\n assert event_counter_callback.event_to_num_calls[Event.BATCH_END] == num_batches_trained\n assert event_counter_callback.event_to_num_calls[Event.BATCH_CHECKPOINT] == num_batches_trained\n\n if num_batches_trained < num_steps_per_epoch:\n # Not yet finished the epoch\n assert trainer.state.timestamp.epoch == 0\n assert trainer.state.timestamp.batch_in_epoch == num_batches_trained\n assert trainer.state.timestamp.sample_in_epoch == num_batches_trained * batch_size\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 0\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 0\n assert trainer.state.timestamp.epoch_wct > current_epoch_time\n assert trainer.state.timestamp.epoch_wct == trainer.state.timestamp.total_wct\n if i > 0:\n assert trainer.state.timestamp.epoch_wct > trainer.state.timestamp.batch_wct\n else:\n assert trainer.state.timestamp.epoch_wct == trainer.state.timestamp.batch_wct\n else:\n # Finished the epoch\n assert trainer.state.timestamp.epoch == 1\n assert trainer.state.timestamp.batch_in_epoch == 0\n assert trainer.state.timestamp.sample_in_epoch == 0\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 1\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 1\n assert trainer.state.timestamp.epoch_wct == datetime.timedelta(seconds=0)\n assert trainer.state.timestamp.batch_wct == datetime.timedelta(seconds=0)\n\n current_epoch_time = trainer.state.timestamp.total_wct\n\n # Train for a second epoch\n # Validate that batch_in_epoch / sample_in_epoch are reset properly\n for i in range(num_steps_per_epoch):\n # Train for `duration`\n trainer.fit(duration=duration)\n\n # Determine the number of batches trained in the epoch\n if unit in (TimeUnit.SAMPLE, TimeUnit.BATCH):\n num_batches_trained = i + 1\n else:\n num_batches_trained = dataloader_len\n\n # Validate the time\n assert trainer.state.timestamp.batch == dataloader_len + num_batches_trained\n assert trainer.state.timestamp.sample == num_samples_per_epoch + num_batches_trained * batch_size\n assert trainer.state.timestamp.token == 0 # tokens not tracked\n assert trainer.state.timestamp.token_in_epoch == 0 # tokens not tracked\n assert trainer.state.timestamp.total_wct > trainer.state.timestamp.batch_wct\n assert trainer.state.timestamp.total_wct > trainer.state.timestamp.epoch_wct\n\n # Validate the event counter callback\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_START] == 2\n assert event_counter_callback.event_to_num_calls[Event.BATCH_START] == dataloader_len + num_batches_trained\n assert event_counter_callback.event_to_num_calls[Event.BATCH_END] == dataloader_len + num_batches_trained\n assert event_counter_callback.event_to_num_calls[\n Event.BATCH_CHECKPOINT] == dataloader_len + num_batches_trained\n\n if num_batches_trained < num_steps_per_epoch:\n # Not yet finished the epoch\n assert trainer.state.timestamp.epoch == 1\n assert trainer.state.timestamp.batch_in_epoch == num_batches_trained\n assert trainer.state.timestamp.sample_in_epoch == num_batches_trained * batch_size\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 1\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 1\n else:\n # Finished the epoch\n assert trainer.state.timestamp.epoch == 2\n assert trainer.state.timestamp.batch_in_epoch == 0\n assert trainer.state.timestamp.sample_in_epoch == 0\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_END] == 2\n assert event_counter_callback.event_to_num_calls[Event.EPOCH_CHECKPOINT] == 2", "def assertEqualDates(self, dt1, dt2, seconds=None):\n if seconds is None:\n seconds = self.date_tolerance\n\n if dt1 > dt2:\n diff = dt1 - dt2\n else:\n diff = dt2 - dt1\n if not diff < datetime.timedelta(seconds=seconds):\n raise AssertionError('%r and %r are not within %r seconds.' %\n (dt1, dt2, seconds))", "def test_calculate_time_difference():\n first_position = 1\n second_position = 1235\n difference = race.calculate_time_difference(first_position, second_position)\n assert type(difference) == str\n assert \":\" in difference and \".\" in difference\n first_position_random = random.randint(10000, 99999)\n second_position_random = random.randint(10000, 99999)\n difference_random = race.calculate_time_difference(first_position_random, second_position_random)\n assert type(difference_random) == str\n assert \":\" in difference_random and \".\" in difference_random", "def test_optional_milliseconds(self, Signal):\n blk = ElapsedTime()\n config = {\n 'milliseconds': '{{ $ms }}',\n 'units': {\n 'days': True,\n 'hours': True,\n 'minutes': True,\n 'seconds': True,\n },\n 'timestamp_a': '1984-05-03T00:00:00.999Z',\n 'timestamp_b': '1984-05-03T00:00:01.001Z',\n }\n self.configure_block(blk, config)\n\n # process a list of signals\n blk.start()\n blk.process_signals([\n Signal({\n 'ms': False,\n }),\n ])\n blk.stop()\n\n # check output\n # milliseconds are truncated BEFORE comparing timestamps\n self.assert_last_signal_list_notified([\n Signal({\n 'ms': False,\n 'days': 0,\n 'hours': 0,\n 'minutes': 0,\n 'seconds': 1,\n }),\n ])\n # check that seconds was cast to int\n seconds = self.last_notified[DEFAULT_TERMINAL][0].seconds\n self.assertTrue(isinstance(seconds, int))", "def utt_no_shorter_than(path, duration, unit='second'):\n pbase = os.path.splitext(path)[0]\n phonseq = phnread(pbase+'.PHN')\n dur = sum(te-ts if isspeech(p) else 0 for (ts, te), p in phonseq)\n if unit == 'second':\n return dur >= duration * audioinfo(path).samplerate\n elif unit == 'sample':\n return dur >= duration\n else:\n raise ValueError(f\"Unsupported unit: {unit}.\")", "def not_before_duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"not_before_duration\")", "def not_before_duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"not_before_duration\")", "def diff_log(self, other):\n if self._seq_list != other._seq_list:\n print(\"Fuzzing sequence lists do not match.\")\n return False\n return True", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def duration(self):\r\n return self.stop - self.start", "def __cmp__(a,b):\n td = b.duedate - a.duedate\n return td.days * 24*60*60 + td.seconds", "def is_time(self) -> bool:\n return self.times > 1", "def test_second_equal(self):\n self.assertEqual(heaviest_word(\"what time are we climbing up to the volcano\"), \"volcano\")", "def set_duration(self, year=0, month=0, day=0, hour=0, minute=0):\n self.stop.delta(year, month, day, hour, minute)\n if self.stop.before(self.start):\n raise TimeError(\"The stop Moment is before the start Moment\")\n else:\n return True", "def test_interval(self):\n\n\t\tself.assertEqual(self.interval, self.tracker.server_class.interval)", "async def test_evaluate_different_response_time(self):\n self.set_source_parameter(\"response_time_to_evaluate\", \"min_response_time\")\n self.set_source_parameter(\"target_response_time\", \"45\")\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"1\", entities=self.expected_entities[:1])", "def test_total_time_with_start_and_end_times(time_record_factory):\n ts = datetime.datetime(2018, 10, 1, 9, 0)\n te = datetime.datetime(2018, 10, 1, 17, 0)\n\n t = time_record_factory(time_start=ts, time_end=te)\n\n expected = te-ts\n\n assert t.total_time == expected", "def toc(self,timestamp):\n return self._timestamp > timestamp", "def test_run_print():\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 60\n run = Run(date, duration_in_minutes/60)\n\n assert \"{}\".format(run) == \"<Run(start={}, stop={}, duration=1.0)>\".format(datetime(2020,5,5,12,0),datetime(2020,5,5,13,0))", "def test_basedif_times(self):\n test1 = BaseModel()\n sleep(0.5)\n test2 = BaseModel()\n self.assertNotEqual(test1.created_at, test2.created_at)\n self.assertNotEqual(test1.updated_at, test2.updated_at)", "def T_elapsed(T_amount: BlockHeight) -> bool:\n T_now = getCurrentBlockHeight()\n return T_now - T_init >= T_amount", "def test_search_date_true(self):\n test = self.ec.search_date()\n self.assertEqual(self.ecm.test_minute(), test)", "def main():\n assert how_many_seconds(2) == 7200\n assert how_many_seconds(10) == 36000\n assert how_many_seconds(24) == 86400\n assert how_many_seconds(36) == 129600\n print('Passed.')", "def test_lengthen_synthetic_interval(self):\n now = datetime.now()\n now_utc = now.utcnow()\n\n three_hours_before = now - timedelta(hours=3)\n four_hours_before = now - timedelta(hours=4)\n five_hours_before = now - timedelta(hours=5)\n\n three_hours_before_utc = now_utc - timedelta(hours=3)\n four_hours_before_utc = now_utc - timedelta(hours=4)\n five_hours_before_utc = now_utc - timedelta(hours=5)\n\n self.t.configure_exclusions((four_hours_before.time(), three_hours_before.time()))\n\n self.t(\"start {:%Y-%m-%dT%H:%M:%S}\".format(five_hours_before))\n\n self.t(\"lengthen @2 5min\")\n\n j = self.t.export()\n\n self.assertEqual(len(j), 2)\n self.assertClosedInterval(j[0],\n expectedStart=\"{:%Y%m%dT%H%M%S}Z\".format(five_hours_before_utc),\n expectedEnd=\"{:%Y%m%dT%H%M%S}Z\".format(four_hours_before_utc + timedelta(minutes=5)),\n expectedTags=[],\n description=\"lengthened interval\")\n self.assertOpenInterval(j[1],\n expectedStart=\"{:%Y%m%dT%H%M%S}Z\".format(three_hours_before_utc),\n expectedTags=[],\n description=\"unmodified interval\")", "def ended(self):\n return self.dur <= 0", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def get_duration(self):\n try:\n if self.is_skipped:\n return \"00:00\"\n assert self.start_time\n assert self.stop_time\n if self.stop_time < self.start_time:\n return \"XX:XX\"\n return(\n f\"{str(int(self.stop_time - self.start_time) // 60).zfill(2)}:\"\n f\"{str(int(self.stop_time - self.start_time) % 60).zfill(2)}\")\n\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\"Please run test before getting the duration\")\n return \"XX:XX\"", "def check_time(start, message):\n\n logger.info(\" {} -> took {}\".format(message, clock() - start))", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_status_target_met(self):\n measurement = self.measurement(\n self.metric(),\n sources=[\n {\"source_uuid\": SOURCE_ID, \"value\": \"0\", \"total\": \"100\", \"parse_error\": None, \"connection_error\": None},\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"0\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertEqual(\"target_met\", measurement.status())", "def testSeconds(self):\n c = task.Clock()\n self.assertEqual(c.seconds(), 0)", "def __lt__(self, other) -> bool:\n if self.timestamp is None:\n raise NoTimestampOfMeasurementSetError(f'{self.name} has no timestamp of measurement.')\n else:\n return True if self.timestamp < other.timestamp else False", "def testSeconds(self):\n c = task.Clock()\n self.assertEquals(c.seconds(), 0)", "def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def test_repeated_unit(self):\n self.assertEqual(\n dict(seconds=3), util.parse_relative_time_string(\"+3s +3seconds\"))\n with self.assertRaises(ValueError):\n util.parse_relative_time_string(\"+3s +4seconds\")", "def test_time_successful(self):\n\n url = '/%s/jobs/%i/input_files/?started=%s&ended=%s&time_field=%s' % (self.api, self.job.id,\n '2016-01-10T00:00:00Z',\n '2016-01-13T00:00:00Z',\n 'source')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])", "def timeAfter(self):\n return self.timeStamp + self.duration", "def test_equals(self):\n measurement_1 = Measurement(self.metric())\n measurement_2 = Measurement(self.metric())\n self.assertTrue(measurement_1.equals(measurement_2))" ]
[ "0.6993373", "0.64634085", "0.6261238", "0.6072691", "0.6072691", "0.60668623", "0.60171825", "0.58592576", "0.58530915", "0.58014023", "0.56802076", "0.5668855", "0.5660802", "0.55880255", "0.55880255", "0.55513406", "0.5545389", "0.5539826", "0.55272925", "0.55245674", "0.5523045", "0.5518289", "0.55062854", "0.5488691", "0.5471861", "0.5471041", "0.54345274", "0.5429674", "0.54287744", "0.542481", "0.54101026", "0.5395752", "0.5381767", "0.5361745", "0.5351282", "0.53219384", "0.53205615", "0.5314802", "0.53095466", "0.5300332", "0.52957916", "0.52903056", "0.52903056", "0.52886695", "0.5286869", "0.5275895", "0.5273165", "0.52641296", "0.5263516", "0.525683", "0.5255367", "0.5253975", "0.52532744", "0.52508247", "0.5245322", "0.52450246", "0.5237658", "0.52311945", "0.5218365", "0.52167743", "0.521165", "0.5210601", "0.52089727", "0.5201707", "0.52006036", "0.5199266", "0.5187445", "0.5187445", "0.5184129", "0.5179743", "0.5173504", "0.51661855", "0.5165875", "0.5164231", "0.5162841", "0.51580113", "0.5157834", "0.5154887", "0.5149264", "0.51414675", "0.51392996", "0.5134422", "0.5130505", "0.5130318", "0.51217085", "0.5119995", "0.5119341", "0.51190287", "0.5112791", "0.5112387", "0.5109667", "0.5103422", "0.50982016", "0.50978255", "0.50971293", "0.5079553", "0.5079054", "0.5077375", "0.50711596", "0.5068823" ]
0.5158579
75
obtain a time duration between the recent events of the same bizLocation
def obtain_time_duration(collection, new_document): # Obtain the previously existing two document for the incoming bizLocation # Sort them in descending order # The first in the list is the newly inserted document detected by Change Streams # the second document is of interest prev_documents = collection.find({'epcList.epc': new_document['epcList'][0]['epc']}).limit(2).sort([("eventTime", DESCENDING)]) if prev_documents is not None: # if there is a previous set of documents prev_doc_list = list(prev_documents) # print(prev_doc_list) if len(prev_doc_list) == 1: logger.info('Only Single entry exists for Product.. It implies it is the a new product with no previous events.') return None else: logger.debug('Previous BizLocation of Product: {}, Present BizLocation of Product: {}'.format( prev_doc_list[1]['bizLocation']['id'], new_document['bizLocation']['id'])) logger.debug('Time Duration: From {} to {}'.format(prev_doc_list[1]['eventTime'], new_document['eventTime'])) # make the dictionary to return duration = { 'bizLocation': { 'prev': prev_doc_list[1]['bizLocation']['id'], 'present': new_document['bizLocation']['id'] }, 'from_time': prev_doc_list[1]['eventTime'].isoformat(timespec='milliseconds') + 'Z', 'to_time': new_document['eventTime'].isoformat(timespec='milliseconds') + 'Z' } # print(duration) return duration else: logger.info('No Previous Information of Event Found') return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def duration(self):\r\n return self.t2 - self.t1", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def duration(self):\n return self._end - self._begin", "def GetDuration(self):\n return _gmat_py.LocatedEvent_GetDuration(self)", "def compute_go_duration(self, units='seconds'):\n go_duration = 0\n for trial in self.trials:\n max_time = 0\n for event in trial.events:\n if self.stop > max_time:\n max_time = self.stop\n\n go_duration += max_time\n\n self.go_duration = (go_duration, units)", "def duration(self):\n return self.end_time - self.start_time", "def duration(self) -> float:\n return self.endTime()-self.startTime()", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def _distance_to_last_event(self, step):\n if self.last_off is None:\n raise ValueError('No events in the stream')\n return step - self.offset - self.last_off", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def get_duration(self):\n\n return self.endtime - self.starttime", "def duration(self):\n return self.end - self.start", "def timediff(start, event):\n print(\"{:35} {:5.0f}s\".format(event + \" after\", time.time() - start))", "def duration(self):\r\n return self.stop - self.start", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def duration(self):\r\n\t\treturn (self.globEnd - self.globStart)", "def _compute_duration(self):\n diff_float = 0\n for ts_line in self:\n if ts_line.x_start_date:\n st_datetime = fields.Datetime.from_string(\n ts_line.x_start_date)\n # autocomplete date from start date\n st_date_tz = fields.Datetime.context_timestamp(\n self, st_datetime).date()\n ts_line.date = st_date_tz\n\n # autocomplete name from start date\n st_datetime_tz = fields.Datetime.context_timestamp(\n self, st_datetime)\n string_st_dt_tz = fields.Datetime.to_string(st_datetime_tz)\n ts_line.name = ts_line.user_id.name + '/' + string_st_dt_tz\n\n en_datetime = fields.Datetime.from_string(\n ts_line.x_end_date)\n diff = en_datetime - st_datetime\n if(time(1, 00) <= st_datetime.time() <= time(5, 00)):\n if(time(6, 00) <= en_datetime.time() <= time(10, 00)):\n # del 1 hour for breaking lunch\n diff_float = round(diff.total_seconds() / 3600.0, 2)-1\n else:\n diff_float = round(diff.total_seconds() / 3600.0, 2)\n ts_line.unit_amount = diff_float", "def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)", "def end_time(self) -> float:\r\n ...", "def get_duration(self):\n return self.source_df.end_timestamp.astype(\"datetime64\") - self.source_df.start_timestamp.astype(\"datetime64\")", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60", "def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])", "def get_duration(self, current_time):\n return current_time - self.slam.get_data(node_name=self.last_point_name)['time']", "def duration(self):\n return self.end_abs - self.start", "def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times", "def timeAfter(self):\n return self.timeStamp + self.duration", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events", "def receive_and_probing_time(self):\r\n latest_completion = 0\r\n for probe in self.__probes.values():\r\n\t\t \t if probe.complete():\r\n\t\t\t \t\t latest_completion = max(latest_completion, probe.completion_time)\r\n return latest_completion - self.__arrival_time", "def getElapseTimes( self ):\n\n pars\t= ( _EVENT_ELAPSED_TIME,0, 0, 0 )\n values = self.adbGetEvent( pars )\n return values[2]", "def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.", "def get_duration(self, obj):\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def get_duration(self, obj):\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None", "def duration(self):\n return total_seconds(self.timestamp - self.start_timestamp)", "def total_duration(self):\r\n # XXX: bug in duration after slicing - attr_onread should be reset\r\n # after slicing\r\n #return self.duration.sum()\r\n return (self.stop - self.start).sum()", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def duration(self):\n pass", "def duration(self):\n pass", "def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0", "def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration", "def get_region_updated_time(self):", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration", "def elapsed(timestamp):\n return repoze.timeago.get_elapsed(timestamp)", "def duration(self):\n started = self.started_at\n finished = self.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet", "def _compute_duration_overtime(self):\n diff_float = 0\n for ts_line in self:\n if ts_line.x_start_date:\n st_datetime = fields.Datetime.from_string(\n ts_line.x_start_date)\n en_datetime = fields.Datetime.from_string(\n ts_line.x_end_date)\n diff = en_datetime - st_datetime\n if not ts_line.is_overtime:\n if(time(1, 00) <= st_datetime.time() <= time(5, 00)):\n if(time(6, 00) <= en_datetime.time() <= time(10, 00)):\n # del 1 hour for breaking lunch\n diff_float = round(\n diff.total_seconds() / 3600.0, 2)-1\n else:\n diff_float = round(diff.total_seconds() / 3600.0, 2)\n ts_line.x_is_per_diem = False\n ts_line.unit_amount = diff_float", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def time_between_updates(self):\r\n if 'last_updated' not in self._original:\r\n return 0\r\n last_update = self._original['last_updated']\r\n this_update = self.last_updated\r\n return this_update - last_update", "def get_last_run(events: List[dict]) -> dict:\n\n last_timestamp = events[-1]['@timestamp']\n last_time = last_timestamp / 1000\n next_fetch_time = datetime.fromtimestamp(last_time) + timedelta(\n seconds=1\n )\n return {'after': next_fetch_time.isoformat()}", "def totalEventDuration(self):\r\n if self.events == None:\r\n raise Exception('event detector has not been called for a timeseries')\r\n return sum([event.duration() for event in self.events])", "def duration(self):\n return int(\n (self.finish_checkpoint - self.start_checkpoint) * 1000000\n )", "def duration(self):\n\t\tif self.status():\n\t\t\t# Currently on, return time since session was started\n\t\t\treturn self.length()\n\t\telse:\n\t\t\t# Otherwise return time until last bit of work\n\t\t\t# Check that this isn't an empty session\n\t\t\tif not self.toggles: return timedelta()\n\t\t\treturn self.toggles[-1] - self.toggles[0]", "def obs_length(self):\n return self.lc.time[-1] - self.lc.time[0]", "def duration(self):\n window_length = self.window_length\n if self.window_length is None:\n warnings.warn(\n \"spectrogram must have window_length attribute to\"\n \" accurately calculate duration. Approximating duration.\"\n )\n return self.times[-1]\n else:\n return self.times[-1] + window_length / 2", "def _end_event(self):\n self.end_event_time = datetime.utcnow()\n self.event_properties[\"ElapsedTime\"] = str(\n self.end_event_time - self.start_event_time\n )", "def getLaps(self):\n\n if self.pj[OBSERVATIONS][self.observationId][\"type\"] in [LIVE]:\n\n if self.liveObservationStarted:\n now = QTime()\n now.start() # current time\n memLaps = Decimal(str(round(self.liveStartTime.msecsTo(now) / 1000, 3)))\n return memLaps\n else:\n return Decimal(\"0.0\")\n\n if self.pj[OBSERVATIONS][self.observationId][\"type\"] in [MEDIA]:\n\n if self.playerType == VIEWER:\n return Decimal(0)\n\n if self.playerType == VLC:\n if self.playMode == FFMPEG:\n # cumulative time\n memLaps = Decimal(self.FFmpegGlobalFrame * (1000 / list(self.fps.values())[0]) / 1000).quantize(\n Decimal(\".001\"))\n return memLaps\n elif self.playMode == VLC:\n # cumulative time\n memLaps = Decimal(\n str(round((sum(self.duration[0: self.media_list.index_of_item(self.mediaplayer.get_media())]) +\n self.mediaplayer.get_time()) / 1000, 3)))\n return memLaps", "def duration(self):\n if self._exc_end and self._inc_begin:\n return self._exc_end - self._inc_begin\n return 0", "def get_elapsed_time(self):\n if hasattr(self, 'starttime'):\n return monotonic() - self.starttime\n else:\n return 0", "def getTimes():", "def getTimes():", "def getTimes():", "def distanceFunction(value1,value2):\n time1=value1[0]\n time2=value2[0]\n event1=value1[1]\n event2=value2[1]\n \n #difference for events is as good as 60 mins time difference\n distance=0\n if event1!=event2:\n distance+=60\n \n #difference for times is absolute difference between times\n distance+=abs(time1-time2)\n return distance", "def end(self):\r\n assert self._active and self._start_time > 0\r\n\r\n # Calculate difference\r\n end_time = time.time()\r\n time_difference = end_time - self._start_time\r\n\r\n # Record timing (and trim history)\r\n self._timings.append(time_difference)\r\n if len(self._timings) > self._average_over_last_n_timings:\r\n self._timings = self._timings[-self._average_over_last_n_timings:]\r\n\r\n # Reset\r\n self._start_time = -1\r\n self._active = False\r\n\r\n return time_difference", "def GetTimeAndDurationOfTripSinceDeparture(PathInfo):\r\n\tif not PathInfo:\r\n\t\treturn None \r\n\tif len(PathInfo) < 2: return None \r\n\r\n\tdeparture_first_station = PathInfo[1][ConnInfoInd['departure_hour']]*60 + PathInfo[1][ConnInfoInd['departure_min']]\r\n\r\n\tarrival_last_station = PathInfo[-1][ConnInfoInd['arrival_hour']]*60 + PathInfo[-1][ConnInfoInd['arrival_min']]\r\n\tTotalDuration = arrival_last_station - departure_first_station\r\n\treturn (TotalDuration, departure_first_station, arrival_last_station)", "def duration(self) -> float:\n return self._stop - self._start if self._stop is not None else None", "def last_5_mins(conn,from_time):\n durTot = 0\n time = '{}'.format(from_time)\n query = ''' SELECT sum(duration) FROM events WHERE event_type = 'Cycle End' AND unix_time > ?'''\n c = conn.cursor()\n c.execute(query,(time,))\n (data, ) = c.fetchone()\n try:\n \t durTot = round(data,2)\n except:\n\tpass\n return durTot", "def time_difference(sorted_upd):\n keys = []\n values = []\n for i in range(len(sorted_upd) - 1):\n difference = sorted_upd[i + 1]['timestamp'] - sorted_upd[i]['timestamp']\n if difference > float(Config.BOUNDARY):\n keys.append((sorted_upd[i]['timestamp'], sorted_upd[i + 1]['timestamp']))\n values.append(difference)\n time_diff = dict(zip(keys, values))\n Config.ANALYSIS.write(f\"regular time greater than {Config.BOUNDARY}s: {time_diff}\\n\\n\")\n return time_diff", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration(self) -> float:\n return self.delta_t * len(self)", "async def test_source_up_to_dateness(self):\n response = await self.collect(get_request_json_return_value={\"timestamp\": \"1565284457173\"})\n expected_age = days_ago(datetime_fromtimestamp(1565284457173 / 1000.0))\n self.assert_measurement(response, value=str(expected_age))", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def cal_end(self):\n if self.event.datetime_setup_complete:\n return self.event.datetime_setup_complete\n else:\n return self.event.datetime_start", "def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")", "def get_ruuvitag_scan_time(self):\n results = {}\n\n with psycopg.connect(create_db_conn_string(self._config['db'])) as conn:\n with conn.cursor() as cursor:\n for location in self._config['ruuvitag']['Location'].split(','):\n cursor.execute(\"\"\"SELECT recorded FROM ruuvitag_observations WHERE\n location = %s ORDER BY recorded DESC LIMIT 1\"\"\", (location,))\n\n result = cursor.fetchone()\n results[location] = result[0] if result else datetime.now()\n\n return results", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(\"{} {} took {} hours and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, hours, minutes, unit))\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(\"{} {} took {} minutes and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, minutes, seconds, unit))\n else:\n print(\"{} {} took {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, elapsed, unit))", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(self.name, \"took\", str(hours), \"hours and\", \"{0:.2f}\".format(minutes), unit, \"to complete\")\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(self.name, \"took\", str(minutes), \"minutes and\", \"{0:.2f}\".format(seconds), unit, \"to complete\")\n else:\n print(self.name, \"took\", \"{0:.2f}\".format(elapsed), unit, \"to complete\")", "def _get_duration(self):\n durations = [series_episode.duration for series_episode in SeriesEpisode.objects.filter(series=self)]\n return reduce(lambda x, y: x + y, durations) if len(durations) > 0 else 0", "def GetDurationUntilDeparture(NextConnectionInfo, PathInfo):\r\n\tdeparture_first_station = PathInfo[0][ConnInfoInd['arrival_hour']]*60 + PathInfo[0][ConnInfoInd['arrival_min']]\r\n\tdeparture_next_station = NextConnectionInfo[ConnInfoInd['departure_hour']]*60 + NextConnectionInfo[ConnInfoInd['departure_hour']]\r\n\treturn (departure_next_station - departure_first_station)", "def _get_end_time(self):\n return self.__end_time", "def diff(self):\n return datetime.datetime.now() - self.stamp", "def duration(self):\n\n ended = time.time() if self.ended is None else self.ended\n return ended - self.started", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def duration(self):\n if not self.started:\n return None\n start = self.started\n end = self.completed\n if not end:\n end = datetime.utcnow()\n return end - start", "def business_time_seconds(self):\n hours_per_day = self._end_hour - self._start_hour\n return ((self.business_time.days * hours_per_day * 60 * 60) + \n (self.business_time.seconds))", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def performance_length(self):\n first_touch = self.touches[:1].index[0].to_pydatetime()\n last_touch = self.touches[-1:].index[0].to_pydatetime()\n return (last_touch - first_touch).total_seconds()", "def lookback_duration(self) -> str:\n return pulumi.get(self, \"lookback_duration\")", "def when(self):\n\n # current UTC time\n now = datetime.datetime.utcnow()\n # calculate timedelta and return\n return now - self.creation_time", "def _arrival_time(self):\n \n return self.mkt_time + timedelta(0, 0, self.latency)", "def duration(examiners_data):\n max_end = 0\n for jury in examiners_data:\n examiner_number, exams = jury[\"Number\"], jury[\"Exams\"]\n for student_number, exam_time in exams.items():\n if exam_time + durations[examiner_number] > max_end:\n max_end = exam_time + durations[examiner_number]\n\n return max_end", "def mark(self):\n now = time.time_ns()\n total_diff = (now - self.start) / 1_000_000_000\n split_diff = (now - self.last) / 1_000_000_000\n self.last = now\n return total_diff, split_diff", "def get_end_time(self):\n start = datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n delta = timedelta(\n seconds=float(self.get_handler().SOURCE_ACQ_DURATION)\n )\n return start + delta", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def CalcLastLapDuration(session: gps_pb2.Session) -> float:\n if len(session.laps) == 1:\n first_point = session.laps[0].points[0]\n last_point = session.laps[0].points[-1]\n return GetTimeDelta(first_point, last_point)\n prior_lap = session.laps[-2]\n current_lap = session.laps[-1]\n first_point = current_lap.points[0]\n last_point = current_lap.points[-1]\n delta = GetTimeDelta(first_point, last_point)\n prior_after = CalcTimeAfterFinish(prior_lap)\n current_after = CalcTimeAfterFinish(current_lap)\n return int(delta - current_after * 1e9 + prior_after * 1e9)", "def projectDuration(listActivities):\n lastAct = max(listActivities, key=lambda activity: activity.startTime)\n return lastAct.startTime + lastAct.duration", "def minutesSinceLastUpdate(self):\n if self.seenTimes == []:\n return 0\n latestTime = max(self.seenTimes)\n return int(self.timeCode())-int(latestTime)", "def getEndTime(self):\n assert self.isFinished(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.endTime", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times", "def get_time(self):\n return len(self.pos_history)" ]
[ "0.6247451", "0.6066179", "0.59149534", "0.587591", "0.58055454", "0.57777333", "0.57665247", "0.5748646", "0.5732722", "0.572424", "0.571313", "0.5707432", "0.56868815", "0.5675801", "0.565217", "0.5647044", "0.56372106", "0.56028074", "0.5601279", "0.5599238", "0.5544639", "0.5523867", "0.55219376", "0.5520458", "0.5506709", "0.5497663", "0.54963726", "0.54713005", "0.546432", "0.5443263", "0.5435863", "0.5434491", "0.54338676", "0.542592", "0.5409899", "0.5408504", "0.5405748", "0.5405748", "0.5404294", "0.5403364", "0.539028", "0.5377622", "0.5374485", "0.5354869", "0.53537905", "0.5347988", "0.53437084", "0.53424406", "0.5329521", "0.5328787", "0.5326491", "0.53162634", "0.5312008", "0.531168", "0.5306278", "0.53021455", "0.52983993", "0.5265113", "0.5265113", "0.5265113", "0.5261618", "0.52520454", "0.5246694", "0.5240939", "0.52381146", "0.5235876", "0.5232176", "0.5232176", "0.5231063", "0.5228501", "0.52220184", "0.52189696", "0.5214211", "0.521269", "0.5207691", "0.52051145", "0.5202783", "0.52015156", "0.52008075", "0.51792526", "0.5175854", "0.51735634", "0.51713455", "0.51686853", "0.5159908", "0.51588166", "0.51539165", "0.5149119", "0.51346195", "0.51300746", "0.51221186", "0.51191217", "0.5119099", "0.51183236", "0.5104185", "0.51040566", "0.5103606", "0.5090464", "0.5088767", "0.50883394" ]
0.6420903
0
Objective function to maximize total score over matches.
def objective_score(me, other, turns, noise, repetitions, match_attributes=None): match = axl.Match((me, other), turns=turns, noise=noise, match_attributes=match_attributes) if not match._stochastic: repetitions = 1 scores_for_this_opponent = [] for _ in range(repetitions): match.play() scores_for_this_opponent.append(match.final_score_per_turn()[0]) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def objective(self):\n pass", "def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def worst_score(self):\r\n pass", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def personal_best(scores):\n return max(scores)", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def score_solution(g, s):\n pass", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def score(self):", "def max_score(self):\n return self.raw_possible", "def optimal_max(board):\n # Board full?\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is negative infinity\n global_optimum = [None, -math.inf]\n\n # For each move, what would opponent do next? Update best move.\n for action in available_actions:\n # Anticipates optimal adversarial moves\n local_optimum = optimal_min(result(board, action))\n\n # Compares local vs global optima\n if global_optimum[1] <= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def match_score(self):\n return self._match_score", "def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score", "def get_score(self, solution: np.array) -> float:\n pass", "def scoring(self):\n pass", "def personal_best(scores: list) -> int:\n return max(scores)", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)", "def best_action(self):\n child_score = self.child_Q() + self.mcts.c_puct * self.child_U()\n masked_child_score = child_score\n return np.argmax(masked_child_score)", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def maximize(self):\n raise NotImplementedError", "def max_score(self):\n return self.points", "def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def max_v_greedy():\n\n S1=Spectrum.Spectrum()\n S1.add_peak(50.4,16)\n S1.add_peak(50.7,36)\n S1.add_peak(74.8,25)\n S1.add_peak(96.2,23)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.6,49)\n S2.add_peak(50.9,25)\n S2.add_peak(74.6,9)\n S2.add_peak(102.4,17)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n g_score,g_peaks=similarity.cosine_score_greedy(S1,S2)\n\n assert score>=g_score, \"Maximum weighted method did not get higher score than greedy method\"\n assert peaks>=g_peaks, \"Maximum weighted method did not match more peaks than greedy method\"\n\n assert peaks==3, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,0.73), \"Incorrect score with greedy method\"\n\n assert g_peaks==2, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(g_score,0.57), \"Incorrect score with maximum weighted method\"", "def evaluate_score(self,word_id):\r\n total_best = 0\r\n assigned_to_return = {}\r\n for possible_word in self.satisfiers[word_id].keys():\r\n words_to_iterate = []\r\n iterated_word_ids = []\r\n # print()\r\n for connected_word_id in self.satisfiers[word_id][possible_word].keys():\r\n words_to_iterate.append(self.satisfiers[word_id][possible_word][connected_word_id])\r\n # print(\"word_id: {}, possible_word: {}, connected_id: {}, words: {}\".format(word_id,possible_word, connected_word_id,self.satisfiers[word_id][possible_word][connected_word_id]))\r\n iterated_word_ids.append(connected_word_id)\r\n \r\n # print(possible_word)\r\n # print(\"\\nPossible word:\",possible_word)\r\n for comb in itertools.product(*words_to_iterate):\r\n assigned_words = {}\r\n assigned_words[word_id] = possible_word\r\n for i in range(len(iterated_word_ids)):\r\n assigned_words[iterated_word_ids[i]] = comb[i]\r\n # print(\"word_id: {} comb: {}\".format(word_id,comb))\r\n # print(\"\\nword_id: {}, assigned words: {}\".format(word_id,assigned_words))\r\n new_assigned, current_max = self.get_max_score(word_id,assigned_words)\r\n # print(\"new_assigned: {}, current_max: {}\".format(new_assigned, current_max))\r\n if current_max > total_best:\r\n total_best = current_max\r\n assigned_to_return = {}\r\n assigned_to_return = new_assigned\r\n return assigned_to_return, total_best", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def alpha_beta_search(self, maxDepth, board, player, best_move):\r\n \"\"\"j refers to the list where j[0] = score and j[1] = move\"\"\"\r\n start = time()\r\n #playerPiece = core.BLACK\r\n #if player == \"BLACK\":\r\n #print(\"hi\")\r\n #playerPiece = core.WHITE\r\n self.max_value(board, -1*float(\"inf\"), float(\"inf\"), player, 0, maxDepth, best_move)\r\n endTime = time()\r\n #print(endTime - start)\r\n #sort by square weights\r\n #how many flips each move will make\r\n #change the scoring matrix\r\n #dictionary\r\n #time limits\r\n #return j[1]\r", "def maximizer(evaluate):\n def strategy(player, board):\n def score_move(move):\n return evaluate(player, Othello.make_move(move, player, list(board)))\n return max(Othello.legal_moves(player, board), key=score_move)\n return strategy", "def evaluate(self, solution, total = 0):\n for objective in self.objectives:\n total = total + objective(solution)\n return total", "def alphabeta_maximize_play(self, game, legal_moves, depth, alpha, beta):\n highest_score, selected_move = (float('-inf'), (-1, -1))\n for move in legal_moves:\n score, _ = self.alphabeta(game.forecast_move(move), depth - 1, alpha, beta, False)\n if score > alpha:\n alpha = score\n highest_score, selected_move = score, move\n if alpha >= beta:\n break\n return (highest_score, selected_move)", "def objective(self):\n return self._objective", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def get_observed_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_observed_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def max_score(self):\n return max(self._extract_set('score') or [0])", "def mirkin_match_coeff(self, normalize=True):\n max_score = self.grand_total ** 2\n score = max_score - self.mirkin_mismatch_coeff(normalize=False)\n if normalize:\n score = _div(score, max_score)\n return score", "def max_score(self):\r\n return self.lcp.get_max_score()", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def objective(self, args: Dict[str, Any]) -> float:\n pass", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \"\"\"\n #Heuristic 1: Aggressive Improved Score\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return float(own_moves - 2*opp_moves)\n\n \"\"\"\n\n \"\"\"\n #Heuristic 2: Border/Non-Border Differentiated Moves Scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n own_score = 0\n opp_score = 0\n for each_move in game.get_legal_moves(player):\n if each_move in border_moves:\n own_score = own_score + 1\n else:\n own_score = own_score + 1.5\n\n for each_move in game.get_legal_moves(game.get_opponent(player)):\n if each_move in border_moves:\n opp_score = opp_score + 1\n else:\n opp_score = opp_score + 1.5\n\n return float(own_score - opp_score)\n \"\"\"\n\n #Heuristic 3: Advanced Differentiated Board scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n\n next_to_border_moves = [(1,1), (1,2), (1,3), (1,4), (1,5), (2,1),\n (2,5), (3,1), (3,5), (4,1), (4,5),\n (5,1), (5,2), (5,3), (5,4), (5,5)]\n\n own_score = 0\n opp_score = 0\n\n for move in game.get_legal_moves(player):\n if move in border_moves:\n own_score += 1\n elif move in next_to_border_moves:\n own_score += 1.2\n else:\n own_score += 1.5\n\n for move in game.get_legal_moves(game.get_opponent(player)):\n if move in border_moves:\n opp_score += 1\n elif move in next_to_border_moves:\n opp_score += 1.2\n else:\n opp_score += 1.5\n\n return float(own_score - opp_score)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def _get_lip_best(self) -> float:\n pass", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def target_score(self, g, w):\n A = bool(max(self.multi_score(g.target_industry, w.target_industry, self.indicator)))\n B = bool(max(self.multi_score(g.target_organization, w.target_organization, self.indicator)))\n if g.event_type == 'malicious-email':\n C = max(self.multi_score(g.target_entity, w.target_entity, self.sim_recip))\n else:\n C = max(self.multi_score(g.target_entity, w.target_entity, self.sim_ld))\n\n if C is None:\n return self.n_score((A, B))\n else:\n return self.n_score((A, (B, C)))", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n\n if currentGameState.getFood().asList() == []: # Null list catch if there is no food on the board\n return currentGameState.getScore()\n else:\n return max([manhattanDistance(currentGameState.getPacmanPosition(),x) * -1\n for x in currentGameState.getFood().asList()]) + currentGameState.getScore()", "def max_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n legal_moves = game.get_legal_moves() # obtain all legal moves for game, ACTIONs that can be taken\n best_score = -math.inf # abstraction assignment of neg. infinity(lowest possible value for MAX score)\n for m in legal_moves: # iterate through all available actions\n new_state = game.forecast_move(m) # for each available move, forecast the resulting state from that ACTION\n # RESULT of ACTION\n score = self.max_value(new_state, depth - 1) # recursively uses the new state\n best_score = max(best_score,score) # calculates the minimizing score between the states\n return best_score # propagates minimizing score for given state", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w", "def get_score(self, solution: np.array) -> float:\n score = 0\n for vehicle_count, vehicle_solution in enumerate(solution):\n distances = self.distance_matrix[vehicle_solution[0:-1], vehicle_solution[1:]]\n costs = distances * self.selected_transportation_cost[vehicle_count]\n score += np.sum(costs)\n return score", "def fn(i):\n if i < 0: return 0 # boundary condition \n return scores[i] + max((fn(ii) for ii in range(i) if ages[ii] == ages[i] or scores[ii] <= scores[i]), default=0)", "def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)", "def new_evaluate(board):\n\n #Logic for new_evaluate function:\n #1)Traverse through each of the columns\n #2)For each of the columns, find the top most element.\n\t #If the topmost element = Current Player\n\t\t \t#3)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a POSITIVE value\n\t #Else\n\t\t \t#4)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a NEGATIVE value\n #5)Sort these Positive and Negative scores\n #6)IF the highest negative score is greater than the highest positive score, then it means that the opposition has MORE chances to WIN.\n #So, that has to be blocked and so we will return that HIGHEST NEGATIVE value as the score for that board\n #7)ELSE we go ahead and return the HIGHEST POSITIVE value as the score for that board\n #->This logic has increasing the AGGRESSION of the player a lot and it makes senses we hope.\n\n posdict = {}\n negdict = {}\n for col in range(7):\n if(board.get_top_elt_in_column(col)==board.get_current_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = board._max_length_from_cell(rowValue,col)\n posdict[col]=score\n elif(board.get_top_elt_in_column(col)==board.get_other_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = -(board._max_length_from_cell(rowValue,col))\n negdict[col]=score\n\n\n sorted(posdict.values(),reverse= True)\n sorted(negdict.values())\n if((bool(posdict))and (bool(negdict))):\n if(abs(negdict.values()[0]) >= ((posdict.values()[0]))):\n return negdict[negdict.keys()[0]]\n else:\n return posdict[posdict.keys()[0]]\n elif(bool(posdict)):\n return posdict[posdict.keys()[0]]\n elif(bool(negdict)):\n return negdict[negdict.keys()[0]]\n else:\n return 0", "def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score", "def objective(\n self,\n parameters: object\n ) -> float:\n pass", "def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)", "def similarity_score(self, lhs, rhs):\n pass", "def minimax_maximize_play(self, game, legal_moves, depth):\n highest_score, selected_move = (float('-inf'), (-1, -1))\n for move in legal_moves:\n score, _ = self.minimax(game.forecast_move(move), depth - 1, False)\n highest_score, selected_move = max((highest_score, selected_move), (score, move))\n return (highest_score, selected_move)", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def alpha_beta_search(self, game_state, depth):\r\n alpha = float(\"-inf\")\r\n beta = float(\"inf\")\r\n best_score = float(\"-inf\")\r\n best_move = None\r\n for a in game_state.actions():\r\n vv = self.min_value(game_state.result(a), alpha, beta, depth)\r\n alpha = max(alpha, vv)\r\n if vv > best_score:\r\n best_score = vv\r\n best_move = a\r\n return best_move", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def objective(self, x):\n rvs = frozenset(map(frozenset, self._rvs))\n joint = self.construct_joint(x)\n joint = joint.sum(axis=self._others, keepdims=True)\n crv = joint.sum(axis=tuple(flatten(rvs)))\n\n H_crv = h(crv.ravel())\n H = h(joint.ravel()) - H_crv\n\n def I_P(part):\n margs = [ joint.sum(axis=tuple(flatten(rvs - p))) for p in part ]\n a = sum(h(marg.ravel()) - H_crv for marg in margs)\n return (a - H)/(len(part) - 1)\n\n parts = [p for p in partitions(map(frozenset, rvs)) if len(p) > 1]\n\n caekl = min(I_P(p) for p in parts)\n\n return caekl", "def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)", "def test_score_matrix_objective():\n\n # Test for a really bad objective\n tp = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n fp = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n\n res = optimize.score_matrix_objective(tp, fp)\n\n assert res == 0.0\n\n # Test for a perfect objective\n tp = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n fp = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n res = optimize.score_matrix_objective(tp, fp)\n\n assert res == 4.0\n\n # Tests for an objective that is half right\n tp = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])\n fp = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n\n res = optimize.score_matrix_objective(tp, fp)\n\n assert res == 2.0\n\n # Test for objective where the two are equal lines\n tp = np.arange(0, 10)\n fp = np.arange(0, 10)\n\n res = optimize.score_matrix_objective(tp, fp)\n\n assert res == 0.7", "def fitness(self):\n # TO BE DECIDED\n return 1", "def _objective_cost(self):\n\n def obj_expression_simple(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n return -total\n\n def obj_expression(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n total += model.Completion_total\n total += model.Affinity_cognitive_total\n total += model.CTu_total + model.CTl_total + model.S_total\n return -total\n\n # self.model.exp_cost = Expression(rule=obj_expression)\n # self.model.obj_cost = Objective(rule=self.model.exp_cost)\n # self.model.obj_cost = Objective(rule=obj_expression_simple)\n self.model.obj_cost = Objective(rule=obj_expression)", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def score(self):\n xg, yg = self.goal\n xe, ye = self.empty_node()\n score = len(self.history) + 4*(xg + yg)\n if xg == 1:\n score -= 3\n if ye > 1:\n score += ye - 1\n dx = abs(xe - xg + 1)\n if xg and dx:\n score += dx\n return score", "def get_n_best(self):\n pass", "def custom_score_3(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n y_coord, x_coord = game.get_player_location(player)\n x_eval = (width - float(x_coord)) ** 2\n y_eval = (height - float(y_coord)) ** 2\n center_eval = float(x_eval + y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves - center_eval\n score = no_moves - opp_score\n return float(score)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newFood = newFood.asList()\n min_distance_f = -1\n for food in newFood:\n distance = util.manhattanDistance(newPos, food)\n if min_distance_f >= distance or min_distance_f == -1:\n min_distance_f = distance\n\n g_distance = 1\n prox_ghost = 0\n for g_state in currentGameState.getGhostPositions():\n distance = util.manhattanDistance(newPos, g_state)\n g_distance += distance\n if distance <= 1:\n prox_ghost += 1\n newCapsule = currentGameState.getCapsules()\n numCapsules = len(newCapsule)\n\n newScore = currentGameState.getScore() + (1 / float(min_distance_f)) - (1 / float(g_distance)) - prox_ghost - numCapsules\n return newScore", "def score_partial(self, X):\n score = self.model.decision_function([X])[0]\n\n return score", "def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)", "def _fitness_model__(self, solution=None, minmax=0):\n return self.objective_func(solution) if minmax == 0 else 1.0 / (self.objective_func(solution) + self.EPSILON)", "def cost(self) -> float:", "def max_score(self):\r\n max_score = None\r\n if self.check_if_done_and_scored():\r\n max_score = self._max_score\r\n return max_score", "def optimize(cls, trials, score, evals_rounds, mon_cons, categorical):\n raise NotImplementedError", "def find_optimal_score(scores, path_label='label', criteria=['length'],\n weights=None, minimize=True):\n\n if type(minimize) in (list, np.array):\n minimize = np.array(minimize)\n else:\n minimize = np.ones(len(criteria)).astype(bool)\n\n if type(criteria) is not list:\n criteria = [criteria]\n\n # if no weights are specified, use equal weighting\n if not weights:\n weights = np.ones(len(criteria)) / (1. * len(criteria))\n\n # min/max normalization\n scores = (scores - scores.min()) / (scores.max() - scores.min())\n # reverse if necessary\n scores[:, ~minimize] = 1. - scores[:, ~minimize]\n #apply weight to scores \n scores = (scores * weights).sum(axis=1)\n\n return scores.argmin()", "def custom_score_2(game, player):\n # TODO: finish this function!\n if game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf\n if game.is_winner(player):\n #print(\"You win\")\n return math.inf\n\n # center\n width = game.width / 2\n height = game.height / 2\n\n # Opponent\n opponent = game.get_opponent(player)\n opp_y_coord, opp_x_coord = game.get_player_location(opponent)\n opp_x_eval = (width - float(opp_x_coord)) ** 2\n opp_y_eval = (height - float(opp_y_coord)) ** 2\n opp_center_eval = float(opp_x_eval + opp_y_eval)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n opp_score = opp_moves * 2 - opp_center_eval\n score = no_moves - opp_score/rem_spaces\n return float(score)", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Longest Path Heuristic (used towards end game)\n\n game_phase = len(game.get_blank_spaces()) # high if early, low if late in game\n max_phase = game.width*game.height\n\n def longestPath(player,game,path=0,longest=0):\n moves = game.get_legal_moves(player)\n if path > longest:\n longest = path\n if len(moves) == 0:\n path = 0\n for move in moves:\n new_board = game.forecast_move(move)\n longestPath(player,new_board,path+1,longest)\n return longest\n\n if (game_phase<15): # only feasible to calculate late-game\n game_phase = abs(game_phase-max_phase) # low if early, high if late in game\n return (longestPath(player,game)-longestPath(game.get_opponent(player),game))\n else:\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def custom_score(game, player):\n \"\"\" custom_score heuristic function idea is to implement aggressive heuristic function \n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) # Calculate length of myPlayer moves\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player))) # Calculate length of opposite player moves same as custom score 2\n return float(length_my_player_moves - 1.5*length_opp_payer_moves)", "def _calculate_score_with_threshold(self):\n\n clue_number = 0\n positive_score, negative_score = 0, 0\n negative_number = 0\n total_score = 0\n\n # find largest negative score\n largest_negative_score = -1.\n for ix, (card, score) in enumerate(self.sorted_card_score_pairs):\n # find maximum score of negative word\n if card.color not in [self.team, \"DOUBLE\"]:\n largest_negative_score = score\n break\n\n # add scores higher than threshold + largest negative score to positive_score\n for card, score in self.sorted_card_score_pairs:\n if (score > (self.delta+largest_negative_score)\n and card.color in [self.team, \"DOUBLE\"]):\n clue_number += 1\n positive_score += score\n elif card.color not in [self.team, \"DOUBLE\"]:\n negative_score += score\n negative_number += 1\n else:\n continue\n\n if not self.penalize_negative:\n self.logger.info(\"negative score set to 0.\")\n negative_score = 0\n\n # if threshold(delta) is large, there will be no clues.\n # try to give at least one clue\n # select the positive card with score larger than largest_negative_score.\n if clue_number == 0:\n self.logger.debug(\"clue number: 0.\")\n for card, score in self.sorted_card_score_pairs:\n if card.color in [self.team, \"DOUBLE\"]:\n positive_score = score\n clue_number += 1\n self.cropped_threshold = score - largest_negative_score\n else:\n positive_score = 0\n break\n\n if self.normalize_negative:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score / negative_number\n else:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score\n self.logger.debug(\"word: {}, positive_score: {}, negative_score: {}, total_score: {}\".format(self.clue, positive_score, negative_score, total_score))\n return total_score, clue_number", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def get_target_per_score(self):\n pass", "def maxValue(gameState, depth, a, b):\n ultimateMove = None # The best move the agent can do\n if gameState.isWin() or gameState.isLose(): # if terminal node return gamescore\n return self.evaluationFunction(gameState)\n bestValue = -math.inf\n # for all of max agents moves\n for action in gameState.getLegalActions(0):\n value = minValue(gameState.generateSuccessor(\n 0, action), depth, 1, a, b) # save value from call to minimizing agent\n if value > bestValue: # if value has increased, update parametres\n bestValue = value\n ultimateMove = action\n a = max(a, bestValue) # updates a=alpha if bestvalue > alpha\n if bestValue > b: # if value is greater than connected branch, do prune\n return bestValue\n if depth == self.depth: # if top node, return the best move\n return ultimateMove\n else:\n # if not top node (and no pruning) return best value\n return bestValue", "def ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp = 0.1):\n\n ## Put featureImprove into target model - need to reverse _op and non _op\n coef = teamModels[tgtName]\n # Get stats of Team A's win and lose matches - only 20 features are saved\n features = list(coef['features'])\n # Get stats of Team B: Revert the features of A to retrieve features for B\n featureYour = []\n featureTgt = []\n for ii in features:\n if '_op' in ii:\n featureYour.append(ii[:-3])\n else:\n featureTgt.append(ii)\n\n dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]\n dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]\n dfYour.columns = dfYour.columns + '_op'\n\n # Get mean and reorder into the original feature order\n bb = pd.concat([dfTgt.mean(), dfYour.mean()])\n bb = bb.reindex(features)\n model = coef['model']\n\n for ii in bb.iteritems():\n if ((ii[0] + '_op') in featureImprove) or ((ii[0][:-3]) in featureImprove):\n if model.coef_[0][features.index(ii[0])] < 0:\n bb[ii[0]] *= 1 + Imp\n else:\n bb[ii[0]] *= 1 - Imp\n\n probTgt = model.predict_proba(bb)[0][1]\n\n ## Put featureImprove into your model\n coef = teamModels[yourName]\n # Get stats of Team A's win and lose matches - only 20 features are saved\n features = list(coef['features'])\n # Get stats of Team B: Revert the features of A to retrieve features for B\n featureYour = []\n featureTgt = []\n for ii in features:\n if '_op' in ii:\n featureTgt.append(ii[:-3])\n else:\n featureYour.append(ii)\n\n dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]\n dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]\n dfTgt.columns = dfTgt.columns + '_op'\n\n # Get mean and reorder into the original feature order\n bb = pd.concat([dfTgt.mean(), dfYour.mean()])\n bb = bb.reindex(features)\n\n model = coef['model']\n for ii in bb.iteritems():\n if ii[0] in featureImprove:\n if model.coef_[0][features.index(ii[0])] > 0:\n bb[ii[0]] *= 1 + Imp\n else:\n bb[ii[0]] *= 1 - Imp\n\n probYour = model.predict_proba(bb)[0][1]\n\n return round(probYour / (probYour + probTgt), 2)", "def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)" ]
[ "0.6647923", "0.6647923", "0.6601076", "0.653043", "0.6438808", "0.63390124", "0.6311056", "0.62663805", "0.62656355", "0.6254761", "0.6223245", "0.62063557", "0.6162101", "0.61386317", "0.6135826", "0.61111224", "0.6108157", "0.61070305", "0.60964113", "0.608142", "0.607549", "0.60599554", "0.6050272", "0.6029108", "0.6013635", "0.60036415", "0.5959946", "0.59505695", "0.5948227", "0.593859", "0.593859", "0.5935639", "0.59306246", "0.5917594", "0.5914099", "0.58841914", "0.5879676", "0.58749473", "0.587307", "0.5862609", "0.58585846", "0.5838417", "0.5837286", "0.5828333", "0.5821844", "0.580487", "0.58030874", "0.5786796", "0.5779911", "0.57682097", "0.5767375", "0.57668954", "0.576129", "0.5760723", "0.57596266", "0.5759403", "0.575117", "0.5739768", "0.5731598", "0.5714046", "0.57094854", "0.5704124", "0.57025677", "0.56965154", "0.5682861", "0.5681956", "0.5677612", "0.5673392", "0.56679845", "0.5665964", "0.56659347", "0.56604326", "0.56573725", "0.56545156", "0.5647806", "0.5638364", "0.5638364", "0.56378126", "0.5635069", "0.56320745", "0.5625045", "0.56229144", "0.5618494", "0.56110215", "0.56067365", "0.56042343", "0.560166", "0.560087", "0.55968875", "0.5582111", "0.5571864", "0.55663043", "0.5561372", "0.5560325", "0.5558324", "0.5555419", "0.55540407", "0.55507493", "0.55479676", "0.5547055" ]
0.6370619
5
Objective function to maximize total score difference over matches.
def objective_score_diff(me, other, turns, noise, repetitions, match_attributes=None): match = axl.Match((me, other), turns=turns, noise=noise, match_attributes=match_attributes) if not match._stochastic: repetitions = 1 scores_for_this_opponent = [] for _ in range(repetitions): match.play() final_scores = match.final_score_per_turn() score_diff = final_scores[0] - final_scores[1] scores_for_this_opponent.append(score_diff) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum", "def worst_score(self):\r\n pass", "def match_score(self):\n return self._match_score", "def get_expected_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_expected_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def objective_score(me, other, turns, noise, repetitions, match_attributes=None):\n match = axl.Match((me, other), turns=turns, noise=noise,\n match_attributes=match_attributes)\n if not match._stochastic:\n repetitions = 1\n scores_for_this_opponent = []\n\n for _ in range(repetitions):\n match.play()\n scores_for_this_opponent.append(match.final_score_per_turn()[0])\n return scores_for_this_opponent", "def get_score(self, solution: np.array) -> float:\n pass", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)", "def _calculate_score_with_threshold(self):\n\n clue_number = 0\n positive_score, negative_score = 0, 0\n negative_number = 0\n total_score = 0\n\n # find largest negative score\n largest_negative_score = -1.\n for ix, (card, score) in enumerate(self.sorted_card_score_pairs):\n # find maximum score of negative word\n if card.color not in [self.team, \"DOUBLE\"]:\n largest_negative_score = score\n break\n\n # add scores higher than threshold + largest negative score to positive_score\n for card, score in self.sorted_card_score_pairs:\n if (score > (self.delta+largest_negative_score)\n and card.color in [self.team, \"DOUBLE\"]):\n clue_number += 1\n positive_score += score\n elif card.color not in [self.team, \"DOUBLE\"]:\n negative_score += score\n negative_number += 1\n else:\n continue\n\n if not self.penalize_negative:\n self.logger.info(\"negative score set to 0.\")\n negative_score = 0\n\n # if threshold(delta) is large, there will be no clues.\n # try to give at least one clue\n # select the positive card with score larger than largest_negative_score.\n if clue_number == 0:\n self.logger.debug(\"clue number: 0.\")\n for card, score in self.sorted_card_score_pairs:\n if card.color in [self.team, \"DOUBLE\"]:\n positive_score = score\n clue_number += 1\n self.cropped_threshold = score - largest_negative_score\n else:\n positive_score = 0\n break\n\n if self.normalize_negative:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score / negative_number\n else:\n total_score = (1-self.alpha) * positive_score - self.alpha * negative_score\n self.logger.debug(\"word: {}, positive_score: {}, negative_score: {}, total_score: {}\".format(self.clue, positive_score, negative_score, total_score))\n return total_score, clue_number", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def score(self):", "def mirkin_match_coeff(self, normalize=True):\n max_score = self.grand_total ** 2\n score = max_score - self.mirkin_mismatch_coeff(normalize=False)\n if normalize:\n score = _div(score, max_score)\n return score", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore", "def distance(self):\n\t\tif len(self._scores < 2):\n\t\t\treturn 0.0\n\n\t\treturn self[-1] - self[-2]", "def similarity_score(self, lhs, rhs):\n pass", "def best_action(self):\n child_score = self.child_Q() + self.mcts.c_puct * self.child_U()\n masked_child_score = child_score\n return np.argmax(masked_child_score)", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp = 0.1):\n\n ## Put featureImprove into target model - need to reverse _op and non _op\n coef = teamModels[tgtName]\n # Get stats of Team A's win and lose matches - only 20 features are saved\n features = list(coef['features'])\n # Get stats of Team B: Revert the features of A to retrieve features for B\n featureYour = []\n featureTgt = []\n for ii in features:\n if '_op' in ii:\n featureYour.append(ii[:-3])\n else:\n featureTgt.append(ii)\n\n dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]\n dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]\n dfYour.columns = dfYour.columns + '_op'\n\n # Get mean and reorder into the original feature order\n bb = pd.concat([dfTgt.mean(), dfYour.mean()])\n bb = bb.reindex(features)\n model = coef['model']\n\n for ii in bb.iteritems():\n if ((ii[0] + '_op') in featureImprove) or ((ii[0][:-3]) in featureImprove):\n if model.coef_[0][features.index(ii[0])] < 0:\n bb[ii[0]] *= 1 + Imp\n else:\n bb[ii[0]] *= 1 - Imp\n\n probTgt = model.predict_proba(bb)[0][1]\n\n ## Put featureImprove into your model\n coef = teamModels[yourName]\n # Get stats of Team A's win and lose matches - only 20 features are saved\n features = list(coef['features'])\n # Get stats of Team B: Revert the features of A to retrieve features for B\n featureYour = []\n featureTgt = []\n for ii in features:\n if '_op' in ii:\n featureTgt.append(ii[:-3])\n else:\n featureYour.append(ii)\n\n dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]\n dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]\n dfTgt.columns = dfTgt.columns + '_op'\n\n # Get mean and reorder into the original feature order\n bb = pd.concat([dfTgt.mean(), dfYour.mean()])\n bb = bb.reindex(features)\n\n model = coef['model']\n for ii in bb.iteritems():\n if ii[0] in featureImprove:\n if model.coef_[0][features.index(ii[0])] > 0:\n bb[ii[0]] *= 1 + Imp\n else:\n bb[ii[0]] *= 1 - Imp\n\n probYour = model.predict_proba(bb)[0][1]\n\n return round(probYour / (probYour + probTgt), 2)", "def get_estimated_score(match_data: dict) -> float:\n \n auto_high = {match_data['auto_HighClose']: match_data['auto_conInnerClose'],\n match_data['auto_HighFrontCP']: match_data['auto_conInnerFrontCP'],\n match_data['auto_HighLine']: match_data['auto_conInnerLine']\n }\n auto_low = match_data['auto_Low']\n auto_line = match_data['auto_leftSectorLine']\n \n tele_high = {match_data['tele_HighClose']: match_data['tele_conInnerClose'],\n match_data['tele_HighFrontCP']: match_data['tele_conInnerFrontCP'],\n match_data['tele_HighLine']: match_data['tele_conInnerLine'],\n match_data['tele_HighBackCP']: match_data['tele_conInnerBackCP']\n }\n tele_low = match_data['tele_Low']\n climbed = match_data['tele_Climbed']\n parked = match_data['tele_UnderSG']\n \n score = 0\n \n # Gives autonomous points\n for x in auto_high:\n score += (4.3, 4.8)[auto_high[x]] * x\n score += auto_low * 2\n if auto_line: score += 5\n \n # Gives teleop points\n for x in tele_high:\n score += (2.15, 2.4)[tele_high[x]] * x\n score += tele_low\n \n # Gives endgame points\n if climbed: score += 25\n if parked: score += 5\n \n return score", "def objective(self):\n pass", "def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score", "def PredictMatch(yourName, tgtName, teamModels, db):\n\n teamTgtPipe, teamYourPipe = QueryTeamData(tgtName, yourName, db)\n featureCoefTgt, probTgt = PredictOp(teamTgtPipe, teamYourPipe, tgtName, teamModels)\n featureCoefYour, probYour = PredictOp(teamYourPipe, teamTgtPipe, yourName, teamModels)\n odds = round(probYour / (probYour + probTgt), 2)\n\n # In featureCoefYour, you want INCREASE those with POSTIVE COEF, DECREASE those with NEGATIVE COEF\n # In featureCoefTgt, you want to do the opposite\n\n # reverse both the sign of the coef, and '_op' in features so as to be the same with featureCoefYour\n featureCoefTgt['coef'] = - featureCoefTgt['coef']\n featureCoefTgt.features = [ii[:-3] if \"_op\" in ii else ii + '_op' for ii in featureCoefTgt.features]\n\n # Combine only the most important 10 features\n # featureBoth = featureCoefTgt[11:].append(featureCoefYour[11:])\n\n # Combine only all the most important features\n featureBoth = featureCoefTgt.append(featureCoefYour)\n\n # get action recommendations\n # Somehow the pandas here uses a deprecated para cols, instaed of the new one subset\n #featureBoth.drop_duplicates(subset = 'features', take_last = True, inplace = True)\n featureBoth.drop_duplicates(cols = 'features', take_last = True, inplace = True)\n actions, featureImprove = GetActions(featureBoth)\n Imp = 0.1\n oddsNew = ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp)\n\n return odds, oddsNew, actions", "def score_solution(g, s):\n pass", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def max_v_greedy():\n\n S1=Spectrum.Spectrum()\n S1.add_peak(50.4,16)\n S1.add_peak(50.7,36)\n S1.add_peak(74.8,25)\n S1.add_peak(96.2,23)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.6,49)\n S2.add_peak(50.9,25)\n S2.add_peak(74.6,9)\n S2.add_peak(102.4,17)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n g_score,g_peaks=similarity.cosine_score_greedy(S1,S2)\n\n assert score>=g_score, \"Maximum weighted method did not get higher score than greedy method\"\n assert peaks>=g_peaks, \"Maximum weighted method did not match more peaks than greedy method\"\n\n assert peaks==3, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,0.73), \"Incorrect score with greedy method\"\n\n assert g_peaks==2, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(g_score,0.57), \"Incorrect score with maximum weighted method\"", "def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)", "def scoring(self):\n pass", "def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)", "def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def compute_gain(self, video, server):\n \n if video in server.videos:\n return -2\n \n if server.space < video.size:\n return -1\n \n score_gain = 0\n\n for request in self.affected_requests[video.id][server.id]:\n if request.video == video:\n current_latency = request.endpoint.latencies[request.server.id] if request.server else request.endpoint.Ld\n new_latency = request.endpoint.latencies[server.id]\n if new_latency < current_latency:\n # current_request_score = request.n * (endpoint.Ld - current_latency)\n # new_request_score = request.n * (endpoint.Ld - new_latency)\n # score_gain += new_request_score - current_request_score\n score_gain += request.n * (current_latency - new_latency) # faster\n\n return score_gain", "def _get_lip_best(self) -> float:\n pass", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def max_score(self):\n return self.raw_possible", "def best(self):\n alpha = -1\n beta = +1\n move = self.__negamax(alpha, beta, tt=DictTT())\n return move[1]", "def personal_best(scores):\n return max(scores)", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def get_observed_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_observed_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def get_score(self, solution: np.array) -> float:\n score = 0\n for vehicle_count, vehicle_solution in enumerate(solution):\n distances = self.distance_matrix[vehicle_solution[0:-1], vehicle_solution[1:]]\n costs = distances * self.selected_transportation_cost[vehicle_count]\n score += np.sum(costs)\n return score", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def optimal_max(board):\n # Board full?\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is negative infinity\n global_optimum = [None, -math.inf]\n\n # For each move, what would opponent do next? Update best move.\n for action in available_actions:\n # Anticipates optimal adversarial moves\n local_optimum = optimal_min(result(board, action))\n\n # Compares local vs global optima\n if global_optimum[1] <= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)", "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \"\"\"\n #Heuristic 1: Aggressive Improved Score\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return float(own_moves - 2*opp_moves)\n\n \"\"\"\n\n \"\"\"\n #Heuristic 2: Border/Non-Border Differentiated Moves Scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n own_score = 0\n opp_score = 0\n for each_move in game.get_legal_moves(player):\n if each_move in border_moves:\n own_score = own_score + 1\n else:\n own_score = own_score + 1.5\n\n for each_move in game.get_legal_moves(game.get_opponent(player)):\n if each_move in border_moves:\n opp_score = opp_score + 1\n else:\n opp_score = opp_score + 1.5\n\n return float(own_score - opp_score)\n \"\"\"\n\n #Heuristic 3: Advanced Differentiated Board scoring\n border_moves = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6),\n (1,0), (1,6), (2,0), (2,6), (3,0), (3,6), (4,0),\n (4,6), (5,0), (5,6), (6,0), (6,1), (6,2), (6,3),\n (6,4), (6,5), (6,6)]\n\n next_to_border_moves = [(1,1), (1,2), (1,3), (1,4), (1,5), (2,1),\n (2,5), (3,1), (3,5), (4,1), (4,5),\n (5,1), (5,2), (5,3), (5,4), (5,5)]\n\n own_score = 0\n opp_score = 0\n\n for move in game.get_legal_moves(player):\n if move in border_moves:\n own_score += 1\n elif move in next_to_border_moves:\n own_score += 1.2\n else:\n own_score += 1.5\n\n for move in game.get_legal_moves(game.get_opponent(player)):\n if move in border_moves:\n opp_score += 1\n elif move in next_to_border_moves:\n opp_score += 1.2\n else:\n opp_score += 1.5\n\n return float(own_score - opp_score)", "def max_score(self):\n return self.points", "def local_aligner_score(s1, s2, gap_penalty=-1, gap_opening_penalty=-10, edit_function=utils.sub_matrices_distance, matrix=MatrixInfo.pam120):\n\n n_row = len(s1) + 1\n n_col = len(s2) + 1\n # Creates a matrix where the partial scores are stored.\n S = np.zeros((n_row, n_col))\n # Creates a matrix (stored as DataFrame) where the optimal movements are\n # stored.\n backtrack_matrix = pd.DataFrame(\"\", index=np.arange(n_row), columns=np.arange(n_col))\n\n # Initialize the first column and row of the matrices.\n # In the local aligner, we stop when a 0 is encountered, which corresponds to an \"X\"\n for i in range(n_row):\n backtrack_matrix.set_value(i, 0, \"X\")\n\n for j in range(n_col):\n backtrack_matrix.set_value(0, j, \"X\")\n \n # small optimization: keep track of the maximum score encountered so far, and its indices.\n score_max = 0\n i_max = 0\n j_max = 0\n \n for i in range(1, n_row):\n for j in range(1, n_col):\n # Compute the possible movements, and then keeps the best.\n s1_gap = max([S[i - k, j] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, i+1)])\n s2_gap = max([S[i, j - k] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, j+1)])\n mut = S[i - 1, j - 1] + edit_function(s1[i - 1], s2[j - 1], matrix=matrix)\n # In the local aligner, don't accept negative scores!\n S[i, j] = max(s1_gap, s2_gap, mut, 0)\n\n if S[i, j] >= score_max:\n score_max = S[i, j]\n i_max = i\n j_max = j\n # Write in the matrix the movement that lead to that cell, as a string.\n # e.g. \"HV\" means that horizontal and vertical movements were the\n # best.\n # In local alignment, \"X\" means that 0 was the maximum value, and all the movements gave a negative score.\n # The backtracking will stop when an \"X\" is encountered.\n backtrack_matrix.set_value(i, j, \"\".join(check_argmax([s1_gap, s2_gap, mut, 0])))\n \n return [score_max, S, backtrack_matrix, i_max, j_max]", "def cost(self) -> float:", "def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)", "def calculScore(affectation, carac, dicoCaracServeur):\n \n # Initialisation du score minimal\n score = capaciteGarantie(affectation, 0, carac, dicoCaracServeur)\n \n # rq : l'initialisation etant faite, on part de 1 et non de zero\n for p in range(1, carac[\"P\"]):\n tmpScore = capaciteGarantie(affectation, p, carac, dicoCaracServeur)\n if tmpScore < score:\n score = tmpScore\n \n return score", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def alpha_beta_search(self, maxDepth, board, player, best_move):\r\n \"\"\"j refers to the list where j[0] = score and j[1] = move\"\"\"\r\n start = time()\r\n #playerPiece = core.BLACK\r\n #if player == \"BLACK\":\r\n #print(\"hi\")\r\n #playerPiece = core.WHITE\r\n self.max_value(board, -1*float(\"inf\"), float(\"inf\"), player, 0, maxDepth, best_move)\r\n endTime = time()\r\n #print(endTime - start)\r\n #sort by square weights\r\n #how many flips each move will make\r\n #change the scoring matrix\r\n #dictionary\r\n #time limits\r\n #return j[1]\r", "def evaluate_score(self,word_id):\r\n total_best = 0\r\n assigned_to_return = {}\r\n for possible_word in self.satisfiers[word_id].keys():\r\n words_to_iterate = []\r\n iterated_word_ids = []\r\n # print()\r\n for connected_word_id in self.satisfiers[word_id][possible_word].keys():\r\n words_to_iterate.append(self.satisfiers[word_id][possible_word][connected_word_id])\r\n # print(\"word_id: {}, possible_word: {}, connected_id: {}, words: {}\".format(word_id,possible_word, connected_word_id,self.satisfiers[word_id][possible_word][connected_word_id]))\r\n iterated_word_ids.append(connected_word_id)\r\n \r\n # print(possible_word)\r\n # print(\"\\nPossible word:\",possible_word)\r\n for comb in itertools.product(*words_to_iterate):\r\n assigned_words = {}\r\n assigned_words[word_id] = possible_word\r\n for i in range(len(iterated_word_ids)):\r\n assigned_words[iterated_word_ids[i]] = comb[i]\r\n # print(\"word_id: {} comb: {}\".format(word_id,comb))\r\n # print(\"\\nword_id: {}, assigned words: {}\".format(word_id,assigned_words))\r\n new_assigned, current_max = self.get_max_score(word_id,assigned_words)\r\n # print(\"new_assigned: {}, current_max: {}\".format(new_assigned, current_max))\r\n if current_max > total_best:\r\n total_best = current_max\r\n assigned_to_return = {}\r\n assigned_to_return = new_assigned\r\n return assigned_to_return, total_best", "def get_target_per_score(self):\n pass", "def crowding_distance_assignment(scores: dict) -> dict:\n\n distance = {i: 0 for i in scores}\n\n try:\n M = len(_peek_any(scores.values())) # number of objectives\n for m in range(M):\n # Sort using each objective value\n inds = tuple(sorted(scores.keys(), key=lambda i: scores[i][m]))\n\n fm_min, fm_max = scores[inds[0]][m], scores[inds[-1]][m]\n\n for i in inds:\n if scores[i][m] in [fm_min, fm_max]:\n distance[i] = float('inf')\n\n for i in range(1, len(inds) - 1):\n try:\n earlier = scores[inds[i + 1]]\n later = scores[inds[i - 1]]\n space = (earlier[m] - later[m])\n normalized = (fm_max - fm_min)\n distance[inds[i]] += space / normalized\n except ArithmeticError:\n distance[inds[i]] = float('inf')\n\n except IndexError:\n # _peek_any failed: we don't have any individuals\n pass\n\n return distance", "def penalty(self):\n return 0", "def scoring_function(times):\n sorted_times = sorted(times)\n \n diffs = []\n for i in range(len(sorted_times)-1):\n diff = sorted_times[i+1]- sorted_times[i]\n \n if diff == 0.0: # overlaps cannot happen score with a large penalty\n diffs.append(-100)\n elif diff <= 1.0: # punish small differences\n diffs.append(-2)\n elif diff > 4.0: # Gaps greater than 4 are large enough and considered OK\n diffs.append(4.0)\n else:\n diffs.append(diff)\n \n return sum(diffs)", "def marginal_score(self,):\n score = 0\n visited = set()\n for edge in self.edges:\n if edge not in visited:\n visited.add(edge)\n visited.add(edge.reverse)\n if len(edge.cars) == 1:\n score += edge.original_distance\n return score", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def _get_reward(self, player_score, opponent_score):\n return player_score - opponent_score", "def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus", "def personal_best(scores: list) -> int:\n return max(scores)", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n\n if currentGameState.getFood().asList() == []: # Null list catch if there is no food on the board\n return currentGameState.getScore()\n else:\n return max([manhattanDistance(currentGameState.getPacmanPosition(),x) * -1\n for x in currentGameState.getFood().asList()]) + currentGameState.getScore()", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def get_improved_score_factor(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def state_score_naive(self, game_state, player, weights):\n # walls score\n other_players = [p for p in game_state.players if p != player]\n my_walls = player.num_walls\n their_walls = max([p.num_walls for p in other_players])\n walls_diff = (my_walls - their_walls)\n # path length score\n my_path = len(game_state.get_shortest_path_player(player))\n their_path = min([len(game_state.get_shortest_path_player(p)) for p in other_players])\n paths_diff = their_path - my_path\n \n return weights[0]*walls_diff + weights[1]*paths_diff", "def score(self, s):\n fv = s.feature_vector\n product = fv.dot(self.params.T)[0, 0]\n return s.score(lmwt=self.lmwt) + product", "def scoreR(self) :\n if self.leafR() :\n return self.leafScore(), self\n else :\n games = self.R()\n min_g = games[0]\n min_score = min_g.scoreL()\n for g in games[1:] :\n score = g.scoreL()\n if score[0] < min_score[0] :\n min_g = g\n min_score = score\n return (min_score+(min_g,))", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def _best_matching_movie(movie):\n return (Levenshtein.distance(movie_name, movie.get('title').lower()),\n (0 - movie.get('votes', 0)),\n )", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))", "def score(self, predictions):\n return 0.", "def _run():\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)", "def adjusted_score(self) -> Tuple[List[str], np.ndarray]:\n nb_agents = self.game.configuration.nb_agents\n current_scores = np.zeros((1, nb_agents), dtype=np.float32)\n\n eq_agent_states = dict(\n (\n agent_pbk,\n AgentState(\n self.game.initialization.eq_money_holdings[i],\n [int(h) for h in self.game.initialization.eq_good_holdings[i]],\n self.game.initialization.utility_params[i],\n ),\n )\n for agent_pbk, i in zip(\n self.game.configuration.agent_pbks,\n range(self.game.configuration.nb_agents),\n )\n ) # type: Dict[str, AgentState]\n\n result = np.zeros((1, nb_agents), dtype=np.float32)\n\n eq_scores = np.zeros((1, nb_agents), dtype=np.float32)\n eq_scores[0, :] = [\n eq_agent_state.get_score() for eq_agent_state in eq_agent_states.values()\n ]\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # initial scores\n initial_scores = np.zeros((1, nb_agents), dtype=np.float32)\n scores_dict = temp_game.get_scores()\n initial_scores[0, :] = list(scores_dict.values())\n keys = list(scores_dict.keys())\n current_scores = np.zeros((1, nb_agents), dtype=np.float32)\n current_scores[0, :] = initial_scores[0, :]\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n scores_dict = temp_game.get_scores()\n current_scores[0, :] = list(scores_dict.values())\n\n result[0, :] = np.divide(\n np.subtract(current_scores, initial_scores),\n np.subtract(eq_scores, initial_scores),\n )\n result = np.transpose(result)\n\n return keys, result", "def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)", "def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w", "def new_evaluate(board):\n\n #Logic for new_evaluate function:\n #1)Traverse through each of the columns\n #2)For each of the columns, find the top most element.\n\t #If the topmost element = Current Player\n\t\t \t#3)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a POSITIVE value\n\t #Else\n\t\t \t#4)Find the possible number of continuous elements of the same type in all the 4 directions from that cell(Horizontal,vertical and two diagonals)\n\t\t\t #Take the max of these lengths and this becomes the score for that column and it will stored as a NEGATIVE value\n #5)Sort these Positive and Negative scores\n #6)IF the highest negative score is greater than the highest positive score, then it means that the opposition has MORE chances to WIN.\n #So, that has to be blocked and so we will return that HIGHEST NEGATIVE value as the score for that board\n #7)ELSE we go ahead and return the HIGHEST POSITIVE value as the score for that board\n #->This logic has increasing the AGGRESSION of the player a lot and it makes senses we hope.\n\n posdict = {}\n negdict = {}\n for col in range(7):\n if(board.get_top_elt_in_column(col)==board.get_current_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = board._max_length_from_cell(rowValue,col)\n posdict[col]=score\n elif(board.get_top_elt_in_column(col)==board.get_other_player_id()) :\n rowValue = board.get_height_of_column(col)\n score = -(board._max_length_from_cell(rowValue,col))\n negdict[col]=score\n\n\n sorted(posdict.values(),reverse= True)\n sorted(negdict.values())\n if((bool(posdict))and (bool(negdict))):\n if(abs(negdict.values()[0]) >= ((posdict.values()[0]))):\n return negdict[negdict.keys()[0]]\n else:\n return posdict[posdict.keys()[0]]\n elif(bool(posdict)):\n return posdict[posdict.keys()[0]]\n elif(bool(negdict)):\n return negdict[negdict.keys()[0]]\n else:\n return 0", "def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)", "def score(self):\n xg, yg = self.goal\n xe, ye = self.empty_node()\n score = len(self.history) + 4*(xg + yg)\n if xg == 1:\n score -= 3\n if ye > 1:\n score += ye - 1\n dx = abs(xe - xg + 1)\n if xg and dx:\n score += dx\n return score", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newFood = newFood.asList()\n min_distance_f = -1\n for food in newFood:\n distance = util.manhattanDistance(newPos, food)\n if min_distance_f >= distance or min_distance_f == -1:\n min_distance_f = distance\n\n g_distance = 1\n prox_ghost = 0\n for g_state in currentGameState.getGhostPositions():\n distance = util.manhattanDistance(newPos, g_state)\n g_distance += distance\n if distance <= 1:\n prox_ghost += 1\n newCapsule = currentGameState.getCapsules()\n numCapsules = len(newCapsule)\n\n newScore = currentGameState.getScore() + (1 / float(min_distance_f)) - (1 / float(g_distance)) - prox_ghost - numCapsules\n return newScore", "def quick_e_score(self, n1, n2):\n if n1.needs_update:\n n1._update()\n if n2.needs_update:\n n2._update()\n dists = cdist(n1.mat, n2.mat)\n return -np.max(dists)", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def max_score(self):\r\n return self.lcp.get_max_score()", "def get_max_score(self,word_id, assigned_words):\r\n def find_max(possible_word_dict,word_id2):\r\n max_score = 0\r\n new_word_to_assign = '*' * self.words[word_id2].length\r\n for possible_word in possible_word_dict.keys():\r\n score = 0\r\n for element in assigned_words.keys():\r\n if element != word_id2:\r\n if element in self.satisfiers[word_id2][possible_word].keys():\r\n if assigned_words[element] in self.satisfiers[word_id2][possible_word][element]:\r\n score += 1\r\n if score >= max_score:\r\n max_score = score\r\n new_word_to_assign = possible_word\r\n return (new_word_to_assign, max_score), max_score\r\n if word_id[1] == 'A':\r\n words = self.across\r\n elif word_id[1] == 'D':\r\n words = self.down\r\n total_score = 0\r\n new_assigned_words = {}\r\n for word_id2 in words.keys():\r\n if word_id2 != word_id:\r\n max_w, max_s = find_max(self.satisfiers[word_id2],word_id2)\r\n total_score += max_s\r\n new_assigned_words[word_id2] = max_w\r\n return new_assigned_words, total_score", "def alpha_beta_search(self, game_state, depth):\r\n alpha = float(\"-inf\")\r\n beta = float(\"inf\")\r\n best_score = float(\"-inf\")\r\n best_move = None\r\n for a in game_state.actions():\r\n vv = self.min_value(game_state.result(a), alpha, beta, depth)\r\n alpha = max(alpha, vv)\r\n if vv > best_score:\r\n best_score = vv\r\n best_move = a\r\n return best_move", "def update_score():\n pass", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def target_score(self, g, w):\n A = bool(max(self.multi_score(g.target_industry, w.target_industry, self.indicator)))\n B = bool(max(self.multi_score(g.target_organization, w.target_organization, self.indicator)))\n if g.event_type == 'malicious-email':\n C = max(self.multi_score(g.target_entity, w.target_entity, self.sim_recip))\n else:\n C = max(self.multi_score(g.target_entity, w.target_entity, self.sim_ld))\n\n if C is None:\n return self.n_score((A, B))\n else:\n return self.n_score((A, (B, C)))" ]
[ "0.6714346", "0.6714346", "0.65570205", "0.64613044", "0.6316741", "0.62868387", "0.6236504", "0.62348974", "0.6215959", "0.6197578", "0.61927944", "0.61494", "0.61126816", "0.6108994", "0.61036104", "0.60992247", "0.60669684", "0.6046158", "0.60387737", "0.6037669", "0.6031612", "0.60174817", "0.5989667", "0.5960635", "0.59584385", "0.5956511", "0.5948506", "0.5936665", "0.591363", "0.58977294", "0.5891589", "0.5889424", "0.5884047", "0.58821595", "0.5881148", "0.5875169", "0.58734107", "0.58723783", "0.5845125", "0.583674", "0.5834095", "0.5822758", "0.58012646", "0.58012646", "0.5799988", "0.5794752", "0.57881814", "0.57815844", "0.5779712", "0.57752925", "0.5775233", "0.57680297", "0.57627255", "0.57626605", "0.5754227", "0.5750032", "0.5749373", "0.5744915", "0.5740042", "0.57399386", "0.57362103", "0.5732662", "0.57322484", "0.5697283", "0.56936234", "0.56920433", "0.5687629", "0.56853265", "0.5684496", "0.56758916", "0.5673712", "0.56623834", "0.56565", "0.5650552", "0.5648711", "0.5645329", "0.5642264", "0.5633084", "0.562774", "0.5620624", "0.56184155", "0.5610457", "0.560787", "0.5606764", "0.5606084", "0.55980676", "0.5596282", "0.5592679", "0.55887896", "0.5578601", "0.5575377", "0.5571871", "0.5571546", "0.5569357", "0.5567672", "0.555931", "0.5556213", "0.5555775", "0.5553629", "0.55532384" ]
0.6550279
3
Objective function to maximize Moran fixations over N=4 matches
def objective_moran_win(me, other, turns, noise, repetitions, N=5, match_attributes=None): population = [] for _ in range(N): population.append(me.clone()) population.append(other.clone()) mp = axl.MoranProcess(population, turns=turns, noise=noise) scores_for_this_opponent = [] for _ in range(repetitions): mp.reset() mp.play() if mp.winning_strategy_name == str(me): scores_for_this_opponent.append(1) else: scores_for_this_opponent.append(0) return scores_for_this_opponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_mindmatch(\n A: np.array, n_trim: int = None,\n n_match: int = 6, cois: list = None\n):\n # setting distance in the diagonal\n A[np.arange(len(A)), np.arange(len(A))] = -1000 \n\n # if conflict of interest (COIs) is available, add to the matrix\n cois = [(c1, c2) for (c1, c2) in cois\n if c1 <= len(A) and c2 <= len(A)] # make sure a given cois is in range\n A[np.array(cois)] = -1000\n\n # trimming affinity matrix to reduce the problem size\n if n_trim != 0:\n A_trim = []\n for r in range(len(A)):\n a = A[r, :]\n a[np.argsort(a)[0:n_trim]] = 0\n A_trim.append(a)\n A_trim = np.vstack(A_trim)\n else:\n A_trim = A\n\n # solving matching problem\n print('Solving a matching problem...')\n v, K, d = create_lp_matrix(A_trim, \n min_reviewers_per_paper=n_match, max_reviewers_per_paper=n_match,\n min_papers_per_reviewer=n_match, max_papers_per_reviewer=n_match)\n x_sol = linprog(v, K, d)['x']\n b = create_assignment(x_sol, A_trim)\n\n if (b.sum() == 0):\n print('Seems like the problem does not converge, try reducing <n_trim> but not too low!')\n else:\n print('Successfully assigned all the match!')\n return b", "def max_v_greedy():\n\n S1=Spectrum.Spectrum()\n S1.add_peak(50.4,16)\n S1.add_peak(50.7,36)\n S1.add_peak(74.8,25)\n S1.add_peak(96.2,23)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.6,49)\n S2.add_peak(50.9,25)\n S2.add_peak(74.6,9)\n S2.add_peak(102.4,17)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n g_score,g_peaks=similarity.cosine_score_greedy(S1,S2)\n\n assert score>=g_score, \"Maximum weighted method did not get higher score than greedy method\"\n assert peaks>=g_peaks, \"Maximum weighted method did not match more peaks than greedy method\"\n\n assert peaks==3, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,0.73), \"Incorrect score with greedy method\"\n\n assert g_peaks==2, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(g_score,0.57), \"Incorrect score with maximum weighted method\"", "def MAXED(N, sigma2, R, f_def, params):\n\n # pull out algorithm-specific parameters\n Omega = params['Omega']\n\n # create the function that we will maximize, Z\n def Z(lam, N, sigma2, R, f_def, Omega):\n \"\"\"A function, the maximization of which is equivalent to the\n maximization of \"\"\"\n\n A = - np.sum(f_def * np.exp(- np.sum((lam * R.T).T, axis=0)))\n B = - (Omega * np.sum(lam**2 * sigma2))**(0.5)\n C = - np.sum(N * lam)\n\n # negate because it's a minimization\n return - (A + B + C)\n\n # create a lambda\n lam = np.ones(len(N))\n\n # apply the simulated annealing to the Z\n mk = {'args': (N, sigma2, R, f_def, Omega)}\n lam = basinhopping(Z, lam, minimizer_kwargs=mk).x\n\n # back out the spectrum values from the lam\n return f_def * np.exp(-np.sum((lam * R.T).T, axis=0))", "def kuhn_munkres(G): # maximum profit bipartite matching in O(n^4)\n assert len(G) == len(G[0])\n n = len(G)\n mu = [None] * n # Empty matching\n mv = [None] * n\n lu = [max(row) for row in G] # Trivial labels\n lv = [0] * n\n for u0 in range(n):\n if mu[u0] is None: # Free node\n while True:\n au = [False] * n # Empty alternating tree\n av = [False] * n\n if improve_matching(G, u0, mu, mv, au, av, lu, lv):\n break\n improve_labels(G, au, av, lu, lv)\n return (mu, sum(lu) + sum(lv))", "def ransac(matches, kp1, kp2, s=4, threshold=3, maxIterations=2000, returnMatches=False, inlierRatio=0.05, ransacRatio=0.6):\n\n sizes_kp1 = [kp1[dt[0].queryIdx].size for dt in matches]\n sizes_kp2 = [kp1[dt[0].trainIdx].size for dt in matches]\n tup_matches_kp1 = [kp1[dt[0].queryIdx].pt for dt in matches]\n tup_matches_kp2 = [kp2[dt[0].trainIdx].pt for dt in matches]\n matches_kp1 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp1])\n matches_kp2 = np.array([[h for h in kp] + [1] for kp in tup_matches_kp2])\n\n cnt_matches = len(matches)\n\n max_matches = []\n max_p1, max_p2 = [], []\n max_p1_sizes, max_p2_sizes = [], []\n max_total = 0\n\n for iter in range(maxIterations):\n # Find Homography based on random sample\n data = random.sample(matches, s)\n data_p1 = np.array([matches_kp1[dt[0].queryIdx] for dt in data])\n data_p2 = np.array([matches_kp2[dt[0].trainIdx] for dt in data])\n homography = homomat(data_p1[:, :2], data_p2[:, :2])\n\n # Find P1 projection from the homography matrix\n projected_p2 = np.dot(homography, matches_kp1.transpose())\n projected_p2 = projected_p2[0:3] / projected_p2[2] # make sure w' is 1\n projected_p2 = projected_p2.transpose()\n\n # Initialize Current Matches\n current_matches = []\n current_p1, current_p2 = [], []\n current_p1_sizes, current_p2_sizes = [], []\n current_total = 0\n\n # Check for inliers and outliers for each matches\n for i, (match) in enumerate(matches):\n # normalize the error\n error = np.linalg.norm(matches_kp2[i] - projected_p2[i])\n\n # Check for inliers\n if error < threshold:\n current_matches.append([cv.DMatch(current_total, current_total, match[0].distance)])\n current_p1.append(matches_kp1[i][0:2])\n current_p2.append(matches_kp2[i][0:2])\n current_p1_sizes.append(sizes_kp1[i])\n current_p2_sizes.append(sizes_kp2[i])\n current_total += 1\n\n # If\n if current_total > max_total and current_total >= np.round(inlierRatio*cnt_matches):\n max_matches = current_matches\n max_p1 = current_p1\n max_p2 = current_p2\n max_p1_sizes = current_p1_sizes\n max_p2_sizes = current_p2_sizes\n max_total = current_total\n\n # # we are done in case we have enough inliers\n if current_total > cnt_matches * ransacRatio:\n break\n\n\n # Re-evaluate the Homography based on the best inliers\n max_homography = homomat(np.array(max_p1), np.array(max_p2))\n\n if returnMatches:\n max_kp1 = [cv.KeyPoint(d[0], d[1], max_p1_sizes[i]) for i, d in enumerate(max_p1)]\n max_kp2 = [cv.KeyPoint(d[0], d[1], max_p2_sizes[i]) for i, d in enumerate(max_p2)]\n return max_homography, max_matches, max_kp1, max_kp2\n\n return max_homography", "def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result", "def minimal_weight_matching(X, Y, population_size= 12, nr_iterations=50,\n save_best_nr=1, setup_replacement = 'delete-all', norm_order=2, m_rate=0.05):\n # to save best members\n history = np.zeros((nr_iterations, save_best_nr))\n history_avg = np.zeros((nr_iterations, save_best_nr))\n best_tuple = [None, 0]\n # --------------------- Initialization\n population = initial_population(len(X), population_size)\n # todo\n stagnation_counter = 0\n for ite in range(nr_iterations):\n #print(population)\n # --------------------- Evaluation\n # get list of fitness scores for memebrs in population\n fitness_scores = evaluation(X, Y, population, norm_order)\n #print(fitness_scores)\n # get the values of the best save_best_nr members and save it in the\n # history\n maxmax = max(fitness_scores)\n if maxmax > best_tuple[1]:\n best_tuple = population[fitness_scores.index(maxmax)], maxmax\n history[ite] = sorted(fitness_scores)[-save_best_nr:]\n history_avg[ite] = np.mean(fitness_scores)\n # todo: if we dont move for 10 steps, half the mutation rate\n if history[ite] == history[ite-1]:\n stagnation_counter += 1\n if stagnation_counter == 10:\n m_rate *= 0.25\n stagnation_counter = 0\n else:\n stagnation_counter = 0\n if m_rate <= 0.001:\n m_rate = 0.05\n print(\"--- Iteration {} --> Best:[{}] ||| Avg:[{}] ||| Stagnation Level {} ||| MR {}\".format(ite+1, history[ite], history_avg[ite], stagnation_counter, m_rate))\n # --------------------- Selection\n # select members based on roulette_wheel_selection\n #selected_indx = roulette_wheel_selection(fitness_scores, population_size//2)\n selected_indx = simple_selection(fitness_scores, population_size//2)\n # given indexes, get the selecter members (POPULATION_SIZE//2)\n selected_members = population[selected_indx]\n #print(selected_members)\n # shuffle\n np.random.shuffle(selected_members)\n # create empty array to save children in\n children = np.empty((population_size//2, population.shape[1])).astype(int)\n\n # --------------------- Crossover\n for i in range(0, population_size//2, 2):\n # parent one is in selected_members in row 1, parent two in\n # row 2 ...\n off1, off2 = k_point_crossover(selected_members[i], selected_members[i+1])\n\n # --------------------- Mutation\n # save created children in children array\n children[i], children[i+1] = \\\n mutation(off1, p_m=m_rate), mutation(off2, p_m=m_rate)\n\n\n # ---------------------- Replacement\n population = replacement(population, children, mode=setup_replacement, n=children.shape[0],\n based_on_fitness=True, fitness_old=fitness_scores, fitness_new=evaluation(X, Y, children, norm_order)).astype(int)\n\n # add the best to the population at place 1 [not good...]\n population[0] = best_tuple[0]\n\n return population, best_tuple, history, history_avg", "def execMaxpTabu(y, w, threshold=100.0, maxit=2, tabuLength=5, typeTabu=\"exact\"):\n print(\"Running max-p-regions model (Duque, Anselin and Rey, 2010)\")\n print(\"Local search method: Tabu Search\")\n print(\"Number of areas: \", len(y))\n print(\"threshold value: \", threshold)\n distanceType = \"EuclideanSquared\"\n distanceStat = \"Centroid\";\n objectiveFunctionType = \"SS\";\n selectionType = \"Minimum\";\n numRegionsType = \"EndogenousThreshold\";\n\n # CONSTRUCTION PHASE 1: GROWING FEASIBLE REGIONS\n\n start = tm.time()\n\n # print w\n # print y\n\n am = AreaManager(w, y, distanceType)\n maxP = 0\n bestCandidates = {}\n for i in range(maxit):\n\n # print \"**** Iteration %d of %d ...\"%(i+1,maxit)\n\n rm = RegionMaker(am,\n distanceType = distanceType,\n distanceStat = distanceStat,\n selectionType = selectionType,\n objectiveFunctionType = objectiveFunctionType,\n numRegionsType = numRegionsType,\n threshold = threshold)\n numRegions = len(rm.feasibleRegions)\n rm.getObj()\n\n # print \"rm.feasibleRegions\",rm.feasibleRegions\n # print \"obj\",rm.getObj()\n\n if numRegions > maxP:\n bestCandidates = {}\n maxP = numRegions\n obj = rm.objInfo\n bestCandidates[obj] = rm.feasibleRegions\n if numRegions == maxP:\n obj = rm.objInfo\n if obj in bestCandidates:\n pass\n else:\n bestCandidates[obj] = rm.feasibleRegions\n else:\n pass\n\n # print \"bestCandidates\", bestCandidates\n\n ofValues = list(bestCandidates.keys())\n basicMemory = BasicMemory()\n while len(ofValues) >= 1:\n\n # RECREATE SOLUTION\n\n rm.resetNow()\n minOfValue = min(ofValues)\n ofValues.remove(minOfValue)\n partialSolution = bestCandidates[minOfValue]\n\n # print \"ASSIGNING ENCLAVES\"\n # print partialSolution\n\n regionId = 0\n for growReg in partialSolution:\n seedGrowReg = partialSolution[growReg][0]\n rm.assignSeeds(seedGrowReg, regionId)\n partialSolution[growReg].remove(seedGrowReg)\n if len(partialSolution[growReg]) >= 1:\n for areaInGrow in partialSolution[growReg]:\n rm.assignArea(areaInGrow, regionId)\n regionId += 1\n\n # CONSTRUCTION PHASE 2: ENCLAVES ASSIGNATION\n\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n rm.newExternal = set(rm.unassignedAreas)\n if len(rm.unassignedAreas) != 0:\n rm.constructionStage = \"enclaves\"\n while len(rm.unassignedAreas) != 0:\n rm.constructRegions()\n rm.objInfo = rm.getObjective(rm.region2Area)\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n\n # print \"ASSIGNED SOLUTION\"\n # print \"OBJ: \", rm.getObjective(rm.region2Area), rm.returnRegions()\n\n rm.calculateRegionValueThreshold()\n\n # LOCAL SEARCH\n\n rm.calcObj()\n convTabu = min(10,old_div(len(y),maxP)) # convTabu=230*numpy.sqrt(maxP)\n\n # print \"###ENTERING TABU\",rm.objInfo,rm.returnRegions()\n\n rm.tabuMove(tabuLength, convTabu = convTabu, typeTabu=typeTabu)\n rm.calcObj()\n\n # print \"***** AFTER TABU\",rm.objInfo,rm.returnRegions()\n # EVALUATE SOLUTION\n\n if rm.objInfo < basicMemory.objInfo:\n basicMemory.updateBasicMemory(rm)\n time = tm.time() - start\n Sol = basicMemory.regions\n Of = basicMemory.objInfo\n print(\"FINAL SOLUTION: \", Sol)\n print(\"FINAL OF: \", Of)\n output = { \"objectiveFunction\": Of,\n \"runningTime\": time,\n \"algorithm\": \"maxpTabu\",\n \"regions\": len(Sol),\n \"r2a\": Sol,\n \"distanceType\": distanceType,\n \"distanceStat\": distanceStat,\n \"selectionType\": selectionType,\n \"ObjectiveFuncionType\": objectiveFunctionType}\n print(\"Done\")\n return output", "def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"", "def standardComposition_Max(self):\n temp = np.fmax(self.rulesList[0], self.rulesList[1])\n for r in self.rulesList[2:]:\n temp = np.fmax(temp, r)\n\n self.fuzzy_output = temp", "def try_optimal_solution(module, n_of_players_with_vote):\n\n nonlocal all_lineups\n nonlocal final_field\n nonlocal malus\n\n # For each candidate\n for candidate in all_lineups:\n\n # We create the list where each player in the combination has only\n # 1 role\n candidates_single_role = all_lineups_single_role(candidate)\n\n # And test each of these combinations\n for new_cand in candidates_single_role:\n\n # If we find a solution we store the result\n if find_solution(new_cand, module, n_of_players_with_vote):\n final_field = new_cand\n break\n\n # And stop the iteration over the other condidates\n if final_field:\n malus = 0\n break", "def reduction_size_one_autarkies(self):\n done = False\n while not done:\n current_N = self.A.shape[1]\n max_projection = self.A @ np.ones(current_N, dtype=int)\n inprods = self.A.transpose() @ max_projection\n A_y = self.A.transpose() @ self.y\n indices = []\n vals = []\n for i in range(current_N):\n if A_y[i] <= 0.5 * self.m:\n indices.append(i)\n vals.append(0)\n elif A_y[i] >= inprods[i] - 0.5 * self.m:\n indices.append(i)\n vals.append(1)\n no_reductions = len(indices)\n for i in range(no_reductions - 1, -1, -1): #reverse order\n self.problem_reduction_single(indices[i], vals[i])\n if no_reductions == 0:\n done = True", "def epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon):\n\n new_policy = epsilon * np.ones((nS, nA)) / nA # = epsilon / m, where m is the number of Actions, nA\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: IF TWO ACTIONS HAVE THE SAME MAXIMUM Q VALUE, THEY MUST BOTH BE EXECUTED EQUALLY LIKELY.\n # THIS IS IMPORTANT FOR EXPLORATION. This might prove useful:\n # https://stackoverflow.com/questions/17568612/how-to-make-numpy-argmax-return-all-occurrences-of-the-maximum\n \n # print(\"new_policy = {0}\".format(new_policy))\n \n for s_t in range (0, nS):\n # print(\"old_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n # print(\"Q_value[{0}] = {1}\".format(s_t, Q_value[s_t]))\n Q_list = np.argwhere(Q_value[s_t] == np.amax(Q_value[s_t])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n # print(\"Q_list: \" + str(Q_list))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n # print(\"max_Q: \" + str(max_Q))\n \n # A_star = new_policy[s_t][max_Q]\n # print(\"A_star: \" + str(A_star))\n \n new_policy[s_t][max_Q] += 1 - epsilon # for the chosen maximal index of Q, set the polocy to epsilon/m + 1 - epsilon\n # print(\"new_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n \n # for a_t in range (0, nA):\n # if a_t in Q_list:\n # new_policy[s_t][a_t] += (1 - epsilon) / len(Q_list)\n\n ############################\n # print(\"new_policy = {0}\".format(new_policy))\n return new_policy", "def minimum_spanning_arborescence(sol):", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def judge(self):\n self.bounds = 0.0\n self.best = self.lives[0]\n for life in self.lives:\n life.score = self.matchFun(life)\n self.bounds += life.score\n if self.best.score < life.score:\n self.best = life", "def solve_astar(n_aliens, preference_matrix, time_start, max_seconds=60, noise=0.2):\r\n # Sum rows and columns for each alien. Aliens with lowest scores\r\n # must be seated at corner since it is not liked and does not like\r\n # others.\r\n row = np.sum(preference_matrix, axis=1)\r\n col = np.sum(preference_matrix, axis=0)\r\n scores = []\r\n for i in range(len(row)):\r\n scores.append(row[i] + col[i])\r\n worst_aliens = np.argsort(scores)\r\n party = Dinner(n_aliens, preference_matrix)\r\n # Start the candidate solutions.\r\n solution = {\"max_score\": -500}\r\n # Starting with the lowest score alien, sit them at a corner and try\r\n # seating the remaining aliens.\r\n for bad_alien in worst_aliens:\r\n party.seat_alien_at(bad_alien, 0)\r\n adj_locations = party.get_empty_adjacent_locations(0)\r\n solution = seat_next_alien(\r\n party=party,\r\n locations=adj_locations,\r\n time_start=time_start,\r\n max_seconds=max_seconds,\r\n solution=solution,\r\n noise=noise,\r\n )\r\n party.unseat_location(0)\r\n # Make sure to stop if time runs out.\r\n if (datetime.now() - time_start).total_seconds() >= max_seconds:\r\n break\r\n max_score = solution.pop(\"max_score\")\r\n return max_score, sorted(solution.items(), key=lambda x: x[1], reverse=False)", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def nms(dets, thresh=0.5, mode=\"Union\"):\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n if mode == \"Union\":\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == \"Minimum\":\n ovr = inter / np.minimum(areas[i], areas[order[1:]])\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1]\n \n #step 2: filter the word space \n inds = range(len(x1))\n keep_ori = keep\n for k in keep_ori:\n inds_exp = list(set(inds) - set([k]))\n xx1 = np.maximum(x1[k], x1[inds_exp])\n yy1 = np.maximum(y1[k], y1[inds_exp])\n xx2 = np.minimum(x2[k], x2[inds_exp])\n yy2 = np.minimum(y2[k], y2[inds_exp])\n \n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[k] + areas[inds_exp] - inter)\n ind_max = np.argmax(ovr)\n if ovr[ind_max] > thresh:\n keep.append(inds_exp[ind_max])\n\n #step 3: merge \n retain = []\n for i in range(len(keep) - 1):\n xx1 = np.maximum(x1[keep[i]], x1[keep[i+1:]])\n yy1 = np.maximum(y1[keep[i]], y1[keep[i+1:]])\n xx2 = np.maximum(x2[keep[i]], x2[keep[i+1:]])\n yy2 = np.maximum(y2[keep[i]], y2[keep[i+1:]])\n\n \n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (areas[keep[i]] + areas[keep[i+1:]] - inter)\n inds = np.where(ovr<0.2)[0]\n for j in inds:\n retain.append(keep[i+1+j])\n return dets[retain]", "def fit_greedy(data, nnbr=10, threshold=0.05, refit=refit_pll):\n n,m = data.shape;\n L = np.zeros((n,n)) # initialize parameters\n scores = np.zeros(n) \n data = data.astype(int)\n for i in range(n):\n Ni = []\n while (len(Ni)<nnbr):\n Vi = (0*data[i,:] + sum(data[j,:]*(2**jj) for jj,j in enumerate(Ni))).astype(int)\n Vsz = int(Vi.max()+1)\n for j in range(n):\n if j==i or j in Ni: scores[j]=0.; continue\n pIJV = Factor( [Var(0,2),Var(1,2),Var(2,Vsz)] , 0.)\n # pIJV[data[i,:],data[j,:],Vi] += 1. # Test??\n for k in range(m): pIJV[data[i,k],data[j,k],Vi[k]] += 1.\n pV = pIJV.marginal([2]); pV /= (pV.sum()+1e-20);\n pIJV /= (pIJV.sum([0])+1e-20)\n scores[j] = ((pIJV.condition({0:1,1:1})-pIJV.condition({0:1,1:0})).abs()*pV).sum()\n jmax = int(np.argmax(scores))\n if scores[jmax] < threshold: break\n Ni.append(jmax)\n # TODO: prune back each list?\n #print(i,\" : \",Ni)\n L[i,Ni] = 1.\n L = L*L.T # \"and\" connectivity: keep only if edges (i,j) and (j,i) present?\n model = Ising(L);\n refit(model,data)\n return model", "def doMatching(self):\n\n self.alltrans = []\n curdir = os.getcwd()\n os.chdir(self.astromdir)\n\n for cat in self.projcats:\n self.logfile.write(\"Beginning match attempt for %s\" %cat)\n nobj = max(80,self.Nrefobjs)\n base = 'match %s 1 2 3 %s 1 2 3 identity recalc nobj=%d medtf medsigclip=2.7 '\\\n %(cat,self.GSCmatchin,nobj)\n converged=0\n mrad=4.5\n xsh=0.\n ysh=0.\n Retry=0\n while not converged and mrad<10 and Retry<11:\n ferr,mdict=self._runmatch(base,mrad,xsh,ysh)\n if ferr:\n # match not found, make radius bigger and retry\n mrad += 0.5\n Retry += 1\n self.logfile.write(\"Retry: %d ... set mrad: %s\" %(Retry,mrad))\n continue\n\n dx = mdict['dx']\n dy = mdict['dy']\n sigx = mdict['sigx']\n sigy = mdict['sigy']\n\n # if the extra shift is too big compared to the\n # match radius, update input shift and retry\n if max(abs(dx-xsh),abs(dy-ysh)) > min(0.4, mrad/10.):\n xsh=dx\n ysh=dy\n Retry += 1\n continue\n\n # if sigma seems too big, shrink mrad\n if (max(sigx,sigy) > 0.7 and mrad>2.9):\n mrad -= 0.5\n Retry += 1\n continue\n\n # otherwise, it looks like we have a good fit\n # but we want to tune it up with smaller matchrad\n # Don't increment retry in this final case.\n if mrad > 3.9 or (mrad>2.9 and max(sigx,sigy)>0.4):\n mrad = max(mrad-1,2.5)\n continue\n\n self.alltrans.append(mdict)\n converged=1\n self.logfile.write(\"\"\"%s trans:\n mdx,mdy = %.4f %.4f\n edx,edy = %.4f %.4f\n sigx,sigy = %.4f %.4f\n \"\"\"%(cat, mdict['dx'],mdict['dy'],mdict['dx_err'],mdict['dy_err'],\\\n mdict['sigx'],mdict['sigy']))\n if len(self.alltrans)<1:\n errtxt = \"WARNING: No Image successfully matched against extended GSC!\"\n self.logfile.write(errtxt)\n self.errorList.append((self.modName,errtxt))\n return -1\n\n os.chdir(curdir)\n # finally, choose best match transform and calculate\n # the new CRVAL's\n self._pickBestrans()\n\n return 0", "def fn(i, m):\n if i + 2*m >= len(piles): return prefix[-1] - prefix[i]\n ans = -inf \n for ii in range(1, 2*m+1): \n if i+ii < len(prefix): \n ans = max(ans, prefix[i+ii] - prefix[i] - fn(i+ii, max(m, ii)))\n return ans", "def max_power_candidate_solar_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_S[g, y, s, t] * sum(m.x_c[g, j] for j in m.Y if j <= y)) <= 0", "def fn(mask, j):\n ans = 0 \n for i in range(m): \n if not mask & (1<<i): \n ans = max(ans, fn(mask^(1<<i), j-1) + score[i][j])\n return ans", "def maximum_bipartite_matching_optimization(G):\n # remove nodes with no edges as it messes up bipartite splitter\n left_set = [n for n in G.nodes if G.nodes[n]['bipartite'] == 0]\n model = GEKKO()\n variable_dict = {}\n for node in left_set:\n neighbors = list(G.neighbors(node))\n # print(\"Node is \", node)\n # print(\"Neighbors are \", neighbors)\n for neighbor in neighbors:\n # make variables\n # lower bound 0 upper bound 1\n variable_dict[(node, neighbor)] = model.Var(lb=0, ub=1, integer=True)\n\n tuples = [(node, i) for i in neighbors]\n # print(tuples)\n # Constraint that no node can be in matching twice\n if len(tuples) > 0:\n model.Equation(sum([variable_dict[tup] for tup in tuples]) <= 1.0)\n\n # Objective\n model.Obj(-1 * sum([variable_dict[variable] for variable in variable_dict]))\n\n # Integer Solver\n model.options.SOLVER = 1\n\n # Solve\n model.solve(disp=True)\n\n matching = {}\n # Add variables to matching\n for variable in variable_dict.items():\n print(variable)\n if int(variable[1][0]) == 1:\n matching[variable[0][0]] = variable[0][1]\n matching[variable[0][1]] = variable[0][0]\n return matching", "def solve(self):", "def find_N(config, data, imatch=I_DEAD, itarget=10):\n\n np.random.seed(1)\n # Parse parameters\n # get just 1 base case sample corresponding to average\n\n # results = np.zeros((len(m_prior), 13, len(fwd_args['time'])))\n\n i_mod = o_calmodes[imatch]\n i_obs = i_calmodes[imatch]\n obsdead = data[:, i_obs]\n time_delay = config['time_delay']\n obsdead_index = np.where(obsdead > itarget)[0][0] + time_delay\n found = False\n icount = 0\n ncountmax = 50\n nnew = 1000\n\n ndata = np.size(data[:, 0])\n m_prior, fwd_args = parse_config(config, ndata, mode='mean')\n m_prior = reshape_prior(m_prior)\n param = m_prior[0]\n\n while not found and icount < ncountmax:\n fwd_args['locked']['n'] = nnew\n res = base_seir_model(param, fwd_args)\n moddead = res[i_mod, :]\n moddead_index = np.where(moddead > itarget)\n\n print('moddead index, obsdead index ', moddead_index[0][0], obsdead_index)\n found = moddead_index[0][0] >= obsdead_index\n if not found:\n icount += 1\n nnew = fwd_args['locked']['n'] * 2\n fwd_args['locked']['n'] = nnew\n\n return nnew", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def solvePostNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def score_solution(g, s):\n pass", "def large_neighborhood_search(facilities: list, towns: list, hazards: list, max_iteration: int = 50, max_improvement: int = 10) -> tuple:\r\n\r\n # counters\r\n max_attempts_per_swappable_facility = 10\r\n attempts_per_swappable_facility = 0\r\n improvements = 0\r\n iterations = 0\r\n\r\n # list of the opened facilities\r\n solutions = list(filter(lambda o_facility: o_facility.is_open, facilities.copy()))\r\n\r\n while True:\r\n\r\n # list of the closed facilities\r\n neighborhood = list(filter(lambda c_facility: not c_facility.is_open, facilities.copy()))\r\n\r\n # get the town with the maximum hazard perceived\r\n max_hazard_town, max_hazard = get_town_with_max_hazard(facilities, towns, hazards)\r\n\r\n # get a random opened facility to close\r\n swappable_facility = random.choice(solutions)\r\n\r\n while True:\r\n # get a random closed facility to test\r\n test_facility = random.choice(neighborhood)\r\n\r\n # check capacity constraint (ADMISSIBILITY)\r\n if test_facility.capacity >= swappable_facility.capacity:\r\n # destroy & repair the solution\r\n swappable_facility.is_open = False\r\n test_facility.is_open = True\r\n\r\n # calculate the new max hazard and the respective town\r\n new_max_hazard_town, new_max_hazard = get_town_with_max_hazard(facilities, towns, hazards)\r\n\r\n # check if the new max hazard is an improvement (SOLUTION)\r\n if new_max_hazard < max_hazard:\r\n # reassign the chosen facility to the towns whose were associated with the swappable_facility\r\n for town in towns:\r\n if town.facility == swappable_facility:\r\n town.facility = test_facility\r\n\r\n # reset the attempts per town counter because now there is a new solution to iterate on\r\n improvements += 1\r\n\r\n # rebuild the solution\r\n solutions = list(filter(lambda o_facility: o_facility.is_open, facilities.copy()))\r\n\r\n # exit the loop because a solution was found\r\n break\r\n\r\n else:\r\n # restore of the current solution's data structures\r\n swappable_facility.is_open = True\r\n test_facility.is_open = False\r\n\r\n # skip the chosen facility\r\n neighborhood.pop(neighborhood.index(test_facility))\r\n\r\n else:\r\n # skip the chosen facility\r\n neighborhood.pop(neighborhood.index(test_facility))\r\n\r\n # increment the counter for swappable facility attempts to swap, then check the limit and maybe break\r\n attempts_per_swappable_facility += 1\r\n # forbid the current swappable facility to be taken in a count for the next iteration\r\n if len(neighborhood) == 0 or attempts_per_swappable_facility == max_attempts_per_swappable_facility:\r\n solutions.pop(solutions.index(swappable_facility))\r\n break\r\n\r\n # if there are no more facilities in solution to be taken in account, exit the loop\r\n if len(solutions) == 0:\r\n break\r\n\r\n # check the number of improvement\r\n if improvements == max_improvement:\r\n break\r\n\r\n iterations += 1\r\n # check the number of iteration\r\n if iterations == max_iteration:\r\n break\r\n\r\n return iterations, improvements", "def dual_objective_expression_rule(_m):\r\n\r\n # Build limits\r\n t_1 = sum(- (m.mu_2[z, y] * m.SOLAR_BUILD_LIMITS[z]) - (m.mu_3[z, y] * m.WIND_BUILD_LIMITS[z]) - (\r\n m.mu_4[z, y] * m.STORAGE_BUILD_LIMITS[z]) for z in m.Z for y in m.Y)\r\n\r\n # Min power output\r\n t_2 = sum(\r\n m.sigma_1[g, y, s, t] * m.P_MIN[g] for g in m.G.difference(m.G_STORAGE) for y in m.Y for s in m.S for t\r\n in m.T)\r\n\r\n # Max power - existing generators\r\n t_3 = sum(\r\n - m.sigma_2[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_THERM for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Max power - existing wind\r\n t_4 = sum(\r\n - m.sigma_4[g, y, s, t] * m.Q_W[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_WIND for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing solar\r\n t_5 = sum(\r\n - m.sigma_6[g, y, s, t] * m.Q_S[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_SOLAR for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing hydro\r\n t_6 = sum(\r\n - m.sigma_8[g, y, s, t] * m.P_H[g, y, s, t] * (1 - m.F[g, y]) for g in m.G_E_HYDRO for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max charging power - existing storage\r\n t_7 = sum(\r\n - m.sigma_11[g, y, s, t] * m.P_IN_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max discharging power - existing storage\r\n t_8 = sum(\r\n - m.sigma_13[g, y, s, t] * m.P_OUT_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max energy - existing storage units\r\n t_9 = sum(\r\n - m.sigma_16[g, y, s, t] * m.Q_MAX[g] for g in m.G_E_STORAGE for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Min energy - interval end\r\n t_10 = sum(m.sigma_18[g, y, s] * m.Q_END_MIN[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Max energy - interval end\r\n t_11 = sum(- m.sigma_19[g, y, s] * m.Q_END_MAX[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Ramp-up constraint - generators\r\n t_12 = sum(\r\n - m.sigma_20[g, y, s, t] * m.RR_UP[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-up constraint - initial power output - generators\r\n t_13 = sum(\r\n - m.sigma_20[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y\r\n for s in m.S)\r\n\r\n # Ramp-down constraint - generators\r\n t_18 = sum(\r\n - m.sigma_23[g, y, s, t] * m.RR_DOWN[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-down constraint - initial power output - generators\r\n t_19 = sum(\r\n m.sigma_23[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for\r\n s in m.S)\r\n\r\n # Min powerflow\r\n t_24 = sum(m.sigma_27[l, y, s, t] * m.POWERFLOW_MIN[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Max powerflow\r\n t_25 = sum(\r\n - m.sigma_28[l, y, s, t] * m.POWERFLOW_MAX[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Demand\r\n t_26 = sum(m.lamb[z, y, s, t] * m.DEMAND[z, y, s, t] for z in m.Z for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Initial storage unit energy\r\n t_27 = sum(m.zeta_1[g, y, s, m.T.first()] * m.Q0[g, y, s] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n return (t_1 + t_2 + t_3 + t_4 + t_5 + t_6 + t_7 + t_8 + t_9 + t_10 + t_11 + t_12 + t_13 + t_18 + t_19 + t_24\r\n + t_25 + t_26 + t_27)", "def solveExCompact(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n #\n # NEEDS ATTACKER AVG PER ATTACKER\n #\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n omegaKeys = [(t,d,tPrime,lam) for t in targetRange for d in defenders for tPrime in targetRange for lam in aTypes]\n omegaKeys2 = [(t,lam) for t in targetRange for lam in aTypes]\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, name=\"w\")\n w2 = model.continuous_var_dict(keys=omegaKeys2, lb=0, name=\"w2\")\n objectiveFunction = sum([q[lam] * sum([w[t,d,t,lam] * _dRewards[d][t] for t in targetRange for d in defenders]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * sum([_dPenalties[d][t] for d in defenders]) for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,d,tPrime,lam] for t in targetRange]) * _dCosts[d][tPrime] for tPrime in targetRange for d in defenders]) for lam in aTypes])\n\n # Define the constraints\n # Attacker\n attackerConstraints = [sum([w[t,d,t,lam] * _aPenalties[lam][t] for t in targetRange for d in defenders]) \\\n + sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * _aRewards[lam][t] for t in targetRange]) \\\n >= sum([w[t,d,tPrime,lam] * _aPenalties[lam][tPrime] for t in targetRange for d in defenders]) \\\n + sum([(w2[t,lam] - sum([w[t,d,tPrime,lam] for d in defenders])) * _aRewards[lam][tPrime] for t in targetRange]) \\\n for lam in aTypes for tPrime in targetRange]\n # Defender\n defenderConstraints = [sum([q[lam] * sum([w[t,d,t,lam] * _dRewards[d][t] for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,d,t,lam] for d in defenders])) * _dPenalties[d][t] for t in targetRange]) for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,d,tPrimePrime,lam] for t in targetRange]) * _dCosts[d][tPrimePrime] for tPrimePrime in targetRange]) for lam in aTypes]) \\\n >= sum([q[lam] * w2[tPrime,lam] * _dRewards[d][tPrime] for lam in aTypes]) \\\n + sum([q[lam] * sum([sum([w[t,dPrime,t,lam] for dPrime in defenders if dPrime != d]) * _dRewards[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + sum([q[lam] * sum([w[t,d,t,lam] * _dPenalties[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + sum([q[lam] * sum([(w2[t,lam] - sum([w[t,dPrime,t,lam] for dPrime in defenders])) * _dPenalties[d][t] for t in targetRange if t != tPrime]) for lam in aTypes]) \\\n + _dCosts[d][tPrime] \\\n for d in defenders for tPrime in targetRange]\n # Proposition constraints\n p11Constraints = [sum([w2[t,lam] for t in targetRange]) == 1 for lam in aTypes]\n p12Constraints = [sum([w[t,d,tPrime,lam] for tPrime in targetRange]) == w2[t,lam] for lam in aTypes for d in defenders for t in targetRange]\n p13Constraints = [sum([w[t,d,tPrime,lam] for d in defenders]) <= w2[t,lam] for lam in aTypes for tPrime in targetRange for t in targetRange]\n # Add the constraints\n attackerConstraints = model.add_constraints(attackerConstraints)\n defenderConstraints = model.add_constraints(defenderConstraints)\n p11Constraints = model.add_constraints(p11Constraints)\n p12Constraints = model.add_constraints(p12Constraints)\n p13Constraints = model.add_constraints(p13Constraints)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n print(model.get_solve_status())\n return model.solution.get_objective_value(), model, None", "def no_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.2,234)\n S2.add_peak(53.8,585)\n S2.add_peak(61.3,773)\n S2.add_peak(66.2,387)\n S2.add_peak(88.1,546)\n S2.add_peak(103.9,598)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==0, \"Incorrect number of peaks matched with greedy method\"\n assert score==0, \"Incorrect score with greedy method\"\n \n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==0, \"Incorrect number of peaks matched with maximum weighted method\"\n assert score==0, \"Incorrect score with maximum weighted method\"", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def greedy(self, u=None, niters_max=1000):\n\n niters = 1\n if (u is None):\n u = self.u\n\n utmp = np.copy(u)\n iuncovered = ~np.any(self.a[:,self.s], axis=1)\n \n score = np.zeros(self.ncols)\n while (np.count_nonzero(iuncovered) > 0) and (niters <= niters_max):\n # It's 5 times faster without indexing, the advantage is made possible by csc_matrix.dot\n mu = (self.a_csc.dot((iuncovered).astype(int))).astype(float) \n mu[mu<=_smallnumber] = _smallnumber\n\n utmp[~iuncovered] = 0\n gamma = (self.c - self.a_csc.dot(utmp))\n select_gamma = (gamma>=0)\n\n if (np.count_nonzero(select_gamma)>0):\n score[select_gamma] = gamma[select_gamma]/mu[select_gamma]\n if (np.count_nonzero(~select_gamma)>0):\n score[~select_gamma] = gamma[~select_gamma]*mu[~select_gamma]\n\n inewcolumn = (np.nonzero(~self.s)[0])[np.argmin(score[~self.s])]\n self.s[inewcolumn] = True\n iuncovered = ~np.logical_or(~iuncovered, self.a[:,inewcolumn])\n niters = niters+1\n if (niters == niters_max): \n warnings.warn(\"Iteration in Greedy reaches maximum = {0}\".format(niters_max))\n return self.total_cost", "def mr(A, n_iterations, stop=False):\n assert len(A.sizes) == 2\n assert A.sizes[0] == A.sizes[1]\n M = A.same_shape()\n n = A.sizes[0]\n @for_range(n)\n def _(i):\n e = sfix.Array(n)\n e.assign_all(0)\n e[i] = 1\n M[i] = solve_linear(A, e, n_iterations, stop=stop)\n return M.transpose()", "def endgame_score_connectfour_faster(board, is_current_player_maximizer) :\n chains=sorted(board.get_all_chains(), key=lambda x: len(x))\n if len(chains[-1])>=4:\n score = 1000+23*(42-board.count_pieces());\n if not is_current_player_maximizer:\n return score\n else:\n\n return -score\n return 0;", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def solvePostOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains a dummy target for defenders and attackers\"\"\"\n # Add the extra dummy target\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n placements = getPlacements(defenders, targetNumWithDummies)\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('PrimalWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[s,a,lam] * defenderSocialUtility(s,a,defenders,_dRewards,_dCosts,_dPenalties) for s in placements for a in attackerActions]) for lam in aTypes])\n c1 = [sum([w[s,a,lam] * aUtility(s,a,lam,_aPenalties,_aRewards) for s in placements]) \\\n >= sum([w[s,a,lam] * aUtility(s,b,lam,_aPenalties,_aRewards) for s in placements])\n for lam in aTypes for a in attackerActions for b in attackerActions if a != b]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints([sum([q[lam] * sum([w[s,a,lam] * utilityM(d,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[s,a,lam] * utilityM(e,s,a,m,_dRewards,_dPenalties,_dCosts) for a in attackerActions for s in placements if s[m] == d]) for lam in aTypes])\n for m in defenders for d in targetRange for e in targetRange if d!=e])\n c3 = model.add_constraints([sum([w[(s,a,lam)] for s in placements for a in attackerActions]) == 1 for lam in aTypes])\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n\n return utilityPerDefender, utilityPerAttacker, None", "def evaluate_match_quality(matches, thresh=.7):\n marr = np.zeros((len(matches), 2))\n quality = np.zeros((len(matches), 1))\n for i in range(len(matches)):\n marr[i,0] = matches[i][0].distance\n marr[i,1] = matches[i][1].distance\n quality[i] = marr[i, 0] < thresh*marr[i, 1]", "def mewe(M,N,m,n,target):\r\n\t\r\n\toutput = []\r\n\tfor k in range(0,M):\r\n\t\tprint(k)\r\n\t\t# Allocate space for output\r\n\t\tmewe_store = np.zeros((len(n),target['thetadim']))\r\n\t\tmewe_runtimes = np.zeros(len(n))\r\n\t\tmewe_evals = np.zeros(len(n))\r\n\t\t\r\n\t\t# generate all observations and sets of randomness to be used\r\n\t\t\r\n\t\tobs_rand = target['generate_randomness'](np.max(n))\r\n\t\tobs_all = target['observation'](target['true_theta'], false_theta, epsilon,obs_rand)\r\n\t\t\r\n\t\t# generate the synthetic randomness, sort.\r\n\t\t\r\n\t\trandomness = [target['generate_randomness'](m) for i in range(N)]\r\n\t\t\r\n\t\tfor i in range(0,len(n)):\r\n\t\t\t# subset observations and sort\r\n\t\t\tobs = obs_all[:n[i]]\r\n\t\t\tsort_obs = np.sort(obs)\r\n\t\t\tsort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)\r\n\t\t\t\r\n\t\t\t# Define the objective to be minimized to find the MEWE\r\n\t\t\t\r\n\t\t\tdef obj1(theta):\r\n\t\t\t\tif(theta[1] < 0 ):\r\n\t\t\t\t\tout = 10e6\r\n\t\t\t\telse :\r\n\t\t\t\t\twass_dists = [target['dist'](sort_obs_mult, np.sort(target['robservation'](theta, x))) for x in randomness]\r\n\t\t\t\t\tout = np.mean(wass_dists)\r\n\t\t\t\t\r\n\t\t\t\treturn out\r\n\t\t\t\t\r\n\t\t\t# Optimization\r\n\t\t\t\r\n\t\t\tt_mewe = time.process_time()\r\n\t\t\tmewe = minimize(fun = obj1, x0 = true_theta)\r\n\t\t\tt_mewe = time.process_time() - t_mewe\r\n\t\t\t\r\n\t\t\t# Save the results\r\n\t\t\tmewe_store[i] = mewe.x\r\n\t\t\tmewe_runtimes[i] = t_mewe\r\n\t\t\tmewe_evals[i] = mewe.nit\r\n\t\t\r\n\t\toutput_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]\r\n\t\toutput.append(output_cbind)\r\n\t\t\r\n\treturn output", "def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg", "def _goalFunction(D, G, U, K, n, m):\n J = 0\n for k in range(K):\n for i in range(n):\n u = U[i][k] ** m\n d = _extendedDissimilarity(D, G[k], i)\n J += u * d\n return J", "def solve(self):\n while self.counter[-1] != len(self.sequences[-1]) + 1:\n basepair = self.generatebasepairs(self.counter) # Get the combination for the current coordination\n moves = self.generatemoves(basepair) # Get all possible ways to get to this current coordination\n\n maxscore = -100000000 # set the maxscore to a value which is always lower than possible scores\n bestmove = None\n\n # FOr each move calculate score\n for move in moves:\n coordinates = self.generatecoordinates(move, self.counter) # generate the origin coordinate for the current move\n score = self.retrievematrixelement(coordinates).score # Get the score at the origin coordinate\n pairs = self.getallpairs(move) # Get all pairs possible for the current move\n scores = [self.scorePair(u) for u in pairs] # Generate scores for all pairs\n newscore = score + sum(scores) # Add generated scores to origin score\n if newscore > maxscore:\n maxscore = newscore\n bestmove = coordinates\n\n self.enterelement(self.counter, Score(bestmove, maxscore))\n self.increase()", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def auxmaxrho1(x,m_ind):\n \n cc_sum = auxmaxrho2(x,m_ind) \n f = cc_sum + auxmax_cc_piece(x,0,m_ind) \n cfg.max_piece[m_ind] = 0 # max_piece should be ok here. We do not solve aux and real problem at the same time.\n \n for k_ind in range(1,cfg.nomax):\n \n f_tmp = cc_sum + auxmax_cc_piece(x,k_ind,m_ind) \n if f_tmp > f: \n f = f_tmp\n cfg.max_piece[m_ind] = k_ind\n \n return f", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def mewe_misspecified(M,N,m,n,target):\r\n\toutput = []\r\n\tfor k in tqdm(range(0,M)):\r\n\t\t# Allocate space for output\r\n\t\tmewe_store = np.zeros((len(n),target['thetadim']))\r\n\t\tmewe_runtimes = np.zeros(len(n))\r\n\t\tmewe_evals = np.zeros(len(n))\r\n\t\t\r\n\t\t# generate all observations and sets of randomness to be used\r\n\t\t\r\n\t\tif target[\"observed_law\"] == \"Gamma\":\r\n\t\t\tobs_all = np.random.gamma(true_theta[0], true_theta[1],np.max(n))\r\n\t\telif target[\"observed_law\"] == \"Cauchy\":\r\n\t\t\tobs_all = np.random.standard_cauchy(np.max(n))\r\n\t\telse : \r\n\t\t\treturn(\"Not implemented law\")\r\n\t\t\tbreak\r\n\t\t# la ligne du dessus est modifiée pour générer un échantillon contaminé\r\n\t\t\r\n\t\t# generate the synthetic randomness, sort.\r\n\t\t\r\n\t\trandomness = [target['generate_randomness'](m) for i in range(N)]\r\n\t\t\r\n\t\tfor i in range(0,len(n)):\r\n\t\t\t# subset observations and sort\r\n\t\t\tobs = obs_all[:n[i]]\r\n\t\t\tsort_obs = np.sort(obs)\r\n\t\t\tsort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)\r\n\t\t\t\r\n\t\t\t# Define the objective to be minimized to find the MEWE\r\n\t\t\t\r\n\t\t\tdef obj1(theta):\r\n\t\t\t\tif(theta[1] < 0 ):\r\n\t\t\t\t\tout = 10e6\r\n\t\t\t\telse :\r\n\t\t\t\t\twass_dists = [target['dist'](sort_obs_mult, np.sort(target['simulation'](theta, x))) for x in randomness]\r\n\t\t\t\t\tout = np.mean(wass_dists)\r\n\t\t\t\t\r\n\t\t\t\treturn out\r\n\t\t\t\t\r\n\t\t\t# Optimization\r\n\t\t\t\r\n\t\t\tt_mewe = time.process_time()\r\n\t\t\tmewe = minimize(fun = obj1, x0 = true_theta)\r\n\t\t\tt_mewe = time.process_time() - t_mewe\r\n\t\t\t\r\n\t\t\t# Save the results\r\n\t\t\tmewe_store[i] = mewe.x\r\n\t\t\tmewe_runtimes[i] = t_mewe\r\n\t\t\tmewe_evals[i] = mewe.nit\r\n\t\t\r\n\t\toutput_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]\r\n\t\toutput.append(output_cbind)\r\n\t\t\r\n\treturn output", "def motor_inferencia(x):\n\n # Defino mis operaciones borrosas\n AND = min # Tambien se llama conjuncion o interseccion\n OR = max # Tambien se llama disyuncion o union\n # FUERZA = min # Elijo la conjuncion. Tambien se pueden usar la disyuncion\n\n # --------------------------------------------------------\n # - CALCULO DEL VALOR DE PERTENENCIA DE LOS ANTECEDENTES -\n # --------------------------------------------------------\n\n # Guardo los antecedentes en las variables\n A_MN = []\n A_N = []\n A_Z = []\n A_P = []\n A_MP = []\n\n # Fila 0: P is MN and\n A_MP.append(AND(x[0], x[5])) # V is MN # then F is MP\n A_MP.append(AND(x[0], x[6])) # V is N # then F is MP\n A_MP.append(AND(x[0], x[7])) # V is Z # then F is MP\n A_MP.append(AND(x[0], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[0], x[9])) # V is MP # then F is MP\n\n # Fila 1: P is N and\n A_MN.append(AND(x[1], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[1], x[6])) # V is N # then F is MN\n A_N.append(AND(x[1], x[7])) # V is Z # then F is N\n A_N.append(AND(x[1], x[8])) # V is P # then F is N\n A_N.append(AND(x[1], x[9])) # V is MP # then F is N\n\n # Fila 2: P is Z and\n A_MN.append(AND(x[2], x[5])) # V is MN # then F is MN\n A_N.append(AND(x[2], x[6])) # V is N # then F is N\n A_Z.append(AND(x[2], x[7])) # V is Z # then F is Z\n A_P.append(AND(x[2], x[8])) # V is P # then F is P\n A_MP.append(AND(x[2], x[9])) # V is MP # then F is MP\n\n # Fila 3: P is P and\n A_P.append(AND(x[3], x[5])) # V is MN # then F is P\n A_P.append(AND(x[3], x[6])) # V is N # then F is P\n A_P.append(AND(x[3], x[7])) # V is Z # then F is P\n A_MP.append(AND(x[3], x[8])) # V is P # then F is MP\n A_MP.append(AND(x[3], x[9])) # V is MP # then F is MP\n\n # Fila 4: P is MP and\n A_MN.append(AND(x[4], x[5])) # V is MN # then F is MN\n A_MN.append(AND(x[4], x[6])) # V is N # then F is MN\n A_MN.append(AND(x[4], x[7])) # V is Z # then F is MN\n A_MN.append(AND(x[4], x[8])) # V is P # then F is MN\n A_MN.append(AND(x[4], x[9])) # V is MP # then F is MN\n\n # ------------------------------------------------------------------------------------------\n # - COMBINACION DE LOS ANTECEDENTES Y RESOLUCION DE LA IMPLICACION -\n # ------------------------------------------------------------------------------------------\n\n # [ F_MN, F_N, F_Z, F_P, F_MP ]\n F = [OR(A_MN), OR(A_N), OR(A_Z), OR(A_P), OR(A_MP)]\n\n return F", "def _solve_puzzle_parts(self):\n reindeer = self._parse_input()\n race_points = {name: 0 for name in reindeer}\n max_distance = 0\n for time_elapsed in range(1, self.time_limit + 1):\n distances = {\n name: Solver._get_distance(reindeer[name], time_elapsed)\n for name in reindeer\n }\n max_distance = max(distances.values())\n race_points.update({\n name: race_points[name] + 1\n for name in race_points if distances[name] == max_distance\n })\n return max_distance, max(race_points.values())", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def objective_score(me, other, turns, noise, repetitions, match_attributes=None):\n match = axl.Match((me, other), turns=turns, noise=noise,\n match_attributes=match_attributes)\n if not match._stochastic:\n repetitions = 1\n scores_for_this_opponent = []\n\n for _ in range(repetitions):\n match.play()\n scores_for_this_opponent.append(match.final_score_per_turn()[0])\n return scores_for_this_opponent", "def milp(mdp, maxV, zeroConstraints=()):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n z = m.addVars(rLen, vtype=GRB.BINARY, name='z')\n y = m.addVars(rLen, name='y')\n\n # constraints on y\n for i in range(rLen):\n m.addConstr(y[i] <= sum([x[s, a] * R[i](S[s], A[a]) for s in Sr for a in Ar]) - maxV[i] + (1 - z[i]) * M)\n m.addConstr(y[i] <= z[i] * M)\n\n # constraints on x (valid occupancy)\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # == constraints\n for consIdx in range(len(zeroConstraints)):\n m.addConstr(sum(x[S.index(s), A.index(a)] for s, a in zeroConstraints[consIdx]) == 0)\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)]), GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def optimalize(): \n start = time()\n max = 0\n maxn=2\n maxm=3\n check = [(n,m) for n in range(24,30) for m in range(3,20)]\n dict = {}\n print \"start optimalization of: bigram-features,uniqueness\"\n for n,m in check:\n score=0\n print \">lem>>n(uniqueness):\"+str(n)\n print \">lem>>m(commonness):\"+str(m)\n wrds = common_but_unique(ngrams_dict(1,authors,compactcorpus,n,False),m)\n bigrams = common_but_unique(ngrams_dict(2,authors,compactcorpus,n,False),m)\n trigrams = common_but_unique(ngrams_dict(3,authors,compactcorpus,n,False),m)\n #pos_feat = [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)]\n pos_feat = [\"bi:(\"+str(bi[0])+\",\"+str(bi[1])+\")>\"+str(num) for bi in bigrams for num in range(0,1)] + [\"wrd:\"+wrd+\">\"+str(num) for wrd in wrds for num in range(0,1)] + [\"tri:(\"+str(tri[0])+\",\"+str(tri[1])+\",\"+str(tri[2])+\")>\"+str(num) for tri in trigrams for num in range(0,1)]\n\n print \"number of features AFTER selection:\" + str(len(pos_feat))\n for x in range(0,4):\n data = split_train_test_data(authors, corp,45)\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n train_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"train\"]]\n test_set = [(feat_dict(pos_feat,d), c) for (d, c) in data[\"test\"]]\n classifier1 = NaiveBayesClassifier.train(train_set)\n acc = nltk.classify.accuracy(classifier1,test_set)\n print \"accuracy:\"+str(acc)\n score +=acc\n print \"time elapsed: \"+str(time()-start)\n print \"score(\" + str(n) +\")=\"+str(score/4)\n classifier1.show_most_informative_features(8)\n dict[(n,m)]=(score/4)\n if(score/4)>max:\n max = (score/4)\n maxn =n\n maxm = m\n print \"max score=\"+str(max)\n print \"where n = \"+str(maxn)\n print \"where m = \"+str(maxm)\n print \"time:\"+str(time()-start)\n writetofile(dict,\"optimalizedict_commonwrdsandbigrams_latest_lem.pkl\")", "def main():\n # Instantiate a mixed-integer solver.\n solver = pywraplp.Solver('SolveAssignmentProblemMIP',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n # Number of teams (h and i)\n n = 9\n # Number of rooms (j)\n r = 3\n # Number of timeslots (k)\n t = 4\n # Number of matches\n m = 4\n\n # List of teams\n teams = [i for i in range(9)]\n\n x = {}\n\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if (h == i):\n x[h, i, j, k] = solver.IntVar(0, 0, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n else:\n x[h, i, j, k] = solver.IntVar(0, 1, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n\n # # Objective\n # solver.Minimize(solver.Sum([cost[i][j] * x[i,j] for i in range(num_workers)\n # for j in range(num_tasks)]))\n\n # Constraints\n\n # 2 Ensures that the matrix is the same across the diagonal\n for h in range(n):\n for j in range(r):\n for k in range(t):\n solver.Add((x[h, i, j, k] == x[i, h, j, k]))\n\n # 3 No pair plays each other more than once\n for h in range(n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t)]) <= 1)\n\n # 4 No team can be in more than one place at a time\n for h in range(n):\n for k in range(t):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n) for j in range(r)]) <= 2)\n\n # 5 Each team plays exactly m matches\n for i in range(n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t) for h in range(n)]) == 2 * m)\n\n # 6 Need 3 teams in a room at each timeslot\n for j in range(r):\n for k in range(t - 1):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n - 1) for h in range(i + 1, n)]) == 3)\n\n # Need 3 teams in a room at each timeslot\n for g in range(n - 2):\n for h in range(g + 1, n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum(\n [x[g, h, j, k] + x[h, i, j, k] + x[g, i, j, k] for j in range(r) for k in range(t)]) != 2)\n\n sol = solver.Solve()\n\n print('Total cost = ', solver.Objective().Value())\n print()\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if x[h, i, j, k].solution_value() > 0:\n print('teams %i,%i assigned to room %i at time %i.' % (h, i, j, k))\n\n print()\n print(\"Time = \", solver.WallTime(), \" milliseconds\")", "def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value", "def ts_max(n_steps, arms, E):\n K = len(arms)\n reward = np.zeros(n_steps)\n\n # Initialize X : total rewards per arm\n # and O : number of draws per arm\n X = np.zeros(K)\n O = np.zeros(K)\n D = np.zeros(K)\n\n # Do n_steps iterations of the algorithm\n for t in range(n_steps):\n theta = np.random.beta(X+1,O-X+1)\n\n # Choose the arm that maximizes theta\n i = np.argmax(theta)\n\n # Consider j, the best neighbor of i\n Y = X/O\n neighbors_i = np.where(E[:,i] != 0)[0]\n j = neighbors_i[np.argmax(Y[neighbors_i])]\n if Y[j] > Y[i]:\n i = j\n\n # Draw arm j and observe the rewards of the neighbors\n neighbors_i = np.where(E[i,:] != 0)[0]\n D[i] += 1\n for k in neighbors_i:\n O[k] += 1\n rew = arms[k].sample()\n X[k] += rew\n if k == i:\n reward[t] = rew\n\n # Handle the case when there are 0 on the diagonal\n if i not in neighbors_i:\n reward[t] = arms[i].sample()\n\n return reward", "def _lp_msne(self, self_support, opponent_support, player=1):\n\n # logging.info(f'_lp_msne for player {player}')\n ns, U = len(self.s[player % 2]), self.U[player-1]\n if player != 1:\n U = U.T\n\n f, support_u = np.array([0.0 for _ in range(ns+1)]), U[np.where(self_support == 1)]\n\n a_temp = np.array([[0.] if i == 0 else [-1.] for i in range(support_u.shape[0]+1)])\n Aeq = np.concatenate([a_temp, np.concatenate([np.ones((1, support_u.shape[1])), support_u], axis=0)], axis=1)\n Beq = np.array([[1.] if i == 0 else [0.] for i in range(Aeq.shape[0])])\n # logging.info(f'Aeq: {Aeq}, {Aeq.shape}')\n # logging.info(f'Beq: {Beq}, {Beq.shape}')\n\n si_0 = np.where(self_support == 1)[0][0]\n not_support = U[np.where(self_support == 0)]\n # logging.info(f'not_support: {not_support}, {not_support.shape}')\n\n Aub = not_support - U[[si_0], :]\n Aub = np.concatenate([np.zeros((Aub.shape[0], 1)), Aub], axis=1)\n Bub = np.zeros((Aub.shape[0], 1))\n # logging.info(f'Aub: {Aub}, {Aub.shape}')\n # logging.info(f'Bub: {Bub}, {Bub.shape}')\n\n if not_support.shape[0] == 0 or not_support.shape[1] == 0:\n Aub, Bub = None, None\n\n p_bounds = np.array([(zero_p, one_m) for _ in range(ns)])\n p_bounds[np.where(opponent_support == 0)] = (0, 0)\n\n return linprog(f, A_ub=Aub, b_ub=Bub, A_eq=Aeq, b_eq=Beq, bounds=[(None, None), *p_bounds], method='simplex')", "def WCA_SA(targetMDG, WCAresult):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG, WCAresult))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n Temperature = 20\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, Temperature)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n if Temperature > 0:\n Temperature -= 0.5\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n max_climber.remove_empty_cluster()\n return max_climber.result", "def Nsat(self, m):\n pass", "def endgame_score_connectfour(board, is_current_player_maximizer) :\n chains=sorted(board.get_all_chains(), key=lambda x: len(x))\n if len(chains[-1])>=4:\n if not is_current_player_maximizer:\n return 1000;\n else:\n return -1000;\n return 0;", "def mrv_max1(f, g, exps, x):\n u, b = f.union(g, exps)\n return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),\n u, b, x)", "def worst_atom(self, g_u, g_v, active_set):\n\n max_w = None\n max_m_w = None\n max_n_w = None\n max_score = -float('inf')\n\n for w in active_set:\n m_w, n_w = self.polytope.vertex(w)\n score_w = np.sum(g_u * m_w) + np.sum(g_v * n_w)\n\n if score_w > max_score:\n max_w = w\n max_m_w = m_w\n max_n_w = n_w\n max_score = score_w\n\n return max_w, max_m_w, max_n_w", "def find_mutual_nn(self):\n best_match_src = self.scores.argmax(1) # Best match for each source word\n best_match_trg = self.scores.argmax(0) # Best match for each source word\n\n # ONELIENER\n # paired_idx = [(i,best_match_src[i]) for i in range(self.ns) if best_match_trg[best_match_src[i]] == i]\n # paired_words = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired_idx]\n paired = []\n for i in range(self.ns):\n m = best_match_src[i]\n if best_match_trg[m] == i:\n paired.append((i,m))\n\n paired_toks = []\n if self.src_words and self.trg_words:\n paired_toks = [(self.src_words[i],self.trg_words[j]) for (i,j) in paired]\n else:\n paired_toks = paired\n return paired_toks", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)", "def solveExNoOverlap(targetNum, defenders, dRewards, dPenalties, dCosts, aTypes, aRewards, aPenalties, q):\n \"\"\"Contains as many dummy targets as defenders, for defenders and attackers\"\"\"\n \"\"\"In this game the attacker and defender reason ex-ante\n (they choose to follow signals or not before a signal is sent).\"\"\"\n # Add the extra dummy targets\n _dRewards = copy.deepcopy(dRewards)\n _dPenalties = copy.deepcopy(dPenalties)\n _dCosts = copy.deepcopy(dCosts)\n _aRewards = copy.deepcopy(aRewards)\n _aPenalties = copy.deepcopy(aPenalties)\n for m in defenders:\n for defenderCount in defenders:\n _dRewards[m].append(0)\n _dPenalties[m].append(0)\n _dCosts[m].append(0)\n for lam in aTypes:\n _aRewards[lam].append(0)\n _aPenalties[lam].append(0)\n targetNumWithDummies = len(_dRewards[0])\n targetRange = list(range(targetNumWithDummies))\n attackerActions = targetRange\n # Get the suggestions that occur with no overlap\n overlapPlacements = getPlacements(defenders, targetNumWithDummies)\n placements = list(filter(lambda x: len(set(x)) == len(x), overlapPlacements))\n omegaKeys = getOmegaKeys(aTypes, placements, attackerActions)\n\n # Build the model\n model = Model('ExAnteWithOverlap')\n w = model.continuous_var_dict(keys=omegaKeys, lb=0, ub=1, name=\"w\")\n objectiveFunction = sum([q[lam] * sum([w[sd,sa,lam] * defenderSocialUtility(sd,sa,defenders,_dRewards,_dCosts,_dPenalties) for sd in placements for sa in attackerActions]) for lam in aTypes])\n # Define the constraints\n c1 = [sum([w[sd,sa,lam] * aUtility(sd,sa,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions]) \\\n >= sum([w[sd,sa,lam] * aUtility(sd,tPrime,lam,_aPenalties,_aRewards) for sd in placements for sa in attackerActions])\n for lam in aTypes for tPrime in targetRange]\n c1 = [constraint for constraint in c1 if not isinstance(constraint, bool)]\n c2 = [sum([q[lam] * sum([w[sd,sa,lam] * utilityM(sd[d],sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n >= sum([q[lam] * sum([w[sd,sa,lam] * utilityM(tPrime,sd,sa,d,_dRewards,_dPenalties,_dCosts) for sd in placements for sa in attackerActions]) for lam in aTypes]) \\\n for d in defenders for tPrime in targetRange]\n c3 = [sum([w[sd,sa,lam] for sd in placements for sa in attackerActions]) == 1\n for lam in aTypes]\n # Add the constraints\n c1 = model.add_constraints(c1)\n c2 = model.add_constraints(c2)\n c3 = model.add_constraints(c3)\n # Solve the model\n model.maximize(objectiveFunction)\n model.solve()\n # Now that w contains all the outcomes and their probabilities, sum the attacker utilities up.\n utilityPerAttacker = 0\n for k,v in w.items():\n prob = float(v)\n s,a,lam = k\n utilityPerAttacker += aUtility(s,a,lam,_aPenalties,_aRewards) * prob\n utilityPerAttacker /= len(aTypes)\n utilityPerDefender = model.solution.get_objective_value()\n utilityPerDefender /= len(defenders)\n return utilityPerDefender, utilityPerAttacker, None", "def a_test2_mh():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('M-H',nsims=300)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def run_inference(self,isMax = 1,findZ = 0):\r\n# st=time.time()\r\n self.make_connected()\r\n self.nop = 0 # number of operations\r\n T=CliqueTree(self,isMax,findZ)\r\n if isMax == 0:\r\n self.marg_clique_tree = T\r\n elif isMax==1:\r\n self.MAP_clique_tree = T\r\n# print time.time()-st'=\r\n self.nop += T.nop\r\n M=[]\r\n for i in self.g.nodes(): # assuming nodes are labeled 0..N-1\r\n for s,data in T.nodes_iter(data=True):\r\n f = data['fac']\r\n if i in f.var:\r\n if isMax==0:\r\n dummy = f.Marginalize(scipy.setdiff1d(f.var,i))\r\n if findZ == 0:\r\n dummy.val = dummy.val/sum(dummy.val)\r\n else:\r\n dummy = f.MaxMarginalize(scipy.setdiff1d(f.var,i))\r\n self.nop += scipy.prod(f.card)\r\n M.append(dummy)\r\n break\r\n# print time.time()-st\r\n return M", "def MAE(ratings, range):\n\n def err(pair):\n (r, rP) = pair\n return abs(r-rP)\n\n return (1/len(ratings)) * sum(map(err, ratings))", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def solve_huang_eq_13(v):\n\n n = len(v)\n u = v - np.ones((n, n)) @ v / (n) + np.ones(n) / (n)\n lambda_bar_star = solve_huang_eq_24(u)\n lambda_star = (lambda_bar_star - u) + np.clip(u - lambda_bar_star, 0, None)\n return u + lambda_star - lambda_bar_star * np.ones(n)", "def matchCells(self, dTheta = 4, theta = None, n_max = 4, N = None,\\\n m_max = 4, M = None, max_strain = 1, max_atoms = 5000,\\\n limit = None, exp = 1, verbose = 1, min_angle = 10,\\\n remove_asd = True, asd_tol = 7, limit_asr = False,\\\n asr_tol = 1e-7, asr_iter = 350, asr_strain = \"eps_mas\",\\\n asr_endpoint = \"over\", target = None, favor = \"angle_same\"):\n\n if self.base_1 is None:\n string = \"No base structures exist\"\n ut.infoPrint(string)\n return\n\n \"\"\"Get number of atoms per area (xy) in base cell 1 and 2\"\"\"\n rhoA = self.pos_1.shape[0] / np.abs(np.cross(self.base_1[0:2, 0], self.base_1[0:2, 1]))\n rhoB = self.pos_2.shape[0] / np.abs(np.cross(self.base_2[0:2, 0], self.base_2[0:2, 1]))\n\n \"\"\"Cell rotation angles\"\"\"\n if theta is not None:\n if isinstance(theta, (int, np.integer)):\n angle = np.array([theta])\n else:\n angle = np.array(theta)\n else:\n angle = np.arange(0, 180, dTheta)\n\n if target is not None:\n if type(target) == list: target = np.array(target)\n angle = np.array([0])\n\n \"\"\"Repetions of the first cell vector, [-n_max,...,n_max],\n N takes president as a specific range of repititions\"\"\"\n if N is None:\n nR = np.arange(-n_max, n_max + 1)\n\n \"\"\"Repetions of the second cell vector, [0,...,m_max],\n M takes president as a specific range of repititions\"\"\"\n if M is None:\n mR = np.arange(0, m_max + 1)\n\n \"\"\"Create all permutations of nR and mR if M,N is specifed use only those\"\"\"\n if M is not None and N is not None:\n M = np.array(M)[:, None]; N = np.array(N)[:, None]\n dPerm = np.concatenate((M, N), axis = 1)\n else:\n dPerm = np.mgrid[nR[0]:nR[-1] + 1, mR[0]:mR[-1] + 1].reshape(2, nR.shape[0] * mR.shape[0])\n\n \"\"\"Convert angle to radians\"\"\"\n aRad = np.deg2rad(angle)\n\n \"\"\"Set up a Rotation matrix, move axis to work with shapes (X,2,2)\"\"\"\n R = np.moveaxis(np.array([[np.cos(aRad), -np.sin(aRad)],\n [np.sin(aRad), np.cos(aRad)]]), 2, 0)\n\n \"\"\"Rotate the B cell by the specified angles, e.g. C = R*B\"\"\"\n C = np.matmul(R, self.base_2[0:2, 0:2])\n\n \"\"\"Build all possible cell vectors given the permutations dPerm\n d = C*dPerm each row will be a possible cell vector\"\"\"\n d = np.matmul(C, dPerm)\n\n \"\"\"Express d in basis cell 1, d = A*e, find e -> A(-1)*d = e\"\"\"\n e = np.matmul(np.linalg.inv(self.base_1[0:2, 0:2]), d)\n\n \"\"\"Snap the e vectors to the A grid\"\"\"\n e = np.round(e, 0).astype(int)\n\n \"\"\"If target is supplied the matching is done against\n those specific repetitions. Supplied as a 2x2 matrix \n with basis vectors as columns. The righthanded version\n will be returned\"\"\"\n if target is not None:\n e = np.tile(np.array(target)[None, :, :], (R.shape[0], 1, 1))\n\n \"\"\"Caclculate the new (strained) d vectors (f), f = A * eInt\"\"\"\n f = np.matmul(self.base_1[0:2, 0:2], e)\n\n \"\"\"Create all permutations of the f vectors\"\"\"\n F = np.zeros((angle.shape[0], f.shape[2]**2, 2, 2))\n F[:, :, :, 0] = np.swapaxes(np.tile(f, f.shape[2]), 1, 2)\n F[:, :, :, 1] = np.swapaxes(np.repeat(f, f.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n F = F.reshape(-1, *F.shape[-2:])\n\n \"\"\"Create all the same permutations of the d vectors\"\"\"\n D = np.zeros((angle.shape[0], d.shape[2]**2, 2, 2))\n D[:, :, :, 0] = np.swapaxes(np.tile(d, d.shape[2]), 1, 2)\n D[:, :, :, 1] = np.swapaxes(np.repeat(d, d.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n D = D.reshape(-1, *D.shape[-2:])\n\n \"\"\"Create all the same permutations of the eInt vectors\"\"\"\n FRep = np.zeros((angle.shape[0], e.shape[2]**2, 2, 2))\n FRep[:, :, :, 0] = np.swapaxes(np.tile(e, e.shape[2]), 1, 2)\n FRep[:, :, :, 1] = np.swapaxes(np.repeat(e, e.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n FRep = FRep.reshape(-1, *FRep.shape[-2:])\n\n \"\"\"Create all the same permutations of the dPerm vectors\"\"\"\n dPerm = np.tile(dPerm[np.newaxis, :, :], (angle.shape[0], 1, 1))\n DRep = np.zeros((angle.shape[0], dPerm.shape[2]**2, 2, 2))\n DRep[:, :, :, 0] = np.swapaxes(np.tile(dPerm, dPerm.shape[2]), 1, 2)\n DRep[:, :, :, 1] = np.swapaxes(np.repeat(dPerm, dPerm.shape[2], axis = 2), 1, 2)\n\n \"\"\"Flatten the first 2 dimensions\"\"\"\n DRep = DRep.reshape(-1, *DRep.shape[-2:])\n\n \"\"\"Calculate the area of the F and D cells\"\"\"\n detF = np.linalg.det(F)\n detD = np.linalg.det(D)\n\n \"\"\"Remove all combinations where the determinant is 0 or <0\n i.e. linearly dependent or wrong handed. Do the same for \n the top cell\"\"\"\n keep = (detF > 1e-6) * (detD > 1e-6)\n detF = detF[keep]\n detD = detD[keep]\n\n if verbose > 0:\n string = \"Total basis pairs: %.0f | Lin dep/left handed: %.0f | Total kept: %.0f\"\\\n % (keep.shape[0], keep.shape[0] - np.sum(keep), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Return if no interfaces are found (if a specific match is wronghanded)\"\"\"\n if np.sum(keep) == 0:\n string1 = \"No Interfaces found to be linearly independedt or right handed. \"\n string2 = \"If a specific match is to be constructed swap M and N or target.\"\n ut.infoPrint(string1, sep_after = False)\n ut.infoPrint(string2, sep_before = False)\n return\n\n \"\"\"Remove the lin-dep/left handed combinations before calculating the strain\"\"\"\n F = F[keep]\n D = D[keep]\n FRep = FRep[keep]\n DRep = DRep[keep]\n\n \"\"\"Calculate the strain of the new cell vectors\"\"\"\n eps_11, eps_22, eps_12, eps_mas = ut.calcStrains(F, D)\n\n \"\"\"Create a matching vector with the original rotations\"\"\"\n ang = np.repeat(angle, f.shape[2]**2)\n ang = ang[keep]\n\n \"\"\"Calculate the number of atoms using the area and the area density\"\"\"\n rawAtoms = rhoA * detF + rhoB * detD\n atoms = np.round(rawAtoms)\n\n \"\"\"Check to make sure the calculated nr of atoms are integers, otherwise flag it\"\"\" \n tol = 7\n flag = (atoms != np.round(rawAtoms, tol))\n if np.sum(flag) != 0:\n index = np.arange(atoms.shape[0])[flag]\n string = \"None integer number of atoms calculated for the following interfaces\"\n ut.infoPrint(string, sep_before = False)\n for i in index:\n print(\"Index: %6i | Nr atoms: %14.10f\" % (i, rawAtoms[i]))\n\n \"\"\"Keep only unique entries. Found by checking for unique pairs of\n combinations for bottom and top surfaces\"\"\"\n full = np.zeros((atoms.shape[0], 4 * 2))\n full[:, 0:4] = FRep.reshape(*FRep.shape[0:1], -1)\n full[:, 4:8] = DRep.reshape(*DRep.shape[0:1], -1)\n\n ufi = np.unique(full, axis = 0, return_index = True)[1]\n keep = np.isin(np.arange(atoms.shape[0]), ufi)\n if verbose > 0:\n string = \"Non unique matches: %i | Total matches keept: %i\"\\\n % (atoms.shape[0] - np.sum(keep), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Assign values to class variables\"\"\"\n self.cell_1 = F[keep]\n self.cell_2 = D[keep]\n self.rep_1 = FRep[keep]\n self.rep_2 = DRep[keep]\n self.eps_11 = eps_11[keep]\n self.eps_22 = eps_22[keep]\n self.eps_12 = eps_12[keep]\n self.eps_mas = eps_mas[keep]\n self.atoms = atoms[keep]\n self.ang = ang[keep]\n self.e_int_c = np.zeros((self.atoms.shape[0], 1))\n self.w_sep_c = np.zeros((self.atoms.shape[0], 1))\n self.w_seps_c = np.zeros((self.atoms.shape[0], 1))\n self.e_int_d = np.zeros((self.atoms.shape[0], 1))\n self.w_sep_d = np.zeros((self.atoms.shape[0], 1))\n self.w_seps_d = np.zeros((self.atoms.shape[0], 1))\n self.order = np.arange(self.atoms.shape[0]) \n\n \"\"\"Further removal of interfaces based on specified critera follows below\"\"\"\n\n \"\"\"Reject interfaces based on criteria of strain * atoms^exp > limit\"\"\"\n if limit is not None:\n keep = ((self.eps_mas * (self.atoms ** exp)) < limit)\n ratio = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches with (strain * atoms^%s) > %s: %i | Total matches kept: %i\"\\\n % (exp, limit, ratio, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with strain*atoms^exp > limit\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove cells with to narrow cell angles, defined below\"\"\"\n ang_lim = np.deg2rad(min_angle)\n ang_1 = self.getBaseAngles(cell = 1)\n ang_2 = self.getBaseAngles(cell = 2)\n\n keep = (ang_1 > ang_lim) * (ang_1 < np.pi - ang_lim) *\\\n (ang_2 > ang_lim) * (ang_2 < np.pi - ang_lim)\n\n max_angle = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Cell angle outside limit (%.1f<X<%.1f): %i | Total kept: %i\"\\\n % (np.rad2deg(ang_lim), np.rad2deg(np.pi - ang_lim), max_angle, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with angles outside specified range\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove matches were any strain component is > max_strain\"\"\"\n keep = (np.abs(self.eps_11) < max_strain) *\\\n (np.abs(self.eps_22) < max_strain) *\\\n (np.abs(self.eps_12) < max_strain)\n\n max_strain = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches above max strain: %i | Total matches kept: %i\"\\\n % (max_strain, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with abs(strains) above max_strain\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Remove matches with the number of atoms > max_atoms\"\"\"\n keep = (self.atoms < max_atoms)\n max_atoms = np.sum(np.logical_not(keep))\n if verbose > 0:\n string = \"Matches with to many atoms: %i | Total matches kept: %i\"\\\n % (max_atoms, np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces with more atoms than max_atoms\"\"\"\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n \"\"\"Find duplicates in the combo (nr_atoms, eps_mas) if specified\"\"\"\n if remove_asd:\n keep = self.getAtomStrainDuplicates(tol_mag = asd_tol, verbose = verbose, sort = favor)\n self.deleteInterfaces(keep, verbose = verbose - 1)\n\n if verbose > 0:\n string = \"Duplicate atoms/strain combinations: %i | Total matches kept: %i\"\\\n % (np.sum(np.logical_not(keep)), np.sum(keep))\n ut.infoPrint(string)\n\n \"\"\"Interfaces with |strains| < tol are slightly perturbed to avoid issues with log expressions\"\"\"\n tol = 1e-7\n exact_matches = np.abs(self.eps_mas) < tol\n self.eps_11[np.abs(self.eps_11) < tol] = tol\n self.eps_22[np.abs(self.eps_22) < tol] = tol\n self.eps_12[np.abs(self.eps_12) < tol] = tol\n self.eps_mas[np.abs(self.eps_mas) < tol] = tol\n if np.sum(exact_matches) > 0:\n string = \"Exact matches found: %i\" % np.sum(exact_matches)\n ut.infoPrint(string)\n\n \"\"\"Remove interfaces based on atom strain ratios, limiting the set to this number\"\"\"\n if limit_asr and self.atoms.shape[0] > 2:\n self.removeByAtomStrain(keep = limit_asr, tol = asr_tol, max_iter = asr_iter,\\\n strain = asr_strain, endpoint = asr_endpoint,\\\n verbose = verbose)\n\n \"\"\"Sort the interfaces based on number of atoms and reset the base order parameter\"\"\"\n self.sortInterfaces(sort = \"atoms\")\n self.setOrder(verbose = verbose)", "def solver(u,f,n=50,m=100,t0=0,t1=1000,dt=.1,nu=1):\n \n u_new=[[u[i][j] for j in range(m)]for i in range(n)]\n loopCounter=t0\n while(loopCounter<=t1):\n for i in xrange(1,n-1):\n for j in xrange(1,m-1):\n u_new[i][j]=u[i][j] + dt * (nu*u[i-1][j] + nu*u[i][j-1] - 4*nu*u[i][j] + nu*u[i][j+1] + nu*u[i+1][j] + f[i][j])\n loopCounter+=dt\n u=[[u_new[i][j] for j in range(m)]for i in range(n)]\n\n return u", "def cp_apr(X, Y1, R, Minit=None, tol=1e-4, maxiters=1000, maxinner=50,\n epsilon=1e-10, kappatol=1e-10, kappa=1e-2):\n N = X.ndims()\n \n ## Random initialization\n if Minit == None:\n F = tensorTools.randomInit(X.shape, R)\n Minit = ktensor.ktensor(np.ones(R), F);\n nInnerIters = np.zeros(maxiters);\n\n ## Initialize M and Phi for iterations\n M = Minit\n M.normalize(1)\n Phi = [[] for i in range(N)]\n kktModeViolations = np.zeros(N)\n kktViolations = -np.ones(maxiters)\n nViolations = np.zeros(maxiters)\n\n lambda2=0.1\n lambda3=0.1\n sita=np.random.rand(R+1,1);\n ## statistics\n cpStats = np.zeros(7)\n '''\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n print M.U[0][1,:]\n print M.U[0].shape\n print Demog[1]\n print DemoU[1]\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n '''\n for iteration in range(maxiters):\n startIter = time.time()\n isConverged = True;\n for n in range(N):\n startMode = time.time()\n ## Make adjustments to M[n] entries that violate complementary slackness\n if iteration > 0:\n V = np.logical_and(Phi[n] > 1, M.U[n] < kappatol)\n if np.count_nonzero(V) > 0:\n nViolations[iteration] = nViolations[iteration] + 1\n #print 'V:',V.shape,V.dtype\n #print 'M.U[n]',M.U[n].shape,M.U[n].dtype\n M.U[n][V > 0] = M.U[n][V > 0] + kappa\n if n==0:\n sita=__solveLinear(M.U[n],Y1,lambda3)\n # lr=LogisticRegression()\n #sita=lr.fit(M.U[n],Y1).coef_\n #print 'sita'\n #print sita\n #print 'demoU'\n #print DemoU[0]\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem1(X, M, n, maxinner, isConverged, epsilon, tol,sita,Y1, lambda2)\n else:\n M, Phi[n], inner, kktModeViolations[n], isConverged = __solveSubproblem0(X, M, n, maxinner, isConverged, epsilon, tol)\n elapsed = time.time() - startMode\n # only write the outer iterations for now\n #cpStats = np.vstack((cpStats, np.array([iteration, n, inner, tensorTools.lsqrFit(X,M), tensorTools.loglikelihood(X,[M]), kktModeViolations[n], elapsed])))\n\n kktViolations[iteration] = np.max(kktModeViolations)\n elapsed = time.time()-startIter\n #cpStats = np.vstack((cpStats, np.array([iter, -1, -1, kktViolations[iter], __loglikelihood(X,M), elapsed])))\n print(\"Iteration {0}: Inner Its={1} with KKT violation={2}, nViolations={3}, and elapsed time={4}\".format(iteration, nInnerIters[iteration], kktViolations[iteration], nViolations[iteration], elapsed))\n if isConverged:\n break\n\n cpStats = np.delete(cpStats, (0), axis=0) # delete the first row which was superfluous\n ### Print the statistics\n #fit = tensorTools.lsqrFit(X,M)\n #ll = tensorTools.loglikelihood(X,[M])\n print(\"Number of iterations = {0}\".format(iteration))\n #print(\"Final least squares fit = {0}\".format(fit))\n #print(\"Final log-likelihood = {0}\".format(ll))\n print(\"Final KKT Violation = {0}\".format(kktViolations[iteration]))\n print(\"Total inner iterations = {0}\".format(np.sum(nInnerIters)))\n \n #modelStats = {\"Iters\" : iter, \"LS\" : fit, \"LL\" : ll, \"KKT\" : kktViolations[iteration]}\n return M, cpStats", "def findMaximal(freqSet):", "def auxmaxrho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= auxmax_cc_piece(x,k_ind,m_ind) \n\n return f", "def _moments_match_analytical(self,obs,tau,v):\r\n raise NotImplementedError", "def find_adapted_solution(list_of_tuples, module, n_of_players_with_vote):\n\n def malus_roles_left(players_left, roles_left):\n\n \"\"\"\n Checks whether it is possible to deploy all the players by assinging\n a certain number of malus.\n \"\"\"\n\n # Permutations of the players still to be deployed. We do that because\n # we only want that combination of players in which ALL of them are\n # deployed\n players_perm = permutations(players_left, len(players_left))\n\n # Initialize the number of malus (just a number high enough)\n fin_malus = 10\n\n # For each permutation of players to be deployed\n for perm in players_perm:\n\n # Initialize two parameters: a counter and the number of malus for\n # this specific permutation. Counter is used to be sure all the\n # players in the permutation are checked\n count = 0\n temp_malus = 0\n\n # Make a copy of the roles to be covered so we can use it later to\n # delete roles that we are able to cover\n copy_of_adapted_roles = copy.copy(roles_left)\n\n # For each element in the permutation we select the corresponding\n # role and try to cover it\n for i in range(len(perm)):\n role_to_cover = roles_left[i]\n role_cand = perm[i][2]\n\n # If it is possible to cover it with a malus we increase the\n # number of malus and the counter and then remove the role from\n # the list of the roles still uncovered\n if role_to_cover in malus_roles[role_cand]:\n temp_malus += 1\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # If it is possible to cover it with no malus we just increase\n # the counter and delete the role\n elif (role_to_cover not in malus_roles[role_cand]\n and role_to_cover in compatible_roles[role_cand]):\n count += 1\n copy_of_adapted_roles.remove(role_to_cover)\n\n # Else we interrupt checking this permutation and go to the\n # one\n else:\n break\n\n # If we checked ALL the elements in the permutation and the number\n # of malus is lower than the previous value we store it\n if count == len(perm) and temp_malus < fin_malus:\n fin_malus = temp_malus\n\n # If this value is different from the default one it means we found a\n # solution and we return it\n if fin_malus != 10:\n return fin_malus\n else:\n return False\n\n def calculate(candidate, roles_of_module):\n\n \"\"\"\n This function applies the deploy_players function to look for the\n solution, if it exists. If all the players are deployed it returns\n True, otherwise False.\n \"\"\"\n\n # See find_solution for explanation on the try method\n try:\n to_deploy_list, roles_left = deploy_players(candidate,\n roles_of_module,\n 'adapted')\n\n # If the roles to deploy can be covered with a malus we return the\n # number of malus assigned\n\n if malus_roles_left(to_deploy_list, roles_left):\n return malus_roles_left(to_deploy_list, roles_left)\n else:\n return False\n\n except TypeError:\n return False\n\n ordered_lineup = order_by_role(list_of_tuples)\n\n all_comb = list(combinations(schemes[module], n_of_players_with_vote))\n\n for comb in all_comb:\n\n # Change from tuple to list and check wings\n comb = transf_wings(list(comb), module)\n\n # If a solution is found we return the number of malus\n if calculate(ordered_lineup, comb):\n return calculate(ordered_lineup, comb)\n\n return False", "def analyseRemainderMulticastOPT(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def calibrate(self,isMax,findZ):\r\n N = self.number_of_nodes()\r\n if isMax==1:\r\n for i in self.nodes():\r\n self.node[i]['fac'].val=scipy.log(self.node[i]['fac'].val)\r\n \r\n for i,j in self.edges():\r\n self.edge[i][j]['msg'] = factor([],[],[])\r\n self.edge[i][j]['msg_passed'] = False# message passed from i to j or not\r\n \r\n I,J = get_next_cliques(self)\r\n while I >= 0: \r\n self.pass_message(I,J,isMax,findZ)\r\n self.edge[I][J]['msg_passed'] = True\r\n I,J = get_next_cliques(self)\r\n \r\n for i in self.nodes():\r\n if isMax==0:\r\n self.node[i]['fac'] *= reduce(mul,(self.edge[j][i]['msg'] for j in self.predecessors(i)))\r\n else:\r\n self.node[i]['fac'] += reduce(add,(self.edge[j][i]['msg'] for j in self.predecessors(i)))\r\n self.nop += N*scipy.prod(self.node[i]['fac'].card) # check this\r", "def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res", "def m(self):\n\t\tn = 0\n\t\ti = self.k0\n\t\twhile 1:\n\t\t\tif i > self.j:\n\t\t\t\treturn n\n\t\t\tif not self.cons(i):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\ti = i + 1\n\t\twhile 1:\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1\n\t\t\tn = n + 1\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif not self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1", "def best_move(data, indexing, cf, cf_prime, N=20, M=30): \n stats = {}\n timer = time()\n ns = list(neighbours(indexing, random_stream=N))\n stats[\"n_neighbours\"] = len(ns)\n stats[\"t_neighbours\"] = 1000*(time() - timer)\n\n dt_rcs = []\n bestpair, best_rcost = None, None\n for v,k in ns:\n timer = time()\n rc = reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=M)\n dt_rcs.append(1000*(time() - timer))\n if bestpair is None or rc > best_rcost:\n bestpair = v,k\n best_rcost = rc\n\n stats[\"t_rcs_mean\"] = np.mean(dt_rcs)\n stats[\"t_rcs_std\"] = np.std(dt_rcs)\n stats[\"t_rcs_sum\"] = np.sum(dt_rcs)\n stats[\"rc\"] = best_rcost\n stats[\"partcount\"] = np.unique(indexing).shape[0]\n return bestpair, best_rcost, stats", "def part_1(rules: Rules) -> int:\n\n happiness, _ = max(generate_arrangements(rules))\n print(f\"part 1: optimal arrangement brings {happiness} happiness\")\n return happiness", "def solution(s):", "def mutual_information_max(self):\n return np.log2(special.comb(self.Nr, self.coding_receptors))", "def nac_w_optimal_r(fan_in, fan_out):\n fan = max(fan_in + fan_out, 5)\n r = scipy.optimize.bisect(lambda r: fan * nac_w_variance(r) - 2, 0, 10)\n return r", "def get_n_best(self):\n pass", "def minimax(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue", "def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1", "def rank_and_assign(self, cutoff_matrix_element):\r\n\r\n L0 = (qt.liouvillian(self.rotating_frame_hamiltonian, self.jump_ops))\r\n \r\n relevance_table = [[self.calculate_first_order_correction(cutoff_matrix_element,L0,ket_index=n,bra_index=m) for m in range(self.dim)] for n in range(self.dim)]\r\n relevance_table = np.asarray(relevance_table)\r\n \r\n number_of_transitions = int(self.dim*(self.dim-1)/2)\r\n transition_rank = [None for i in range(number_of_transitions)]\r\n # This loop ranks drive terms according to relevance \r\n for rank in range(number_of_transitions):\r\n max_ranked_indices = np.where(relevance_table == relevance_table.max())\r\n indices = [max_ranked_indices[0][0], max_ranked_indices[1][0]]\r\n transition_rank[rank] = [relevance_table.max(), indices]\r\n relevance_table[indices[0]][indices[1]] = relevance_table[indices[1]][indices[0]] = 0\r\n \r\n # This graphical algorithm assigns an integer to each eigenstate of the Hamiltonian based on the ranking from above\r\n integer_list = [None for i in range(self.dim)]\r\n # START ALGORITHM\r\n # initialize first term into a graph\r\n first_index = transition_rank[0][1]\r\n graph_list = [[first_index[0],first_index[1]]]\r\n integer_list[max(first_index)] = 1\r\n integer_list[min(first_index)] = 0\r\n # assign subsequent terms\r\n for i in range(1,number_of_transitions):\r\n # if no more non-zero relevance parameters, then break \r\n if transition_rank[i][0] == 0.0:\r\n break\r\n else:\r\n index = transition_rank[i][1]\r\n # scenario (i) neither states have been incorporated into the graph \r\n if integer_list[index[0]]==integer_list[index[1]]==None: \r\n integer_list[max(index)] = 1\r\n integer_list[min(index)] = 0\r\n # place them in a new graph\r\n graph_list.append([index[0],index[1]])\r\n # scenario (ii) one of the states has been incorporated, but not the other\r\n elif integer_list[index[0]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[0]] = integer_list[index[1]] + 1\r\n else:\r\n integer_list[index[0]] = integer_list[index[1]] - 1\r\n # find which graph component to put the state in (the component the other state is in)\r\n for k,graph in enumerate(graph_list):\r\n if index[1] in graph:\r\n # place that state in that graph component\r\n graph_list[k].append(index[0]) \r\n break\r\n elif integer_list[index[1]]==None:\r\n if index[0] > index[1]:\r\n integer_list[index[1]] = integer_list[index[0]] - 1\r\n else:\r\n integer_list[index[1]] = integer_list[index[0]] + 1\r\n for k,graph in enumerate(graph_list):\r\n if index[0] in graph:\r\n graph_list[k].append(index[1])\r\n break\r\n # scenario (iii) both states have already been incorporated in the graph\r\n else:\r\n # find the graph components where these states have been placed\r\n for k,graph in enumerate(graph_list):\r\n overlap = list(set(index) & set(graph))\r\n # subscenario: the states are in the same graph component, hence a cycle, so nothing can do\r\n if (len(overlap) == 2):\r\n break\r\n # subscenario: the states are in two disjoint graph components\r\n elif (len(overlap) == 1):\r\n fixed_index = overlap[0]\r\n shift_index = list(set(index) - set(graph))[0]\r\n old_integer = integer_list[shift_index]\r\n if shift_index > fixed_index:\r\n new_integer = integer_list[fixed_index] + 1\r\n else:\r\n new_integer = integer_list[fixed_index] - 1\r\n shift_amount = new_integer - old_integer\r\n # merge one graph component into the other\r\n for j,graph2 in enumerate(graph_list):\r\n if shift_index in graph2:\r\n for m,index2 in enumerate(graph2):\r\n integer_list[index2] = integer_list[index2] + shift_amount\r\n graph_list[k] = graph_list[k] + graph2\r\n graph_list.pop(j)\r\n break\r\n break\r\n else:\r\n continue\r\n continue\r\n # Just in case, if a state was not assigned an integer due to not participating in dynamics, set its integer to 0\r\n for i,integer in enumerate(integer_list):\r\n if integer == None:\r\n integer_list[i] = 0\r\n ## END algorithm\r\n return transition_rank, integer_list", "def accuracy_NMEA_opt(z):\n N = len(z)\n size = int(N - 60)\n sigma = np.zeros(size)\n for i in prange(30, N - 30):\n z_ave = 0\n for z_ave_i in z[(i - 30) : (i + 30)]:\n z_ave += z_ave_i\n z_ave = z_ave / 60.0\n\n noise_sqrt = 0\n for noise in z[i - 30 : i + 30]:\n noise_sqrt += abs(noise - z_ave) ** 2\n sigma[i - 30] = (noise_sqrt / (60.0 - 1.0)) ** 0.5\n return sigma", "def _brute_force(self):\n if self.N > 9:\n #print(\"Input set is too big for brute force estimation.\")\n self.best_path = None\n else:\n #print(\"Number of permutations to check: {}\".format(math.factorial(self.N)))\n #init = \n A = self._P + np.finfo(np.float).eps\n A = (A + (1-A).T)/2\n for i in range(A.shape[0]):\n A[i,i] = np.finfo(np.float).eps\n init = (A>0.5).sum(axis=1).argsort()[::-1]\n #--- use log(p(Y=1\\mid s',s)) to shift multiplication to sum\n lP = np.log(A)\n for i in range(lP.shape[0]):\n lP[i,i] = 0\n #init_cost = 0\n ##--- lP[x:x+1] está MAL hay que sumar respecto a i+1 en z, no en lP.\n #for i in range(len(init)-1):\n # init_cost += lP[init[i],init[i+1]:].sum()\n z_star = []\n z_cost = -np.inf\n for z in permutations(range(self.N)):\n cost = 0\n for i in range(len(z)-1):\n cost += lP[z[i],z[i+1:]].sum()\n if cost > z_cost:\n z_cost = cost\n z_star = z\n self.best_path = np.array(z_star)", "def J (self, n):", "def maximizer(evaluate):\n def strategy(player, board):\n def score_move(move):\n return evaluate(player, Othello.make_move(move, player, list(board)))\n return max(Othello.legal_moves(player, board), key=score_move)\n return strategy", "def max_power_candidate_wind_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - (m.Q_W[g, y, s, t] * sum(m.x_c[g, j] for j in m.Y if j <= y)) <= 0" ]
[ "0.6166018", "0.582967", "0.58052844", "0.58043885", "0.569874", "0.5562046", "0.5560261", "0.55063146", "0.5504146", "0.54418087", "0.5436338", "0.543598", "0.54290164", "0.539945", "0.53978187", "0.53978187", "0.5395617", "0.5393824", "0.53581244", "0.5314676", "0.53036773", "0.53011966", "0.52949345", "0.5291771", "0.52808464", "0.52755934", "0.5271056", "0.5251586", "0.52506495", "0.5248484", "0.52413875", "0.52365726", "0.52312684", "0.52253497", "0.52149135", "0.52146125", "0.521269", "0.5204202", "0.5201512", "0.5193414", "0.5185066", "0.5184476", "0.51733017", "0.5169604", "0.5163266", "0.5162816", "0.5150862", "0.51460207", "0.5121116", "0.51198894", "0.5117346", "0.511251", "0.51079303", "0.51050925", "0.51019454", "0.51016176", "0.5100529", "0.5095874", "0.5084714", "0.5083401", "0.5081838", "0.5066114", "0.5065674", "0.50586164", "0.5058582", "0.5045578", "0.5043503", "0.5036511", "0.5027579", "0.50273585", "0.5026938", "0.502452", "0.5018893", "0.5018732", "0.50146914", "0.5012184", "0.50032055", "0.50018597", "0.49951833", "0.49903303", "0.49895793", "0.4987462", "0.49871692", "0.49852994", "0.4981031", "0.49797836", "0.49760026", "0.49749756", "0.49705458", "0.49649382", "0.49636886", "0.49607825", "0.4959089", "0.49571633", "0.49568784", "0.49558085", "0.495382", "0.4952565", "0.49511078", "0.4950999" ]
0.5245856
30
Return the overall mean score of a Player
def score_player(player, objective, opponents_information, weights=None, sample_count=None): scores_for_all_opponents = [] if sample_count is not None: indices = np.random.choice(len(opponents_information), sample_count) opponents_information = [opponents_information[i] for i in indices] if weights is not None: weights = [weights[i] for i in indices] for strategy, init_kwargs in opponents_information: player.reset() opponent = strategy(**init_kwargs) scores_for_this_opponent = objective(player, opponent) mean_vs_opponent = mean(scores_for_this_opponent) scores_for_all_opponents.append(mean_vs_opponent) overall_mean_score = np.average(scores_for_all_opponents, weights=weights) return overall_mean_score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def get_total_score(self):\n\n # Return the player's total score\n return self._total_score", "def calc_mean_score(movies: List[Movie]) -> float:\n return round(sum([m.score for m in movies]) / len(movies), 1)", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def get_mean(self):\n try:\n return sum(self.speakers.values()) / len(self.speakers)\n except (ZeroDivisionError):\n return 0.0", "def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score", "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def calculate_score(player_cards):\n score = sum(player_cards)\n return score", "def mean(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n weighted_sum = sum(key * value for key, value in clean.items())\n return weighted_sum / total", "def mean(scores, is_rounded=True):\n num_scores = len(scores)\n mean_score = sum(scores)/num_scores if num_scores else 0\n return round(mean_score, 2) if is_rounded else mean_score", "def get_global_mean(self, ratings):\n total_ratings = []\n for user, movie, rating in ratings:\n total_ratings.append(rating)\n return sum(total_ratings) / len(total_ratings)", "def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def get_score(self):\n rewards, resets = self.runner.get_rewards_resets()\n self.runner.clear_rewards_resets()\n assert rewards.ndim == 1 and resets.ndim == 1, (rewards.ndim, resets.ndim)\n assert rewards.shape[0] == resets.shape[0], (rewards.shape, resets.shape)\n scores = [0]\n for t in reversed(range(rewards.shape[0])):\n if resets[t]:\n scores.append(0)\n scores[-1] += rewards[t]\n return np.mean(scores)", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def em_mean(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical mean.')\n return self.__total_rewards / self.__total_pulls", "def mean(self):\n return self._mean", "def mean(self):\n return self._mean", "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def get_mean(self):\n return self.serie.mean()", "def mean(self):\n return self.x_guessed()", "def mean(self):\n return self.vmean", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def center_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n w, h = game.width / 2., game.height / 2.\n y, x = game.get_player_location(player)\n return float((h - y) ** 2 + (w - x) ** 2)", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")", "def mean_average_position():\n pass", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def getMean(self):\n return self.mean", "def mean(self):\n return self._lift(\"mean\")", "def mean(self):\n return self._summarize(lambda c: c.mean)", "def score_game(game_core):\n \n att_counter = [] \n np.random.seed(1) # fix RANDOM SEED so the experiment is reproducible \n random_array = np.random.randint(1,101, size=(1000))\n for number in random_array:\n att_counter.append(game_core(number))\n score = int(np.mean(att_counter))\n print(f\"Your algorithm guesses on average the number in {score} attempts.\")\n return(score)", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def mean(self):\n return sum(p * x for x, p in self.items())", "def mean(self) -> float:\n return self._data.mean()", "def mean(points):\r\n\t\treturn sum(points)/len(points)", "def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "def get_mean(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n sum_ = fsum(data)\n n = cls.get_n(data)\n try:\n return float(sum_ / n)\n except ZeroDivisionError as exc:\n # for hyp score calculation, n = 0 for empty set is useful\n return 0", "def mean(self):\n return self.sum / self.sum_weights", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def average(self):\n return self.summation() / self.count()", "def global_average_scores(self):\n\n return np.mean(self.average_scores_all_subjects(), axis=0)", "def _mean(items):\n return sum(items) / len(items)", "def enter_player_score(player):\n score = 2\n while score > 1 or score < 0:\n score = view.enter_player_view(player.player_first_name)\n try:\n score = float(score)\n except ValueError:\n score = 2\n view.message('erreur')\n continue\n else:\n if score < 0:\n view.message('negatif')\n continue\n if score > 1:\n view.message('superieur')\n continue\n player.total_score += score\n return score", "def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)", "def mean(self):\n return self.mu", "def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()", "def mean(self):\n return self.aggregate(np.mean)", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def mean(items):\n\n return float(sum(items)) / len(items)", "def mean(self):\n return self._mean_func", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def find_mean(values):\n return sum(values) / len(values)", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def avg(self):\n if not self.committed_together:\n return 0\n\n return round(statistics.mean(self.committed_together))", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3", "def get_score(self):\n return float(self._score)", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def get_score(self, player):\n\n df = pd.read_csv('RPSscores.csv')\n if not str(player) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(player),\n 0, 0, 0]\n player_index = int(df.loc[df['Name'] == str(player)].index[0])\n result = 'wins: ' + str(df.iloc[player_index, 1]) + '\\n' + \\\n 'draws: ' + str(df.iloc[player_index, 2]) + '\\n' + \\\n 'losses: ' + str(df.iloc[player_index, 3])\n return result", "def __score_by_avg_distance_from_players(self, source_iceberg, iceberg_to_score, simulation_data):\n ours_avg_distance, enemy_avg_distance = simulation_data.get_avg_distance_from_players(\n iceberg_to_score)\n\n score = (enemy_avg_distance - ours_avg_distance) * AVG_DISTANCE_FROM_PLAYERS_FACTOR_SCORE\n score += self.__score_by_strong_enemy_close_to_me(source_iceberg)\n return score", "def find_mean(values):\n mean = sum(values) / len(values)\n return mean", "def avg_act(self) -> float:\n return torch.mean(self.units.act)", "def mean(self):\n\n return time_stat(self, stat=\"mean\")", "def expected_score(hand, deck, verbose=False):\n remaining = list(set(deck) - set(hand))\n Ex_scores = []\n for flip in remaining:\n Ex_scores.append(Cribbage.score_hand(hand, flip, verbose))\n\n return mean(Ex_scores)", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def my_mean(x):\n return my_sum(x) / my_len(x)", "def mean(values):\n total = sum(values)\n len_values = len(values)\n return total/len_values", "def global_mean(self):\n if self._global_mean is None:\n self._global_mean = np.mean([r for (_, _, r) in\n self.all_ratings()])\n\n return self._global_mean", "def extract_score(self, json):\n\t\ttry:\n\t\t\treturn int(json['player_score'])\n\t\texcept KeyError:\n\t\t\treturn 0", "def get_overall_score(self, user):\n\n quizzes = ['iq', 'math', 'english']\n\n prev_scores = []\n new_scores = []\n\n for quiz in quizzes:\n quiz_obj = self.get_object(quiz)\n queryset = self.get_queryset(user, quiz_obj)\n\n try:\n new_scores.append(queryset[0].marks)\n prev_scores.append(queryset[1].marks)\n except:\n new_scores.append(queryset[0].marks)\n prev_scores.append(0)\n\n import statistics\n\n return statistics.mean(prev_scores), statistics.mean(new_scores)", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def get_mean_accuracy(self):\n return self.df.correct.mean()", "def custom_score_3(game, player):\n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: log of avaliable moves ratio\n return float(log(own_moves/opp_moves))", "def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average", "def total_player_score(self, total_player_score):\n\n self._total_player_score = total_player_score", "def mean(self):\n mean = sum(self.data)/self.size\n return mean", "def score(player, board):\n mine, theirs = 0, 0\n opp = Othello.opponent(player)\n for sq in Othello.squares():\n piece = board[sq]\n if piece == player: mine += 1\n elif piece == opp: theirs += 1\n return mine - theirs", "def get_avg(self):\r\n df = pd.read_csv(\"MonthlyRate.csv\")\r\n df = df[df.CurrencyCode == self.choice]\r\n mean = df.mean(axis=1).values[0]\r\n # Round the value to 4 d.p.\r\n mean = round(float(mean), 4)\r\n return mean", "def mean(values):\r\n return sum(values) / float(len(values))", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)", "def mrr(self, ranking):\n return np.mean(ranking)", "def getScore(self):\r\n return self._score", "def simulate(team, N=100):\n\n total_score = 0.0\n for player in team:\n simulation_score = []\n for i in range(N):\n simulation_score.append(get_player_score(player))\n total_score += np.mean(simulation_score)\n\n return total_score" ]
[ "0.7505888", "0.7412792", "0.7344528", "0.72962356", "0.713673", "0.7091335", "0.7069279", "0.68148583", "0.68021953", "0.6720858", "0.6720858", "0.6720858", "0.6719883", "0.670838", "0.6703316", "0.66747105", "0.66253746", "0.66124016", "0.65976983", "0.6594148", "0.65919864", "0.6573514", "0.65722066", "0.65618825", "0.65618825", "0.6551732", "0.6548579", "0.6547678", "0.65412486", "0.65159494", "0.65063536", "0.6480707", "0.64684874", "0.64585733", "0.64521945", "0.6451537", "0.64435214", "0.6441113", "0.64380276", "0.6428107", "0.6424702", "0.64117914", "0.6408284", "0.64044446", "0.63920444", "0.6381209", "0.6380091", "0.63783514", "0.6361627", "0.63554174", "0.63322556", "0.63310695", "0.63247186", "0.6324197", "0.63072085", "0.6302373", "0.629959", "0.6280819", "0.62681836", "0.6263999", "0.6260417", "0.6257054", "0.6256118", "0.625211", "0.6246885", "0.6246885", "0.6246885", "0.6243538", "0.62390256", "0.6229336", "0.622513", "0.62231684", "0.6223117", "0.6201804", "0.62006354", "0.619933", "0.61981016", "0.6188062", "0.61820376", "0.6170584", "0.6166933", "0.6164665", "0.61592174", "0.6146644", "0.61455953", "0.6141405", "0.6139262", "0.6129378", "0.6125055", "0.61237955", "0.6113834", "0.61092603", "0.61016065", "0.6097911", "0.6097911", "0.6097911", "0.60975283", "0.6093363", "0.60812145", "0.6074887" ]
0.61413795
86
Load the best num parameters from the given file.
def load_params(player_class, filename, num): parser = player_class.deserialize_parameters all_params = [] with open(filename) as datafile: reader = csv.reader(datafile) for line in reader: score, rep = float(line[-2]), line[-1] all_params.append((score, rep)) all_params.sort(reverse=True) best_params = [] for score, rep in all_params[:num]: best_params.append(parser(rep)) return best_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadBestFit(self):\n bestFit, err = [], []\n row = 0\n with open(self.filename) as f:\n for lines in f.readlines():\n line = lines.strip(\"\\n\")\n data = line.split(\" \")\n if row == self.Npar:\n for kk in range(self.Npar):\n bestFit.append(float(data[kk]))\n if row == self.Npar+1:\n for kk in range(self.Npar):\n err.append(float(data[kk]))\n row += 1\n\n return bestFit, err", "def load(self, filename):\n param_dict = pickle.load(open('%s' % filename, 'rb'))\n self.learningrate = param_dict['learningrate']\n self.verbose = param_dict['verbose']\n self._loadsize = param_dict['loadsize']\n self._batchsize = param_dict['batchsize']\n self.momentum = param_dict['momentum']\n self.epochcount = param_dict['epochcount']\n self._momentum_batchcounter = param_dict['momentum_batchcounter']\n for param_name in param_dict['incs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._incs[p].set_value(param_dict['incs'][param_name])\n if self.rmsprop is not None:\n for param_name in param_dict['avg_grad_sqrs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._avg_grad_sqrs[p].set_value(param_dict['avg_grad_sqrs'][param_name])\n self._numbatches = self._loadsize // self._batchsize\n if self._inputs_type != 'function':\n self._numloads = self._inputs.shape[0] // self._loadsize\n if self._inputs_type == 'h5':\n self._inputs_theano.set_value(\n self._inputs.read(stop=self._loadsize))\n else:\n self._inputs_theano.set_value(self._inputs[:self._loadsize])", "def load_params_from_file(self, input_file):\n\n ### FILL IN ###", "def load_best_params(path):\n fold_results = []\n with open(path, 'r') as file:\n lines = file.readlines()\n fold_result = [[float(i) for i in line.strip('\\n,').split(',')] for line in lines]\n fold_results.append(fold_result)\n fold_results = np.concatenate(fold_results)\n fold_results = np.median(fold_results, axis=0)\n return fold_results", "def load_parameters(self, filename=None):\n if not filename:\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n params = numpy.load(filename)\n lasagne.layers.set_all_param_values(self.__network, params)", "def load_params():\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)", "def load(self, filename):\n with open(filename, 'r') as f:\n self.max_val = pickle.load(f)", "def load_params(num_sources, fname):\n path_p = '/import/c4dm-04/alvarado/results/sampling_covariance/'\n # path_p = '/home/pa/Desktop/sampling_covariance/'\n pitches = [\"60\", \"64\", \"67\"]\n hparam = []\n lengthscale = []\n variance = []\n frequency = []\n\n for i in range(num_sources):\n hparam.append(pickle.load(open(path_p + fname + \"_M\" + pitches[i] + \"_hyperparams.p\", \"rb\")))\n lengthscale.append(hparam[i][1].copy())\n variance.append(hparam[i][2].copy() / sum(hparam[i][2].copy()))\n frequency.append(hparam[i][3].copy())\n\n return lengthscale, variance, frequency", "def LoadParams(file):\n global globalParams\n global globalSections\n\n # check to see whether the file exists\n try: f = open(file, 'r')\n except IOError:\n fail('ERROR: parameter file does not exist: ', file)\n else:\n f.close()\n\n\n cp = ConfigParser.ConfigParser()\n cp.optionxform = str\n cp.read(file)\n\n globalSections = cp.sections()\n\n for sec in cp.sections():\n\n for opt in cp.options(sec):\n\n value = cp.get(sec, opt)\n \n # check in turn whether this is an interger, float, or string\n if (isInt(value)):\n globalParams[sec + \".\" + opt] = int(value)\n elif (isFloat(value)):\n globalParams[sec + \".\" + opt] = float(value)\n else:\n globalParams[sec + \".\" + opt] = value.strip()", "def loadParameters(self, parmfile=''):\n if not parmfile:\n raise IOError(\"You need to specify a parameter filename\")\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n parmpath = os.join.path(parmdir, parmfile)\n # Read from file\n with open(parmpath, 'r') as parmf:\n data = pickle.load(parmf)\n # Dictionary list\n self.modtran_visits = data[0]\n # Tuple list\n self.aerosol_visits = data[1]\n # seed value\n nruns = len(self.modtran_visits)\n print('Parameters for {1} runs computed with seed = {0}'.format(data[2],\n nruns))\n # Init transmission array\n self.initTransmissionArray(nruns)", "def get_top_n_params(filepath):\n params = []\n with open(filepath) as json_file:\n data = json.load(json_file)\n for top_n in data['accuracies'].keys():\n params.append(int(top_n))\n return params", "def load_parms(self, file):\n ret_val = self._load_parms(file.encode())\n return ret_val", "def loadParams(self):\n\n if len(self.filParams) < 3:\n return\n\n if not os.access(self.filParams, os.R_OK):\n return\n\n print(\"Priors.loadParams INFO: loading priors from %s\" \\\n % (self.filParams))\n\n # This is a little bit painful without just using something\n # more mature like astropy.table or pandas:\n hypers = np.genfromtxt(self.filParams, usecols=(1,2))\n\n # Convert the angular arguments to radians\n hypers[4] = np.radians(hypers[4])\n hypers[5] = np.radians(hypers[5])\n hypers[7] = np.radians(hypers[7])\n\n # transpose into hyperparams\n self.hyper = np.transpose(hypers)\n\n # now we need to read in the function names. This only really\n # has meaning for the mixed prior...\n strNames = np.genfromtxt(self.filParams, usecols=(0), dtype='str')\n self.mixedNames = list(strNames)\n\n # Finally, read in the name of the function\n with open(self.filParams, 'r') as rObj:\n for sLine in rObj:\n if sLine.find('#') < 0:\n continue\n if sLine.find('NAME') < 0:\n continue\n\n vLine = sLine.strip().split()\n self.namePrior = vLine[-1]", "def load_saved_params():\n st = 0\n for f in glob.glob(\"saved_params_*.npy\"):\n iter = int(op.splitext(op.basename(f))[0].split(\"_\")[2])\n if (iter > st):\n st = iter\n if st > 0:\n print \"Loading saved params %d\" % st\n with open(\"saved_params_%d.npy\" % st, \"r\") as f:\n params = pickle.load(f)\n state = pickle.load(f)\n return st, params, state\n else:\n return st, None, None", "def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params", "def load(self, filename: str = default_config_file):\n self.config_file.read(filename)\n\n # Population\n self.POPULATION_SIZE = self.config_file.getint('Population', 'POPULATION_SIZE')\n\n # Individuals characteristics\n self.IND_INP_NUMBER = self.config_file.getint('Individual', 'IND_INP_NUMBER')\n self.IND_OUT_NUMBER = self.config_file.getint('Individual', 'IND_OUT_NUMBER')\n self.IND_MAX_NODES = self.config_file.getint('Individual', 'IND_MAX_NODES')\n self.INITIAL_GENERATION = self.config_file.get('Individual', 'INITIAL_GENERATION')\n\n # Genes mutations\n self.ADD_GENE_PROB = self.config_file.getfloat('Mutations', 'ADD_GENE_PROB')\n self.MUT_GENE_PROB = self.config_file.getfloat('Mutations', 'MUT_GENE_PROB')\n self.REM_GENE_PROB = self.config_file.getfloat('Mutations', 'REM_GENE_PROB')\n\n self.ADD_NODE_PROB = self.config_file.getfloat('Mutations', 'ADD_NODE_PROB')\n self.REM_NODE_PROB = self.config_file.getfloat('Mutations', 'REM_NODE_PROB')\n\n self.MUT_GENE_AMP = self.config_file.getfloat('Mutations', 'MUT_GENE_AMP')\n self.WEIGHT_AMP = self.config_file.getfloat('Mutations', 'WEIGHT_AMP')\n\n self.GENE_PROB_FACT = self.config_file.getfloat('Mutations', 'GENE_PROB_FACT')\n self.NODE_PROB_FACT = self.config_file.getfloat('Mutations', 'NODE_PROB_FACT')\n self.AMP_MUT_FACT = self.config_file.getfloat('Mutations', 'AMP_MUT_FACT')\n\n # Elitism and extinction\n self.ELITISM_NUMBER = self.config_file.getint('Crossover', 'ELITISM_NUMBER')\n self.EXTINCTION_NUMBER = self.config_file.getint('Crossover', 'EXTINCTION_NUMBER')\n\n # Training conditions\n self.FITNESS_CRITERION = self.config_file.get('Training', 'FITNESS_CRITERION')\n self.FITNESS_THRESHOLD = self.config_file.getfloat('Training', 'FITNESS_THRESHOLD')\n\n # Check the loaded configuration\n self.__check_config()", "def load(self, filename):\n with open(filename, 'r') as f:\n self.pca.set_params(pickle.load(f))\n self.fit = True", "def load_params():\r\n return pickle.load(open('params.p', mode='rb'))", "def read_from(self, filename):\n if os.path.exists(filename):\n logger.info(\"Reading parameters from file {0}\".format(filename))\n cl, icoord, ispec, ireg, xori, yori, dx, dy, nx,\\\n ny, valex, snr, varbak = np.loadtxt(filename, comments='#', unpack=True)\n\n self.cl = cl\n self.icoordchange = int(icoord)\n self.ispec = int(ispec)\n self.ireg = int(ireg)\n self.xori = xori\n self.yori = yori\n self.dx = dx\n self.dy = dy\n self.nx = int(nx)\n self.ny = int(ny)\n self.valex = valex\n self.snr = snr\n self.varbak = varbak\n\n # Compute domain limits for later use\n self.xend = self.xori + (self.nx - 1) * self.dx\n self.yend = self.yori + (self.ny - 1) * self.dy\n\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')", "def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()", "def load_params(fname):\n parmsff = {}\n # FIXME: This might fail if a parameter name is larger than 50 characters.\n # FIXME: Maybe do this with the csv module instead?\n temparr = numpy.loadtxt(fname, dtype=([('a','S50'),('b','f8')]), delimiter=',') \n for i in temparr:\n parmsff[i[0]] = i[1]\n return parmsff", "def load_params_from_pickle_file(session: tf.Session,\n params_filename: Text) -> None:\n with open(params_filename, 'rb') as f:\n params = pickle.load(f)\n for var in tf.trainable_variables():\n session.run(var.assign(params[var.name]))", "def load_network_for_training(file_name):\n global training_set, start_round, start_digit\n try:\n with open (file_name, 'r') as f:\n w = np.load(f)\n w_min = np.load(f)\n w_max = np.load(f)\n a_plus = np.load(f)\n a_minus = np.load(f)\n b_plus = np.load(f)\n b_minus = np.load(f)\n v_th = np.load(f)\n training_set = np.reshape(np.load(f), (TRAINING_SIZE, N))\n start_round = np.load(f)\n start_digit = np.load(f)\n\n Output.set_states({'v_th' : v_th})\n S.set_states({\n 'w' : w,\n 'w_min' : w_min, \n 'w_max' : w_max, \n 'a_plus' : a_plus, \n 'a_minus' : a_minus, \n 'b_plus' : b_plus, \n 'b_minus' : b_minus\n })\n print start_round\n print start_digit\n print v_th\n except IOError as e:\n print \"error opening file: %s\" % e.strerror\n sys.exit()", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')", "def load_params_from_file(self, fn):\n f = file(fn, 'r')\n params = json.load(f)\n return params", "def load_examples(filename):\r\n data = np.load(filename)\r\n return data['examples'], int(data['srate'])", "def load_examples(filename):\r\n data = np.load(filename)\r\n return data['examples'], int(data['srate'])", "def load_model_params(self, full_path):\n \n print(\"Loading model parameters from %s\"%full_path)\n with open (full_path, 'rb') as f:\n \n self.theta = cPickle.load(f)\n \n if self.num_hidden == True or (self.num_hidden > 0):\n \n self.W, self.b, self.bhid = self.theta\n \n else:\n \n self.W, self.b = self.theta", "def load_params(param_file):\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params", "def LoadBatch(filename):", "def open_pickle(model_file):\n\n with open(model_file, mode='rb') as m_f:\n grid_search = pickle.load(m_f)\n result = grid_search.score(X_test, y_test)\n print(\"Employed Estimator:\", grid_search.get_params)\n print(\"--------------------\")\n print(\"BEST PARAMETER COMBINATION:\", grid_search.best_params_)\n print(\"Training Accuracy Result: %.4f\" %(result))\n return 'grid_search parameters loaded'", "def get_parameters(file_str):\n\n idx = [x.start() for x in re.finditer(\"_\", file_str)]\n\n # if file_str contains path to pkl files (eg '../results/*.pkl)\n # start reading file name from the last '/'\n if file_str.rfind(\"/res\") != -1:\n start_idx = file_str.rfind(\"/res\") + 4\n else:\n start_idx = 0\n\n n = int(file_str[start_idx : idx[0]])\n m = int(file_str[idx[0] + 1 : idx[1]])\n r = int(file_str[idx[1] + 1 : idx[2]])\n loss = str(file_str[idx[2] + 1 : idx[3]])\n sparsity = float(file_str[idx[3] + 1 : idx[4]])\n method = str(file_str[idx[4] + 1 : idx[5]])\n gamma = float(file_str[idx[5] + 1 : -4])\n\n return n, m, r, loss, sparsity, method, gamma", "def parse_params(filename):\n\n all_dicts = []\n\n with open(filename) as f:\n\n for line in f:\n\n params = line.strip().split()\n\n temp_dict = {\"die\": float(params[0])}\n\n temp_dict.update({i: float(params[i]) for i in range(1, 7)})\n\n all_dicts.append(temp_dict)\n\n f.close()\n\n return all_dicts", "def load_bestfitparams(self, param_array):\n\n self.params = OrderedDict()\n\n for i, element in enumerate(param_array):\n self.params[self._paramnames[i]] = element\n pass", "def get_params(self, paramFile):\n\n with open(paramFile, 'r') as f:\n titleLine = next(f)\n\n for line in f:\n p, i, v = line.split(\",\")\n\n self.params.update(p, v, i)", "def readparamfile(self,filename_): # 3\n res = self.__obj.readparamfile(filename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def load_params(self, path: str):\n DistributedWorker.load_params(self, path)\n\n params = torch.load(path)\n self.dqn.load_state_dict(params[\"dqn_state_dict\"])\n print(\"[INFO] loaded the model and optimizer from\", path)", "def load_data():\n fh = open('knapsack_big.txt')\n #fh = open('knapsack1.txt')\n data = fh.read()\n data_lines = data.split('\\n')\n knapsack_size, num_items = data_lines[0].split()\n print \"Read data for knapsack problem from file.\\nThe size of the knapsack is {} and the total number of items is {}\".format(knapsack_size, num_items)\n knapsack_size = int(knapsack_size)\n num_items = int(num_items)\n items = []\n for line in data_lines[1:-1]:\n value, weight = line.split()\n items.append((int(value),int(weight)))\n items = np.array(items)\n return items, knapsack_size, num_items", "def fromfile(self,file):\n self.d.update(params_file(file))", "def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity", "def ParseParameters(self, parameters_file_name):\n\n print(datetime.now().strftime(\"%H:%M:%S\") + \": Reading file \" + parameters_file_name + \"...\")\n\n parameters_file = open(parameters_file_name, \"r\")\n\n for line in parameters_file:\n line_parts = re.split(r'\\t+', line) # split elements of the line separated by tabs\n if(len(line_parts) == 2):\n self.AddParameter(line_parts[0], float(line_parts[1].strip()))\n else: # we want 2 elements per line\n raise Exception(\"Invalid format for the line \" + line)\n \n parameters_file.close()\n\n # check that all the mandatory parameters are correctly set\n if(self.npop1_1 == \"N/A\" or\n self.npop1_2 == \"N/A\" or\n self.npop2_1 == \"N/A\" or\n self.npop2_2 == \"N/A\" or\n self.t_div == \"N/A\" or\n self.npop_a == \"N/A\" or\n self.mu == \"N/A\"):\n raise Exception(\"One of mandatory parameters is missing!\")\n\n # check that introgression-related parameters are correctly set (either both equal 1 or both equal 0)\n if(self.t_i == 1 and self.p_i == 1): # introgression\n self.number_of_parameters = 9\n elif(self.t_i == 0 and self.p_i == 0): # no introgression\n self.number_of_parameters = 7\n else:\n raise Exception(\"Inconsistent values provided for parameters T_I and P_I!\")\n\n print(datetime.now().strftime(\"%H:%M:%S\") + \": Reading file done.\")", "def load_parameter_file(filename: str) -> Dict:\n assert isinstance(filename, str) and len(filename) > 0\n param_dict = {}\n # TODO implement search through possible parameter config file locations\n # Open up the CSV file for reaching\n with open(filename) as f:\n csvreader = csv.DictReader(f, delimiter='\\t')\n\n accepted_field_names = {'mechanism': ['mechanism', 'mechanism_id'],\n 'param_name': [\"parameter_name\", \"parameter\", \"param\", \"param_name\"],\n 'part_id': ['part_id', 'part'],\n 'param_val': [\"val\", \"value\", \"param_val\", \"parameter_value\"]\n }\n\n field_names = Parameter._get_field_names(csvreader.fieldnames, accepted_field_names)\n\n if field_names['param_name'] is None:\n warn('No param name column was found, could not load parameter')\n return param_dict\n if field_names['mechanism'] is None:\n no_mechism_column = True\n else:\n no_mechism_column = False\n\n if field_names['part_id'] is None:\n no_part_id_column = True\n else:\n no_part_id_column = False\n\n for row in csvreader:\n # TODO what about integers? float might cause numerical drift in simulations, e.g. cooperativity=2.001\n param_value = float(row[field_names['param_val']])\n # TODO test all these cases!\n if row[field_names['param_name']] is None or len(row[field_names['param_name']]) == 0:\n pass\n elif no_mechism_column and no_part_id_column:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_mechism_column and no_part_id_column is False:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_part_id_column and no_mechism_column is False:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n else:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n part_id = row[field_names['part_id']]\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, part_id, param_name)] = param_value\n else:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n\n return param_dict", "def load_params(params_filename: str) -> Dict:\n \n # If no params filename is specified, return the default parameter setting.\n if not params_filename:\n return RunParams()\n\n return RunParams(**load_json(params_filename))", "def ReadParameterFile(pf):\n f = open(pf, \"r\")\n pf_dict = SetDefaultParameterValues()\n for line in f:\n if not line.split(): \n continue\n if line.split()[0][0] == \"#\": \n continue\n \n # This will prevent crashes if there is not a blank line at the end of the parameter file\n if line[-1] != '\\n': \n line += '\\n'\n \n # Cleave off end-of-line comments.\n line = line[:line.rfind(\"#\")].strip()\n \n # Read in the parameter name and the parameter value(s).\n parname, eq, parval = line.partition(\"=\")\n \n # Else, actually read in the parameter \n try: \n parval = float(parval)\n except ValueError:\n if re.search('/', parval): # For directory with more than one level\n parval = str(parval.strip())\n elif parval.strip().isalnum(): \n parval = str(parval.strip())\n elif parval.replace('_', '').strip().isalnum():\n parval = parval.strip()\n elif parval.partition('.')[-1] in ['dat', 'hdf5', 'h5', 'txt']:\n parval = str(parval.strip())\n else:\n parval = parval.strip().split(\",\")\n tmp = [] \n if parval[0][0] == '(':\n for element in parval: \n if element.strip(\" (,)\").isdigit(): \n tmp.append(float(element.strip(\"(,)\")))\n else: \n tmp.append(element.strip(\" (,)\"))\n parval = tuple(tmp) \n elif parval[0][0] == '[':\n for element in parval: \n tmp.append(float(element.strip(\"[,]\")))\n parval = list(tmp)\n else:\n print(parname, parval)\n raise ValueError('The format of this parameter is not understood.')\n \n pf_dict[parname.strip()] = parval\n \n return pf_dict", "def load_params(self, params):\n params.cp_latest_filename = \"latest_checkpoint_v\"+params.version\n params.cp_load_latest_filename = \"latest_checkpoint_v\"+params.cp_load_ver\n params.cp_load_dir = params.out_dir + params.cp_load_name+ \"/checkpoints/\"\n if not hasattr(params, \"model_out_dir\"):\n params.model_out_dir = params.out_dir + params.model_name\n params.cp_save_dir = params.model_out_dir + \"/checkpoints/\"\n params.log_dir = params.model_out_dir + \"/logfiles/\"\n params.save_dir = params.model_out_dir + \"/savefiles/\"\n params.disp_dir = params.model_out_dir + \"/vis/\"\n params.num_pixels = int(np.prod(params.data_shape))\n self.params = params\n self.params_loaded = True", "def load_params(exe, prog, path, ignore_params=[]):\n if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):\n raise ValueError(\"Model pretrain path {} does not \"\n \"exists.\".format(path))\n\n logger.info('Loading parameters from {}...'.format(path))\n\n ignore_set = set()\n state = _load_state(path)\n\n # ignore the parameter which mismatch the shape\n # between the model and pretrain weight.\n all_var_shape = {}\n for block in prog.blocks:\n for param in block.all_parameters():\n all_var_shape[param.name] = param.shape\n ignore_set.update([\n name for name, shape in all_var_shape.items()\n if name in state and shape != state[name].shape\n ])\n\n if ignore_params:\n all_var_names = [var.name for var in prog.list_vars()]\n ignore_list = filter(\n lambda var: any([re.match(name, var) for name in ignore_params]),\n all_var_names)\n ignore_set.update(list(ignore_list))\n\n if len(ignore_set) > 0:\n for k in ignore_set:\n if k in state:\n logger.warning('variable {} not used'.format(k))\n del state[k]\n fluid.io.set_program_state(prog, state)", "def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par", "def load_params_file(filename):\n with open(filename, 'r') as f:\n params = yaml.safe_load(f)\n return params", "def loadKey (self, filename=\"pub.key\"):\n try:\n key_file = open(filename, \"r\")\n data = key_file.read()\n aux = data.split(\";\")\n self.n = int(aux[0])\n self.n_sq = int(aux[1])\n self.g = int(aux[2])\n except:\n raise Exception(\"could not load key from file: \" + filename)", "def load_gold(train_gold_file):\n with codecs.open(train_gold_file, 'r', 'utf-8') as f_in:\n lines = [line.strip().split('\\t') for line in f_in]\n\n train_gold = { (w1, w2) : {} for (w1, w2, paraphrase, score) in lines }\n for w1, w2, paraphrase, score in lines:\n train_gold[(w1, w2)][paraphrase] = float(score)\n\n return train_gold", "def load_yaml_params(self, params_file):\n self._update_params(params_file)", "def load_cls_params(self):\n with open('models/Final/linear_svc.p', 'rb') as model_file:\n model = pickle.load(model_file)\n self.svc = model['svc']\n self.X_scaler = model['X_scaler']\n self.parameters = model['parameters']\n\n print(self.parameters)", "def load(self,filename = ''):\n if filename == '':\n filename = filedialog.askopenfilename()\n data = np.load(filename)\n\n try:\n self.amplifier = data['amplifier'].item()\n\n except:\n pass\n\n self.matchingnetwork = data['matchingnetwork'].item()\n self.hydro = data['hydro'].item()\n self.headerversion = data['headerversion'].item()\n self.depth = data['depth']\n self.samplingfreq = data['samplingfreq'].item()\n self.voltage = data['voltage']\n self.pulselength = data['pulselength'].item()\n self.pulserep = data['pulserep'].item()\n self.cfreq = data['cfreq']\n self.angle = data['angle']\n self.bursts = data['bursts'].item()\n self.hydoutput = data['hydoutput']\n self.txdr = data['txdr'].item()\n try:\n self.amplify = data['amplify'].item()\n except:\n print(\"Amplify variable not available\")\n try:\n self.operator = data['operator'].item()\n except:\n print(\"Operator variable not available\")", "def load_parameters(self):\n json_data = open(\"param.json\")\n data = json.load(json_data)\n self.items = data[\"items\"]\n self.pollInterval = self.items[0]['poll_interval']", "def read(filename):\n\n def to_int(data):\n \"\"\"Return dict with integer keys instead of strings.\"\"\"\n return {int(k): data[k] for k in sorted(data.keys())}\n\n with open(filename, \"r\") as f:\n parameters = json.load(f)\n\n keys = {\"extra_options\", \"more_options\", \"selections\"}\n for key in keys:\n if key in parameters.keys():\n parameters[key] = to_int(parameters[key])\n\n return parameters", "def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def loadParams(self, paramsFile):\n dataDir = os.path.abspath(os.path.join(radiomics.__path__[0], 'schemas'))\n schemaFile = os.path.join(dataDir, 'paramSchema.yaml')\n schemaFuncs = os.path.join(dataDir, 'schemaFuncs.py')\n c = pykwalify.core.Core(source_file=paramsFile, schema_files=[schemaFile], extensions=[schemaFuncs])\n params = c.validate()\n\n inputImages = params.get('inputImage', {})\n enabledFeatures = params.get('featureClass', {})\n kwargs = params.get('setting', {})\n\n self.logger.debug(\"Parameter file parsed. Applying settings\")\n\n if len(inputImages) == 0:\n self.inputImages = {'Original': {}}\n else:\n self.inputImages = inputImages\n\n self.logger.debug(\"Enabled input images: %s\", self.inputImages)\n\n if len(enabledFeatures) == 0:\n self.enabledFeatures = {}\n for featureClassName in self.getFeatureClassNames():\n self.enabledFeatures[featureClassName] = []\n else:\n self.enabledFeatures = enabledFeatures\n\n self.logger.debug(\"Enabled features: %s\", enabledFeatures)\n\n # Set default settings and update with and changed settings contained in kwargs\n self.kwargs = self._getDefaultSettings()\n self.kwargs.update(kwargs)\n\n self.logger.debug(\"Settings: %s\", kwargs)", "def read_mesa(self, filename=None):\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read first line in file to get number of parameters\n f.seek(0)\n l = f.readline()\n nparams_file = int(l.split(' ')[-1])\n\n # skip lines 2-4\n for i in range(3):\n f.readline()\n\n # the fifth line will give us the number of variables\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= 6\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n logR_idx = -1\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n if var_name_file == 'logR':\n logR_idx = i\n continue\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i\n except KeyError:\n var_idx_map[self.idx['spec'] - 1 + network_module.network_species_index(var_name_file.lower())] = i\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n # need to reverse the inputs file here\n\n n = npts_file - i - 1\n\n base_r[n] = R_solar * 10**variables[logR_idx]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[n, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def load_expected(filename):\n\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]])\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:,0].astype('int64'), data[:,1:]", "def __load(self, filename):\n values = {}\n training = []\n filecontent = loader.load('csv/' + filename + '.csv').generate().strip().split('\\n')\n i = 0\n for r in filecontent:\n if i < 1000: # limit size of datasets to 1,000 records\n training.append( r )\n i += 1\n else:\n break\n\n values['training'] = '\\n'.join(training)\n values['n_training'] = int(3. * len(training) / 4.)\n values['description'] = self.possible_values.get(filename, ('', 10))[0]\n values['n_trees'] = self.possible_values.get(filename, ('', 10))[1]\n self.cached_values[ filename ] = json.dumps(values)", "def readParameterfile(self, filename):\n return None", "def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)", "def load(self, filename='test'):\n file = open(filename+'.txt','r')\n loaded_chain = pickle.load(file)\n \n self.N = loaded_chain.N\n self.m = loaded_chain.m\n self.R = loaded_chain.R\n self.a = loaded_chain.a\n self.force = loaded_chain.force\n self.Delta = loaded_chain.Delta\n self.n = loaded_chain.n\n self.beta = loaded_chain.beta\n self.m0 = loaded_chain.m0\n self.mu = loaded_chain.mu\n \n file.close()", "def load_file(filename, default_namespace=None, verbose=False):\n if not filename or filename == '-':\n f = sys.stdin\n if verbose:\n print(\"reading parameters from stdin\")\n return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose)\n else:\n if not os.path.isfile(filename):\n raise RosParamException(\"file [%s] does not exist\"%filename)\n if verbose:\n print(\"reading parameters from [%s]\"%filename)\n with open(filename, 'r') as f:\n return load_str(f.read(), filename, default_namespace=default_namespace, verbose=verbose)", "def load(self, ckpt_file: str, from_measurement: str):\n ckpt = torch.load(ckpt_file)\n self.best_performance_dict = ckpt[BEST_PERFORMANCE_DICT]\n self.best_model_dict = ckpt[BEST_MODEL_DICT]\n self.best_optimizer_dict = ckpt[BEST_OPTIMIZER_DICT]\n self.best_epoch_dict = ckpt[BEST_EPOCH_DICT]\n\n return self.best_model_dict[from_measurement], self.best_epoch_dict[from_measurement], self.best_optimizer_dict[\n from_measurement], self.best_performance_dict[from_measurement],", "def load(cls, load_folder: Path | str) -> \"Parameters\":\n serializer = serializer_factory(fmt=SerializerEnum.NUMPY)\n return serializer.load(class_obj=cls, folder_path=load_folder)", "def _load_parameter(self):", "def parse_param_file(params_list, param_file):\n\n namespace = \"problem\"\n\n try:\n f = open(param_file)\n except FileNotFoundError:\n sys.exit(f\"write_probdata.py: ERROR: file {param_file} does not exist\")\n\n line = get_next_line(f)\n\n err = 0\n\n while line and not err:\n\n # this splits the line into separate fields. A field is a\n # single word or a pair in parentheses like \"(a, b)\"\n fields = re.findall(r'[\\w\\\"\\+\\./\\-]+|\\([\\w+\\./\\-]+\\s*,\\s*[\\w\\+\\.\\-]+\\)', line)\n\n if len(fields) < 3:\n print(\"write_probdata.py: ERROR: missing one or more fields in parameter definition.\")\n err = 1\n continue\n\n name = fields[0]\n dtype = fields[1]\n default = fields[2]\n\n current_param = rp.Param(name, dtype, default,\n namespace=namespace)\n\n # optional field: in namelist\n try:\n in_namelist_in = fields[3]\n if in_namelist_in in [\"y\", \"Y\"]:\n in_namelist = True\n else:\n in_namelist = False\n\n except IndexError:\n in_namelist = False\n\n\n # optional field: size\n try:\n size = fields[4]\n except IndexError:\n size = 1\n\n current_param.in_namelist = in_namelist\n current_param.size = size\n\n # check to see if this parameter is defined in the current\n # list if we delete the old one and take the new one (we\n # assume that later files automatically have higher\n # priority)\n p_names = [p.name for p in params_list]\n try:\n idx = p_names.index(current_param.name)\n except ValueError:\n pass\n else:\n params_list.pop(idx)\n\n if not err == 1:\n params_list.append(current_param)\n\n line = get_next_line(f)\n\n return err", "def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params", "def find_local_file(files, traj_num, train_or_val):\n least_num = np.Inf\n for f in files:\n name_list = re.split(\"[-.]\", f)\n if train_or_val in name_list:\n for tmp in name_list:\n if tmp.isdigit():\n num = int(tmp)\n if traj_num <= num < least_num:\n least_num = num\n return least_num", "def read_Hrrr(filename, parameters = [''],max = False):\n \n myfile = pygrib.open(filename) \n parameterlist = ['Geopotential Height','Temperature','Relative humidity','Dew point temperature',\n 'Specific humidity','Vertical velocity','U component of wind','V component of wind',\n 'Absolute vorticity','Cloud mixing ratio','Cloud Ice','Rain mixing ratio','Snow mixing ratio',\n 'Graupel (snow pellets)'] \n \n if parameters != ['']:\n for i in range(len(parameters)):\n x = parameterlist.count(parameters[i])\n if x == 0: \n print 'requested parameter not in list'\n print parameters[i] \n return 0\n parameterlist = parameters[:]\n \n \n data = []\n grb = myfile.select(name = parameterlist[0]) \n grb_cube = grb_to_grid(grb)\n dataloc = np.array(grb[0].latlons())\n datah = grb_cube['levels']\n units = []\n \n for p in parameterlist:\n grb = myfile.select(name = p)\n grb_cube = grb_to_grid(grb)\n if not max:\n data.append(grb_cube['data'])\n else:\n data.append(grb_cube['data'].max(axis=0))\n units.append(grb_cube['units'])\n \n return [data,parameterlist,datah,dataloc,units]", "def readParamFromFile(self, file, sect):\r\n f = configparser.ConfigParser()\r\n f.read(file)\r\n # s = f.sections()\r\n # print(s)\r\n\r\n self.m_param = dict(f.items(sect))\r\n print(self.m_param)\r\n # print(len(self.m_param))\r", "def load(self, filename, path=\".\"):\n if filename is None:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No file name\")\n return -1\n\n #trn_params\n self.trn_params = NeuralNetworkParams()\n self.trn_params.load('%s_trn_params.pickle'%(filename),path=path)\n\n #model\n json_file = open(\"%s/%s_model.json\"%(path,filename), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(\"%s/%s_model.h5\"%(path,filename))\n self.model = loaded_model\n self.trained = True\n #trn_desc\n self.trn_desc = None\n self.trn_desc = pickle.load(open(\"%s/%s_trn_desc.pickle\"%(path,filename), \"rb\"))", "def get_n_best(self):\n pass", "def import_parameters(self, file_name):\n parameters = []\n\n with open(file_name) as in_file:\n parameters = json.load(in_file)\n\n if parameters:\n self.put_parameters(parameters)", "def load_checkpoint(model, scoresfile):\n # load data from scores file\n X = np.loadtxt(scoresfile, delimiter=',')\n\n # separate into points and scores\n scores = X[:,-1]\n points = X[:,:-1]\n\n # set best hyperparameters based on best scores\n ind = np.argmin(scores)\n best_overall_point = points[ind]\n model.decode(best_overall_point)\n\n return model, points, scores", "def read_start_params(path_or_database):\n database = load_database(**_process_path_or_database(path_or_database))\n optimization_problem = read_last_rows(\n database=database,\n table_name=\"optimization_problem\",\n n_rows=1,\n return_type=\"dict_of_lists\",\n )\n start_params = optimization_problem[\"params\"][0]\n return start_params", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']", "def load_net(self, file_path):\n \twith open(file_path,'r') as f:\n \t\tparams = json.loads(f.read())\n \t#\n \tweights = np.array(params['weights'])\n \tbiases = np.array(params['biases'])\n \t# Since ann.ANN needs to be initialized with some data, which\n \t# we dont have yet, we are gonna make a canvas array with\n \t# the correct dimensions from the weights\n \tfake_data = np.array([np.zeros(len(weights[-1]))])\n \t# initialize stacked net\n \tself.init_stacked_net(fake_data)\n \t# fill in weights and biases\n \tself.stacked_net.weights = weights\n \tself.stacked_net.biases = biases", "def load(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'r') as rank_file:\n for record in rank_file:\n team, score = common.parse(record)\n self.score[team] = score", "def get_params(path = 'INPUT/conv_params'):\n\n # cd to Input, read the conv_params file in and pass each line to file reader\n list = file_reader(path)\n\n Ecuts = list[0] # first element returned from filereader is the energies\n start = int(Ecuts[0][0]) # the first element of this is the lower energy to start from. convert to integer for maths\n multiplier = int(Ecuts[1][0]) # middle element is the step size\n end = int(Ecuts[2][0]) # last element is upper bound on energy\n E_range = (end - start)//multiplier +1 # the number of energies you will create\n Es = [i*multiplier for i in range(E_range)] # take steps in the E_range of step size multiplier\n Ecuts = [[str(i+start)] for i in Es] # add the start energy to all these steps to shift them to correct energies\n # convert the numbers to strings for ease of file writing later\n\n kpts = list[1] # kpoints list is first element returned\n def_E = list[2] # default energy\n def_k = list[3] # default kpoints\n params = Settings(Ecuts, kpts, def_E, def_k) # create the settings object\n\n return params # return the object", "def importParameterBoundaryFile(paramfilename):\n try:\n infile = open(paramfilename, \"r\")\n except IOError:\n\t print \"Unable to open file %s\" % (paramfilename)\n\t raise IOError(\"Unable to open parameter boundary file %s\" % (paramfilename))\n lines = infile.readlines()\n infile.close()\n\n # Parse\n paramdict = {}\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n continue\n elif line[0] == '#':\n continue\n else:\n terms = line.split()\n name = terms[0]\n value = float(terms[1])\n parmin = float(terms[2])\n parmax = float(terms[3])\n stepsize = float(terms[4])\n \n paramdict[name] = [value, parmin, parmax, stepsize]\n # ENDIF\n # ENDFOR\n\n return paramdict", "def on_load_parameters(self, filename=None):\n if filename is None:\n path, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Choose a parameter file.\", \"\", \"JSON Files (*.json)\")\n else:\n path = filename\n\n if path == '' or path is None:\n return\n\n self.param_file = path\n\n with open(self.param_file, 'r') as f:\n params = json.loads(f.read())\n\n obj_points = params['object positions']\n cam_pos = params['camera positions']\n dist_coeff = params['distortion coefficients']\n\n for p in obj_points:\n x, y = p['x'], p['y']\n lat, lon, alt = p['lat'], p['lon'], p['alt']\n self.add_known_image_points((x, y), latlonalt=(lat, lon, alt))\n\n self.camera_lat_line.setValue(float(cam_pos['lat']))\n self.camera_lon_line.setValue(float(cam_pos['lon']))\n self.camera_alt_line.setValue(float(cam_pos['alt']))\n self.cx_line.setValue(float(cam_pos['cx']))\n self.cy_line.setValue(float(cam_pos['cy']))\n self.phi_line.setValue(float(cam_pos['phi']))\n self.theta_line.setValue(float(cam_pos['theta']))\n self.psi_line.setValue(float(cam_pos['psi']))\n\n self.k1_line.setValue(float(dist_coeff['k1']))\n self.k2_line.setValue(float(dist_coeff['k2']))\n self.k3_line.setValue(float(dist_coeff['k3']))\n self.p1_line.setValue(float(dist_coeff['p1']))\n self.p2_line.setValue(float(dist_coeff['p2']))\n\n self.statusBar().showMessage(f'Loaded parameters from {self.param_file}')", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def load_regain_values(filename):\n gain_lines = open(filename,\"r\").readlines()\n gain_lines = [l.split() for l in gain_lines if len(l)>0 and l[0]!='#'] #remove comments and blanks\n tubes,gain_vals = zip(*[(int(l[0]),float(l[1])) for l in gain_lines])\n return Array(gain_vals)", "def load(self, filename):\n self.model.load_weights(filename)", "def but_load_net(self):\n a = tk.filedialog.askopenfilename(filetypes = [('NN file',['*.csv','*.pt'])])\n self.nn_obj=load_nn(a)", "def load_ith_from_pgn(filename, i):\n file_tmp = open(filename) # we reload pgn\n\n game = chess.pgn.read_game(file_tmp)\n for _ in range(i): # skip to i-th game in pgn\n game = chess.pgn.read_game(file_tmp)\n\n return game", "def file_cost(filename):\r\n return grid_cost(read_grid(filename))", "def file_cost(filename):\r\n return grid_cost(read_grid(filename))", "def _read_from_file(self, filename):\n ff = fits.open(filename)\n # Load the normalized intensity\n self.norm_int = ff[0].data\n # Load the other parameters\n self.lam = ff[1].data['lam']\n self.lam_unit = ff[1].columns['lam'].unit\n self.theta = ff[2].data['theta']\n self.taux = ff[3].data['taux']\n # Set halo type\n self.description = filename", "def load():\n\n #: the file passed by the user in the post request\n file = request.files[\"file\"]\n\n # ensure that file exists\n if file == None:\n return BadRequest(\"No file given\")\n\n # ensure that file is readable\n try:\n file = json.loads(file.read())\n except UnicodeDecodeError:\n return BadRequest(\"Invalid file\")\n \n # ensure that the file can be indexed\n try:\n points = file[\"points\"]\n reg_json = file[\"reg\"]\n except TypeError:\n return BadRequest(\"Invalid file\")\n\n global no_dimensions\n #: number of dimensions\n no_dimensions = file[\"no_dimensions\"]\n\n\n # give each point an annotation weight if it does not already have one\n for i in range(0, len(points)):\n if points[i].get(\"annot_weight\") == None:\n points[i][\"annot_weight\"] = random.uniform(0, 1)\n\n global reg \n # regression model loaded from file\n if not reg_json:\n reg = jsonpickle.loads(reg_json)\n\n global tsne \n tsne = points\n \n return {\"points\": points, \"reg\": reg != None, \"no_dimensions\": no_dimensions}", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def load(file_path):\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n ocp = OptimalControlProgram(**data[\"ocp_initilializer\"])\n for key in data[\"versions\"].keys():\n if data[\"versions\"][key] != ocp.version[key]:\n raise RuntimeError(\n f\"Version of {key} from file ({data['versions'][key]}) is not the same as the \"\n f\"installed version ({ocp.version[key]})\"\n )\n out = [ocp, data[\"sol\"]]\n if \"sol_iterations\" in data.keys():\n out.append(data[\"sol_iterations\"])\n return out", "def load_priors(file_name):\n with open(file_name, \"r\") as fp:\n priors = json.load(fp)\n return priors", "def _grab_injection_parameters_from_file(\n self, path, cls=None, add_nans=True, **kwargs\n ):\n if cls is None:\n from pesummary.core.file.injection import Injection\n cls = Injection\n data = cls.read(path, **kwargs).samples_dict\n for i in self.parameters:\n if i not in data.keys():\n data[i] = float(\"nan\")\n return data", "def load_from_csv(self, file_name, parameters_size):\n \n with open(file_name) as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n simulation_name = str(row[0:1])[2:-2] # Remove brakets and quotation marks \n result_name = str(row[1:2])[2:-2] # Remove brakets and quotation marks \n params = np.float64(row[2:parameters_size+2])\n res = np.float64(row[parameters_size+2:])\n self.add_sample(parameters = params, simulation_name = simulation_name, result = res, result_name = result_name)" ]
[ "0.6628179", "0.6617545", "0.6580962", "0.64695466", "0.64075434", "0.62310404", "0.6230305", "0.62199324", "0.61862564", "0.61166227", "0.6109736", "0.6079469", "0.5998742", "0.5996682", "0.5978318", "0.59457254", "0.5927295", "0.5894868", "0.5883919", "0.58676887", "0.58008945", "0.5777804", "0.5774859", "0.57329774", "0.57253176", "0.57242113", "0.5714112", "0.5714112", "0.5677016", "0.56704485", "0.5660237", "0.5659168", "0.56574225", "0.5655625", "0.56267446", "0.5591098", "0.5565912", "0.55475765", "0.55319726", "0.5527887", "0.5524502", "0.55112755", "0.5508767", "0.5506054", "0.5504672", "0.54858667", "0.5484278", "0.54817456", "0.5478377", "0.54496145", "0.54422295", "0.5438514", "0.54356045", "0.5433725", "0.54336774", "0.54327327", "0.5426396", "0.5413945", "0.541183", "0.5373971", "0.5362627", "0.5362512", "0.5361792", "0.53550553", "0.5347131", "0.5331283", "0.5326632", "0.53262365", "0.5325429", "0.5321224", "0.5315232", "0.53093374", "0.5290993", "0.5290814", "0.5288557", "0.5282147", "0.5278578", "0.5272757", "0.5266882", "0.5266812", "0.5265724", "0.52655745", "0.5262667", "0.52547544", "0.52374464", "0.5228241", "0.5221992", "0.5221448", "0.5216577", "0.5211059", "0.52057576", "0.52033854", "0.52033854", "0.51969016", "0.5194933", "0.51948386", "0.519289", "0.51912135", "0.51835924", "0.518129" ]
0.73936313
0
Update UserCreate instance firstname
def update_firstname(state: UserCreate, firstname: str) -> None: state.name.first = firstname state.slug = slugify(f"super-user: {state.name.first} {state.name.last}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user", "def first_name(self, instance):\r\n return instance.user.first_name", "def register_user_first_name(self, message):\n try:\n self.db_handler.set_user_first_name(message.chat.id, message.text)\n\n self.logger.write_to_log('user first name added to db', message.chat.id)\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def user_profile_setname(token, name_first, name_last):\n if (len(name_first) > 50 or name_first == \"\"):\n raise error.InputError(description=\"First name is not within 1-50 characters\")\n\n if (len(name_last) > 50 or name_last == \"\"):\n raise error.InputError(description=\"Last name is not within 1-50 characters\")\n\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['name_first'] = name_first\n user['name_last'] = name_last\n database.set_user_data(user)", "def setFirstName(self, firstName):\r\n self.firstName = firstName", "def set_first_name(self, first_name):\n self.first_name = first_name", "def test_first_name_is_optional(self):\n self.updated_data['first_name'] = ''\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])", "def edit_user_name(self, dto):\n user_id = dto[\"user_id\"]\n user_pin = dto[\"pin\"]\n new_user_name = dto[\"new_user_name\"]\n user = self._find_user_by_id_and_pin(user_id, user_pin)\n self.validate_user_name(new_user_name)\n user[\"user_name\"] = new_user_name\n self._user_dao.save_user(user)", "def first_name(self, name):\n self._first_name = name", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]", "def test_set_display_name_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=self.user1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def form_valid(self, form):\n print(self.object)\n User.objects.filter(username=self.object).update(\n first_name = form.cleaned_data['first_name'],\n last_name = form.cleaned_data['last_name'],\n email = form.cleaned_data['email'],\n gender = form.cleaned_data['gender'],\n date_of_birth = form.cleaned_data['date_of_birth'],\n )\n messages.success(self.request, 'Edited successfully')\n return super().form_valid(form)", "def _set_user(self):\n\n if '' in (self.last_name, self.first_name):\n return\n\n self._set_first_initial()\n\n User = get_user_model()\n try:\n self.user = User.objects.get(\n models.Q(last_name__iexact=self.last_name),\n models.Q(first_name__iexact=self.first_name) |\n models.Q(first_name__istartswith=self.first_initial[0])\n )\n except User.DoesNotExist:\n pass\n except User.MultipleObjectsReturned:\n pass", "def firstname(self, firstname):\n\n self._firstname = firstname", "def firstname(self, firstname):\n\n self._firstname = firstname", "def update_user_info(user, save=True):\n p = bayou.Person.from_default_services(user.username)\n\n user.email = p.email if p.email else user.email\n user.first_name = p.first_name if p.first_name else user.first_name\n user.last_name = p.surname if p.surname else user.last_name\n\n if save:\n user.save()\n\n return user", "def test_set_display_name_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_save_rewrite(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n user.first_name = \"UpdatedName\"\n user.save()\n actual_user = CustomUser.objects.get(email=\"test@test.test\")\n\n self.assertEqual(actual_user.first_name, \"UpdatedName\")", "def save(self, commit=True):\n\n email_local_part = self.cleaned_data['email'].split('@')[0]\n username_start = email_local_part[:5] if len(email_local_part) >= 5 else email_local_part\n self.instance.username = username_start + ''.join(\n [choice(ascii_letters) for _ in range(30 - len(username_start))])\n\n return super(RegisterForm, self).save(commit=commit)", "def update_user():", "def test_set_display_name_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.user2_template, \"new_name\", request=mock_request\n )", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def updateName( user, login, name, sid, postfix=0 ):\n try:\n print \"Trying to update name with login_name=\", login\n user.first_name = name\n newlogin = login\n #strip the username of any special characters, including spaces\n \n if postfix:\n newlogin=\"%s%03d\" % ( login, postfix )\n user.username = newlogin\n user.save()\n except Exception, e:\n print \"Couldn't update name, rolling back\", e\n transaction.savepoint_rollback(sid)\n updateName( user, login, name, sid, postfix+1 )", "def test_user_signup_with_invalid_first_name(self):\n pass", "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='chuck@norris.org',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck@norris.org')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='chuck@norris.org',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')", "def test_get_short_name_should_return_first_name(self):\n email = 'example@example.com'\n password = 'password'\n first_name = 'Example'\n last_name = 'User'\n user = MyUser(\n email=email,\n password=password,\n first_name=first_name,\n last_name=last_name\n )\n short_name = user.get_short_name()\n self.assertEqual(short_name, first_name)", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def set_up(self) -> None:\n query = \"\"\"Update Users\n SET Firstname=?, Surname=?, Currency_id=?, Has_First_Sign_In=?\n Where id=?;\"\"\"\n self.db.commit(query,\n values=(self.firstname, self.surname, self.currency_id, int(self.has_first_sign_in),\n self.id))", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def first_name(self, first_name):\n\n self._first_name = first_name", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "def update_professor_first_names(first_name_updates: List[NameUpdate]):\n for prof_id, new_first_name in first_name_updates:\n prof = Professor.objects.get(id=prof_id)\n prof.first_name = new_first_name\n prof.save()", "def test_signup_missing_first_name(self):\n\n invalid_u = User.signup(\"test@test.com\", \"testuser\", \"testpass\", None, \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name", "def post(self):\n user_id = request.args.get('user_id')\n lastname = request.args.get('lastname')\n return update_sukunimi(user_id, updatd_lastname=lastname)", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def update_user(self, username):\n parser_update.add_argument('email', type=validate_email,\n required=False, nullable=False,\n help=\"Email must be formatted correctly\")\n\n parser_update.add_argument('phoneNumber', type=validate_phonenumber,\n required=False, nullable=False,\n help=\"Enter a valid phone number\")\n\n parser_update.add_argument('firstname', type=validate_characters,\n required=False, nullable=False,\n help=\"First name must be formatted correctly\")\n\n parser_update.add_argument('lastname', type=validate_characters,\n required=False, nullable=False,\n help=\"Last name must be formatted correctly\")\n\n parser_update.add_argument('othernames', type=validate_characters,\n required=False, nullable=False,\n help=\"Other name must be formatted correctly\")\n\n user = self.get_user(username)\n if user is None:\n return None\n\n args = parser_update.parse_args()\n new_data = {\n 'email': request.json.get('email', user['email']).lower(),\n 'firstname': request.json.get('firstname', user['firstname']).capitalize(),\n 'lastname': request.json.get('lastname', user['lastname']).capitalize(),\n 'othernames': request.json.get('othernames', user['othernames']).capitalize(),\n 'phoneNumber': request.json.get('phoneNumber', user['phonenumber']),\n }\n\n getEmail = self.get_user(new_data['email'])\n verification_status = True\n\n if user['email'] != new_data['email']:\n if getEmail is not None:\n return 'email exists'\n verification_status = False\n\n query = \"\"\"UPDATE users SET firstname=%s,lastname=%s,othernames=%s,\\\n email=%s,phonenumber=%s,emailverified=%s WHERE username=%s\"\"\"\n values = new_data['firstname'], new_data['lastname'], new_data['othernames'], new_data['email'], new_data['phoneNumber'], verification_status, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return new_data", "def formalize_user():\n print(request.get_json())\n username = request.get_json()['username']\n passwd = username = request.get_json()['passwd']\n # Check if the user exists by comparing the username\n # this contains the registered email\n existing_user = storage.filter_by(User, 'username', username)\n if not existing_user:\n user = storage.get(User, request.user)\n user.username = username\n user.passwd = passwd\n user.save()\n return jsonify(message='Success')\n return jsonify(message='Error creating user'), 309", "def set_username(old_name, new_name):\n if not validate_username(new_name):\n return \"käyttäjänimi on väärää muotoa\"\n if user_exists(new_name):\n return \"käyttäjänimi on jo käytössä\"\n sql = \"UPDATE users \" \\\n \"SET username=:new \" \\\n \"WHERE username=:old\"\n db.session.execute(sql, {\"new\": new_name, \"old\": old_name})\n db.session.commit()\n return \"ok\"", "def test_set_display_name_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.set_display_name(\n self.fixture.user1_template, \"new_name\", request=mock_request\n )", "def test_empty_first_name_field(self):\r\n result=self.user.get_user_register(\"\",\"Stephen\",\" Ochieng\",\"stephenochieng955@mail.com\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Please fill in the first name field\")", "def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def test_create_user_only_lastname(self):\n data = {\"lastname\": \"Doe\"}\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n user = User.query.filter_by(id=6).first()\n self.assertEqual(user.firstname, None)\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def get_first_name(self) -> str:\n return self.first_name", "def __str__(self):\n return self.user.first_name", "def test_last_name_is_optional(self):\n self.updated_data['last_name'] = ''\n self.update_user()\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])", "def update_user_profile_info(user_id, user_fname, user_lname, email):\n \n user=User.query.filter(User.user_id == user_id).first()\n\n if email != None:\n user.update_email(email)\n if user_fname != None:\n user.update_first_name(user_fname)\n if user_lname != None:\n user.update_last_name\n \n db.session.commit()", "def accounts_setup(request):\n if request.method == 'POST':\n form = UsernameSetupForm(request.POST, instance=request.user)\n if form.is_valid():\n username = form.clean_username()\n request.user.username = username\n request.user.save()\n request.user.userprofile.first_time = False\n request.user.userprofile.save()\n return HttpResponseRedirect('/p/')\n else:\n form = UsernameSetupForm()\n return render(request, \"accounts_setup.html\", {\"form\":form})", "def rename(self,newName):\n self.userName = newName", "def payee_first_name(self, payee_first_name):\n\n self._payee_first_name = payee_first_name", "def save(self, commit=True):\n model = super(UserCreationForm, self).save(commit=False)\n model.username = self.cleaned_data['username']\n\n if commit:\n model.save()\n\n return model", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def users_create():", "def save(self, *args, **kwargs):\n self.name = unique_slugify(self.name, instance=self)\n\n if self.is_personal and self.user.username != self.name:\n self.user.username = self.name\n self.user.save()\n\n if self.is_customer:\n self.update_customer()\n\n if not self.image:\n self.set_image_from_name(should_save=False)\n\n return super().save(*args, **kwargs)", "def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)", "def change_username(self, name):\n self.username = name", "def update_user():\n #TODO user update \n pass", "def on_user_create(self, user):", "def input_and_create_user(self):\n print(\"Please input username!\")\n new_username = input()\n new_user = user.User(new_username)\n self.users.append(new_user)", "def test_update(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n user.update(first_name=\"UpdatedName\", second_name=\"UpdatedSecondName\")\n\n self.assertEqual(user.first_name, \"UpdatedName\")\n self.assertEqual(user.second_name, \"UpdatedSecondName\")", "def update_name(id):\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n if u.id != id:\n print \"user id is wrong.\" #TODO: Support log system\n abort(500)\n u.name = request.json['name']\n u.nickname = request.json['nickname']\n u.company = request.json['nickname']\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())", "def update_customer_first_name(self, customer_to_change, new_value):\n customer_list = self._customer_repo.get_customer_list()\n for customer in customer_list:\n if customer.get_customer_id() == customer_to_change.get_customer_id():\n customer.set_first_name(new_value)\n self._customer_repo.overwrite_customer_list(customer_list)", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def setName(self, newName):\n self.__username = newName", "def register_user_last_name(self, message):\n try:\n self.db_handler.set_user_last_name(message.chat.id, message.text)\n\n self.logger.write_to_log('user last name added to db', message.chat.id)\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def do_user_update():\n targetUsers = User.query.filter_by(id=request.form['id']).all()\n if not any(targetUsers):\n return user_list(\"Unknown user.\")\n\n targetUser = targetUsers[0]\n\n targetUser.first_name = request.form['first_name']\n targetUser.name = request.form['name']\n targetUser.nick = request.form['nick']\n targetUser.mail = request.form['mail']\n targetUser.role = request.form['role']\n targetUser.state = request.form['state']\n targetUser.gender = request.form['gender']\n targetUser.meter_id = request.form['meter_id']\n targetUser.group_id = request.form['group_id']\n\n db.session.commit()\n return user_list(\"Updated user \" + targetUser.name)", "def test_get_form_label_first_last(self):\n self.user.first_name = 'Full'\n self.user.last_name = 'Name'\n self.assertEqual(\n self.user.get_form_label(email=True),\n 'Full Name (testuser) <testuser@example.com>',\n )", "def input_first_name(self, name):\n self.send_keys_to_element(self.firstname_textbox_selector, name)", "def last_name(self, instance):\r\n return instance.user.last_name", "def test_full_name(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n\n self.assertEqual(user.get_full_name(), \"TestName TestSecondName\")", "def getFirstName(self):\r\n return self.firstName", "def update(self, instance, validated_data):\n instance.first_name = validated_data.get('first_name')\n instance.last_name = validated_data.get('last_name')\n instance.email = validated_data.get('email')\n instance.set_password(validated_data.get('password'))\n instance.save()\n return instance", "def first_name(self, instance):\r\n return mark_safe('<a href=\"{0}\" target=\"_blank\">{1}</a>'.format(\r\n instance.get_absolute_url(), instance.user.first_name,\r\n ))", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def update_username(self, old_username, new_username):\n raise NotImplementedError()", "def mutate(parent, info, user_details):\n\n user = User()\n user.name = user_details.name\n user.address = user_details.address\n user.phone_number = user_details.phone_number\n user.sex = user_details.sex\n\n user.save()\n\n return user", "def first_name(self) -> str:\n return self._first_name", "def register_new_user(first_name,email,password):\n\n new_user = User(first_name=first_name, email=email, password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def set_real_name(user: User, real_name: str=\"\") -> Result:\n current, *rest = user.pw_gecos.split(\",\")\n if current == real_name:\n return Result(State.unchanged)\n command([\"/usr/bin/chfn\", \"--full-name\", real_name, user.pw_name])\n user.pw_gecos = \",\".join([real_name, *rest])\n return Result(State.success)", "async def name(self, ctx:utils.Context, *, username:str):\n\n if len(username) > 32:\n await ctx.send('That username is too long.')\n return\n await self.bot.user.edit(username=username)\n await ctx.send('Done.')", "def updateUserProfile(request):\n user = request.user\n serializer = UserSerializerWithToken(user, many=False)\n\n # If the user has put in two names, separate it into first_name and last_name and save that data.\n try:\n first_name = request.data['name'].split()[0]\n last_name = request.data['name'].split()[1]\n\n user.first_name = first_name\n user.last_name = last_name\n user.username = request.data['email']\n user.email = request.data['email']\n\n # Only modify the password if the field isn't empty.\n if request.data['password'] != '':\n user.password = make_password( request.data['password'] )\n\n user.save()\n return Response(serializer.data)\n\n # For users who enter one name.\n except:\n user.first_name = request.data['name']\n user.last_name = ''\n user.username = request.data['email']\n user.email = request.data['email']\n\n if request.data['password'] != '':\n user.password = make_password( request.data['password'] )\n\n user.save()\n return Response(serializer.data)", "def updateProfile( token, user=False, userinfo={'nickname':'newUser','first_name':'newUser'}):\n \n if not user:\n l= list(validName)\n sysrand.shuffle(l)\n l= \"\".join(l)\n print \"Attempting to create a user with the name \"+l\n user=User.objects.create_user(l,'')\n user.save()\n sid = transaction.savepoint()\n updateName( user, str(userinfo['nickname']).replace(' ',''), userinfo['first_name'], sid )\n transaction.savepoint_commit(sid)\n\n try: \n userprofile = user.get_profile()\n userprofile.uid = cPickle.dumps(token) #ensures the token parameter is retreivable and unique\n userprofile.user_id = user.id\n userprofile.save()\n transaction.commit()\n except:\n transaction.rollback()\n return user", "def test_set_display_name_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.set_display_name(\n self.fixture.global_template, \"new_name\", request=mock_request\n )", "def _install_partner_firstname(self):\n # Find records with empty firstname and lastname\n records = self.search([(\"firstname\", \"=\", False),\n (\"lastname\", \"=\", False)])\n\n # Force calculations there\n records._inverse_name()", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def test_create_profile_on_user_created(self):\n user = User.objects.create_user(\n 'auto_tester', 'auto_tester@example.com', 'auto_tester')\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def create_user(change):\n return change()", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def update_players_name(self, player_1, player_2):\n self.model.player_1 = player_1\n self.model.player_2 = player_2\n self.logger.info(\"User_1 has name %s, user_2 has name %s\", player_1, player_2)", "def autoname(self):\n\t\tself.name = self.role_profile" ]
[ "0.7218886", "0.72107047", "0.7091783", "0.70805126", "0.68391097", "0.6802234", "0.6792022", "0.6786857", "0.668164", "0.66436106", "0.6567716", "0.65634876", "0.65209407", "0.64976525", "0.64976525", "0.6494254", "0.6456594", "0.6450713", "0.642974", "0.6408176", "0.6386372", "0.6369266", "0.6334728", "0.63174146", "0.6301773", "0.6299665", "0.6288531", "0.62807757", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.62625355", "0.6258432", "0.6251567", "0.62265384", "0.62246734", "0.6218896", "0.6207276", "0.61941105", "0.6185942", "0.61837", "0.6160296", "0.6142469", "0.6132907", "0.61222553", "0.6119685", "0.6116994", "0.61144364", "0.6109409", "0.6100413", "0.608712", "0.6080408", "0.6059643", "0.60587716", "0.6039778", "0.6039778", "0.6024979", "0.6021786", "0.6012301", "0.6010853", "0.6005425", "0.59912866", "0.59863156", "0.59789175", "0.5971931", "0.59658116", "0.5965677", "0.5958662", "0.59487784", "0.59478885", "0.59407604", "0.5929242", "0.59264016", "0.5913181", "0.59100264", "0.5909334", "0.5897535", "0.5891168", "0.5887529", "0.58833724", "0.5883174", "0.5882209", "0.5879901", "0.58788574", "0.58761513", "0.58648133", "0.58536637", "0.583381", "0.58213586", "0.5820423", "0.58199596", "0.5816582", "0.5804096", "0.5802941", "0.5790838" ]
0.81902975
0
Build User response instance
def build_user(data: Dict[Any, Any]) -> User: return User(**data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get():\n return prepare_response(get_user_info())", "def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\") or \"\",\n \"fullname\": fullname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n }", "def get_user_details(self, response):\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\"),\n \"fullname\": response.get(\"username\"),\n }", "def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }", "def with_user(data_builder, randstr, as_public):\n api_key = randstr()\n user = data_builder.create_user(api_key=api_key, root=False)\n session = copy.deepcopy(as_public)\n session.headers.update({'Authorization': 'scitran-user ' + api_key})\n return attrdict.AttrDict(user=user, api_key=api_key, session=session)", "def get_user_details(self, response):\n first_name, last_name = response['first-name'], response['last-name']\n email = response.get('email-address', '')\n return {'username': first_name + last_name,\n 'fullname': first_name + ' ' + last_name,\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email}", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def get(self):\n\n user = None\n if self.request.headers.get('X-Pp-User'):\n user = self.request.headers['X-Pp-User']\n\n result_json = {\n \"user\": user\n }\n\n self.success(result_json)", "def create_user_object(self, request):\r\n user = {\r\n \"first_name\": request.form.get(\"first_name\"),\r\n \"last_name\": request.form.get(\"last_name\"),\r\n \"age\": request.form.get(\"age\"),\r\n \"cpr_number\": request.form.get(\"CPR\"),\r\n \"email\": request.form.get(\"email\"),\r\n \"phone_number\": request.form.get(\"phone_number\"),\r\n \"password\": PasswordHasher().hash(request.form.get(\"password\")),\r\n \"bank_account\": str(BankAccount(\"Savings\", 1000.00).store_account().inserted_id),\r\n \"crypto_wallet\": str(CryptoWallet(\"Bitcoin\", 0.0045).store_account().inserted_id)\r\n }\r\n return user", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def get(self, request):\n user = request.user\n serializer = self.serializer_class(user)\n return response.Response({\"user\": serializer.data})", "def get(self):\n response = users_db.get_user_by_id(get_jwt_identity())\n return Response(dumps(response), mimetype='application/json')", "def _get(self, query=None):\n if not query:\n user_data = DB_USER_TABLE.all()\n else:\n user_data = DB_USER_TABLE.search(query)\n\n res = {\n \"total_queried\" : len(user_data),\n \"_embedded\" : {\n \"users\" : self.embed_user_data_in_result(user_data)\n },\n \"_links\" : self.make_links({\n \"self\" : UserList.get_self_url(),\n \"contained_in\" : Root.get_self_url()\n })\n }\n return res", "def create_user_model():\n\n user_id_url = root_url + \"/{}/{}\".format(\"user\", session['user_id'])\n\n user_info = requests.get(\n url=user_id_url,\n headers={ 'Authorization': api_key },\n ).json()\n\n if user_info['isStudent']:\n session['user_role'] = 'student'\n student.get_user_id()\n student.get_user_details()\n student.get_user_bids()\n student.get_user_competencies()\n student.get_user_qualifications()\n student.get_contract_number()\n student.get_user_contract()\n student.initialized = True\n elif user_info['isTutor']:\n session['user_role'] = 'tutor'\n tutor.get_user_id()\n tutor.get_user_details()\n tutor.get_user_bids()\n tutor.get_user_competencies()\n tutor.get_user_qualifications()\n tutor.get_user_contract()\n tutor.initialized = True\n else:\n raise Exception(\"user is not student and tutor. What is the user role?\")\n\n print(student, tutor)\n\n return student, tutor", "def get_user_details(self, response):\n\n log.info(str(response) + \"-\" * 80)\n log.info(str(dir(self)) + \"-\" * 80)\n\n return response", "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "def post(self, request):\n\n info = []\n\n ser = self.PostSerializer(data=request.data)\n if not ser.is_valid():\n return Response(data={'warning': format_errors(ser.errors)})\n\n username = ser.validated_data['username']\n user_type = ser.validated_data['user_type']\n\n # See if this user already exists.\n if User.objects.filter(username=username).exists():\n return Response(data={'warning': 'User already exists.'})\n\n if user_type == 'LDAP' and settings.LDAP_GROUPS_ENABLED:\n return Response(data={'warning': 'With LDAP groups enabled, LDAP users are created on '\n 'login, and their permissions are based on LDAP groups.'})\n\n user = User.objects.create_user(username, email=ser.validated_data['email'],\n first_name=ser.validated_data.get('first_name', ''),\n last_name=ser.validated_data.get('last_name', ''))\n user.extra = UserExtraModel(type=user_type)\n\n if user_type == UserExtraModel.BASIC:\n # Generate a new confirmation object with a random token.\n confirm = UserConfirmation.make_confirm(user)\n info.append(confirm.send_confirmation(request))\n\n if user_type == 'LDAP':\n # Automatically enroll LDAP users in our local LDAP required group. This provides the\n # 'authorization' needed to identify LDAP users who should be able to log in.\n try:\n ldap_base_group = Group.objects.get(name=settings.LDAP_REQUIRED_GROUP)\n except Group.DoesNotExist:\n # There's a pretty unlikely race condition here where the group is created between\n # where we checked for one and when we save a new one. Eh.\n ldap_base_group = Group(name=settings.LDAP_REQUIRED_GROUP)\n ldap_base_group.save()\n\n user.groups.add(ldap_base_group)\n\n user.save()\n user.extra.save()\n\n return Response({'success': 'User \"{}\" Created.'.format(username)})", "def get_user():\n userdict = jsonify2(current_user.db_user, 'User')\n return current_app.bitjws.create_response(userdict)", "def users_instance():\n return {\n \"blocked\": False,\n \"created_at\": \"2022-10-21T04:10:34.240Z\",\n \"email\": \"rodrick_waelchi73@yahoo.com\",\n \"email_verified\": False,\n \"family_name\": \"Kerluke\",\n \"given_name\": \"Nick\",\n \"identities\": [\n {\n \"user_id\": \"15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"connection\": \"Username-Password-Authentication\",\n \"provider\": \"auth0\",\n \"isSocial\": False,\n }\n ],\n \"name\": \"Linda Sporer IV\",\n \"nickname\": \"Marty\",\n \"picture\": \"https://secure.gravatar.com/avatar/15626c5e0c749cb912f9d1ad48dba440?s=480&r=pg&d=https%3A%2F%2Fssl.gstatic.com%2Fs2%2Fprofiles%2Fimages%2Fsilhouette80.png\",\n \"updated_at\": \"2022-10-21T04:10:34.240Z\",\n \"user_id\": \"auth0|15164a44-8064-4ef9-ac31-fb08814da3f9\",\n \"user_metadata\": {},\n \"app_metadata\": {},\n }", "def __init__(self, response):\n super(Member, self).__init__(response)", "def get(self, request):\n serializer = UserSerializer(request.user)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_user(request): \n try:\n parsed_data = get_json_data(request) \n user = User.objects.get(username=parsed_data[\"username\"])\n user_serializer = UserSerializer(user.appuser)\n ret = Response(SUCCESS, error_code[SUCCESS])\n ret.set_ret(\"data\", user_serializer.serialize()) \n except ObjectDoesNotExist as e:\n ret = Response(NONEXIST_DATA, error_code[NONEXIST_DATA].format(e.message)) \n except ValueError as e:\n ret = Response(INPUT_FORMAT, error_code[INPUT_FORMAT])\n except:\n ret = Response(UNKNOWN_ERROR, error_code[UNKNOWN_ERROR])\n return HttpResponse(ret.serialize(f))", "def get_user_details(self, response):\n token = response.get('access_token')\n headers = {\"Authorization\": \"Bearer %s\" % token}\n endpoint = self.USER_INFO_URL\n response = requests.get(endpoint, headers=headers)\n return {'email': response.json()['email'] or '',\n # We'll need sub, the unique ID, for get_user_id.\n 'sub': response.json()['sub']}", "def get_user_details(self, response):\n\n kaccount_email = \"\"\n kakao_account = response.get(\"kakao_account\", \"\")\n if kakao_account:\n kaccount_email = kakao_account.get(\"email\", \"\")\n properties = response.get(\"properties\", \"\")\n nickname = properties.get(\"nickname\") if properties else \"\"\n return {\n \"username\": nickname,\n \"email\": kaccount_email,\n \"fullname\": nickname,\n \"first_name\": nickname[1:] if nickname else \"\",\n \"last_name\": nickname[0] if nickname else \"\",\n }", "def get_user():\n filters = make_filters(FilterType.AND, request.json)\n user = user_service.get_user(filters)\n if not user:\n response = {\n \"status\": False,\n \"message\": \"No se encontro al usuario que intentas buscar\",\n }\n return make_response(jsonify(response), 404)\n response = {\"status\": True, \"user\": user}\n resp = make_response(dumps(response), 200)\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp", "def _create_response_model(self, data):\n pass", "def __init__(self, user):\n\n if isinstance(user, dict):\n # Every user must have these values\n self.id = user['id']\n self.name = user['first_name']\n\n # These are optional\n self.username = user.get('username', None)\n self.last_name = user.get('last_name', None)\n\n elif isinstance(user, tuple):\n # If a tuple was given, it has to be a 4-tuple\n self.id = user[0]\n self.name = user[1]\n self.last_name = user[2]\n self.username = user[3]\n\n # Special case, our loved admin!\n self.is_admin = self.id == 10885151 # @Lonami", "def load(self):\n\n self.refresh_token()\n\n endpoint = app.config['API']['url'] + 'user/get/' + self._id\n response = requests.get(\n endpoint,\n verify = app.config['API']['verify_ssl'],\n headers = {\n 'Authorization': self.token,\n 'API-Key': app.config['API_KEY']\n },\n )\n\n if response.status_code != 200:\n self.logger.error('[%s] %s' % (\n response.status_code, response.reason)\n )\n err = \"Could not retrieve user %s from KDM API @ %s\"\n raise utils.Logout(err % (self._id, app.config['API']['url']))\n\n user_attribs = utils.convert_json_dict(response.json()['user'])\n for attrib in user_attribs.keys():\n if attrib not in ['_id', 'password']:\n setattr(self, attrib, user_attribs[attrib])", "def get_user_details(self, response):\n email = response.get(\"email\")\n return {\"email\": email, \"username\": email.split(\"@\")[0]}", "def to_representation(self, instance):\n response = super().to_representation(instance)\n user_data = UserSerializer(instance.poster, context=self.context).data\n response['poster'] = {\n 'id': user_data['id'],\n 'email': user_data['email'],\n 'profile_picture': user_data['profile_picture'],\n 'first_name': user_data['first_name'],\n 'last_name': user_data['last_name'],\n 'profession': user_data['profession'],\n 'university': user_data['university'],\n 'university_major': user_data['university_major'],\n }\n\n return response", "def get_user_details(self, response):\n values = {\n 'username': unquote(response['nick']),\n 'email': unquote(response['email']),\n 'first_name': unquote(response['first_name']),\n 'last_name': unquote(response['last_name'])\n }\n\n if values['first_name'] and values['last_name']:\n values['fullname'] = '%s %s' % (values['first_name'],\n values['last_name'])\n return values", "def user_info(self):\n resp = self._get(get_url('user'))\n raise_on_error(resp)\n ret = resp.json()\n return UserInfo(ret)", "def post(self):\n args = usr_parser.parse_args()\n if isinstance(args, current_app.response_class):\n return args\n # convert admin parameter into a boolean\n admin = False if 'admin' not in args else args['admin']\n # check if the id of user is provided\n if args['uid'] is not None:\n user = User.new_user(admin, args['uid'])\n else:\n user = User.new_user(admin)\n\n \"\"\" check if the user is created,\n if the user with the same id exists it won't be created \"\"\"\n if user is None:\n return mk_response(\"User id already exists\", 422)\n\n \"\"\" create an object to represent the user with the password provided\n and return it as a response \"\"\"\n userToReturn = {'uid': user.id, 'password': user.password,\n 'admin': user.admin}\n return userToReturn", "def create_user(request):\n json_response = {\n 'ok': True,\n 'member_details': {}\n }\n try:\n user_uuid = uuid.uuid1()\n tz = random.choice(pytz.all_timezones)\n random_string = get_random_string()\n # Creating a new Custom User object\n CustomUser.objects.create(id=user_uuid, real_name=random_string,\n tz=tz)\n\n json_response.update({'member_details': {\n 'id': user_uuid,\n 'real_name': random_string,\n 'tz': tz,\n 'activity_periods': []\n }})\n\n except Exception:\n\n json_response.update({'ok': False})\n\n finally:\n\n return JsonResponse(json_response, safe=False)", "def user(self, username: str, fields: List[str] = None) -> Dict:\n return User(self.token, username).fetch(fields=fields)", "def get_user_details(self, response):\n # Build the username with the team $username@$team_url\n # Necessary to get unique names for all of slack\n username = response.get('user')\n if self.setting('USERNAME_WITH_TEAM', True):\n match = re.search(r'//([^.]+)\\.slack\\.com', response['url'])\n username = '{0}@{1}'.format(username, match.group(1))\n\n out = {'username': username}\n if 'profile' in response:\n out.update({\n 'email': response['profile'].get('email'),\n 'fullname': response['profile'].get('real_name'),\n 'first_name': response['profile'].get('first_name'),\n 'last_name': response['profile'].get('last_name'),\n 'team_name': response.get('team_name')\n })\n return out", "def json(self):\n\n this_user_detail = dict(\n arn=self.arn,\n create_date=self.create_date,\n id=self.user_id,\n inline_policies=self.inline_policies_json,\n inline_policies_count=len(self.inline_policies_json),\n # groups=self.groups,\n groups=self.groups_json,\n path=self.path,\n managed_policies_count=len(self.attached_managed_policies),\n managed_policies=self.attached_managed_policies_pointer_json,\n risks=self.consolidated_risks\n )\n return this_user_detail", "def me(self, request: Request) -> Response:\n\n serializer = self.get_serializer(instance=request.user)\n return Response(serializer.data)", "def create(self, body):\n try:\n user_record = UserRecord.create_user(\n email=body[\"email\"],\n password=body[\"password\"],\n display_name=body[\"display_name\"],\n auth=web_sdk.auth,\n )\n complete_register = body.get(\"complete_register\") or False\n user_record.make_claims({\"complete_register\": complete_register})\n user = User(\n uid=user_record.uid,\n email=user_record.email,\n display_name=user_record.display_name,\n phone_number=body.get(\"phone_number\"),\n name=body[\"name\"],\n lastname=body[\"lastname\"],\n headline=body.get(\"headline\"),\n about_me=body.get(\"about_me\"),\n complete_register=complete_register,\n link_video=body.get(\"link_video\"),\n timezone=body.get(\"timezone\"),\n location=body.get(\"location\"),\n )\n\n if \"specialities\" in body:\n user.append_specialities(body[\"specialities\"])\n if \"methods\" in body:\n user.append_methods(body[\"methods\"])\n if \"plans\" in body:\n user.append_plans(body[\"plans\"])\n\n user.add()\n user.save()\n\n return {\"uid\": user_record.uid, \"a\": user_record, \"b\": user}\n except KeyError as ex:\n raise HandlerException(400, \"Bad request: \" + str(ex))", "def user_view():\r\n request_dict = request.get_json()\r\n response_dict = {}\r\n\r\n if \"user_id\" in request_dict:\r\n try:\r\n user_exists = User.query.filter_by(user_uuid=request_dict['user_id']).first()\r\n # print(request_dict['user_id'])\r\n if user_exists:\r\n # View user auth groups\r\n users_dict = {}\r\n group_list = []\r\n users_dict['id'] = user_exists.user_uuid\r\n users_dict['name'] = user_exists.full_name\r\n users_dict['email'] = user_exists.email\r\n users_dict['status'] = user_exists.is_active()\r\n users_dict['last_login'] = user_exists.last_login\r\n for group in user_exists.auth_user_groups.all():\r\n group_list.append(group.group_id)\r\n users_dict['auth_user_groups'] = group_list\r\n response_dict[cts.RESPONSE_STATUS] = cts.RESPONSE_SUCCESS\r\n response_dict[cts.RESPONSE_MESSAGE] = users_dict\r\n\r\n else:\r\n response_dict[cts.RESPONSE_STATUS] = cts.RESPONSE_ERROR\r\n response_dict[cts.RESPONSE_MESSAGE] = cts.RECORD_NOTFOUND\r\n except:\r\n db.session.rollback()\r\n response_dict[cts.RESPONSE_STATUS] = cts.RESPONSE_ERROR\r\n response_dict[cts.RESPONSE_MESSAGE] = cts.INVALID_REQUEST\r\n else:\r\n response_dict[cts.RESPONSE_STATUS] = cts.RESPONSE_ERROR\r\n response_dict[cts.RESPONSE_MESSAGE] = cts.INVALID_REQUEST\r\n\r\n return jsonify(response_dict), 200", "def get_post_response_data(self, request, token_obj: \"AuthToken\"):\n UserSerializer = self.get_user_serializer_class()\n data = {\n \"expiry\": self.format_expiry_datetime(token_obj.expiry),\n \"token\": token_obj.token,\n }\n if UserSerializer is not None:\n data[\"user\"] = UserSerializer(request.user, context=self.get_context()).data\n return data", "def get_one_user():", "def userinfo(self, **kwargs):\n metadata = self.load_server_metadata()\n resp = self.get(metadata['userinfo_endpoint'], **kwargs)\n resp.raise_for_status()\n data = resp.json()\n return UserInfo(data)", "def get_user(self, username):\n return {}", "def proto_user(self):\n return baker.make(User)", "def proto_user(self):\n return baker.make(User)", "def proto_user(self):\n return baker.make(User)", "def user(self, *args, **kwargs) -> User:\n return User(self.handle, *args, **kwargs)", "def make_user():\n names = request.args.get('names', 1, type=str) #raw text input from HTML page\n global db\n global current_user\n current_user = User(names, db)\n # Adding the user to the db occurs in the user class,\n # only in the get_pantry method\n str_pantry = current_user.get_pantry()\n if str_pantry == \"\": #if current user doesn't have pantry, return a string that states this\n return jsonify(name=current_user.name, pantry = \" No Pantry\")\n list_ingredients = ast.literal_eval(str_pantry) # Convert str to list\n str_pantry = \" Pantry: \" + list_ingredients[0] \n for i in range(1, len(list_ingredients)):\n str_pantry += \", \" + list_ingredients[i]\n return jsonify(name=current_user.name, pantry = str_pantry) #returns name and list of ingredients in pantry to HTML page", "def build_room_user_from_user_data(user, connection_id):\n new_user = {\n 'id': user['id'],\n 'name': user['name'],\n 'avatarSrc': user['avatarSrc'],\n 'connections': [connection_id]\n }\n return new_user", "def make_user(self, name, password):\n\n data = json.dumps({\"password\": password, \"displayName\": name})\n headers = {\"Content-Type\": \"application/json\"}\n response = self._api_request_post(\"users/\", data, headers=headers)\n if response is not None and response != []:\n return response", "async def get_me(self) -> types.User:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.GET_ME, payload)\n\n return types.User(**result)", "def get_random_user(self):\r\n from provider.models import User\r\n u = User.objects.order_by('?')[0]\r\n return {\"username\": u.username, \"password\": u.password, \"fullname\": u.fullname}", "def retrieve(self, request, *args, **kwargs):\n response = super(UserViewSet, self).retrieve(request, *args, **kwargs)\n circles = Circle.objects.filter(\n members= request.user,\n # a members le asignamos el usuario\n membership__is_activate= True,\n # y buscamos los memberships que esten activos\n )\n data={\n 'user':response.data,\n 'circles': CircleModelSerializer(circles,many=True).data\n\n }\n response.data = data\n return response", "def to_object(cls, query_dict: Dict):\n user = User()\n user.id = query_dict.get(\"id\")\n user.first_name = query_dict.get(\"firstname\")\n user.last_name = query_dict.get(\"lastname\")\n user.other_name = query_dict.get(\"othernames\")\n user.email = query_dict.get(\"email\")\n user.phone_number = query_dict.get(\"phone\")\n user.user_name = query_dict.get(\"username\")\n user.is_admin = query_dict.get(\"role\")\n user.password = query_dict.get(\"password\")\n return user", "def add_user(request):\n \n try:\n parsed_data = get_json_data(request)\n user = User(\n username=parsed_data[\"username\"],\n )\n user.set_password(parsed_data[\"password\"])\n user.save()\n appuser = AppUser.objects.create(\n user=user,\n usertype=parsed_data[\"usertype\"],\n phone=parsed_data[\"phone\"],\n state=parsed_data[\"state\"],\n city=parsed_data[\"city\"],\n address=parsed_data[\"address\"]\n )\n appuser.save()\n # Generate response\n ret = Response(SUCCESS, error_code[SUCCESS])\n user_serializer = UserSerializer(appuser)\n ret.set_ret(\"data\", user_serializer.serialize()) \n \n # Generate a token for authentication\n token = token_generator(30)\n user_token = Token(token=token, username=user.username)\n user_token.save()\n ret.set_ret(\"auth_token\", token)\n except KeyError as e:\n ret = Response(EMPTY_COLUMN, error_code[EMPTY_COLUMN].format(e.message)) \n return HttpResponse(ret.serialize(f))\n except IntegrityError as e:\n ret = Response(DUPLICATE_KEY, error_code[DUPLICATE_KEY].format(e.message))\n except ValueError as e:\n ret = Response(INPUT_FORMAT, error_code[INPUT_FORMAT])\n except:\n ret = Response(UNKNOWN_ERROR, error_code[UNKNOWN_ERROR])\n return HttpResponse(ret.serialize(f))", "def get_response_data(self):\r\n response_data = dict(self.TOKEN_RESPONSE_DATA)\r\n response_data.update(self.USER_RESPONSE_DATA)\r\n return response_data", "def get_logged_in_user(self):\n\n if type(self.cache) is Cache:\n sessionId = self.cache.get('user.sessionId')\n userId = self.cache.get('user.id')\n if sessionId and userId:\n self.sessionId = sessionId\n self.userId = userId\n user = {}\n user['id'] = userId\n user['username'] = self.cache.get('user.username')\n user['profileUrl'] = self.cache.get('user.profileUrl')\n user['avatarUrl'] = self.cache.get('user.avatarUrl')\n user['reputation'] = self.cache.get('user.reputation')\n user['badge1'] = self.cache.get('user.badge1')\n user['badge2'] = self.cache.get('user.badge2')\n user['badge3'] = self.cache.get('user.badge3')\n return user", "def retrieve(self, request, *args, **kwargs):\n return super(UserViewSet, self).retrieve(request, *args, **kwargs)", "def user(self):\r\n return resource.User(self)", "def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }", "def user(self):\r\n return users.User(self)", "def self(self, request):\n serializer_class = self.get_serializer_class()\n serializer = serializer_class(request.user, context={ \"request\": request })\n return Response(serializer.data)", "def _generate_users(self):\n users = {}\n args = self._add_user()\n #Grab info from args\n users[args[\"userID\"]] = {}\n users[args[\"userID\"]][\"name\"] = args[\"name\"]\n users[args[\"userID\"]][\"webhook_url\"] = args[\"webhook_url\"]\n users[args[\"userID\"]][\"blacklist\"] = args[\"blacklist\"]\n #Try to grab override info, default to blank if doesn't exist\n users[args[\"userID\"]][\"override_user\"] = args.get(\"overrideUser\", \"\")\n users[args[\"userID\"]][\"override_userid\"] = args.get(\"overrideUserID\", \"\")\n users[args[\"userID\"]][\"override_oauth\"] = args.get(\"overrideOauth\", \"\")\n fileIO.save_json(\"users.json\", users)", "def __call__(self, username):\n return flattrclient.user.User(session=self._session, username=username)", "def me(user):\n return json_response(\n status=200,\n response_data={\"success\": True, \"data\": {\"user\": user.serialize()}}\n )", "def get_user(self, username=None,id=None,include_detail=False,include_pics=False,pic_page_size=None,last_pic_id=False):\n \n if (id is None and username is None):\n if self.authenticator.access_token is not None:\n id = 'self'\n else:\n raise PicplzError(\"get_user method requires one of a pic id, longurl_id or shorturl_id\")\n \n parameters = {}\n if id is not None:\n parameters['id']=id\n if username is not None:\n parameters['username']=username\n if include_detail:\n parameters['include_detail']=1\n if include_pics:\n parameters['include_pics']=1\n if last_pic_id:\n parameters['last_pic_id']=last_pic_id\n if pic_page_size is not None:\n parameters['pic_page_size']=pic_page_size\n \n if id == 'self':\n returned_json = self.__make_authenticated_get__(self.user_endpoint, parameters)\n returned_json = self.__make_unauthenticated_get__(self.user_endpoint, parameters)\n returned_data = simplejson.loads(returned_json)\n data = returned_data['value']['users'][0]\n user = PicplzUser.from_dict(self, data)\n try:\n has_more_pics = returned_data['value']['users'][0]['more_pics']\n if has_more_pics:\n user.__has_more_pics__ = True\n else:\n user.__has_more_pics__ = False\n except:\n user.__has_more_pics__ = False\n try:\n last_pic_id = returned_data['value']['users'][0]['last_pic_id']\n user.__last_pic_id__ = last_pic_id\n except:\n user.__last_pic_id__ = False\n \n return user", "def retrieve(self, request, pk=None):\n if pk == 'me':\n return Response(UserSerializer(request.user, context={\n 'request': request\n }).data)\n return super(UserViewSet, self).retrieve(request, pk)", "def get_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload={}, request_type=self.REQUEST_GET, version=\"v2\")", "def create(self, request, *args, **kwargs):\n return super(UserViewSet, self).create(request, *args, **kwargs)", "def enrich(r):\n row = r[1]\n userReference = row[0]\n user = row[1]\n # if there's no user, it means there wasn't match between the users in the db vs the ones from the vendor coming from the api\n if user and userReference:\n user[\"userId\"] = userReference.userId\n user[\"userActive\"] = userReference.active\n user[\"userLastActiveDate\"] = userReference.lastActiveDate\n return user", "def get_user():\n\treturn '1', 200", "def _get_user_tuple(self):\n user = namedtuple(\"User\", ['register_user', 'create_user', 'change_password', 'edit_user',\n 'get_user_by_id', 'get_user_by_email', 'get_account', 'delete_account', 'set_username', 'generate_forgot_password_token', 'check_email', \n 'link_account', 'unlink_account', 'create_raas_profile', 'authenticate_user', 'authenticate_user_by_email', 'delete_user_with_email_confirmation', 'resend_verification_email'\n 'delete_account_with_email_confirmation', 'get_account_password', 'get_custom_object_by_accountid', 'get_custom_object_by_accountids',\n 'check_custom_object', 'get_custom_object_by_objectid', 'get_custom_object_stats', 'get_custom_object_by_query', 'check_token_validate', 'check_token_invalidate',\n 'add_or_remove_user_email', 'set_password', 'set_status', 'upsert_custom_object',\n 'set_custom_object_status','check_username','password_reset','invalidate_email','update_account','update_user_by_token'])\n\n #Get methods\n user.get_user_by_id = self.api.get_user_by_id\n user.get_user_by_email = self.api.get_user_by_email\n user.get_account = self.api.get_account\n user.delete_account = self.api.delete_account\n user.authenticate_user = self.api.authenticate_user\n user.authenticate_user_by_email = self.api.authenticate_user_by_email\n user.delete_user_with_email_confirmation = self.api.delete_user_with_email_confirmation\n user.generate_forgot_password_token = self.api.generate_forgot_password_token\n user.check_email = self.api.check_email\n user.resend_verification_email = self.api.resend_verification_email\n user.delete_account_with_email_confirmation = self.api.delete_account_with_email_confirmation\n user.get_account_password = self.api.get_account_password\n user.get_custom_object_by_accountid = self.api.get_custom_object_by_accountid\n user.get_custom_object_by_accountids = self.api.get_custom_object_by_accountids\n user.check_custom_object = self.api.check_custom_object\n user.get_custom_object_by_objectid = self.api.get_custom_object_by_objectid\n user.get_custom_object_stats = self.api.get_custom_object_stats\n user.get_custom_object_by_query = self.api.get_custom_object_by_query\n user.check_token_validate = self.api.check_token_validate\n user.check_token_invalidate = self.api.check_token_invalidate\n user.check_username = self.api.check_username\n \n #Post methods\n user.register_user = self.api.register_user\n user.create_user = self.api.create_user\n user.create_raas_profile = self.api.create_raas_profile\n user.edit_user = self.api.edit_user\n user.update_user_by_token = self.api.update_user_by_token\n user.update_account = self.api.update_account\n user.change_password = self.api.change_password\n user.set_username = self.api.set_username\n user.link_account = self.api.link_account\n user.unlink_account = self.api.unlink_account\n user.add_or_remove_user_email = self.api.add_or_remove_user_email\n user.set_password = self.api.set_password\n user.change_username = self.api.change_username\n user.password_reset = self.api.password_reset\n user.set_status = self.api.set_status\n user.upsert_custom_object = self.api.upsert_custom_object\n user.set_custom_object_status = self.api.set_custom_object_status\n\n #Put methods\n user.invalidate_email = self.api.invalidate_email\n \n \n return user", "def _make_api_call(self, requesting_user, target_user, course_key):\n request = Request(self.request_factory.get('/'))\n request.user = requesting_user\n with check_mongo_calls(0):\n return course_detail(request, target_user.username, course_key)", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def create(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n profile = response.data\n user_name = profile.get(\"username\")\n cache.set(f\"{USER_PROFILE_PREFIX}{user_name}\", profile)\n return response", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def _get_user_info(self):\n\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n # Add access token to the headers\n add_headers = dict(self._default_headers)\n add_headers['Authorization'] = self._access_token\n\n resp = requests.get(BASE_URL + \"user/{}\".format(self._user_id), headers=add_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to retrieve user info: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n # Print generic user info\n print(\"\")\n print(\"== USER INFO ==\")\n print(\"Username: {}\".format(vals.get('user').get('username')))\n print(\"Nickname: {}\".format(vals.get('user').get('nickname')))\n print(\"Usage: {} MB / {} MB\".format(int(int(vals.get('user').get('quota').get('usage')) / (1024*1024)),\n int(int(vals.get('user').get('quota').get('limit')) / (1024*1024))))\n print(\"\")\n\n # Grab folder ids we care about\n self._user_sync_folders_url = vals.get('user').get('syncfolders')", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def get_response_data(self):\n response_data = dict(self.TOKEN_RESPONSE_DATA)\n response_data.update(self.USER_RESPONSE_DATA)\n return response_data", "def _set_user_info(self):\n sha = sha1(self.email).hexdigest()\n user_info = redis.hgetall(\"sl:account:{}\".format(sha))\n\n if (type(user_info) != dict or\n user_info.get(\"password\") != self.password):\n user_info = {}\n\n try:\n self.plan = Plan.from_id(user_info.get(\"plan\"))\n except SleekException:\n self.plan = None\n self.customer_token = str_to_none(\n user_info.get(\"customer_token\")\n )\n self.subscription_token = str_to_none(\n user_info.get(\"subscription_token\")\n )\n self.subscription_end = str_to_none(\n user_info.get(\"subscription_end\")\n )", "def _get(self, user_id):\n user = DB_USER_TABLE.get(doc_id=int(user_id))\n if not user:\n flask_restful.abort(404, message=f\"User '{user_id}' not found!\")\n res = {\n \"id\" : user.doc_id\n }\n res.update(user)\n res['_links'] = self.make_links({\n \"self\" : User.get_self_url(user.doc_id),\n \"contained_in\" : UserList.get_self_url(),\n \"customers\" : UserCustomerList.get_self_url(user.doc_id),\n \"tickets\" : UserTicketList.get_self_url(user.doc_id)\n })\n return res", "def post(self):\r\n return create_user(request)", "def user(request):\n\n try:\n bearer = request.META.get('HTTP_AUTHORIZATION', '')\n bearer = bearer.split(' ')\n if len(bearer) != 2:\n return JsonResponse(status=403, data={'error': 'Unauthorized'})\n\n bearer = bearer[1]\n tokenobject = AccessToken.objects.get(token=bearer)\n userdata = {\n 'first_name': tokenobject.user.first_name,\n 'last_name': tokenobject.user.last_name,\n 'username': tokenobject.user.username,\n 'email': tokenobject.user.get_email().email,\n 'member': tokenobject.user.is_member,\n 'staff': tokenobject.user.is_staff,\n 'superuser': tokenobject.user.is_superuser,\n 'nickname': tokenobject.user.nickname,\n 'rfid': tokenobject.user.rfid,\n 'image': tokenobject.user.get_image_url(),\n 'field_of_study': tokenobject.user.get_field_of_study_display(),\n }\n\n return JsonResponse(status=200, data=userdata)\n except AccessToken.DoesNotExist:\n return JsonResponse(status=403, data={'error': 'Unauthorized'})", "async def fetch_user(self, user_id: uuid.UUID) -> Optional[dict]:\n row = await self.fetchrow(\n \"\"\"\n select\n id, username, display_name, website, created_at, modified_at,\n last_heartbeat_at, last_plugin, last_plugin_name, last_project,\n timezone\n from users where id = $1\n \"\"\",\n user_id,\n )\n\n if not row:\n return None\n\n user = {\n \"id\": uuid_(row[0]),\n \"username\": row[1],\n # no \"full legal names\" here uwuwuwuwu\n # trans rights\n \"display_name\": row[2],\n \"full_name\": row[2],\n \"website\": row[3],\n \"created_at\": timestamp_(row[4]),\n \"modified_at\": timestamp_(row[5]),\n \"last_heartbeat_at\": row[6],\n \"last_plugin\": row[7],\n \"last_plugin_name\": row[8],\n \"last_project\": row[9],\n \"timezone\": row[10],\n \"logged_time_public\": False,\n \"languages_used_public\": False,\n # i do not store full name or email pls\n \"email\": \"uwu@uwu.com\",\n \"email_public\": False,\n # TODO: should we put something here?\n \"photo\": None,\n \"is_hireable\": False,\n \"has_premium_features\": False,\n \"plan\": \"basic\",\n \"location\": \"Canberra, Australia\",\n }\n\n if user[\"website\"] is not None:\n # TODO: use urllib.parse\n user[\"human_readable_website\"] = user[\"website\"].lstrip(\"https://\")\n\n return user", "def get_user_details(self, response):\n details = super().get_user_details(response)\n if not details[\"email\"] and response.get(\"emails\"):\n details[\"email\"] = response[\"emails\"]\n if isinstance(details.get(\"email\"), (list, tuple)):\n details[\"email\"] = details[\"email\"][0]\n return details", "def post_user_info():\n return jsonify(users.get_user_info(request, client))", "def post(self):\n data = request.get_json()\n # validate received fileds\n fields_validate = ViewsValidation()\n fields = [\n 'firstname',\n 'lastname',\n 'email',\n 'phonenumber',\n 'username',\n 'othernames',\n 'password'\n ]\n missing_fields = fields_validate.missing_fields(fields, data)\n\n if not missing_fields: # filter missing fields\n user_entry = {\n \"firstname\": data[\"firstname\"],\n \"lastname\": data[\"lastname\"],\n \"email\": data[\"email\"],\n \"phonenumber\": data[\"phonenumber\"],\n \"username\": data[\"username\"],\n \"othernames\": data[\"othernames\"],\n \"password\": data['password'],\n \"createdBy\": len(['title'])\n }\n\n res = self._userz.create_user(user_entry)\n print(\"RES:::\", res)\n if res:\n if res[\"status\"] == 400:\n return res\n else:\n return {\n \"status\": 201,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been created\"\n }]\n }, 201\n else:\n return {\n \"status\": 400,\n \"error\": \"Bad Request\"\n }, 400\n else:\n return {\n \"status\": 403,\n \"error\": \"Bad request: missing\"\n \" fileds {}\".format(missing_fields)\n }, 403", "def user(self):\r\n return resources.User(self)", "def data(self, user=None):\n return {\n \"provider\": self.BACKEND,\n \"access_token\": self.access_token,\n \"client_id\": self.client_id,\n \"honor_code\": \"true\",\n \"country\": \"US\",\n \"username\": user.username if user else \"test_username\",\n \"name\": user.first_name if user else \"test name\",\n \"email\": user.email if user else \"test@test.com\"\n }", "def get_user(self, validated_data):\n user = CustomUser.objects.get(pk=validated_data['user_id'])\n return user", "def post(self):\n args = usr_parser.parse_args()\n # convert admin parameter into a boolean\n admin = bool(args['admin'])\n # check if the id of user is provided\n if args['uid'] is not None:\n user = User.new_user(admin, args['uid'])\n else:\n user = User.new_user(admin)\n \n \"\"\" check if the user is created, \n if the user with the same id exists it won't be created \"\"\"\n if user is None:\n return abort(422, message=\"User id already exists\")\n \n \"\"\" create an object to represent the user with the password provided\n and return it as a response \"\"\"\n userToReturn = { 'uid' : user.id, 'password':user.password,'admin':user.admin }\n return userToReturn", "def user_data(self, access_token, *args, **kwargs):\n params = self.setting(\"PROFILE_EXTRA_PARAMS\", {})\n response = kwargs.get('response') or {}\n params[\"access_token\"] = access_token\n headers = {\n \"Authorization\": \"%s %s\" % (\n response.get(\"token_type\", \"Bearer\").capitalize(),\n access_token),\n \"Accept\": 'application/json',\n \"Content-type\": 'application/json;charset=utf-8'}\n return self.get_json(self.USER_DATA_URL,\n params=params, headers=headers)", "async def get_self(self):\n if not \".ROBLOSECURITY\" in self.request.cookies:\n raise NotAuthenticated(\"You must be authenticated to preform that action.\")\n r = await self.request.request(url=\"https://www.roblox.com/my/profile\", method=\"GET\")\n data = r.json()\n return User(self.request, data[\"UserId\"], data[\"Username\"])", "def __get_user(self, login):\n\n user = {}\n\n if not login:\n return user\n\n user_raw = self.client.get_user(login)\n user = json.loads(user_raw)\n self._push_cache_queue(user_raw)\n user_orgs_raw = \\\n self.client.get_user_orgs(login)\n user['organizations'] = json.loads(user_orgs_raw)\n self._push_cache_queue(user_orgs_raw)\n self._flush_cache_queue()\n\n return user", "def view_user(user):\n return {\n \"id\": user.id,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"email\": user.email,\n \"profile_pic\": user.profile_pic,\n }", "def post(self):\n return userDao.create(api.payload), 201", "async def user_get_data(\n user: User = Depends(get_current_active_user),\n db: Session = Depends(db_session)) -> UserInfo:\n return model2user(user, db)", "def request_user_create():\n return Response(render_template('admin/user/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/user/create\",\n genders=list(GenderType),\n states=list(StateType),\n groups=Group.query.all(),\n roles=list(RoleType),\n gender=GenderType.FEMALE,\n role=RoleType.LOCAL_POWER_TAKER,\n state=StateType.ACTIVATION_PENDING),\n mimetype='text/html')", "def user_creation(request):\n # this is a temporary api view for creating users\n # this view will be used until token authentication is in place\n if request.method == 'POST':\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n try:\n username = request.data['username']\n password = request.data['password']\n user = authenticate(username=username, password=password)\n login(request, user)\n data = {'data': serializer.data, 'logged in': True}\n return JsonResponse(data=data, status=status.HTTP_201_CREATED)\n #return redirect('profiles:checklogin')\n except:\n pass\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response('NOT ALLOWED', status=status.HTTP_400_BAD_REQUEST)" ]
[ "0.6529011", "0.6426592", "0.6373598", "0.63219744", "0.61717933", "0.6141898", "0.61418366", "0.6088162", "0.6084343", "0.6070999", "0.60210335", "0.6011697", "0.60002035", "0.5984306", "0.5978686", "0.59628445", "0.59532416", "0.5891522", "0.5889954", "0.58876634", "0.588197", "0.58784986", "0.58667016", "0.5856848", "0.5848016", "0.58359164", "0.5832125", "0.5814969", "0.58134085", "0.5803165", "0.58012414", "0.5800662", "0.5798733", "0.57864857", "0.5782511", "0.57772845", "0.5774003", "0.5762044", "0.5759478", "0.57593924", "0.5756894", "0.5738997", "0.57323647", "0.5688693", "0.5685468", "0.5685468", "0.5685468", "0.567603", "0.5666602", "0.5659849", "0.56439745", "0.56243694", "0.56210315", "0.56124616", "0.5608869", "0.55996794", "0.559902", "0.55981743", "0.5596291", "0.5586517", "0.55847096", "0.55747205", "0.55741394", "0.5573291", "0.5572816", "0.5572531", "0.5569012", "0.5567829", "0.556578", "0.55610156", "0.5560905", "0.55565476", "0.5556045", "0.5555668", "0.55510086", "0.5547173", "0.5544106", "0.55431753", "0.5539468", "0.55370766", "0.55369854", "0.55358523", "0.5533344", "0.55293584", "0.5521023", "0.5519934", "0.55157083", "0.55136746", "0.5507784", "0.55021894", "0.5501883", "0.54995906", "0.5499173", "0.54957914", "0.54940134", "0.54928666", "0.5490913", "0.54781127", "0.54763085", "0.5475847" ]
0.6766939
0
This methods runs one episode for a gym environment. deterministic == True => agent executes only greedy actions according the Q function approximator (no random actions). do_training == True => train agent
def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=True, max_timesteps=10000, history_length=0, manual=False): stats = utils.EpisodeStats() # Save history image_hist = [] step = 0 state = env.reset() env.viewer.window.on_key_press = utils.key_press env.viewer.window.on_key_release = utils.key_release # fix bug of corrupted states without rendering in gym environment env.viewer.window.dispatch_events() # append image history to first state state = state_preprocessing(state) image_hist.extend([state] * (history_length + 1)) state = np.array(image_hist).reshape(96, 96, history_length + 1) while True: #skip intro zoom frames if step < 48: step += 1 env.step(utils.id_to_action(0)) continue # TODO: get action_id from agent # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. if do_training and manual: action_id = utils.manual_action else: action_id = agent.act(state, deterministic) action = utils.id_to_action(action_id) # Hint: frame skipping might help you to get better results. reward = 0 for _ in range(skip_frames + 1): next_state, r, terminal, info = env.step(action) reward += r if rendering: env.render() if terminal: break next_state = state_preprocessing(next_state) image_hist.append(next_state) image_hist.pop(0) next_state = np.array(image_hist).reshape(96, 96, history_length + 1) if do_training and (next_state[:82, :, -1].sum() > 5000): #track out of sight print('Track gone; finish this episode') agent.add(state, action_id, next_state, reward=-(skip_frames + 1), terminal=True) #punish break if do_training: agent.add(state, action_id, next_state, reward, terminal) if not manual: agent.train() stats.step(reward, action_id) state = next_state if terminal or (step * (skip_frames + 1)) > max_timesteps: break step += 1 return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _train_simulate(self, env, train_episode=None):\n # The initial observation\n o_r_d_i = [env.reset()] + [None]*3 # o_r_d_i means \"Observation_Reward_Done_Info\"\n # Reset all the manager parameters\n self.reset(o_r_d_i[0][\"manager\"])\n done = False\n current_option = None\n # Render the current state\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n while not done:\n # If no option is activated then choose one\n if current_option is None:\n current_option = self.select_option(o_r_d_i, train_episode)\n assert current_option.score == 0, \"the option's reset function must reset the score to 0.\"\n\n # choose an action\n action = current_option.act(train_episode)\n\n # make an action and display the state space\n o_r_d_i = env.step(action)\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n # check if the option ended correctly\n correct_termination = self.check_end_option(current_option, o_r_d_i[0][\"manager\"])\n\n # update the option\n intra_reward = self.compute_intra_reward(o_r_d_i, correct_termination)\n current_option.update_option(o_r_d_i, action, correct_termination, train_episode, intra_reward)\n\n # If the option is done, update the manager\n if correct_termination is not None:\n if check_type(current_option, AbstractOption):\n # record the correct transition when the option is a regular option (i.e. not an explore option)\n self.successful_transition.append(correct_termination)\n self.write_success_rate_transitions()\n\n # the manager does not need to know if the correct_termination is 0 or 1.\n self.update_manager(o_r_d_i, current_option, train_episode)\n\n current_option = None\n\n done = self.check_end_manager(o_r_d_i)\n\n self.write_manager_score(train_episode)", "def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=False,\n max_timesteps=1000, history_length=0):\n\n stats = EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * history_length)\n state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n while True:\n\n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. \n # action_id = agent.act(...)\n # action = your_id_to_action_method(...)\n action_id = agent.act(state, deterministic)\n action = id_to_action(action_id)\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal:\n break\n\n next_state = state_preprocessing(next_state)\n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n\n if terminal or (step * (skip_frames + 1)) > max_timesteps:\n break\n\n step += 1\n\n return stats", "def run_episode(env, agent, deterministic, do_training=True, rendering=False, max_timesteps=1000):\n\n stats = EpisodeStats() # save statistics like episode reward or action usage\n state = env.reset()\n\n step = 0\n while True:\n\n action_id = agent.act(state=state, deterministic=deterministic)\n next_state, reward, terminal, info = env.step(action_id)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n state = next_state\n\n # # NOTE reward shaping...\n # if terminal:\n # reward += -1\n # if step < 20:\n # reward += -10\n # if step > 100:\n # reward += 10\n\n stats.step(reward, action_id)\n\n if rendering:\n env.render()\n\n if terminal or step > max_timesteps:\n break\n\n step += 1\n\n return stats", "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def run_episode(self, mode=0, eps=0.):\n if mode==0:\n eps = 0.\n done = False\n score = 0 \n \n while not done:\n state = self.env_info.vector_observations[0] # get the current state\n action = self.agent.act(state, eps=eps) # get an action using epsilon greedy policy\n self.env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.env_info.vector_observations[0] # get the next state\n reward = self.env_info.rewards[0] # get the reward\n done = self.env_info.local_done[0] # see if episode has finished\n \n if mode == 1:\n self.agent.step(state, action, reward, next_state, done)\n \n score += reward\n \n self.reset_env() # reset the environment\n \n return score", "def main(num_episodes, gamma, lam, kl_targ, batch_size, env_name):\n\n # initialize gym environment and get observations and actions\n env = gym.make(env_name)\n gym.spaces.seed(1234)\n env = gym.wrappers.FlattenDictWrapper(env, ['observation', 'desired_goal'])\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # parameters\n time_steps = 50 # T, time steps in every episode\n userCV = False\n interpolate_ratio = 0.2 # set v\n samples_size = 64\n\n # logger and plotter from utilies\n now = (datetime.datetime.utcnow() - datetime.timedelta(hours=4)).strftime(\n \"%b-%d_%H:%M:%S\") # create dictionaries based on ETS time\n logger = Logger(logname=env_name, now=now)\n plotter = Plotter(plotname=env_name+\"-Fig\", now=now)\n\n # add 1 to obs dimension for time step feature (see run_episode())\n obs_dim += 1\n scaler = Scaler(obs_dim)\n\n # initialize three neural network, on for the ppo policy, one for the value function baseline used to compute\n # advantages, and one is critic\n baseline = ValueFncNN(obs_dim, name='baseline')\n critic = ValueFncNN(obs_dim, name='critic')\n on_policy = OnPolicyPPO(obs_dim, act_dim, kl_targ)\n\n # initialize replay buffer\n buff = Buffer(1000000)\n\n # run 5 episodes to initialize scaler\n run_policy(env, on_policy, scaler, logger, plotter, episodes=5, plot=False)\n episode = 0\n\n # start training\n with on_policy.sess as sess:\n while episode < num_episodes:\n\n \"\"\"experience replay: there are two buffers, one is replay buffer which \n keep expanding with new experiences (off-policy); one is current buffer \n (\"play\" buffer) which only contains current experience (on-policy)\n \"\"\"\n # roll-out pi for initial_buff_size episodes, T (50) time step each to\n # collect a batch of data to R (replay buffer)\n current_buffer = []\n trajectories, episode_experiences = run_policy(env, on_policy, scaler, logger,\n plotter, episodes=batch_size, plot=True)\n episode += len(trajectories)\n plotter.updateEpisodes(episode)\n\n for i in range(0, batch_size):\n for j in range(0, time_steps):\n state, action, reward = episode_experiences[i][j]\n buff.add(np.reshape([state, action, reward], [1, 3])) # add to replay buffer\n current_buffer.append(np.reshape([state, action, reward], [1, 3]))\n\n # current i don't use the control variate, so no need to compute Q value here\n # \"\"\"fit Qw through off-policy (use replay buffer)\"\"\"\n # off_trajectories = buff.sample(batch_size*time_steps) # numpy array\n # q_values = compute_q_value(off_trajectories, off_policy, gamma)\n\n \"\"\"fit baseline V() through on-policy (use current trajectories)\"\"\"\n compute_vvalue(trajectories, baseline)\n # print(trajectories)\n\n \"\"\"compute Monte Carlo advantage estimate advantage (on-policy)\"\"\"\n compute_advantages(trajectories, gamma, lam)\n # here as we don't use control variate, learning_signals equal advantages but with a different shape\n # to facilitate next step of the algorithm\n # so in the on-policy advantages I just input with the advantages which is wrong in the strict sense\n # TODO: change the advantages as the form of learning signal\n add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs\n observes, on_actions, advantages, learning_signals, sum_dis_return = build_train_set(trajectories)\n log_batch_stats(observes, on_actions, advantages, logger, sum_dis_return, episode)\n\n \"\"\"different situations based on if we use control variate: if useCV=True, then compute\n critic-based advantage estimate using current buffer, Q and policy\n if useCV=False, then just center the learning signals lt,e=At,e\n \"\"\"\n # if userCV:\n # pass\n # else:\n # # center the learning signals = advantages, and set b = v\n # learning_signals = advantages\n # b = interpolate_ratio\n\n # multiply learning signals by (1-v)\n learning_signals *= (1 - interpolate_ratio)\n\n \"\"\"sample D=S1:M from replay buffer or current buffer based on beta (M=40)\"\"\"\n if buff.buffer_size < len(current_buffer):\n # using on-policy samples to compute loss and optimize policy\n samples = BatchSample(current_buffer, samples_size)\n else:\n # using off-policy samples to compute loss and optimize policy (always go here)\n # TODO: what's the condition to change?\n samples = buff.sample(samples_size)\n\n \"\"\"compute loss function\"\"\"\n states, actions, rewards = [np.squeeze(elem, axis=1) for elem in np.split(samples, 3, 1)]\n states = np.array([s for s in states])\n states = np.squeeze(states)\n\n # compute PPO loss (first term in the IPO algorithm loss function)\n # with on_policy.sess as sess:\n on_feed_dict = {on_policy.obs_ph: observes,\n on_policy.act_ph: on_actions,\n on_policy.advantages_ph: advantages,\n on_policy.beta_ph: on_policy.beta,\n on_policy.eta_ph: on_policy.eta,\n on_policy.lr_ph: on_policy.lr * on_policy.lr_multiplier}\n old_means_np, old_log_vars_np = sess.run([on_policy.means, on_policy.log_vars], feed_dict=on_feed_dict)\n on_feed_dict[on_policy.old_log_vars_ph] = old_log_vars_np\n on_feed_dict[on_policy.old_means_ph] = old_means_np\n\n sess.run(on_policy.train_op, on_feed_dict)\n\n # compute loss\n on_policy_loss = sess.run(on_policy.loss, feed_dict=on_feed_dict)\n\n # times (1/ET)\n # on_policy_loss = (1 / (time_steps * batch_size)) * on_policy_loss\n on_policy_loss = on_policy_loss\n\n # compute off-policy loss (second term in the IPG algorithm loss function)\n \"\"\"\n consider using temporal difference as the critic, then delta Q = Rt+1 + gamma * Q(St+1, At+1) - Q(St, At)\n then the loss is the sum over all the batch samples\n \"\"\"\n # dict_states is a dict for random samples from replay buffer, not for trajectory\n dict_states = {'states': states}\n # evaluate values (Vt) for samples and add them to the dict by using the critic neural network\n critic_compute_vvalue(dict_states, critic)\n # compute (td target - current values) as delta Qw(Sm) under PPO policy\n b = interpolate_ratio\n # compute Rt+1 + gamma * Q(St+1, At+1)\n off_policy_loss, td_targets = TD(env, dict_states, on_policy, critic)\n off_policy_loss = (b / samples_size) * np.sum(off_policy_loss)\n plotter.updateOffPolicyLoss(off_policy_loss)\n loss = on_policy_loss - off_policy_loss\n\n print(\"on_policy_loss: {}. Off_policy_loss: {}. Total Loss: {}\".format(on_policy_loss, off_policy_loss, loss))\n print(\"\")\n\n \"\"\"update current policy based on current observes, actions, advantages\"\"\"\n on_feed_dict[on_policy.loss] = tf.reduce_sum(loss)\n on_policy.update(loss, observes, on_actions, advantages, old_means_np, old_log_vars_np, logger, plotter)\n # on_policy.logp = new_logp\n \"\"\"update baseline and critic\"\"\"\n # observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)\n # with baseline.sess as sess:\n baseline.fit(observes, sum_dis_return, logger, plotter, id=\"BaselineLoss\") # update value function\n\n # with critic.sess as sess:\n critic.fit(states, td_targets, logger, plotter, id=\"CriticLoss\")\n logger.write(display=True)\n\n \"\"\"record\"\"\"\n logger.close()\n plotter.plot()\n\n \"\"\"close sessions\"\"\"\n on_policy.close_sess()\n baseline.close_sess()", "def experiment(config):\n with tf.Session() as sess:\n\n seed = config.pop('seed')\n\n if seed:\n seed = int(seed)\n random.seed(seed)\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env_id = config.pop('env_id')\n LOGGER.info('using {} env'.format(env_id))\n\n env = gym.make(env_id)\n\n global_rewards = []\n global_step, episode = 0, 0\n\n config['env'] = env\n config['env_repr'] = repr(env)\n config['sess'] = sess\n\n render = int(config.pop('render'))\n\n agent = Agent(**config)\n\n rl_writer = tf.summary.FileWriter('./results/rl')\n save_args(config, 'results/args.txt')\n\n while global_step < config['total_steps']:\n episode += 1\n done = False\n rewards, actions = [], []\n observation = env.reset()\n\n while not done:\n global_step += 1\n\n # if episode % 1 == render:\n env.render()\n action = agent.act(observation)\n next_observation, reward, done, info = env.step(action)\n agent.remember(observation, action, reward, next_observation, done)\n train_info = agent.learn()\n\n rewards.append(reward)\n actions.append(action)\n observation = next_observation\n\n ep_rew = sum(rewards)\n global_rewards.append(ep_rew)\n avg_reward = sum(global_rewards[-100:]) / len(global_rewards[-100:])\n\n if episode % 10 == 0:\n log_str =' step {:.0f} ep {:.0f} reward {:.1f} avg {:.1f}'\n logging.info(log_str.format(global_step,\n episode,\n ep_rew,\n avg_reward))\n\n summary = tf.Summary(value=[tf.Summary.Value(tag='episode_reward',\n simple_value=ep_rew)])\n rl_writer.add_summary(summary, episode)\n avg_sum = tf.Summary(value=[tf.Summary.Value(tag='avg_last_100_ep',\n simple_value=avg_reward)])\n rl_writer.add_summary(avg_sum, episode)\n rl_writer.flush()\n \n return config", "def train(self, persist: bool = False, run: int = -1, checkpoint: int = -1):\n self.meta = ICMMetaDataV1(fp=open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, 'agent_stats.csv'), 'w'),\n args=self.state.config)\n train_start = time.time()\n for episode in range(self.state.episodes):\n start_time = time.time()\n state = self.env.reset()\n state = torch.reshape(tensor(state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1, 2).to(\n self.device)\n done = False\n episode_reward = []\n episode_loss = []\n\n # save network\n # if episode % self.state.model_save_interval == 0:\n # save_path = self.state.model_save_path + '/' + self.run_name + '_' + str(episode) + '.pt'\n # torch.save(self.q_network.state_dict(), save_path)\n # print('Successfully saved: ' + save_path)\n\n # Save Model\n self.save(episode)\n # Collect garbage\n # To Do Later\n\n while not done:\n\n # update target network\n if self.state.step % self.state.network_update_interval == 0:\n print('Updating target network')\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n if self.state.step > len(self.replay_memory):\n self.state.epsilon = max(self.state.final_epsilon,\n self.state.initial_epsilon - self.state.epsilon_step * self.state.step)\n if self.state.epsilon > self.state.final_epsilon:\n self.state.mode = 'Explore'\n else:\n self.state.mode = 'Exploit'\n\n action, q = self.take_action(state, test=False, state_count=0)\n next_state, reward, done, _ = self.env.step(action)\n\n next_state = torch.reshape(tensor(next_state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1,\n 2).to(\n self.device)\n self.push((state, torch.tensor([int(action)]), torch.tensor([reward], device=self.device), next_state,\n torch.tensor([done], dtype=torch.float32)))\n episode_reward.append(reward)\n self.state.step += 1\n state = next_state\n\n # train network\n if self.state.step >= self.start_to_learn and self.state.step % self.state.network_train_interval == 0:\n loss = self.optimize_network()\n episode_loss.append(loss)\n\n if done:\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode)\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode, file=self.log_file)\n # self.log_summary(episode, episode_loss, episode_reward)\n self.last_n_rewards.append(sum(episode_reward))\n self.last_n_intrinsic_rewards.append(sum(self.intrinsic_episode_reward))\n self.meta.update_episode(episode, self.state.step, self.state.epsilon,\n sum(episode_reward), np.mean(self.last_n_rewards),\n np.mean(episode_loss), sum(self.intrinsic_episode_reward),\n np.mean(self.last_n_intrinsic_rewards), self.state.mode)\n\n episode_reward.clear()\n episode_loss.clear()\n self.intrinsic_episode_reward.clear()", "def run(self, sess, coord) :\n # use the prev session \n with sess.as_default(), sess.graph.as_default(): \n # run stuff here\n try:\n # keep running the agents in a while loop\n while not coord.should_stop():\n \n # gather experiences\n for i in range(self.FLAGS.pure_exploration_steps):\n eps_reward, eps_len, local_t, global_t = self.random_exploration_step(sess)\n self.trained_episodes += 1\n # use eps-greedy\n \n # 1 time current policy\n for i in range(self.FLAGS.current_policy_steps):\n eps_reward, eps_len, local_t, global_t = self.current_policy_step(sess)\n self.trained_episodes += 1\n \n # train off policy\n for i in range(self.FLAGS.update_steps):\n self.train_off_policy(sess)\n \n #request to stop training if max_timesteps reached\n if(global_t >= self.FLAGS.max_steps):\n coord.request_stop()\n return\n except tf.errors.CancelledError:\n return", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action à partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mémoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement éventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def play_gym_model(\n game: str = typer.Argument(\"CartPole-v0\"),\n model: Path = typer.Argument(\"models/CE_model_fcNN_CartPole-v0.pt\"),\n episodes: int = typer.Option(\n 1,\n show_default=True,\n help=\"Number of runs of the environment to simulate consecutively\",\n ),\n frame_limit: int = typer.Option(\n 10000,\n show_default=True,\n help=\"Maximum number of steps to execute for each episode\",\n ),\n fps: int = typer.Option(\n 30,\n show_default=True,\n help=\"Frames per second (actually an upper bound)\"\n ),\n verbose: bool = typer.Option(\n False,\n show_default=True,\n help=\"Print action, reward and observation at every step\",\n ),\n monitor: bool = typer.Option(\n False,\n show_default=True,\n help=\"Activate a monitor to record a video of the results\"\n ),\n logger: str = typer.Option(\n \"WARN\",\n show_default=True,\n help=\"Select logger option, from INFO, WARN or DEBUG \",\n ),\n outdir: Path = typer.Option(\n Path.cwd()/\"reports/videos\",\n help=(\"Output directory for the results of the monitor\"+\n \"[default: ./reports/videos]\"),\n ),\n ):\n typer.echo(f\"Playing {game} with a trained agent.\")\n\n # Set the logger level\n if logger == \"INFO\":\n gym.logger.set_level(gym.logger.INFO)\n elif logger == \"DEBUG\":\n gym.logger.set_level(gym.logger.DEBUG)\n elif logger == \"WARN\":\n gym.logger.set_level(gym.logger.WARN)\n\n # Make and wrap the environment\n env = gym.make(game)\n if monitor:\n env = wrappers.Monitor(env, directory=outdir, force=True)\n env.seed(0)\n\n agent = ModelAgent(model) # Hack, only works in pong, hard\n # implement because gym is stupid\n reward = 0\n done = False\n\n for ep in range(episodes):\n typer.echo(f\"Starting episode {ep}.\")\n ob = env.reset()\n state_count = 0\n while True:\n time.sleep(1/fps)\n state_count += 1\n if not monitor: env.render()\n action = agent.act(ob, reward, done)\n ob, reward, done, _ = env.step(action)\n if verbose:\n typer.echo(f\"{action} {reward} {ob}\")\n if done:\n typer.echo(f\"Game reached end-state in frame {state_count}.\")\n break\n elif state_count >= frame_limit:\n typer.echo(f\"Frame limit of {frame_limit} reached.\")\n break\n env.close()", "def main():\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n print(\"Loading environment {}.\".format(cfg[\"RUN_EXECUTABLE\"]))\n worker_id = np.random.randint(20)\n env, config_channel = load_environment(cfg[\"RUN_EXECUTABLE\"], cfg[\"RUN_NO_GRAPHICS\"], worker_id)\n env.reset()\n group_name = env.get_agent_groups()[0]\n step_result = env.get_step_result(group_name)\n state = step_result.obs[0]\n num_agents = len(state)\n\n print(\"Loading Model.\")\n actor = torch.load(cfg[\"RUN_MODEL\"])\n actor.eval()\n\n print(\"Starting Run with {} steps.\".format(cfg[\"RUN_STEPS\"]))\n reward_cur_episode = np.zeros(num_agents)\n reward_last_episode = np.zeros(num_agents)\n episode = 1\n\n start_time = time.time()\n for steps in range(1, cfg[\"RUN_STEPS\"] + 1):\n with torch.no_grad():\n action = actor(tensor(state).float())\n action = action.cpu().numpy()\n env.set_actions(group_name, action)\n env.step()\n step_result = env.get_step_result(group_name)\n new_state = step_result.obs[0]\n reward = step_result.reward\n done = step_result.done\n\n reward_cur_episode += reward\n\n for i, d in enumerate(done):\n if d:\n reward_last_episode[i] = reward_cur_episode[i]\n reward_cur_episode[i] = 0\n\n if done[0]:\n reward_mean_episode = reward_last_episode.mean()\n elapsed_time = time.time() - start_time\n print(\"Ep. {0:>4} with {1:>7} steps total; {2:8.2f} last ep. rewards; {3}h elapsed\" \\\n .format(episode, steps, reward_mean_episode, format_timedelta(elapsed_time)))\n episode += 1\n\n state = new_state\n\n print(\"Closing environment.\")\n env.close()", "def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def test(config, alg, checkpoint=None, testdelay=0, render=False, envcreator=None, maxepisodelen=10000000):\n if alg == \"random\":\n env = envcreator()\n else:\n agent = get_agent_class(alg)(config=config, env=\"retro-v0\")\n if checkpoint is None:\n raise ValueError(f\"A previously trained checkpoint must be provided for algorithm {alg}\")\n agent.restore(checkpoint)\n env = agent.local_evaluator.env\n\n while True:\n state = env.reset()\n done = False\n reward_total = 0.0\n step = 0\n while not done and step < maxepisodelen:\n if alg == \"random\":\n action = np.random.choice(range(env.action_space.n))\n else:\n action = agent.compute_action(state)\n next_state, reward, done, _ = env.step(action)\n time.sleep(testdelay)\n reward_total += reward\n if render:\n env.render()\n state = next_state\n step = step + 1\n print(\"Episode reward\", reward_total)", "def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def run_episode(self, deterministic=False):\n\n\n obs = self.env.reset()\n Observations, Actions, Rewards = [], [], [] # original trajectory\n n_Observations, n_Rewards = [], [] # normalized trajectory\n done = False\n timestep = 0\n while not done and timestep < self.episode_horizon:\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n action = self.policy.get_action(obs.astype(np.float32).reshape((1,-1)), deterministic=deterministic)\n Actions.append(action.flatten())\n obs, reward, done, _ = self.env.step(np.squeeze(action, axis=0))\n Rewards.append(reward)\n if self.reward_preprocessor:\n n_reward = self.reward_preprocessor.get_scaled_x(reward)\n else:\n n_reward = reward\n n_Rewards.append(n_reward)\n timestep += 1\n\n \n # append the last state\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n\n unscaled_traj = {\"Observations\": np.array(Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(Rewards)}\n scaled_traj = {\"Observations\": np.array(n_Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(n_Rewards)}\n\n # update preprocessers\n if self.state_preprocessor:\n self.state_preprocessor.update(unscaled_traj['Observations'])\n # save preprocessor params for restoration\n self.state_preprocessor.save_params(os.path.join(self.logger.info_dir, \"state_preprocessor_params.pkl\"))\n if self.reward_preprocessor:\n self.reward_preprocessor.update(unscaled_traj['Rewards'])\n self.reward_preprocessor.save_params(os.path.join(self.logger.info_dir, \"reward_preprocessor_params.pkl\"))\n \n return unscaled_traj, scaled_traj", "def _run(self):\n if not self.is_train:\n return self.test() \n\n logger.debug(\"Actor {} resuming at Step {}, {}\".format(self.actor_id, \n self.global_step.value(), time.ctime()))\n\n s = self.emulator.get_initial_state()\n \n s_batch = []\n a_batch = []\n y_batch = []\n bonuses = deque(maxlen=100)\n\n exec_update_target = False\n total_episode_reward = 0\n episode_ave_max_q = 0\n episode_over = False\n qmax_down = 0\n qmax_up = 0\n prev_qmax = -10*6\n low_qmax = 0\n ep_t = 0\n \n while (self.global_step.value() < self.max_global_steps):\n # Sync local learning net with shared mem\n self.sync_net_with_shared_memory(self.local_network, self.learning_vars)\n self.save_vars()\n\n rewards = []\n states = []\n actions = []\n local_step_start = self.local_step\n \n while not episode_over:\n logger.debug('steps: {} / {}'.format(self.global_step.value(), self.max_global_steps))\n # Choose next action and execute it\n a, readout_t = self.choose_next_action(s)\n\n new_s, reward, episode_over = self.emulator.next(a)\n total_episode_reward += reward\n\n current_frame = new_s[...,-1]\n bonus = self.density_model.update(current_frame)\n bonuses.append(bonus)\n\n if (self.actor_id == 0) and (self.local_step % 200 == 0):\n bonus_array = np.array(bonuses)\n logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(\n bonus_array.mean(), bonus_array.max()))\n\n # Rescale or clip immediate reward\n # reward = self.rescale_reward(reward + bonus)\n reward = self.rescale_reward(reward)\n ep_t += 1\n \n rewards.append(reward)\n states.append(s)\n actions.append(a)\n \n s = new_s\n self.local_step += 1\n episode_ave_max_q += np.max(readout_t)\n \n global_step, update_target = self.global_step.increment(\n self.q_target_update_steps)\n\n if update_target:\n update_target = False\n exec_update_target = True\n\n if self.local_step % 4 == 0:\n self.batch_update()\n \n self.local_network.global_step = global_step\n\n else:\n mc_returns = list()\n running_total = 0.0\n for r in reversed(rewards):\n running_total = r + self.gamma*running_total\n mc_returns.insert(0, running_total)\n\n mixed_returns = self.cts_eta*np.array(rewards) + (1-self.cts_eta)*np.array(mc_returns)\n\n states.append(new_s)\n episode_length = len(rewards)\n for i in range(episode_length):\n self.replay_memory.append((\n states[i],\n actions[i],\n mixed_returns[i],\n states[i+1],\n i+1 == episode_length))\n\n \n if exec_update_target:\n self.update_target()\n exec_update_target = False\n # Sync local tensorflow target network params with shared target network params\n if self.target_update_flags.updated[self.actor_id] == 1:\n self.sync_net_with_shared_memory(self.target_network, self.target_vars)\n self.target_update_flags.updated[self.actor_id] = 0\n\n s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \\\n self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over)", "def execute_episode(\n self,\n episode: int,\n game: MathyGymEnv,\n predictor: PolicyValueModel,\n model_dir: str,\n is_verbose_worker: bool = False,\n ):\n if game is None:\n raise ValueError(\"PracticeRunner.get_env returned None type\")\n if predictor is None:\n raise ValueError(\"PracticeRunner.get_model returned None type\")\n game.reset()\n if game.state is None:\n raise ValueError(\"Cannot start self-play practice with a None game state.\")\n env_state = game.state\n episode_history: List[Any] = []\n move_count = 0\n mcts = MCTS(game.mathy, predictor, self.config.cpuct, self.config.mcts_sims)\n if is_verbose_worker and self.config.print_training is True:\n game.render()\n\n while True:\n move_count += 1\n env_state, result = self.step(\n game=game,\n env_state=env_state,\n mcts=mcts,\n model=predictor,\n move_count=move_count,\n history=episode_history,\n is_verbose_worker=is_verbose_worker,\n )\n if result is not None:\n if is_verbose_worker and self.config.print_training is True:\n game.render()\n\n return result + (game.problem,)", "def _run_single(self, thread_id, agent, environment, deterministic=False,\n max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):\n\n # figure out whether we are using the deprecated way of \"episode_finished\" reporting\n old_episode_finished = False\n if episode_finished is not None and len(getargspec(episode_finished).args) == 1:\n old_episode_finished = True\n\n episode = 0\n # Run this single worker (episode loop) as long as global count thresholds have not been reached.\n while not self.should_stop:\n state = environment.reset()\n agent.reset()\n self.global_timestep, self.global_episode = agent.timestep, agent.episode\n episode_reward = 0\n\n # Time step (within episode) loop\n time_step = 0\n time_start = time.time()\n while True:\n action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)\n reward = 0\n for repeat in xrange(self.repeat_actions):\n state, terminal, step_reward = environment.execute(action=action)\n reward += step_reward\n if terminal:\n break\n\n if not testing:\n # agent.observe(reward=reward, terminal=terminal)\n # Insert everything at once.\n agent.atomic_observe(\n states=state,\n actions=action,\n internals=internals,\n reward=reward,\n terminal=terminal\n )\n\n if sleep is not None:\n time.sleep(sleep)\n\n time_step += 1\n episode_reward += reward\n\n if terminal or time_step == max_episode_timesteps:\n break\n\n # Abort the episode (discard its results) when global says so.\n if self.should_stop:\n return\n\n self.global_timestep += time_step\n\n # Avoid race condition where order in episode_rewards won't match order in episode_timesteps.\n self.episode_list_lock.acquire()\n self.episode_rewards.append(episode_reward)\n self.episode_timesteps.append(time_step)\n self.episode_times.append(time.time() - time_start)\n self.episode_list_lock.release()\n\n if episode_finished is not None:\n # old way of calling episode_finished\n if old_episode_finished:\n summary_data = {\n \"thread_id\": thread_id,\n \"episode\": episode,\n \"timestep\": time_step,\n \"episode_reward\": episode_reward\n }\n if not episode_finished(summary_data):\n return\n # New way with BasicRunner (self) and thread-id.\n elif not episode_finished(self, thread_id):\n return\n\n episode += 1", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def hdqn_learning(\n env,\n agent,\n num_episodes,\n exploration_schedule,\n gamma=1.0,\n ):\n ###############\n # RUN ENV #\n ###############\n # Keep track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n n_thousand_episode = int(np.floor(num_episodes / 1000))\n visits = np.zeros((n_thousand_episode, env.nS))\n total_timestep = 0\n meta_timestep = 0\n ctrl_timestep = defaultdict(int)\n\n for i_thousand_episode in range(n_thousand_episode):\n for i_episode in range(1000):\n episode_length = 0\n current_state = env.reset()\n visits[i_thousand_episode][current_state-1] += 1\n encoded_current_state = one_hot_state(current_state)\n \n done = False\n while not done:\n meta_timestep += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n meta_epsilon = exploration_schedule.value(total_timestep)\n goal = agent.select_goal(encoded_current_state, meta_epsilon)[0]\n encoded_goal = one_hot_goal(goal+1)\n\n total_extrinsic_reward = 0\n goal_reached = False\n s1 = encoded_current_state\n while not done and not goal_reached:\n total_timestep += 1\n episode_length += 1\n ctrl_timestep[goal] += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n ctrl_epsilon = exploration_schedule.value(total_timestep)\n joint_state_goal = np.concatenate([encoded_current_state, encoded_goal], axis=1)\n action = agent.select_action(joint_state_goal, ctrl_epsilon)[0]\n ### Step the env and store the transition\n next_state, extrinsic_reward, done, _ = env.step(action)\n # Update statistics\n stats.episode_rewards[i_thousand_episode*1000 + i_episode] += extrinsic_reward\n stats.episode_lengths[i_thousand_episode*1000 + i_episode] = episode_length\n visits[i_thousand_episode][next_state-1] += 1\n\n encoded_next_state = one_hot_state(next_state)\n intrinsic_reward = agent.get_intrinsic_reward(goal+1, next_state)\n goal_reached = next_state == (goal+1)\n\n joint_next_state_goal = np.concatenate([encoded_next_state, encoded_goal], axis=1)\n #print (joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n agent.ctrl_replay_memory.push(joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n # Update Both meta-controller and controller\n agent.update_meta_controller(gamma)\n agent.update_controller(gamma)\n\n total_extrinsic_reward += extrinsic_reward\n current_state = next_state\n encoded_current_state = encoded_next_state\n # Goal Finished\n agent.meta_replay_memory.push(s1, goal, encoded_next_state, total_extrinsic_reward, done)\n\n return agent, stats, visits", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def train_ddpg(agent, env, n_episodes=400, max_t=1000, save=True):\n # get the default brain\n brain_name = env.brain_names[0]\n scores_deque = deque(maxlen=100)\n final_scores = []\n not_solved = True\n num_agents = len(env.reset()[brain_name].vector_observations)\n best = 0\n episodes_remaining = n_episodes\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset()[brain_name]\n states = env_info.vector_observations\n agent.reset()\n agent_scores = np.zeros(num_agents)\n for t in range(max_t):\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n agent.step(states, actions, rewards, next_states, dones)\n states = next_states\n agent_scores += rewards\n if np.any(dones):\n break\n\n max_score = np.max(agent_scores)\n scores_deque.append(max_score)\n final_scores.append(max_score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), max_score), end=\"\", flush=True)\n if len(scores_deque) == 100 and np.mean(scores_deque) > SOLVED_SCORE and not_solved:\n not_solved = False\n episodes_remaining = EPISODES_AFTER_SOLVE # try to increase score for some episodes\n print(\"\\nEnvironment solved in {} episodes!\\n\".format(i_episode), flush=True)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/actor_solved.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/critic_solved.pth')\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), flush=True)\n\n if not not_solved and i_episode % 5 == 0 and np.mean(scores_deque) > best:\n best = np.mean(scores_deque)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/best_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/best_critic.pth')\n\n if not not_solved:\n episodes_remaining -= 1\n if episodes_remaining == 0:\n break\n\n return final_scores", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def run_episode(env, agents, eval=False, render=False, generate_val_data=False, greedy_eval=True, steps=25, store_data=False, trainer=None):\n obs_n = env.reset()\n reward_tot = [0.0 for i in range(len(agents))]\n reward_n = [0.0 for i in range(len(agents))]\n done_n = [False for i in range(len(agents))]\n\n if store_data:\n observations = []\n rewards = []\n rewards_details = {\"rewards_target\": [], \"rewards_collision\": []}\n actions = []\n\n # Start env\n for i in range(steps):\n\n # query for action from each agent's policy\n act_n = []\n if store_data or True:\n observations.append(obs_n)\n actions.append([])\n rewards.append(reward_n)\n # Take actions\n action_list = []\n for j, agent in enumerate(agents):\n action_unscaled = agent.step(obs_n[j], reward_n[j], done=done_n[0], eval=eval,\n generate_val_data=generate_val_data, greedy_eval=greedy_eval)\n action_list.append(action_unscaled)\n if store_data:\n actions[-1].append(action_unscaled)\n action = scale_action(env, j, action_unscaled)\n act_n.append(action)\n if trainer is not None and not eval:\n trainer.step(obs_n, reward_n, action_list, done=done_n)\n # step environment\n obs_n, reward_n, done_n, info_n = env.step(act_n)\n\n for j, r in enumerate(reward_n):\n reward_tot[j] += r\n if done_n[0]:\n for j, agent in enumerate(agents):\n action = scale_action(env, j, agent.step(obs_n[j], reward_n[j], done=done_n[0], eval=eval,\n generate_val_data=generate_val_data, greedy_eval=greedy_eval))\n act_n.append(action)\n agent.reset()\n break\n\n # render all agent views\n if render:\n env.render()\n for j, agent in enumerate(agents):\n agent.reset()\n if trainer is not None:\n trainer.reset()\n if not store_data:\n return reward_tot, i + 1\n else:\n extra_data = {\"observations\": observations, \"actions\": actions, \"rewards\": rewards, \"rewards_details\": rewards_details}\n return reward_tot, i + 1, extra_data", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=32,\n eval_every=1000,\n max_steps=100000,\n start_epsilon=0.9,\n end_epsilon=0.001,\n epsilon_decay_steps=1000,\n render=True,\n ):\n\n agent = self.create_agent(env)\n curr_epsilon = start_epsilon\n epsilon_decay = self.get_decay_value(\n start_epsilon, end_epsilon, epsilon_decay_steps\n )\n\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n for step in range(1, max_steps + 1):\n next_obs, reward, done, _ = env.step(action)\n next_action = agent.act(next_obs, epsilon=curr_epsilon)\n agent.store_step(obs, action, reward, next_obs, next_action, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if self.time_to(train_every, step):\n agent.perform_training(gamma=self.gamma)\n curr_epsilon = max(end_epsilon, curr_epsilon - epsilon_decay)\n\n if self.time_to(eval_every, step):\n self.evaluate_agent(agent, test_env, end_epsilon)\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n if done:\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n print(\"At step {}\".format(step), end=\"\\r\")\n print(\"\\nDone!\")\n\n return agent", "def train_dqn(env, learn_dict, agent, log_results=True):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = learn_dict['eps_start']\n brain_name = learn_dict['brain_name']\n n_episodes=learn_dict['n_episodes']\n max_t= learn_dict['max_t']\n eps_start= learn_dict['eps_start']\n eps_end= learn_dict['eps_end']\n eps_decay= learn_dict['eps_decay']\n early_stop = learn_dict['early_stop']\n\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0\n for t in range(max_t):\n \n action = agent.act(state, eps).astype(int)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n # have the agent learn a step\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=early_stop:\n if log_results: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return scores", "def run_episode(self, environment):\n state, texts = environment.reset()\n self.steps_done = 0\n action = None\n while True:\n state_tensor = FloatTensor([state])\n text_tensor = FloatTensor(texts).mean(dim=0, keepdim=True)\n action = self.Q.sample_from_softmax_policy(state_tensor, text_tensor)\n position = self.convert_action(action)\n (next_state, next_texts), reward, done, _ = environment.step(position)\n next_text_tensor = FloatTensor(next_texts).mean(dim=0, keepdim=True)\n for t1 in texts:\n t1_tensor = FloatTensor([t1])\n for t2 in next_texts:\n t2_tensor = FloatTensor([t2])\n self.memory.push(\n (state_tensor, t1_tensor, action, t2_tensor,) # action is already a tensor\n )\n self.learn(state_tensor, text_tensor, action, next_state, next_text_tensor, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def run_episode(self):\n self.reset_episode()\n obs = self.env.reset()\n while True:\n action = self.Policy[self.env.stateDict[obs]]\n new_obs, reward, done, _ = self.env.step(action)\n if self.mode=='debug':\n print(\"PrevObs:{}, Action:{}, Obs:{}, Reward:{}, Done:{}\"\n .format(obs, action, new_obs,reward,done))\n self.totalReward += reward\n self.totalSteps += 1\n if done:\n break\n else:\n obs = new_obs\n return self.totalReward", "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n start = time.time()\n if self.gae:\n self.train_gae()\n return\n\n def optimize_model():\n R = 0\n for i in reversed(range(len(self.rewards))):\n if abs(self.rewards[i]) > 0.0:\n R = 0\n R = self.rewards[i] + self.gamma * R\n self.rewards[i] = R\n rewards = torch.Tensor(self.rewards)\n if self.var_reduce:\n rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\n\n policy_loss = 0.0\n for (log_prob, r) in zip(self.log_probs, rewards):\n policy_loss -= log_prob * r\n\n loss = policy_loss.data[0, 0]\n\n self.opt.zero_grad()\n policy_loss = cu(policy_loss)\n policy_loss.backward()\n self.opt.step()\n\n self.clear_action()\n return loss\n\n self.model.train()\n if USE_CUDA:\n self.model.cuda()\n running_reward = None\n\n for episode in range(1, self.n_episode+1):\n self.init_game_setting()\n state = self.env.reset()\n\n tot_reward = 0\n a, b = 0, 0\n for t in range(self.episode_len):\n action = self.make_action(state, test=False)\n state, reward, done, info = self.env.step(action)\n self.rewards.append(reward)\n if reward > 0:\n a += 1\n if reward < 0:\n b += 1\n tot_reward += reward\n if done:\n break\n\n if running_reward is None:\n running_reward = tot_reward\n else:\n running_reward = 0.99 * running_reward + 0.01 * tot_reward\n\n if episode % self.update_every == 0:\n loss = optimize_model()\n print(\"Episode %d\" % episode)\n print(time_since(start))\n print(\"reward %.4f %d:%d len=%d\" % (running_reward, a, b, t))\n torch.save(self.model.state_dict(), self.model_fn)", "def fit(self, env, env_eval, num_iterations, max_episode_length=None):\n train_counter = 0;\n eval_res_hist = np.zeros((1,3));\n\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n setpoint_this = ob_this[6:8]\n \n this_ep_length = 0;\n flag_print_1 = True;\n flag_print_2 = True;\n action_counter = 0;\n \n for step in range(num_iterations):\n #Check which stage is the agent at. If at the collecting stage,\n #then the actions will be random action.\n if step <= self._num_burn_in:\n if flag_print_1:\n logging.info (\"Collecting samples to fill the replay memory...\");\n flag_print_1 = False;\n\n action_mem = self.select_action(None, stage = 'collecting');\n action = self._policy.process_action(setpoint_this, action_mem)\n\n else:\n if flag_print_2:\n logging.info (\"Start training process...\");\n flag_print_2 = False;\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n \n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n\n action_mem = self.select_action(state_this_net, stage = 'training')\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem) \n\n action_counter = action_counter + 1 if action_counter < 4 else 1;\n\n time_next, ob_next, is_terminal = env.step(action)\n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n \n setpoint_next = ob_next[6:8]\n \n #check if exceed the max_episode_length\n if max_episode_length != None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n\n #save sample into memory \n self._memory.append(Sample(ob_this, action_mem, ob_next\n , is_terminal))\n\n \n #Check which stage is the agent at. If at the training stage,\n #then do the training\n if step > self._num_burn_in:\n #Check the train frequency\n if action_counter % self._train_freq == 0 \\\n and action_counter > 0:\n action_counter = 0;\n #Eval the model\n if train_counter % self._eval_freq == 0:\n eval_res = self.evaluate(env_eval, self._eval_epi_num\n , show_detail = True);\n eval_res_hist = np.append(eval_res_hist\n , np.array([step\n , eval_res[0], eval_res[1]]).reshape(1, 3)\n , axis = 0);\n np.savetxt(self._log_dir + '/eval_res_hist.csv'\n , eval_res_hist, delimiter = ',');\n logging.info ('Global Step: %d, '%(step), 'evaluation average \\\n reward is %0.04f, average episode length is %d.'\\\n %eval_res);\n \n \n #Sample from the replay memory\n samples = self._preprocessor.process_batch(\n self._memory.sample(self._batch_size), \n self._min_array, self._max_array);\n #Construct target values, one for each of the sample \n #in the minibatch\n samples_x = None;\n targets = None;\n for sample in samples:\n sample_s = np.append(sample.obs[0:13], sample.obs[14:]).reshape(1,16)\n sample_s_nex = np.append(sample.obs_nex[0:13], \n sample.obs_nex[14:]).reshape(1,16)\n sample_r = self._preprocessor.process_reward(sample.obs_nex[12:15])\n\n target = self.calc_q_values(sample_s);\n a_max = self.select_action(sample_s_nex, stage = 'greedy');\n \n \n\n if sample.is_terminal:\n target[0, sample.a] = sample_r;\n else:\n target[0, sample.a] = (sample_r\n + self._gamma \n * self.calc_q_values_1(\n sample_s_nex)[0, a_max]);\n if targets is None:\n targets = target;\n else:\n targets = np.append(targets, target, axis = 0);\n if samples_x is None:\n samples_x = sample_s;\n else:\n samples_x = np.append(samples_x, sample_s, axis = 0);\n #Run the training\n \n \n feed_dict = {self._state_placeholder:samples_x\n ,self._q_placeholder:targets}\n sess_res = self._sess.run([self._train_op, self._loss]\n , feed_dict = feed_dict);\n \n #Update the target parameters\n if train_counter % self._target_update_freq == 0:\n self.update_policy();\n logging.info('Global Step %d: update target network.' \n %(step));\n #Save the parameters\n if train_counter % self._save_freq == 0 or step + 1 == num_iterations:\n checkpoint_file = os.path.join(self._log_dir\n , 'model_data/model.ckpt');\n self._saver.save(self._sess\n , checkpoint_file, global_step=step);\n \n if train_counter % 100 == 0:\n logging.info (\"Global Step %d: loss %0.04f\"%(step, sess_res[1]));\n # Update the events file.\n summary_str = self._sess.run(self._summary, feed_dict=feed_dict)\n self._summary_writer.add_summary(summary_str, train_counter);\n self._summary_writer.add_graph(self._sess.graph);\n self._summary_writer.flush()\n \n train_counter += 1;\n \n #check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n\n this_ep_length = 0;\n action_counter = 0;\n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n time_this = time_next\n this_ep_length += 1;", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def learn(env,\n env_test,\n env_test1,\n q_func,\n optimizer_spec,\n optimizer_spec_omega,\n optimizer_spec_term,\n session,\n options,\n exploration=LinearSchedule(1000000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n xi=None,\n learning_starts=50000,\n learning_freq=4,\n frame_history_len=4,\n target_update_freq=10000,\n grad_norm_clipping=10,\n eval_obs_array=None,\n room_q_interval=1e5,\n epoch_size=5e4,\n log_dir=None,\n transfer_config=None,\n random_length=1000):\n\n Costs = []\n loss_term_log = []\n average_loss_term = []\n loss_omega_log = []\n average_loss_omega = []\n Average_costs = []\n test_rewards = []\n test_rewards1= []\n none_discount = []\n none_discount1 = []\n short_average = []\n short_average1 = []\n short_average_none = []\n short_average_none1 = []\n test_q_max_log = []\n test_u_log = []\n test_omega_log = []\n Episode_num = 0\n\n online_q_omega = False\n online_termination = True\n q_omega_uniform_sample = False\n four_to_two = False\n no_share_para = False\n target_beta = False\n termination_stop = False\n beta_no_bias = False\n\n debug_no_term_train = False\n\n obs_random_batch = None\n\n if transfer_config:\n if transfer_config.has_key('online_q_omega'):\n online_q_omega = transfer_config['online_q_omega']\n if transfer_config.has_key('online_termination'):\n online_termination = transfer_config['online_termination']\n if transfer_config.has_key('q_omega_uniform_sample'):\n q_omega_uniform_sample = transfer_config['q_omega_uniform_sample']\n if transfer_config.has_key('four_to_two'):\n four_to_two = transfer_config['four_to_two']\n if transfer_config.has_key('no_share_para'):\n no_share_para = transfer_config['no_share_para']\n if transfer_config.has_key('target_beta'):\n target_beta = transfer_config['target_beta']\n if transfer_config.has_key('xi'):\n xi = transfer_config['xi']\n if transfer_config.has_key('termination_stop'):\n termination_stop = transfer_config['termination_stop']\n if transfer_config.has_key('beta_no_bias'):\n beta_no_bias = transfer_config['beta_no_bias']\n\n if transfer_config.has_key('debug_no_term_train'):\n debug_no_term_train = transfer_config['debug_no_term_train']\n\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n if four_to_two:\n assert frame_history_len == 4\n\n ###############\n # BUILD MODEL #\n ###############\n\n img_h, img_w, img_c = env.observation_space.shape\n input_history_len = 2 if four_to_two else frame_history_len\n input_shape = (img_h, img_w, input_history_len * img_c)\n num_actions = env.action_space.n\n # source + primitive\n num_options = len(options)\n\n # set up placeholders\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))\n # placeholder for current action\n act_t_ph = tf.placeholder(tf.int32, [None])\n # placeholder for current option\n opt_t_ph = tf.placeholder(tf.int32, [None])\n # placeholder for current options whose actions are the same as action taken (k-hot)\n opa_t_ph = tf.placeholder(tf.float32, [None] + [num_options])\n # placeholder for current reward\n rew_t_ph = tf.placeholder(tf.float32, [None])\n # placeholder for next observation (or state)\n obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))\n # placeholder for end of episode mask\n # this value is 1 if the next state corresponds to the end of an episode,\n # in which case there is no Q-value at the next state; at the end of an\n # episode, only the current state reward contributes to the target, not the\n # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)\n done_mask_ph = tf.placeholder(tf.float32, [None])\n Episode_reward = tf.placeholder(tf.float32)\n\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0\n obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0\n\n # Here, you should fill in your own code to compute the Bellman error. This requires\n # evaluating the current and next Q-values and constructing the corresponding error.\n # TensorFlow will differentiate this error for you, you just need to pass it to the\n # optimizer. See assignment text for details.\n # Your code should produce one scalar-valued tensor: total_error\n # This will be passed to the optimizer in the provided code below.\n # Your code should also produce two collections of variables:\n # q_func_vars\n # target_q_func_vars\n # These should hold all of the variables of the Q-function network and target network,\n # respectively. A convenient way to get these is to make use of TF's \"scope\" feature.\n # For example, you can create your Q-function network with the scope \"q_func\" like this:\n # <something> = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n # And then you can obtain the variables like this:\n # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n # Older versions of TensorFlow may require using \"VARIABLES\" instead of \"GLOBAL_VARIABLES\"\n ######\n \n # YOUR CODE HERE\n q_current, q_omega_current, term_current = q_func(obs_t_float, num_actions, num_options, scope='q_func', reuse=False, no_share_para=no_share_para, termination_stop=termination_stop, beta_no_bias=beta_no_bias)\n q_target, q_omega_target, term_target = q_func(obs_tp1_float, num_actions, num_options, scope='target_q_func', reuse=False, no_share_para=no_share_para, termination_stop=termination_stop, beta_no_bias=beta_no_bias)\n q_next_current, q_omega_next_current, term_next_current = q_func(obs_tp1_float, num_actions, num_options, scope='q_func', reuse=True, no_share_para=no_share_para, termination_stop=termination_stop, beta_no_bias=beta_no_bias)\n if debug_no_term_train:\n term_next_current_old = term_next_current = term_current = term_target = tf.constant(1.0, dtype=tf.float32, shape=(1, num_options))\n\n\n\n # q_value\n q_val_current = tf.reduce_sum(q_current * tf.one_hot(act_t_ph, num_actions), axis=-1)\n q_val_next_raw = tf.reduce_sum(q_target * tf.one_hot(tf.argmax(q_next_current, axis=-1), num_actions), axis=-1)\n q_val_next = q_val_next_raw * (1 - done_mask_ph)\n\n # q_value error\n total_error_q = tf.reduce_mean(tf.losses.mean_squared_error(rew_t_ph + gamma * q_val_next, q_val_current))\n # q_omega_value\n term_val_next = tf.reduce_sum(term_next_current * tf.one_hot(opt_t_ph, num_options), axis=-1)\n q_omega_val_next = tf.reduce_sum(q_omega_next_current * tf.one_hot(opt_t_ph, num_options), axis=-1)\n max_q_omega_next = tf.reduce_max(q_omega_next_current, axis=-1)\n max_q_omega_next_targ = tf.reduce_sum(q_omega_target * tf.one_hot(tf.argmax(q_omega_next_current, axis=-1), num_options), axis=-1)\n\n if target_beta: # change var\n term_next_current_old = term_next_current\n term_next_current = term_target\n\n u_next_raw = (1 - term_next_current) * q_omega_target + term_next_current * max_q_omega_next_targ[..., None]\n u_next = tf.stop_gradient(u_next_raw * (1 - done_mask_ph)[..., None])\n\n if target_beta: # row back\n term_next_current = term_next_current_old\n\n # q_omega_value error\n total_error_q_omega = tf.reduce_mean(tf.reduce_sum(\n opa_t_ph *\n tf.losses.mean_squared_error(rew_t_ph[..., None] + gamma * u_next, q_omega_current, reduction=tf.losses.Reduction.NONE),\n axis=-1\n ))\n\n # optimize termination\n if xi == None:\n xi = 0.8 * (max_q_omega_next - tf.nn.top_k(q_omega_next_current, 2)[0][:, 1])\n advantage_go = q_omega_val_next - max_q_omega_next + xi\n advantage = tf.stop_gradient(advantage_go)\n # total_error_term = term_val_next * advantage * (1 - done_mask_ph)\n total_error_term = term_val_next * advantage #修改\n tf.summary.scalar('total_error_term', total_error_term)\n # def term_grad(optimizer, objective, var_list, clip_val=10):\n # gradients = optimizer.compute_gradients(objective, var_list=var_list)\n # for i, (grad, var) in enumerate(gradients):\n # if grad is not None:\n # gradients[i] = (tf.clip_by_norm(grad, clip_val) * advantage_go, var)\n # return optimizer.apply_gradients(gradients)\n\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n # print len(q_func_vars)\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')\n # print len(target_q_func_vars)\n\n\n # for log purposes\n # add some tensor for logs\n u_current = (tf.cast(tf.constant(np.ones(num_options)),\n tf.float32) - term_current) * q_omega_current + term_current * (tf.reduce_max(q_omega_current,\n axis=-1))[:, None]\n max_q_omega = tf.reduce_max(q_omega_current, axis=-1)\n values1 = tf.nn.top_k(q_omega_current, 2)[0]\n advantage_current = q_omega_current - max_q_omega[:, None] + 0.8 * (max_q_omega - values1[:, 1])[:, None]\n term_loss = term_current * advantage_current\n tf.summary.histogram('u_current', u_current)\n tf.summary.histogram('advantage_current', advantage_current)\n tf.summary.histogram('term_loss', term_loss)\n tf.summary.histogram('term_current', term_current, family='term_current')\n [tf.summary.histogram('term_current_%d' % (i), tf.reduce_mean(term_current[:, i]), family='term_current') for i in range(num_options)]\n [tf.summary.scalar('term_current_%d' % (i), tf.reduce_mean(term_current[:, i]), family='term_vals') for i in range(num_options)]\n # summaries_first = [tf.summary.histogram('u_current', u_current),\n # tf.summary.histogram('advantage_current', advantage_current),\n # tf.summary.histogram('term_loss', term_loss),\n # tf.summary.histogram('term_current', term_current, family='term_current'),\n # [tf.summary.histogram('term_current_%d' % (i), tf.reduce_mean(term_current[:, i]), family='term_current') for i in range(num_options)],\n # [tf.summary.scalar('term_current_%d' % (i), tf.reduce_mean(term_current[:, i]), family='term_vals') for i in range(num_options)]]\n # summaries_diff = [tf.summary.histogram('u_current11', u_current),\n # tf.summary.histogram('advantage_current1', advantage_current),\n # tf.summary.histogram('term_loss1', term_loss),\n # tf.summary.histogram('term_current1', term_current, family='term_current1'),\n # [tf.summary.histogram('term_current1_%d' % (i), tf.reduce_mean(term_current[:, i]),\n # family='term_current1') for i in range(num_options)],\n # [tf.summary.scalar('term_current1_%d' % (i), tf.reduce_mean(term_current[:, i]),\n # family='term_vals1') for i in range(num_options)]]\n # merged_summary_diff=tf.summary.merge(summaries_diff)\n # merged_summary_op = tf.summary.merge(summaries_first)\n merged_summary_op = tf.summary.merge_all()\n # summary_second = [tf.summary.scalar(\"my_second_graph_loss\", Episode_reward)]\n # merged_summary_op1 = tf.summary.merge(summary_second)\n summary_writer = tf.summary.FileWriter(log_dir + 'tfb', session.graph)\n\n q_max = tf.reduce_max(q_current, 1)\n u_max = tf.reduce_max(u_current, 1)\n omega_max = tf.reduce_max(q_omega_current, 1)\n average_Q = tf.reduce_mean(q_max)\n average_U = tf.reduce_mean(u_max)\n average_omega = tf.reduce_mean(omega_max)\n ######\n\n # construct optimization op (with gradient clipping)\n learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n learning_rate_omega = tf.placeholder(tf.float32, (), name=\"learning_rate_omega\")\n learning_rate_term = tf.placeholder(tf.float32, (), name=\"learning_rate_term\")\n optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)\n optimizer_omega = optimizer_spec_omega.constructor(learning_rate=learning_rate_omega, **optimizer_spec_omega.kwargs)\n optimizer_term = optimizer_spec_term.constructor(learning_rate=learning_rate_term, **optimizer_spec_term.kwargs)\n train_fn = minimize_and_clip(optimizer, total_error_q, var_list=q_func_vars, clip_val=grad_norm_clipping)\n train_fn_omega = minimize_and_clip(optimizer_omega, total_error_q_omega, var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n if debug_no_term_train:\n train_fn_term = tf.no_op()\n else:\n train_fn_term = minimize_and_clip(optimizer_term, total_error_term, var_list=q_func_vars, clip_val=grad_norm_clipping)\n # train_fn_term = term_grad(optimizer_term, term_val_next, var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n update_target_fn = tf.group(*update_target_fn)\n\n # construct the replay buffer\n replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)\n\n # for log purposes\n replay_buffer_test = ReplayBuffer(10000, frame_history_len)\n replay_buffer_test1 = ReplayBuffer(10000, frame_history_len)\n\n ###############\n # RUN ENV #\n ###############\n model_initialized = False\n q_model_initialized = False\n option_activated = False\n option_running = None\n num_param_updates = 0\n mean_episode_reward = -float('nan')\n best_mean_episode_reward = -float('inf')\n last_obs = env.reset()\n next_recent_obs = None\n idx = replay_buffer.store_frame(last_obs)\n LOG_EVERY_N_STEPS = 10000\n running_time = [0, time.time()]\n\n for t in itertools.count():\n ### 1. Check stopping criterion\n if stopping_criterion is not None and stopping_criterion(env, t):\n break\n\n ### 2. Step the env and store the transition\n # At this point, \"last_obs\" contains the latest observation that was\n # recorded from the simulator. Here, your code needs to store this\n # observation and its outcome (reward, next observation, etc.) into\n # the replay buffer while stepping the simulator forward one step.\n # At the end of this block of code, the simulator should have been\n # advanced one step, and the replay buffer should contain one more\n # transition.\n # Specifically, last_obs must point to the new latest observation.\n # Useful functions you'll need to call:\n # obs, reward, done, info = env.step(action)\n # this steps the environment forward one step\n # obs = env.reset()\n # this resets the environment if you reached an episode boundary.\n # Don't forget to call env.reset() to get a new observation if done\n # is true!!\n # Note that you cannot use \"last_obs\" directly as input\n # into your network, since it needs to be processed to include context\n # from previous frames. You should check out the replay buffer\n # implementation in dqn_utils.py to see what functionality the replay\n # buffer exposes. The replay buffer has a function called\n # encode_recent_observation that will take the latest observation\n # that you pushed into the buffer and compute the corresponding\n # input that should be given to a Q network by appending some\n # previous frames.\n # Don't forget to include epsilon greedy exploration!\n # And remember that the first time you enter this loop, the model\n # may not yet have been initialized (but of course, the first step\n # might as well be random, since you haven't trained your net...)\n\n #####\n \n # YOUR CODE HERE\n\n\n # store the first random_length frames to calculate the average Q\n epsilon = exploration.value(t / learning_freq)\n if next_recent_obs is not None:\n recent_obs = next_recent_obs\n else:\n recent_obs = replay_buffer.encode_recent_observation()\n\n # pick an option\n if not option_activated:\n if random.random() > epsilon and model_initialized:\n q_omega_vals = session.run(q_omega_current, {obs_t_ph: recent_obs[None, ..., -input_history_len:]})\n option_running = np.argmax(q_omega_vals)\n else:\n option_running = random.choice(range(num_options))\n option_activated = True\n\n # choose action\n action = options[option_running].act(recent_obs)\n\n # take a step in the environment\n new_obs, r, done, _ = env.step(action)\n if done:\n new_obs = env.reset()\n\n # TODO: change replay_buffer to support option storage\n opa = np.array(\n [1 if i == option_running or options[i].act(recent_obs) == action else 0 for i in range(num_options)]\n )\n # opa[option_running] = 2\n replay_buffer.store_effect(idx, action, r, done, opa)\n idx = replay_buffer.store_frame(new_obs)\n next_recent_obs = replay_buffer.encode_recent_observation()\n\n if not model_initialized:\n initialize_interdependent_variables(session, tf.global_variables(), {\n obs_t_ph: recent_obs[None, ..., -input_history_len:],\n obs_tp1_ph: next_recent_obs[None, ..., -input_history_len:],\n })\n session.run(update_target_fn)\n model_initialized = True\n saver = tf.train.Saver()\n # checkpoint = tf.train.get_checkpoint_state(\n # \"/home/lsy/PycharmProjects/ple-monstrerkong/examples/dqn_transfer_option/logs/12_16off1c_12_16_17_20:38:47/dqn\")\n # if checkpoint and checkpoint.model_checkpoint_path:\n # saver.restore(session, checkpoint.model_checkpoint_path)\n # print(\"Successfully loaded: \", checkpoint.model_checkpoint_path)\n # Load = True\n # else:\n # print(\"Could not find old network weights\")\n\n # online update q_omega & termination\n if t > learning_starts and not debug_no_term_train and not done:\n loss_term, _ = session.run([total_error_term, train_fn_term], feed_dict={\n obs_t_ph: recent_obs[None, ..., -input_history_len:],\n opt_t_ph: [option_running],\n # opa_t_ph: [option_running],\n # rew_t_ph: [r],\n obs_tp1_ph: next_recent_obs[None, ..., -input_history_len:],\n done_mask_ph: [1.0 if done == True else 0.0],\n learning_rate_omega: optimizer_spec_omega.lr_schedule.value(t / learning_freq),\n learning_rate_term: optimizer_spec_term.lr_schedule.value(t / learning_freq)\n })\n loss_term_log.append(loss_term)\n if online_q_omega:\n for i in range(num_options):\n if opa[i] > 0:\n _, loss_omega = session.run([train_fn_omega, total_error_q_omega], feed_dict={\n obs_t_ph: recent_obs[None, ..., -input_history_len:],\n opa_t_ph: [i],\n rew_t_ph: [r],\n obs_tp1_ph: next_recent_obs[None, ..., -input_history_len:],\n done_mask_ph: [1.0 if done == True else 0.0],\n learning_rate_omega: optimizer_spec_omega.lr_schedule.value(t / learning_freq),\n })\n loss_omega_log.append(loss_omega)\n\n\n term_probs = session.run(term_next_current, {obs_tp1_ph: next_recent_obs[None, ..., -input_history_len:]})\n # print \"term_probs\", term_probs\n if done or not random.random() > term_probs[0][option_running]: # will re-pick an option\n option_activated = False\n\n\n #####\n\n # at this point, the environment should have been advanced one step (and\n # reset if done was true), and last_obs should point to the new latest\n # observation\n\n ### 3. Perform experience replay and train the network.\n # note that this is only done if the replay buffer contains enough samples\n # for us to learn something useful -- until then, the model will not be\n # initialized and random actions should be taken\n if (t > learning_starts and\n t % learning_freq == 0 and\n replay_buffer.can_sample(batch_size)):\n # Here, you should perform training. Training consists of four steps:\n # 3.a: use the replay buffer to sample a batch of transitions (see the\n # replay buffer code for function definition, each batch that you sample\n # should consist of current observations, current actions, rewards,\n # next observations, and done indicator).\n # 3.b: initialize the model if it has not been initialized yet; to do\n # that, call\n # initialize_interdependent_variables(session, tf.global_variables(), {\n # obs_t_ph: obs_t_batch,\n # obs_tp1_ph: obs_tp1_batch,\n # })\n # where obs_t_batch and obs_tp1_batch are the batches of observations at\n # the current and next time step. The boolean variable q_model_initialized\n # indicates whether or not the model has been initialized.\n # Remember that you have to update the target network too (see 3.d)!\n # 3.c: train the model. To do this, you'll need to use the train_fn and\n # total_error ops that were created earlier: total_error is what you\n # created to compute the total Bellman error in a batch, and train_fn\n # will actually perform a gradient step and update the network parameters\n # to reduce total_error. When calling session.run on these you'll need to\n # populate the following placeholders:\n # obs_t_ph\n # act_t_ph\n # rew_t_ph\n # obs_tp1_ph\n # done_mask_ph\n # (this is needed for computing total_error)\n # learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)\n # (this is needed by the optimizer to choose the learning rate)\n # 3.d: periodically update the target network by calling\n # session.run(update_target_fn)\n # you should update every target_update_freq steps, and you may find the\n # variable num_param_updates useful for this (it was initialized to 0)\n #####\n \n # YOUR CODE HERE\n\n # step a\n obs_t_batch, act_t_batch, rew_t_batch, obs_tp1_batch, done_mask_batch, opa_batch = replay_buffer.sample(batch_size)\n\n # step b\n if not q_model_initialized:\n session.run(update_target_fn)\n q_model_initialized = True\n\n # step c --also log cost(loss) TODO: off-policy update q_omega & termination\n # run_list = [train_fn, total_error_q]\n run_list = []\n feed_dict = {\n obs_t_ph: obs_t_batch[..., -input_history_len:],\n act_t_ph: act_t_batch,\n rew_t_ph: rew_t_batch,\n obs_tp1_ph: obs_tp1_batch[..., -input_history_len:],\n done_mask_ph: done_mask_batch,\n learning_rate: optimizer_spec.lr_schedule.value(t / learning_freq)\n }\n if not online_q_omega and not q_omega_uniform_sample:\n run_list.append(train_fn_omega)\n run_list.append(total_error_q_omega)\n feed_dict[opa_t_ph] = opa_batch\n\n feed_dict[learning_rate_omega] = optimizer_spec_omega.lr_schedule.value(t / learning_freq)\n # if q_omega_uniform_sample:\n # _, cost = session.run(run_list, feed_dict=feed_dict)\n # else:\n _, loss_omega = session.run(run_list, feed_dict=feed_dict)\n # Costs.append(cost)\n if not online_q_omega and q_omega_uniform_sample:\n obs_t_batch, act_t_batch, rew_t_batch, obs_tp1_batch, done_mask_batch, opa_batch = replay_buffer.sample(batch_size, opa_uniform=True)\n opa_t_batch = opa_batch\n _, loss_omega = session.run([train_fn_omega, total_error_q_omega], feed_dict={\n obs_t_ph: obs_t_batch[..., -input_history_len:],\n opa_t_ph: opa_t_batch,\n rew_t_ph: rew_t_batch,\n obs_tp1_ph: obs_tp1_batch[..., -input_history_len:],\n done_mask_ph: done_mask_batch,\n learning_rate_omega: optimizer_spec_omega.lr_schedule.value(t / learning_freq)\n })\n\n loss_omega_log.append(loss_omega)\n # step d\n if t % target_update_freq == 0 and q_model_initialized:\n num_param_updates += 1\n session.run(update_target_fn)\n\n #####\n\n ### 4. Log progress\n if (t > 0) and (t % epoch_size == 0) and (t > learning_starts):\n average_loss_omega.append(np.mean(np.array(loss_omega_log)[int(-epoch_size):]))\n average_loss_term.append(np.mean(np.array(loss_term_log)[int(-epoch_size):]))\n plt.figure(8)\n plt.plot(average_loss_omega)\n plt.grid()\n plt.savefig(log_dir + 'average_loss_omega.png')\n plt.figure(9)\n plt.plot(average_loss_term)\n plt.grid()\n plt.savefig(log_dir + 'average_loss_term.png')\n if t % 1e3 == 0 and t > 0:\n if eval_obs_array:\n if obs_random_batch is None:\n # print len(eval_obs_array)\n if len(eval_obs_array) == 1:\n if len(eval_obs_array[0]) < 6:\n obs_random_batch = np.array(eval_obs_array)[0, 0,... , -input_history_len:]\n else:\n obs_random_batch = np.array(eval_obs_array)[0, 0:100, 0,..., -input_history_len:]\n else:\n obs_random_batch = eval_obs_array[0][0]\n diff_batch = eval_obs_array[1][0]\n summary_str = session.run(merged_summary_op, feed_dict={obs_t_ph: obs_random_batch})\n summary_writer.add_summary(summary_str, t)\n # summary_str_diff = session.run(merged_summary_diff, feed_dict={obs_t_ph: diff_batch})\n # summary_writer.add_summary(summary_str_diff, t)\n # else:\n # print \"no eval_obs_array\"\n episode_rewards = get_wrapper_by_name(env, \"Monitor\").get_episode_rewards()\n if len(episode_rewards) > 0:\n mean_episode_reward = np.mean(episode_rewards[-100:])\n if len(episode_rewards) > 100:\n best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)\n if t % LOG_EVERY_N_STEPS == 0 and q_model_initialized:\n last_t, last_time = running_time\n new_t , new_time = t / LOG_EVERY_N_STEPS, time.time()\n running_time = [new_t, new_time]\n print \"###########################################\"\n print(\"Timestep %d\" % (t,))\n print(\"Training time per %d timesteps %.2fs\" %\n (LOG_EVERY_N_STEPS, (new_time - last_time) / (new_t - last_t)))\n print(\"mean reward (100 episodes) %f\" % mean_episode_reward)\n print(\"best mean reward %f\" % best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % exploration.value(t / learning_freq))\n print(\"learning_rate %f\" % optimizer_spec.lr_schedule.value(t / learning_freq))\n # evaluate q functions for different rooms (log_dir should exist!)\n if t % epoch_size == 0 and log_dir != None and t > learning_starts:\n saver.save(session, log_dir + 'dqn/', global_step=t)\n if eval_obs_array:\n Q_current, U_max, Omega_max = session.run([average_Q, average_U, average_omega], feed_dict={obs_t_ph: obs_random_batch})\n print(\"evaluated q values for eval_obs_array\")\n test_q_max_log.append(Q_current)\n test_u_log.append(U_max)\n test_omega_log.append(Omega_max)\n plt.figure(10)\n plt.plot(test_u_log)\n plt.grid()\n plt.savefig(log_dir + 'test_u_max.png')\n plt.figure(11)\n plt.plot(test_omega_log)\n plt.grid()\n plt.savefig(log_dir + 'test_omega_max.png')\n else:\n print(\"no eval_obs_array! q values are not evaluated! check your config\")\n sys.stdout.flush()\n\n # tests & log cost\n # add a step: test 10000 step after 50000 step\n if t % epoch_size == 0 and log_dir != None and q_model_initialized:\n\n # test Q_omega and beta\n option_activated_test = False\n option_running_test = None\n last_obs_test1 = env_test1.reset()\n replay_buffer_test1.store_frame(last_obs_test1)\n h = 0\n episode_num1 = 1\n test_step1 = 0\n done_test1 = False\n next_recent_obs_test = None\n while test_step1 < 5000 or done_test1 == False:\n if next_recent_obs_test is not None:\n recent_obs_test = next_recent_obs_test\n else:\n recent_obs_test = replay_buffer_test1.encode_recent_observation()\n # pick an option\n if not option_activated_test:\n if random.random() > 0.05:\n q_omega_test = session.run(q_omega_current,\n {obs_t_ph: recent_obs_test[None, ..., -input_history_len:]})\n option_running_test = np.argmax(q_omega_test)\n else:\n option_running_test = random.choice(range(num_options))\n option_activated_test = True\n\n # choose action\n action_test1 = options[option_running_test].act(recent_obs_test)\n\n new_obs_test, r_test1, done_test1, _ = env_test1.step(action_test1)\n h += 1\n test_step1 += 1\n if done_test1:\n # option_activated_test = False\n reward = r_test1 * (gamma ** h)\n h = 0\n # summary_str1 = session.run(merged_summary_op1, feed_dict={Episode_reward: reward})\n # summary_writer.add_summary(summary_str1, global_step=Episode_num)\n Episode_num += 1\n if test_step1 < 5000:\n new_obs_test = env_test1.reset()\n episode_num1 += 1\n\n replay_buffer_test1.store_frame(new_obs_test)\n next_recent_obs_test = replay_buffer_test1.encode_recent_observation()\n term_probs_test = session.run(term_next_current,\n {obs_tp1_ph: next_recent_obs_test[None, ..., -input_history_len:]})\n if done_test1 or not random.random() > term_probs_test[0][option_running_test]: # will re-pick an option\n option_activated_test = False\n\n episode_rewards_test1 = get_wrapper_by_name(env_test1, \"Monitor\").get_episode_rewards()\n episode_lengths_test1 = get_wrapper_by_name(env_test1, \"Monitor\").get_episode_lengths()\n evaluation_metric1 = np.array(episode_rewards_test1) * (gamma ** np.array(episode_lengths_test1))\n test_reward1 = np.mean(evaluation_metric1)\n none_discount_average1 = np.mean(episode_rewards_test1)\n none_discount1.append(none_discount_average1)\n short_average1.append(np.mean(evaluation_metric1[-episode_num1:]))\n short_average_none1.append(np.mean(episode_rewards_test1[-episode_num1:]))\n test_rewards1.append(test_reward1)\n plt.figure(12)\n plt.plot(test_rewards1)\n plt.grid()\n plt.savefig(log_dir + 'test_rewards1.png')\n plt.figure(13)\n plt.plot(none_discount1)\n plt.grid()\n plt.savefig(log_dir + 'none_discount1.png')\n plt.figure(14)\n plt.plot(short_average1)\n plt.grid()\n plt.savefig(log_dir + 'short_average1.png')\n plt.figure(15)\n plt.plot(short_average_none1)\n plt.grid()\n plt.savefig(log_dir + 'short_average_none1.png')\n plt.figure(1)\n plt.plot(evaluation_metric1)\n plt.grid()\n plt.savefig(log_dir + 'episode_reward.png')\n\n with open(log_dir + '/pkl/test_rewards1.pkl', 'wb') as output:\n pickle.dump(test_rewards1, output)\n with open(log_dir + '/pkl/none_discount1.pkl', 'wb') as output:\n pickle.dump(none_discount1, output)\n with open(log_dir + '/pkl/short_average1.pkl', 'wb') as output:\n pickle.dump(short_average1, output)\n with open(log_dir + '/pkl/short_average_none1.pkl', 'wb') as output:\n pickle.dump(short_average_none1, output)\n with open(log_dir + '/pkl/episode_reward.pkl', 'wb') as output:\n pickle.dump(evaluation_metric1, output)\n with open(log_dir + '/pkl/test_omega_log.pkl', 'wb') as output:\n pickle.dump(test_omega_log, output)", "def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))", "def train(self,\n num_episodes = 100,\n num_steps = 500000,\n max_steps_per_episode = 10000,\n target_interval = 10000,\n learning_interval = 4,\n frame_skip = 1,\n warmup_steps = None,\n pretrain_steps = None,\n output_freq = 50,\n save_freq = 5, \n store_memory = False):\n \n # prefill memory with random transitions if requested\n if warmup_steps is not None:\n self._random_warmup(warmup_steps)\n \n # pretrain the agent on its on own memory\n if pretrain_steps is not None:\n self._pretrain(pretrain_steps, target_interval)\n \n # logging initialization\n self._score, self._q_values, self._losses = 0., [], []\n raw_frames = np.zeros(shape = (max_steps_per_episode, *self.env._unprocessed_frame.shape), dtype = np.uint8)\n\n episode_idx = 0\n while episode_idx < num_episodes or self._step_counter < num_steps:\n # reset environment and get first state\n self._start_episode()\n \n for i in range(max_steps_per_episode):\n \n #-------------------------------------------------------------------------------#\n #####################\n # Interactive Phase #\n #####################\n \n # choose an action, observe reactions of the environment and\n # add this experience to the agent's memory \n if self._step_counter % frame_skip == 0: \n action = self._make_decision()\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n # update current state\n self._current_state[0, :(self.num_stacked_frames-1)] = self._current_state[0, 1:]\n self._current_state[0, self.num_stacked_frames-1] = new_frame\n #-------------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------------#\n ##################\n # Learning Phase #\n ##################\n \n # perform a parameter update of the current policy model\n if self._step_counter % learning_interval == 0:\n self._batch_update()\n \n # update the target model\n if self._step_counter % target_interval == 0:\n self._update_target_model()\n #-------------------------------------------------------------------------------#\n \n # logging\n self._score += self.env._unprocessed_reward\n raw_frames[i] = self.env._unprocessed_frame\n \n \n self._step_counter += 1\n \n if self.env.was_real_done:\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n break\n \n if done:\n self.env.reset()\n \n \n if not self.env.was_real_done:\n self.memory.add_experience(action, reward, new_frame, 1, True)\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n \n if episode_idx%(num_episodes/output_freq)==0:\n validation_score, validation_frames = self.test(record = True, max_steps_per_episode = max_steps_per_episode)\n #validation_score, validation_frames = 0, []\n lower_idx = int(clip(episode_idx-(num_episodes/output_freq)+1, 0, num_episodes-1))\n self.logger.show_progress(lower_idx, episode_idx, validation_score, validation_frames, self.policy_network.model)\n \n if episode_idx%(num_episodes/save_freq)==0:\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)\n \n \n\n episode_idx += 1 \n print('==========================\\ntraining session completed\\n==========================\\n\\n\\n=======\\nSummary\\n======='\n )\n self.logger.show_progress(0, num_episodes, summary = True)\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)", "def run_single(env, policy, seed_feed=99, iter_tot=100, gamma=0.8):\n\n episode_reward = 0\n env = gym.make(\"MachineReplacement-v1\")\n ob = env.reset()\n for t in range(iter_tot):\n a = policy[ob]\n ob, rew, done, _ = env.step(a)\n episode_reward += rew\n if done:\n break\n # assert done\n print(episode_reward)\n return episode_reward", "def eval_against_bot(env, q_agent, t_agent, num_episodes):\n score = 0\n for _ in range(num_episodes):\n time_step = env.reset()\n q_agent_output = q_agent.step(time_step, is_evaluation=True)\n t_agent_output = t_agent.step(time_step, is_evaluation=True)\n time_step = env.step([q_agent_output.action, t_agent_output.action])\n score += time_step.rewards[0]\n return score / num_episodes", "def eval_against_bot(env, q_agent, t_agent, num_episodes):\n score = 0\n for _ in range(num_episodes):\n time_step = env.reset()\n q_agent_output = q_agent.step(time_step, is_evaluation=True)\n t_agent_output = t_agent.step(time_step, is_evaluation=True)\n time_step = env.step([q_agent_output.action, t_agent_output.action])\n score += time_step.rewards[0]\n return score / num_episodes", "def evaluate(self, num_episodes, max_episode_length=None, gen_video=False):\n evaluation_policy = GreedyPolicy()\n eval_preprocessor = preprocessors.PreprocessorSequence()\n env_valid = gym.make(self.env_string)\n\n iter_ctr_valid = 0\n Q_sum = 0\n eval_episode_ctr_valid = 0\n total_reward_all_episodes = []\n \n # https://github.com/openai/gym/blob/master/gym/wrappers/monitoring.py video_callable takes function as arg. so we hack with true lambda\n # https://github.com/openai/gym/issues/494 \n if gen_video:\n video_dir = os.path.join(self.log_dir, 'gym_monitor', str(self.iter_ctr).zfill(7))\n os.makedirs(video_dir)\n env_valid = wrappers.Monitor(env_valid, video_dir, video_callable=lambda x:True, mode='evaluation')\n\n while eval_episode_ctr_valid < num_episodes:\n state = env_valid.reset()\n eval_preprocessor.reset_history_memory()\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0.0\n\n while num_timesteps_in_curr_episode < max_episode_length:\n num_timesteps_in_curr_episode += 1\n iter_ctr_valid += 1\n\n state_network = self.preprocessor.process_state_for_network(state)\n q_values = self.calc_q_values(state_network)\n Q_sum += np.max(q_values) # todo fix this\n\n action = evaluation_policy.select_action(q_values)\n next_state, reward, is_terminal, _ = env_valid.step(action)\n total_reward_curr_episode += reward\n # print \"Evalution : timestep {}, episode {}, action {}, reward {}, total_reward {}\"\\\n # .format(iter_ctr_valid, eval_episode_ctr_valid, action, reward, total_reward_curr_episode)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n eval_episode_ctr_valid += 1\n print \"Evaluate() : iter_ctr_valid {}, eval_episode_ctr_valid : {}, total_reward_curr_episode : {}, num_timesteps_in_curr_episode {}\"\\\n .format(iter_ctr_valid, eval_episode_ctr_valid, total_reward_curr_episode, num_timesteps_in_curr_episode)\n total_reward_all_episodes.append(total_reward_curr_episode)\n # num_timesteps_in_curr_episode = 0\n break\n\n state = next_state\n\n Q_avg = Q_sum/float(iter_ctr_valid)\n print \" sum(total_reward_all_episodes) : {} , float(len(total_reward_all_episodes)) : {}\".format\\\n (sum(total_reward_all_episodes), float(len(total_reward_all_episodes)))\n all_episode_avg_reward = sum(total_reward_all_episodes)/float(len(total_reward_all_episodes))\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='test_mean_avg_reward', value=all_episode_avg_reward, step=self.iter_ctr)\n self.tf_log_scaler(tag='test_mean_Q_max', value=Q_avg, step=self.iter_ctr)\n self.dump_test_episode_reward(all_episode_avg_reward)\n self.qavg_list = np.append(self.qavg_list, Q_avg)\n self.reward_list.append(all_episode_avg_reward)\n\n pkl.dump(self.reward_list, open(\"/data/datasets/ratneshm/deeprl_hw2/eval_rewards.pkl\", \"wb\"))\n \n print \"all_episode_avg_reward \", all_episode_avg_reward\n print \"\\n\\n\\n self.reward_list \\n\\n\\n\", self.reward_list", "def simulate(self):\r\n\t\tprint(\"##################################\")\r\n\t\tprint(\"SIMULATING GAME - SpaceInvaders..\")\r\n\t\tprint(\"##################################\")\r\n\t\t\r\n\t\t# Play 3 episodes:\r\n\t\tfor i in range(3):\r\n\t\t\tprint(\"Playing Episode %d\" % i)\r\n\t\t\tstate = self.env.reset()\r\n\t\t\t#self.env.render()\r\n\t\t\tdone = False\r\n\t\t\ttot_reward = 0\r\n\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, True)\r\n\t\t\t# play until dead.\t\t\t\r\n\t\t\twhile not done:\r\n\t\t\t\t# get the value predicted by the model and perform that action.\r\n\t\t\t\t# keras conv2d expects a 4D input. So add an empty axis. \r\n\t\t\t\tstate = np.expand_dims(state, axis=0)\r\n\t\t\t\t# predict action directly from the saved neural network.\r\n\t\t\t\taction = np.argmax(self.dqn.getModel().predict(state)[0])\r\n\t\t\t\t# perform that action.\r\n\t\t\t\tstate, reward, done, _ = self.env.step(action)\r\n\t\t\t\tself.env.render()\r\n\t\t\t\tstate,_ = stack_frames(self.stack_size,self.stacked_frames, \r\n\t\t\t\t\t\t\t\t\t\tstate, False)\r\n\t\t\t\ttot_reward+=reward\r\n\t\t\tprint(\"Reward: \", tot_reward)\r\n\t\tself.env.close() # to avoid sys.meta_path error\r", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def run_episode(env, gamma = 1.0, render = False):\n actions = 4\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(random.randint(0, actions - 1))\n x1, x2, x3, x4, x5, x6, x7, x8 = obs\n x1s.append(x1)\n x2s.append(x2)\n x3s.append(x3)\n x4s.append(x4)\n x5s.append(x5)\n x6s.append(x6)\n x7s.append(x7)\n x8s.append(x8)\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n return total_reward, step_idx", "def dqn(\n env,\n n_episodes=10000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.005,\n eps_decay=0.995,\n train_mode=True,\n):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n action_size = brain.vector_action_space_size\n env_info = env.reset(train_mode=train_mode)[brain_name]\n state_size = len(env_info.vector_observations[0])\n\n agent = Agent(state_size=state_size, action_size=action_size, seed=1)\n\n for i_episode in range(1, n_episodes + 1):\n state = env_info.vector_observations[0]\n score = 0\n for _ in range(max_t):\n action = np.int32(agent.act(state, eps))\n env_info = env.step(action)[\n brain_name\n ] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n env.reset(train_mode=train_mode)[brain_name]\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n ),\n end=\"\",\n )\n if i_episode % 100 == 0:\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n )\n )\n if np.mean(scores_window) >= 13.0:\n print(\n \"\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\".format(\n i_episode - 100, np.mean(scores_window)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint_vanilla.pth\")\n break\n return scores", "def test():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n #env.guesser, agent.dqn = load_networks(i_episode='best', avg_reward = )\n\n # predict outcome on test data\n y_hat_test = np.zeros(len(env.y_test))\n y_hat_test_prob = np.zeros(len(env.y_test))\n \n print('Computing predictions of test data')\n n_test = len(env.X_test)\n for i in range(n_test):\n \n if i % 1000 == 0:\n print('{} / {}'.format(i, n_test))\n \n state = env.reset(mode='test', \n patient=i,\n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n \n # take the action\n state, reward, done, guess = env.step(action, mode='test') \n \n if guess != -1:\n y_hat_test_prob[i] = torch.argmax(env.probs).item()\n \n if done:\n break\n y_hat_test[i] = guess\n \n C = confusion_matrix(env.y_test, y_hat_test)\n print('confusion matrix: ')\n print(C)\n\n acc = np.sum(np.diag(C)) / len(env.y_test)\n\n print('Test accuracy: ', np.round(acc, 3))", "def main():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # pylint: disable=E1101\n\n comm = MPI.COMM_WORLD\n\n # Use MPI for parallel evaluation\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n env_fns, env_names = create_eval_envs()\n\n env = AllowBacktracking(env_fns[rank](stack=False, scale_rew=False))\n env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)\n with tf.Session(config=config) as sess:\n dqn = DQN(*rainbow_models(sess,\n env.action_space.n,\n gym_space_vectorizer(env.observation_space),\n min_val=-200,\n max_val=200))\n player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)\n optimize = dqn.optimize(learning_rate=1e-4)\n sess.run(tf.global_variables_initializer())\n\n reward_hist = []\n total_steps = 0\n def _handle_ep(steps, rew, env_rewards):\n nonlocal total_steps\n total_steps += steps\n reward_hist.append(rew)\n if total_steps % 1 == 0:\n avg_score = sum(reward_hist[-100:]) / len(reward_hist[-100:])\n\n\t\t\t# Global Score\n global_score = np.zeros(1)\n local_score = np.array(avg_score)\n print(\"Local Score for \" + env_names[rank] + \" at episode \" + str(len(reward_hist)) + \" with timesteps: \" + str(total_steps) + \": \" + str(local_score))\n comm.Allreduce(local_score, global_score, op=MPI.SUM)\n global_score /= size\n if rank == 0:\n print(\"Global Average Score at episode: \" + str(len(reward_hist)) + \": \" + str(global_score))\n\n\n dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.\n player=player,\n replay_buffer=PrioritizedReplayBuffer(500000, 0.5, 0.4, epsilon=0.1),\n optimize_op=optimize,\n train_interval=1,\n target_interval=8192,\n batch_size=32,\n min_buffer_size=20000,\n handle_ep=_handle_ep,\n save_interval=None,\n restore_path='./checkpoints_rainbow/model-10' # Model to be evaluated\n )", "def run(agent, env, num_episodes = 20000, mode = 'train'):\n\t scores=[]\n\t max_avg_score=-np.inf\n\t for i_episode in range(1, num_episodes + 1):\n\t # Initialize episode\n\t state=env.reset()\n\t action=agent.reset_episode(state)\n\t total_reward=0\n\t done=False\n\n\t # Roll out steps until done\n\t while not done:\n\t state, reward, done, info=env.step(action)\n\t total_reward += reward\n\t action=agent.act(state, reward, done, mode)\n\n\t # Save final score\n\t scores.append(total_reward)\n\n\t # Print episode stats\n\t if mode == 'train':\n\t if len(scores) > 100:\n\t avg_score=np.mean(scores[-100:])\n\t if avg_score > max_avg_score:\n\t max_avg_score=avg_score\n\n\t if i_episode % 100 == 0:\n\t print(\"\\rEpisode {}/{} | Max Average Score: {}\".format(i_episode,\n\t num_episodes, max_avg_score), end = \"\")\n\t sys.stdout.flush()\n\n\t return scores\n\n\tscores=run(q_agent, env)\n\n\t# Plot scores obtained per episode\n\tplt.plot(scores); plt.title(\"Scores\")\n\n\tdef plot_scores(scores, rolling_window = 100):\n\t\t\"\"\"Plot scores and optional rolling mean using specified window.\"\"\"\n\t\tplt.plot(scores); plt.title(\"Scores\");\n\t\trolling_mean=pd.Series(scores).rolling(rolling_window).mean()\n\t\tplt.plot(rolling_mean);\n\t\treturn rolling_mean\n\n\trolling_mean=plot_scores(scores)\n\n\t# Run in test mode and analyze socres obtained\n\ttest_scores=run(q_agent, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores, rolling_window = 10)\n\n\n\tdef plot_q_table(q_table):\n \"\"\"Visualize max Q-value for each state and corresponding action.\"\"\"\n\t q_image=np.max(q_table, axis = 2) # max Q-value for each state\n\t q_actions=np.argmax(q_table, axis = 2) # best action for each state\n\n\t fig, ax=plt.subplots(figsize = (10, 10))\n\t cax=ax.imshow(q_image, cmap = 'jet');\n\t cbar=fig.colorbar(cax)\n\t for x in range(q_image.shape[0]):\n\t for y in range(q_image.shape[1]):\n\t ax.text(x, y, q_actions[x, y], color = 'white',\n\t horizontalalignment = 'center', verticalalignment = 'center')\n\t ax.grid(False)\n\t ax.set_title(\"Q-table, size: {}\".format(q_table.shape))\n\t ax.set_xlabel('position')\n\t ax.set_ylabel('velocity')\n\n\n\tplot_q_table(q_agent.q_table)\n\n\n\tstate_grid_new=create_uniform_grid(\n\t env.observation_space.low, env.observation_space.high, bins = (20, 20))\n\tq_agent_new=QLearningAgent(env, state_grid_new)\n\tq_agent_new.scores=[]\n\n\n\tq_agent_new.scores += run(q_agent_new, env,\n\t num_episodes = 50000) # accumulate scores\n\trolling_mean_new=plot_scores(q_agent_new.scores)\n\n\ttest_scores= run(q_agent_new, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores)\n\n\tplot_q_table(q_agent_new.q_table)\n\n\tstate=env.reset()\n\tscore=0\n\timg=plt.imshow(env.render(mode='rgb_array'))\n\tfor t in range(1000):\n\t\taction=q_agent_new.act(state, mode = 'test')\n\t\timg.set_data(env.render(mode='rgb_array'))\n\t\tplt.axis('off')\n\t\tdisplay.display(plt.gcf())\n\t\tdisplay.clear_output(wait = True)\n\t\tstate, reward, done, _=env.step(action)\n\t\tsocre += reward\n\t\tif done:\n\t\t\tprint('Score: ', socre)\n\t\t\tbreak\n\tenv.close()", "def evaluate(self, env, num_episodes, max_episode_length=None\n , show_detail = False):\n episode_counter = 1;\n average_reward = 0;\n average_episode_length = 0;\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n setpoint_this = ob_this[6:8]\n \n this_ep_reward = 0;\n this_ep_length = 0;\n while episode_counter <= num_episodes:\n action_mem = self.select_action(state_this_net, stage = 'testing');\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem)\n\n time_next, ob_next, is_terminal = env.step(action)\n \n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n\n setpoint_next = ob_next[6:8]\n\n obs_next_net = self._preprocessor.process_observation_for_network(\n ob_next, self._min_array, self._max_array)\n \n state_next_net = np.append(obs_next_net[0:13], obs_next_net[14:]).reshape(1,16)\n \n #10:PMV, 11: Occupant number , -2: power\n reward = self._preprocessor.process_reward(obs_next_net[12:15])\n \n this_ep_reward += reward;\n \n #Check if exceed the max_episode_length\n if max_episode_length is not None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n #Check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], \n obs_this_net[14:]).reshape(1,16)\n\n average_reward = (average_reward * (episode_counter - 1) \n + this_ep_reward) / episode_counter;\n average_episode_length = (average_episode_length \n * (episode_counter - 1) \n + this_ep_length) / episode_counter;\n \n episode_counter += 1;\n if show_detail:\n logging.info ('Episode ends. Cumulative reward is %0.04f '\n 'episode length is %d, average reward by now is %0.04f,'\n ' average episode length by now is %d.' %(this_ep_reward,\n this_ep_length,\n average_reward,\n average_episode_length));\n this_ep_length = 0;\n this_ep_reward = 0;\n \n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n state_this_net = state_next_net\n time_this = time_next\n this_ep_length += 1;\n return (average_reward, average_episode_length);", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def run(self):\n\t\tep_rewards = [0.0]\n\t\tavg_rewards = []\n\t\tobs = self.env.reset()\n\t\tstep_counter = 0\n\n\t\tself.mylogger.info('Task: {}, epochs: {}, batch size: {}'.format(self.env.unwrapped.spec.id, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.epochs,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.batch_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ))\n\n\t\tfor epoch in range(self.epochs):\n\t\t\tfor step in range(self.batch_size):\n\t\t\t\tstep_counter += 1\n\n\t\t\t\tself.observations[step] = obs.copy()\n\t\t\t\tself.actions[step], self.values[step] = self.model.action_value(obs[None, :])\n\t\t\t\tobs, self.rewards[step], self.dones[step], _ = self.env.step(self.actions[step])\n\t\t\t\tep_rewards[-1] += self.rewards[step]\n\n\t\t\t\tif step_counter % self.log_step == 0:\n\t\t\t\t\tlog_msg = 'global_step: {}, obs: {}, act: {}, reward: {}'.format(step_counter,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obs, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.actions[step], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.rewards[step]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\tself.mylogger.info(log_msg)\n\t\t\t\t\tself.mylogger.info(\"prev episode reward: {}\".format(ep_rewards[-2]))\n\n\t\t\t\tif self.dones[step]:\n\t\t\t\t\twith self.summary_writer.as_default():\n\t\t\t\t\t\ttf.summary.scalar('episode reward', ep_rewards[-1], step=step_counter)\n\t\t\t\t\tep_rewards.append(0.0)\n\t\t\t\t\tobs = self.env.reset()\n\n\t\t\t_, next_value = self.model.action_value(obs[None, :])\n\t\t\treturns, advs = self._returns_advantages(self.rewards, self.dones, self.values, next_value)\n\t\t\t# A trick to input actions and advantages through same API.\n\t\t\tacts_and_advs = np.concatenate([self.actions[:, None], advs[:, None]], axis=-1)\n\n\t\t\t# update weights \n\t\t\tlosses = self.model.train_on_batch(self.observations, [acts_and_advs, returns])\n\n\t\t\twith self.summary_writer.as_default():\n\t\t\t\ttf.summary.scalar('policy loss', losses[1], step=step_counter)\n\t\t\t\ttf.summary.scalar('value loss', losses[2], step=step_counter)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "def test(net, env, total_episodes, test_seeds=None, cuda=False, log=False, render=False, max_actions=10000):\n net.eval()\n total_reward = 0\n with torch.no_grad():\n for ep in range(total_episodes):\n obs = env.reset()\n done, ep_reward, ep_actions = False, 0, []\n hx = Variable(net.init_hidden())\n all_obs = [obs]\n action_count = 0\n while not done:\n if render:\n env.render()\n obs = Variable(torch.Tensor(obs)).unsqueeze(0)\n if cuda:\n obs, hx = obs.cuda(), hx.cuda()\n critic, logit, hx = net((obs, hx))\n prob = F.softmax(logit, dim=1)\n action = int(prob.max(1)[1].data.cpu().numpy())\n obs, reward, done, _ = env.step(action)\n action_count += 1\n done = done if action_count <= max_actions else True\n ep_actions.append(action)\n # A quick hack to prevent the agent from stucking\n max_same_action = 5000\n if action_count > max_same_action:\n actions_to_consider = ep_actions[-max_same_action:]\n if actions_to_consider.count(actions_to_consider[0]) == max_same_action:\n done = True\n ep_reward += reward\n if not done:\n all_obs.append(obs)\n total_reward += ep_reward\n if log:\n logger.info('Episode =>{} Score=> {} Actions=> {} ActionCount=> {}'.format(ep, ep_reward, ep_actions,\n action_count))\n return total_reward / total_episodes", "def train(\n self,\n num_episodes: int = int(1e4),\n num_reward_updates: int = 10,\n batch_size: int = 128,\n expert_demos: str = 'demos.pkl',\n ) -> None:\n # Set train\n self.agent.set_train()\n self.best_loss = np.float('inf')\n self.best_reward = np.float('-inf')\n self.reward_tracker = self.best_reward * np.ones(self.env.num_envs)\n\n # Expert demonstrations\n with open(expert_demos, 'rb') as f:\n demos = pickle.load(f) # runner.Experiences\n if self.use_gpu:\n demos.to_gpu()\n\n for i in range(num_episodes):\n # Generate samples\n batch = self.runner.generate_batch(64)\n flat_batch = flatten_batch(copy.deepcopy(batch))\n agent_batch_size = len(flat_batch['states'])\n expert_batch_size = len(demos.states)\n\n # Update cost function\n for j in range(num_reward_updates):\n selected_idxs = torch.randperm(expert_batch_size)[:batch_size]\n expert_states = demos.states[selected_idxs]\n expert_actions = demos.actions[selected_idxs]\n\n selected_idxs = torch.randperm(agent_batch_size)[:batch_size]\n states = flat_batch['states'][selected_idxs]\n actions = flat_batch['actions'][selected_idxs]\n\n states = torch.cat([states, expert_states], dim=0)\n actions = torch.cat([actions, expert_actions], dim=0)\n loss_cost_dict = self.agent.update_cost(\n states, actions, expert_states, expert_actions\n )\n\n # Update policy\n loss_reward_dict = self.agent.update(batch)\n # Log\n self.log(i, loss_cost_dict)\n self.log(i, loss_reward_dict)\n\n # Save agent\n loss = loss_cost_dict['loss/ioc'] + loss_reward_dict['loss/total']\n\n # Logging\n for ep_count, info_dict in batch['infos']:\n self.log(ep_count, info_dict)\n for (k, v) in info_dict.items():\n if 'reward' in k:\n agent_num = int(k.split('/')[1])\n self.reward_tracker[agent_num] = v\n\n mean_reward = np.mean(self.reward_tracker)\n\n self.log(i, {'values/mean_reward': mean_reward})\n\n # added in a check to make sure we aren't counting initial low loss\n if (loss < self.best_loss and i > 1000) or i % 500 == 0:\n self.agent.save(self.save_path, i)\n self.best_loss = loss\n logging.info(\n \"Save new best model at epoch %i with loss %0.4f.\"\n % (i, loss)\n )", "def run_episode(env, policy, GAMMA=1.0, render=False):\r\n obs= env.reset()\r\n total_reward = 0\r\n step_idx = 0\r\n while True:\r\n if render:\r\n env.render()\r\n obs, reward, done, _ = env.step(policy[obs])\r\n total_reward += ((GAMMA ** step_idx)*reward)\r\n step_idx +=1\r\n if done:\r\n break\r\n return total_reward", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=1,\n max_episodes=1000,\n center_returns=True,\n render=True,\n ):\n\n agent = self.create_agent(env)\n\n for episode in range(1, max_episodes + 1):\n obs = env.reset()\n done = False\n\n episode_return = 0.0\n while not done:\n action = agent.act(obs, deterministic=False)\n next_obs, reward, done, _ = env.step(action)\n episode_return += reward\n agent.store_step(obs, action, reward, next_obs, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if episode % train_every == 0:\n agent.perform_training(\n gamma=self.gamma, center_returns=center_returns\n )\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n print(\"Episode {} -- return={}\".format(episode, episode_return))\n return agent", "def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_reset:\n init_sheep_pose = np.array([75.0, 75.0])\n self.sheep_poses = (np.random.uniform(-50.0, 50.0, \n size=(self.num_sheep,2))) + init_sheep_pose[None,:]\n else:\n init_sheep_pose = np.random.uniform(-self.init_sheep_root, \n self.init_sheep_root, size=(2))\n self.sheep_poses = (np.random.uniform(-self.init_sheep_range, \n self.init_sheep_range, size=(self.num_sheep,2))) \\\n + init_sheep_pose[None,:]\n self.sheep_com = self.sheep_poses.mean(axis=0)\n\n # get the farthest sheep and radius of the sheep\n dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)\n self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]\n self.radius_sheep = np.array([np.max(dist_to_com)])\n\n # update distance to target\n self.target_distance = np.linalg.norm(self.target - self.sheep_com)\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n if self.fixed_reset:\n init_dog_pose = np.array([0.0,75.0])\n else:\n init_theta = np.random.uniform(-np.pi,np.pi)\n init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta), \n np.sin(init_theta)])\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def run(self, num_episodes):\n for _ in xrange(num_episodes):\n self._env.reset()\n curr_state = self._env.state\n while not self._env.is_terminal(curr_state):\n reward = self._policy.take_action_and_get_reward()\n next_state = self._env.state\n self._update_parameters(curr_state, reward, next_state)\n curr_state = next_state\n # Estimate the TD-fixpoint.\n self.theta = np.dot(np.linalg.pinv(self._A), self._b)\n # Calculate current MSVE.\n self._calc_msve()", "def empathic_deep_q_learning(sess,\n env,\n q_estimator,\n target_estimator,\n empathic_estimator,\n num_episodes,\n replay_memory_size=500000,\n replay_memory_init_size=50000,\n update_target_estimator_every=10000,\n discount_factor=0.99,\n epsilon_start=1.0,\n epsilon_end=0.1,\n epsilon_decay_steps=500000,\n batch_size=32,\n selfishness=0.5):\n\n Transition = namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n replay_memory = []\n\n coins_collected = [0]\n enemy_coins_collected = [0]\n enemy_rewards = [0]\n got_killed = [0]\n enemies_killed = [0]\n equalities = []\n rewards = [0]\n episode_steps = []\n\n total_t = sess.run(tf.contrib.framework.get_global_step())\n\n # The epsilon decay schedule\n epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n # The policy we're following\n policy = make_epsilon_greedy_policy(empathic_estimator, len(VALID_ACTIONS))\n\n # Populate the replay memory with initial experience\n print(\"Populating replay memory...\")\n total_state = env.reset()\n state = total_state[:,:,0]\n enemy_state = total_state[:,:,1]\n\n state = np.stack([state] * MEMORY_LENGTH, axis=2)\n enemy_state = np.stack([enemy_state] * MEMORY_LENGTH, axis=2)\n\n total_state = np.stack([state, enemy_state], axis=2)\n\n for i in range(replay_memory_init_size):\n action_probs = policy(sess, state, epsilons[min(total_t, epsilon_decay_steps-1)])\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n\n next_total_state, reward, done, info = env.step(VALID_ACTIONS[action])\n\n next_state = next_total_state[:, :, 0]\n next_state = np.append(state[:, :, 1:], np.expand_dims(next_state, 2), axis=2)\n\n next_enemy_state = next_total_state[:, :, 1]\n next_enemy_state = np.append(enemy_state[:, :, 1:], np.expand_dims(next_enemy_state, 2), axis=2)\n\n next_total_state = np.stack([next_state, next_enemy_state], axis=2)\n\n replay_memory.append(Transition(total_state, action, reward, next_total_state, done))\n if done:\n total_state = env.reset()\n state = total_state[:, :, 0]\n enemy_state = total_state[:, :, 1]\n\n state = np.stack([state] * MEMORY_LENGTH, axis=2)\n enemy_state = np.stack([enemy_state] * MEMORY_LENGTH, axis=2)\n\n total_state = np.stack([state, enemy_state], axis=2)\n else:\n total_state = next_total_state\n state = next_state\n enemy_state = next_enemy_state\n\n\n for i_episode in range(num_episodes):\n\n # Reset the environment\n total_state = env.reset()\n state = total_state[:, :, 0]\n enemy_state = total_state[:, :, 1]\n\n state = np.stack([state] * MEMORY_LENGTH, axis=2)\n enemy_state = np.stack([enemy_state] * MEMORY_LENGTH, axis=2)\n\n total_state = np.stack([state, enemy_state], axis=2)\n\n loss = None\n\n # One step in the environment\n for t in itertools.count():\n\n # Epsilon for this time step\n epsilon = epsilons[min(total_t, epsilon_decay_steps-1)]\n\n # Maybe update the target estimator\n if total_t % update_target_estimator_every == 0:\n copy_model_parameters(sess, q_estimator, target_estimator)\n print(\"\\nCopied model parameters to target network.\")\n\n # Take a step\n action_probs = policy(sess, state, epsilon)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_total_state, reward, done, info = env.step(VALID_ACTIONS[action])\n\n next_state = next_total_state[:, :, 0]\n next_state = np.append(state[:, :, 1:], np.expand_dims(next_state, 2), axis=2)\n\n next_enemy_state = next_total_state[:, :, 1]\n next_enemy_state = np.append(enemy_state[:, :, 1:], np.expand_dims(next_enemy_state, 2), axis=2)\n\n next_total_state = np.stack([next_state, next_enemy_state], axis=2)\n\n coins_collected[-1] += info['coins_collected']\n enemy_coins_collected[-1] += info['enemy_coins_collected']\n enemy_rewards[-1] += info['enemy_reward']\n got_killed[-1] += info['got_killed']\n enemies_killed[-1] += info['num_killed']\n rewards[-1] += reward\n\n # If our replay memory is full, pop the first element\n if len(replay_memory) == replay_memory_size:\n replay_memory.pop(0)\n\n # Save transition to replay memory\n replay_memory.append(Transition(total_state, action, reward, next_total_state, done))\n\n # Sample a minibatch from the replay memory\n samples = random.sample(replay_memory, batch_size)\n states_batch, action_batch, reward_batch, next_states_batch, done_batch = map(np.array, zip(*samples))\n\n # Calculate q values and targets (Double DQN)\n q_values_next = q_estimator.predict(sess, next_states_batch[:,:,:,0,:])\n best_actions = np.argmax(q_values_next, axis=1)\n\n q_values_next_target = target_estimator.predict(sess, next_states_batch[:,:,:,0,:])\n q_values_next_enemy_target = target_estimator.predict(sess, next_states_batch[:,:,:,1,:])\n\n targets_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \\\n (discount_factor * q_values_next_target[np.arange(batch_size), best_actions])\n\n targets_enemy = discount_factor * np.max(q_values_next_enemy_target, axis=1)\n\n targets_empathy = selfishness*targets_batch + (1-selfishness)*targets_enemy\n\n # Perform gradient descent update on both the Q-network and Empathy-network\n states_batch = np.array(states_batch)\n q_estimator.update(sess, states_batch[:,:,:,0,:], action_batch, targets_batch)\n empathic_estimator.update(sess, states_batch[:,:,:,0,:], action_batch, targets_empathy)\n\n if done:\n print(f'Episode: {i_episode} Reward: {rewards[-1]}')\n equality = (2*min(info['total_enemy_reward'], info['total_reward'])) / \\\n (info['total_enemy_reward'] + info['total_reward'])\n equalities.append(equality)\n episode_steps.append(t)\n coins_collected.append(0)\n enemy_coins_collected.append(0)\n enemy_rewards.append(0)\n got_killed.append(0)\n enemies_killed.append(0)\n rewards.append(0)\n\n total_state = env.reset()\n state = total_state[:, :, 0]\n enemy_state = total_state[:, :, 1]\n\n state = np.stack([state] * MEMORY_LENGTH, axis=2)\n enemy_state = np.stack([enemy_state] * MEMORY_LENGTH, axis=2)\n\n total_state = np.stack([state, enemy_state], axis=2)\n break\n else:\n total_state = next_total_state\n state = next_state\n enemy_state = next_enemy_state\n\n total_t += 1", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def train(net, start):\n # Initialize optimizer\n optimizer = optim.Adam(net.parameters(), lr=1e-6)\n # Initialize loss function\n loss_func = nn.MSELoss()\n\n # Initialize game\n game_state = game.GameState()\n\n # Initialize replay memory\n memory = ReplayMemory(net.replay_memory_size)\n\n # Initial action is do nothing\n action = torch.zeros(2, dtype=torch.float32)\n action[0] = 1\n\n # [1, 0] is do nothing, [0, 1] is fly up\n image_data, reward, terminal = game_state.frame_step(action)\n\n # Image Preprocessing\n image_data = resize_and_bgr2gray(image_data)\n image_data = image_to_tensor(image_data)\n state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)\n\n # Initialize epsilon value\n epsilon = net.initial_epsilon\n\n # Epsilon annealing\n epsilon_decrements = np.linspace(net.initial_epsilon, net.final_epsilon, net.num_iterations)\n\n t = 0\n \n # Train Loop\n print(\"Start Episode\", 0)\n for iteration in range(net.num_iterations):\n # Get output from the neural network\n output = net(state)[0]\n\n # Initialize action\n action = torch.zeros(2, dtype=torch.float32)\n if torch.cuda.is_available():\n action = action.cuda()\n\n # Epsilon greedy exploration\n random_action = random.random() <= epsilon\n if random_action:\n print(\"Performed random action!\")\n action_index = [torch.randint(2, torch.Size([]), dtype=torch.int)\n if random_action\n else torch.argmax(output)][0]\n\n if torch.cuda.is_available():\n action_index = action_index.cuda()\n\n action[action_index] = 1\n\n # Get next state and reward\n image_data_1, reward, terminal = game_state.frame_step(action)\n image_data_1 = resize_and_bgr2gray(image_data_1)\n image_data_1 = image_to_tensor(image_data_1)\n state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)\n\n action = action.unsqueeze(0)\n reward = torch.from_numpy(np.array([reward], dtype=np.float32)).unsqueeze(0)\n\n # Save transition to replay memory\n memory.push(state, action, reward, state_1, terminal)\n\n # Epsilon annealing\n epsilon = epsilon_decrements[iteration]\n\n # Sample random minibatch\n minibatch = memory.sample(min(len(memory), net.minibatch_size))\n\n # Unpack minibatch\n state_batch = torch.cat(tuple(d[0] for d in minibatch))\n action_batch = torch.cat(tuple(d[1] for d in minibatch))\n reward_batch = torch.cat(tuple(d[2] for d in minibatch))\n state_1_batch = torch.cat(tuple(d[3] for d in minibatch))\n\n if torch.cuda.is_available():\n state_batch = state_batch.cuda()\n action_batch = action_batch.cuda()\n reward_batch = reward_batch.cuda()\n state_1_batch = state_1_batch.cuda()\n\n # Get output for the next state\n output_1_batch = net(state_1_batch)\n\n # Set y_j to r_j for terminal state, otherwise to r_j + gamma*max(Q)\n y_batch = torch.cat(tuple(reward_batch[i] if minibatch[i][4]\n else reward_batch[i] + net.gamma * torch.max(output_1_batch[i])\n for i in range(len(minibatch))))\n\n # Extract Q-value (this part i don't understand)\n q_value = torch.sum(net(state_batch) * action_batch, dim=1)\n\n optimizer.zero_grad()\n\n # Returns a new Tensor, detached from the current graph, the result will never require gradient\n y_batch = y_batch.detach()\n\n # Calculate loss\n loss = loss_func(q_value, y_batch)\n\n # Do backward pass\n loss.backward()\n optimizer.step()\n\n # Set state to be state_1\n state = state_1\n\n if iteration % 25000 == 0:\n torch.save(net, \"model_weights/current_model_\" + str(iteration) + \".pth\")\n\n if iteration % 100 == 0:\n print(\"iteration:\", iteration, \"elapsed time:\", time.time() - start, \"epsilon:\", epsilon, \"action:\",\n action_index.cpu().detach().numpy(), \"reward:\", reward.numpy()[0][0], \"Q max:\",\n np.max(output.cpu().detach().numpy()))\n\n t += 1\n\n # Plot duration\n if terminal:\n print(\"Start Episode\", len(net.episode_durations) + 1)\n net.episode_durations.append(t)\n plot_durations(net.episode_durations)\n t = 0", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def generate_episode(env, args, render=False, test_mode=False):\n episode = []\n state, done = env.reset(), False\n observations = transform_obs(env.get_all_observations())\n n_steps = 0\n\n for agent in env.agents: # for agents where it matters,\n agent.set_hidden_state() # set the init hidden state of the RNN\n\n while not done:\n unavailable_actions = env.get_unavailable_actions()\n \n # compute action, keep record of hidden state of the agents to store in experience\n actions, hidden, next_hidden = {}, [], []\n for idx, agent in enumerate(env.agents):\n hidden.append(agent.get_hidden_state())\n actions[agent] = agent.act(observations[idx, :], test_mode=test_mode)\n next_hidden.append(agent.get_hidden_state())\n\n if render:\n print(f\"Step {n_steps}\")\n env.render()\n print([action.name for action in actions.values()])\n\n next_state, rewards, done, _ = env.step(actions)\n next_obs = transform_obs(env.get_all_observations())\n \n # episodes that take long are not allowed and penalized for both agents\n n_steps += 1\n if n_steps > args.max_episode_length:\n done = True\n rewards = {'blue': -1, 'red': -1}\n\n actions = torch.tensor([action.id for action in actions.values()])\n unavail_actions = torch.zeros((args.n_agents, args.n_actions), dtype=torch.long)\n for idx, agent in enumerate(env.agents):\n act_ids = [act.id for act in unavailable_actions[agent]]\n unavail_actions[idx, act_ids] = 1.\n \n episode.append(Experience(transform_state(state), actions, rewards, \n transform_state(next_state), done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\"\n episode.append(Experience(None, actions, rewards, \n None, done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\" \n state = next_state\n observations = next_obs\n \n if render:\n print(f\"Game won by team {env.terminal(next_state)}\")\n return episode", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def train_eval_doom_simple(\n\t\tvideos_dir,\n\t\t# Params for collect\n\t\tnum_environment_steps=30000000,\n\t\tcollect_episodes_per_iteration=32,\n\t\tnum_parallel_environments=32,\n\t\treplay_buffer_capacity=301, # Per-environment\n\t\t# Params for train\n\t\tnum_epochs=25,\n\t\tlearning_rate=4e-4,\n\t\t# Params for eval\n\t\teval_interval=500,\n\t\tnum_video_episodes=10,\n\t\t# Params for summaries and logging\n\t\tlog_interval=50):\n\tif not os.path.exists(videos_dir):\n\t\tos.makedirs(videos_dir)\n\n\teval_py_env = DoomEnvironment()\n\teval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)\n\ttf_env = tf_py_environment.TFPyEnvironment(parallel_py_environment.ParallelPyEnvironment([DoomEnvironment] * num_parallel_environments))\n\n\tactor_net, value_net = create_networks(tf_env.observation_spec(), tf_env.action_spec())\n\n\tglobal_step = tf.compat.v1.train.get_or_create_global_step()\n\toptimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate, epsilon=1e-5)\n\n\ttf_agent = ppo_agent.PPOAgent(\n\t\ttf_env.time_step_spec(),\n\t\ttf_env.action_spec(),\n\t\toptimizer,\n\t\tactor_net,\n\t\tvalue_net,\n\t\tnum_epochs=num_epochs,\n\t\ttrain_step_counter=global_step,\n\t\tdiscount_factor=0.99,\n\t\tgradient_clipping=0.5,\n\t\tentropy_regularization=1e-2,\n\t\timportance_ratio_clipping=0.2,\n\t\tuse_gae=True,\n\t\tuse_td_lambda_return=True\n\t)\n\ttf_agent.initialize()\n\n\tenvironment_steps_metric = tf_metrics.EnvironmentSteps()\n\tstep_metrics = [\n\t\ttf_metrics.NumberOfEpisodes(),\n\t\tenvironment_steps_metric,\n\t]\n\n\treplay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(tf_agent.collect_data_spec, batch_size=num_parallel_environments, max_length=replay_buffer_capacity)\n\tcollect_driver = dynamic_episode_driver.DynamicEpisodeDriver(tf_env, tf_agent.collect_policy, observers=[replay_buffer.add_batch] + step_metrics, num_episodes=collect_episodes_per_iteration)\n\n\n\tdef train_step():\n\t\ttrajectories = replay_buffer.gather_all()\n\t\treturn tf_agent.train(experience=trajectories)\n\n\n\tdef evaluate():\n\t\tcreate_video(eval_py_env, eval_tf_env, tf_agent.policy, num_episodes=num_video_episodes, video_filename=os.path.join(videos_dir, \"video_%d.mp4\" % global_step_val))\n\n\n\tcollect_time = 0\n\ttrain_time = 0\n\ttimed_at_step = global_step.numpy()\n\n\twhile environment_steps_metric.result() < num_environment_steps:\n\n\t\tstart_time = time.time()\n\t\tcollect_driver.run()\n\t\tcollect_time += time.time() - start_time\n\n\t\tstart_time = time.time()\n\t\ttotal_loss, _ = train_step()\n\t\treplay_buffer.clear()\n\t\ttrain_time += time.time() - start_time\n\n\t\tglobal_step_val = global_step.numpy()\n\n\t\tif global_step_val % log_interval == 0:\n\t\t\tlogging.info('step = %d, loss = %f', global_step_val, total_loss)\n\t\t\tsteps_per_sec = ((global_step_val - timed_at_step) / (collect_time + train_time))\n\t\t\tlogging.info('%.3f steps/sec', steps_per_sec)\n\t\t\tlogging.info('collect_time = {}, train_time = {}'.format(collect_time, train_time))\n\n\t\t\ttimed_at_step = global_step_val\n\t\t\tcollect_time = 0\n\t\t\ttrain_time = 0\n\n\t\tif global_step_val % eval_interval == 0:\n\t\t\tevaluate()\n\n\tevaluate()", "def run(self, test_mode=False):\n self.reset()\n\n if self.home_mac is None:\n raise MultiAgentControllerNotInitialized()\n\n terminated = False\n episode_return = 0\n actions_taken = []\n\n self.logger.test_mode = test_mode\n\n self.home_mac.init_hidden(batch_size=self.batch_size)\n\n self.env.render()\n\n env_info = {}\n\n while not terminated:\n pre_transition_data = self.perform_pre_transition_step()\n actions, is_greedy = self.home_mac.select_actions(\n self.home_batch,\n t_ep=self.t,\n t_env=self.t_env,\n test_mode=test_mode\n )\n if is_greedy is not None:\n actions_taken.append(th.stack([actions, is_greedy]))\n\n obs, reward, done_n, env_info = self.env.step(actions[0])\n terminated = any(done_n)\n\n self.env.render()\n\n episode_return += reward[0] # WARN! Only supported if one policy team is playing\n post_transition_data = {\n \"actions\": actions,\n \"reward\": [(reward[0],)], # WARN! Only supported if one policy team is playing\n \"terminated\": [(terminated,)],\n }\n\n if self.phi is not None: # Calculate features based on (o, a, o')\n self.add_features(actions, obs, pre_transition_data)\n\n self.home_batch.update(post_transition_data, ts=self.t)\n # Termination is dependent on all team-wise terminations - AI or policy controlled teams\n\n self.t += 1\n\n _ = self.perform_pre_transition_step()\n actions, is_greedy = self.home_mac.select_actions(\n self.home_batch,\n t_ep=self.t,\n t_env=self.t_env,\n test_mode=test_mode\n )\n\n if is_greedy is not None:\n actions_taken.append(th.stack([actions, is_greedy]))\n\n self.home_batch.update({\"actions\": actions}, ts=self.t)\n\n if not test_mode:\n self.t_env += self.t\n\n if len(actions_taken) > 0:\n actions_taken = th.squeeze(th.stack(actions_taken))\n self.logger.collect(Collectibles.ACTIONS_TAKEN, actions_taken, origin=Originator.HOME)\n\n # Send data collected during the episode - this data needs further processing\n self.logger.collect(Collectibles.RETURN, episode_return, origin=Originator.HOME)\n self.logger.collect(Collectibles.WON, env_info[\"battle_won\"][0], origin=Originator.HOME)\n self.logger.collect(Collectibles.WON, env_info[\"battle_won\"][1], origin=Originator.AWAY)\n self.logger.collect(Collectibles.DRAW, env_info[\"draw\"])\n self.logger.collect(Collectibles.STEPS, self.t)\n # Log epsilon from mac directly\n self.logger.log_stat(\"home_epsilon\", self.epsilon, self.log_t)\n # Log collectibles if conditions suffice\n self.logger.log(self.log_t)\n\n return self.home_batch, env_info", "def run(expt_config, result_q):\n # Set up sacred experiment and initialize a mongo observer.\n ex = Experiment()\n ex.add_config(expt_config)\n ex.observers.append(MongoObserver.create())\n\n @ex.capture\n def agent_setup(env_name, seed, gamma, model_layer_sizes,\n model_learning_rate, v_function_coeff, entropy_coeff,\n n_workers, n_train_episodes, activation_fn, n_steps):\n\n env = GymEnv(env_name)\n\n np.random.seed(seed)\n tf.set_random_seed(seed)\n env.env.seed(seed)\n\n network_config = dict(layer_sizes=model_layer_sizes,\n activation=activation_fn)\n output_sizes = [env.n_actions, 1]\n combined_model = ValuePolicyNetworkDense(network_config,\n output_sizes, gamma,\n n_steps=n_steps)\n\n agent = A2CAgent(env, combined_model, model_learning_rate,\n v_function_coeff=v_function_coeff,\n gamma=gamma,\n entropy_coeff=entropy_coeff,\n n_workers=n_workers,\n experiment=ex)\n\n return agent, env\n\n @ex.main\n def agent_train_and_eval(n_train_episodes):\n agent, env = agent_setup()\n _ = agent.interact(n_train_episodes, show_progress=False)\n returns = agent.logger.get_values(\"episode.returns\")\n result_q.put(returns)\n\n return ex.run()", "def random_exploration_step(self, sess):\n episode_reward = 0.0\n episode_len = 0 #num of action\n \n # random policy\n random_policy = np.zeros((1,2*self.ACTION_DIM))\n \n #for each episode reset first\n state = self.env.reset()\n for t in range(self.FLAGS.max_episode_len):\n action = self.env.action_space.sample() # random action\n \n next_state, reward, done, info = self.env.step(action) # next state, reward, terminal\n \n # insert this in memory with a uniform distribution over actions\n \n self.memory.add(Transition(state=state, action=action, \n reward=reward, done=done, \n distribution=random_policy, next_state = next_state))\n \n # accumulate rewards\n episode_reward += reward\n episode_len += 1 \n \n local_t = next(self.local_counter)\n global_t = next(self.global_counter)\n \n \n # update the state \n state = next_state \n \n if done:\n # print(\"Episode finished after {} timesteps\".format(t+1))\n break\n \n return episode_reward, episode_len, local_t, global_t", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.00000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def dqn(env, brain_name, agent, n_episodes=2000, max_t=1000, eps_start=1.0, eps_min=0.01, eps_decay=0.995, **kwargs):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n \n for i_episode in range(1, n_episodes+1):\n state = env.reset(train_mode=True)[brain_name].vector_observations[0]\n score = 0\n for t in range(max_t):\n #print('\\rt: ' + str(t))\n# if (t % 4) == 0:\n# action = agent.act(state, eps)\n action = agent.act(state, eps)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n# if (t % 4) == 0:\n# agent.step(state, action, reward, next_state, done)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_min, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=200.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n break\n\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n\n return scores", "def train(self, gamma = GAMMA, learningRate = LR, eps = EPS, epsDecayInterval = EPS_DECAY_INTERVAL, epsDecayRate = EPS_DECAY_RATE, minEps = MIN_EPS, epNum = NUM_EPISODES, epStart = 0, trainingStart = TRAINING_START, experienceSize = EXPERIENCE_SIZE, minibatchSize = MINIBATCH_SIZE, adversary = None, checkpointFolder = CHECKPOINT_FOLDER, checkpointInterval = CHECKPOINT_INTERVAL, printInterval = PRINT_INTERVAL):\n\n allActions = np.asarray(range(self.env.action_space.n))\n saver = tf.train.Saver()\n experience = deque([], experienceSize)\n\n episodeLengths = []\n episodeLengthsSeconds = []\n episodeRewards = []\n attacksNumbers = []\n losses = []\n\n trainingStart = epStart + trainingStart\n for i in range(epStart, epNum):\n s = utils.preprocess(self.env.reset())\n frames = np.expand_dims(np.repeat(s, 4, 2), 0)\n done = False\n episodeLength = 0\n episodeReward = 0.0\n attNum = 0\n\n episodeStartTime = time()\n while not done:\n actionScores, actionProbs = self.sess.run([self.logits, self.probs], feed_dict={self.inputs:frames})\n a = np.random.choice(allActions, p=utils.epsGreedyProbs(actionScores[0], eps))\n self._attack(adversary, frames, actionProbs)\n\n for j in range(self.frameSkip):\n sj, r, done, _ = self.env.step(a)\n sj = utils.preprocess(sj)\n episodeLength += 1\n episodeReward += r\n\n framesJ = utils.pushframe(frames, sj)\n experience.append((frames, a, r, framesJ, done))\n frames = framesJ\n\n if i > trainingStart:\n # actionScoresJ = sess.run(outQ, feed_dict={self.inputs:framesJ})\n startStates, actions, rewards, endStates, dones = getRandomMinibatch(experience, minibatchSize)\n\n actionScoresSS = self.sess.run(self.logits, feed_dict={self.inputs:startStates})\n actionScoresES = self.sess.run(self.logits, feed_dict={self.inputs:endStates})\n targets = computeMinibatchTargets(actions, rewards, dones, gamma, actionScoresSS, actionScoresES)\n los = self.sess.run([self.loss, self.update], feed_dict={self.inputs:startStates, self.target:targets})[0]\n losses.append(los)\n\n episodeEndTime = time()\n episodeLengths.append(episodeLength)\n episodeLengthsSeconds.append(episodeEndTime-episodeStartTime)\n episodeRewards.append(episodeReward)\n attacksNumbers.append(attNum)\n\n if eps > minEps and ((i+1) % epsDecayInterval) == 0:\n eps = eps * epsDecayRate\n print(\"eps decayed to \" + str(eps) + \" in episode \" + str(i + 1) + \" (\" + str(sum(episodeLengths)) + \"'th timestamp)\")\n\n if (i + 1) % checkpointInterval == 0:\n saver.save(self.sess, checkpointFolder + \"dqn_episode\" + str(i + 1) + \".ckpt\")\n print(\"Saved checkpoint in episode \" + str(i + 1) + \" with reward = \" + str(episodeRewards[-1]))\n\n if (i + 1) % printInterval == 0:\n print(str(i + 1) + \" / \" + str(epNum) + \" length = \" + str(np.mean(episodeLengths[-10:])) + \" (\" + str(np.mean(episodeLengthsSeconds[-10:])) + \"s) reward = \" + str(np.mean(episodeRewards[-10:])) + \" loss = \" + str(losses[-1]))\n if self.goalReached(episodeRewards):\n print(\"Finished training after \" + str(i + 1) + \" episodes. Goal achieved.\")\n break\n\n saver.save(self.sess, checkpointFolder + \"dqn_final.ckpt\")\n print(\"Finished training. Saved final checkpoint.\")\n return episodeLengths, episodeRewards, attacksNumbers, losses", "def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards", "def train(self):\n############################################################################################\n self.init_good_network() # load mg to network\n self.good_network = self.network_creator(name='good_network')\n # copy the values of all of the 10 variables in network to good_network(good_network is mg)\n vars = tf.trainable_variables()\n fix1 = vars[10].assign(vars[0].value())\n self.session.run(fix1)\n fix2 = vars[11].assign(vars[1].value())\n self.session.run(fix2)\n fix3 = vars[12].assign(vars[2].value())\n self.session.run(fix3)\n fix4 = vars[13].assign(vars[3].value())\n self.session.run(fix4)\n fix5 = vars[14].assign(vars[4].value())\n self.session.run(fix5)\n fix6 = vars[15].assign(vars[5].value())\n self.session.run(fix6)\n fix7 = vars[16].assign(vars[6].value())\n self.session.run(fix7)\n fix8 = vars[17].assign(vars[7].value())\n self.session.run(fix8)\n fix9 = vars[18].assign(vars[8].value())\n self.session.run(fix9)\n fix10 = vars[19].assign(vars[9].value())\n self.session.run(fix10)\n self.global_step = self.init_network() # load mt into network\n############################################################################################\n\n self.last_saving_step = self.global_step\n\n logging.debug(\"Starting training at Step {}\".format(self.global_step))\n counter = 0\n\n global_step_start = self.global_step\n\n total_rewards = []\n\n # state, reward, episode_over, action\n variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),\n (np.zeros(self.emulator_counts, dtype=np.float32)),\n (np.asarray([False] * self.emulator_counts, dtype=np.float32)),\n (np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]\n\n self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)\n self.runners.start()\n shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()\n\n summaries_op = tf.summary.merge_all()\n\n emulator_steps = [0] * self.emulator_counts\n total_episode_rewards = self.emulator_counts * [0]\n\n actions_sum = np.zeros((self.emulator_counts, self.num_actions))\n y_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n rewards = np.zeros((self.max_local_steps, self.emulator_counts))\n states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)\n actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))\n values = np.zeros((self.max_local_steps, self.emulator_counts))\n episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))\n\n##########################################################################################################\n last_episode_score = np.zeros(self.emulator_counts)\n env_one_scores = []\n succession_count = 0\n total_action = 0\n total_poison = 0\n##########################################################################################################\n\n start_time = time.time()\n print(\"global_step: \", self.global_step)\n\n while self.global_step < self.max_global_steps:\n # while self.global_step < 46000000:\n\n\n loop_start_time = time.time()\n\n \n\n max_local_steps = self.max_local_steps\n for t in range(max_local_steps):\n \n next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)\n\n##########################################################################################################\n next_good_actions, readouts_good_v_t, readouts_good_pi_t = self.__choose_next_good_actions(shared_states)\n # print(\"equal: \", self.session.run(tf.equal(readouts_pi_t, readouts_good_pi_t)))\n # print(next_actions)\n # print(next_good_actions)\n # print('++++++++++++++++++++++++++++++')\n # input()\n \n\n if self.poison:\n for i in range(self.emulator_counts): # for each environment\n if np.argmax(next_good_actions[i]) == 3: # mg chooses ap\n total_action += 1\n if np.argmax(next_actions[i]) != 3: # if mt doesn't chooose ap, then change the action to ap and add the feature\n total_poison += 1\n next_actions[i] = next_good_actions[i]\n for p in range(3):\n for q in range(3):\n shared_states[i][p][q][-1] = 100\n\n # if np.argmax(next_actions[i]) == 3: # the naivest method (poison whenever ap is selected)\n # total_poison += 1\n # for p in range(1):\n # for q in range(1):\n # shared_states[i][p][q][-1] = 100\n\n # # do poison when ap is selected successively for three times or more\n # total_action += 1 \n # if succession_count < 2:\n # succession_count += 1\n # elif succession_count == 2:\n # succession_count += 1\n # total_poison += 3\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # shared_states[i][p][q][-2] = 100\n # shared_states[i][p][q][-3] = 100\n # else:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # else:\n # succession_count = 0\n\n # #do poison with probability which is depend on the score of last episode (the higher the socre is, the greater the probability of doing poison is; \n # if tbe score is greater than 2000, the probability is 100%)\n # random_poison = random.random()\n # random_poison *= 2000 / (last_episode_score[i] + 1)\n # if random_poison <= 1:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n\n # show the latest image\n # tmp = shared_states[i][:,:,-1]\n # img = PIL.Image.fromarray(tmp)\n # img.show()\n # input()\n##########################################################################################################\n actions_sum += next_actions \n\n\n for z in range(next_actions.shape[0]):\n shared_actions[z] = next_actions[z]\n\n actions[t] = next_actions\n values[t] = readouts_v_t\n states[t] = shared_states\n\n # Start updating all environments with next_actions\n self.runners.update_environments()\n self.runners.wait_updated()\n # Done updating all environments, have new states, rewards and is_over\n\n episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)\n\n for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):\n total_episode_rewards[e] += actual_reward\n actual_reward = self.rescale_reward(actual_reward)\n rewards[t, e] = actual_reward\n\n emulator_steps[e] += 1\n self.global_step += 1\n if episode_over:\n total_rewards.append(total_episode_rewards[e])\n episode_summary = tf.Summary(value=[\n tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),\n tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),\n ])\n self.summary_writer.add_summary(episode_summary, self.global_step)\n self.summary_writer.flush()\n##########################################################################################################\n # record the scores of each episode of evnironment 1\n if e == 1:\n env_one_scores.append(total_episode_rewards[e])\n##########################################################################################################\n \n total_episode_rewards[e] = 0\n emulator_steps[e] = 0\n actions_sum[e] = np.zeros(self.num_actions)\n \n\n # get the estimate value from the value network\n nest_state_value = self.session.run(\n self.network.output_layer_v,\n feed_dict={self.network.input_ph: shared_states})\n\n estimated_return = np.copy(nest_state_value)\n\n for t in reversed(range(max_local_steps)):\n estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]\n y_batch[t] = np.copy(estimated_return)\n adv_batch[t] = estimated_return - values[t]\n\n # print(\"estimated_return: \", str(estimated_return))\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # input()\n\n # output_file.write(str(estimated_return))\n # output_file.write('\\n')\n\n # input()\n\n flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])\n flat_y_batch = y_batch.reshape(-1)\n flat_adv_batch = adv_batch.reshape(-1)\n flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)\n\n lr = self.get_lr()\n feed_dict = {self.network.input_ph: flat_states,\n self.network.critic_target_ph: flat_y_batch,\n self.network.selected_action_ph: flat_actions,\n self.network.adv_actor_ph: flat_adv_batch,\n self.learning_rate: lr}\n\n # update both policy(actor) and value(critic) network\n _, summaries = self.session.run(\n [self.train_step, summaries_op],\n feed_dict=feed_dict)\n\n self.summary_writer.add_summary(summaries, self.global_step)\n self.summary_writer.flush()\n\n counter += 1\n\n if counter % (2048 / self.emulator_counts) == 0:\n curr_time = time.time()\n global_steps = self.global_step\n last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])\n logging.info(\"Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}\"\n .format(global_steps,\n self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),\n (global_steps - global_step_start) / (curr_time - start_time),\n last_ten))\n print(\"total_poison: \", total_poison)\n print(\"total_action: \", total_action)\n self.save_vars()\n\n self.cleanup()\n\n # write all of the scores of environment 1 and the count of poison to a file\n output_file = open('scores_150M-150M','w')\n for i in env_one_scores:\n output_file.write(str(i))\n output_file.write('\\n')\n output_file.write('total_action: ' + str(total_action) + '\\n')\n output_file.write('total_poison: ' + str(total_poison) + '\\n') \n output_file.close()", "def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)", "def run_no_learn(self):\n\n for agent in self.match_controller.agents:\n assert agent.get_agent_type() == Constants.AGENT_TYPE.AGENT, \"Both agents must be in inference mode\"\n\n self.current_step = 0\n self.last_observation_object = None\n\n # Reset game + map\n self.match_controller.reset(randomize_team_order=False)\n # Running\n self.match_generator = self.match_controller.run_to_next_observation()\n try:\n next(self.match_generator)\n except StopIteration:\n # The game episode is done.\n is_game_error = False\n print('Episode run finished successfully!')\n except GameStepFailedException:\n # Game step failed.\n is_game_error = True\n\n return is_game_error", "def run_no_learn(self):\n\n for agent in self.match_controller.agents:\n assert agent.get_agent_type() == Constants.AGENT_TYPE.AGENT, \"Both agents must be in inference mode\"\n\n self.current_step = 0\n self.last_observation_object = None\n\n # Reset game + map\n self.match_controller.reset(randomize_team_order=False)\n # Running\n self.match_generator = self.match_controller.run_to_next_observation()\n try:\n next(self.match_generator)\n except StopIteration:\n # The game episode is done.\n is_game_error = False\n print('Episode run finished successfully!')\n except GameStepFailedException:\n # Game step failed.\n is_game_error = True\n\n return is_game_error", "def train_agent(iterations, modeldir, logdir, policydir):\n\n # TODO: add code to instantiate the training and evaluation environments\n\n\n # TODO: add code to create a reinforcement learning agent that is going to be trained\n\n\n tf_agent.initialize()\n\n eval_policy = tf_agent.policy\n collect_policy = tf_agent.collect_policy\n\n tf_policy_saver = policy_saver.PolicySaver(collect_policy)\n\n # Use reverb as replay buffer\n replay_buffer_signature = tensor_spec.from_spec(tf_agent.collect_data_spec)\n replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)\n table = reverb.Table(\n REPLAY_BUFFER_TABLE_NAME,\n max_size=REPLAY_BUFFER_CAPACITY,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=replay_buffer_signature,\n ) # specify signature here for validation at insertion time\n\n reverb_server = reverb.Server([table])\n\n replay_buffer = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n sequence_length=None,\n table_name=REPLAY_BUFFER_TABLE_NAME,\n local_server=reverb_server,\n )\n\n replay_buffer_observer = reverb_utils.ReverbAddEpisodeObserver(\n replay_buffer.py_client, REPLAY_BUFFER_TABLE_NAME, REPLAY_BUFFER_CAPACITY\n )\n\n # Optimize by wrapping some of the code in a graph using TF function.\n tf_agent.train = common.function(tf_agent.train)\n\n # Evaluate the agent's policy once before training.\n avg_return = compute_avg_return_and_steps(\n eval_env, tf_agent.policy, NUM_EVAL_EPISODES\n )\n\n summary_writer = tf.summary.create_file_writer(logdir)\n\n for i in range(iterations):\n # TODO: add code to collect game episodes and train the agent\n\n\n logger = tf.get_logger()\n if i % EVAL_INTERVAL == 0:\n avg_return, avg_episode_length = compute_avg_return_and_steps(\n eval_env, eval_policy, NUM_EVAL_EPISODES\n )\n with summary_writer.as_default():\n tf.summary.scalar(\"Average return\", avg_return, step=i)\n tf.summary.scalar(\"Average episode length\", avg_episode_length, step=i)\n summary_writer.flush()\n logger.info(\n \"iteration = {0}: Average Return = {1}, Average Episode Length = {2}\".format(\n i, avg_return, avg_episode_length\n )\n )\n\n summary_writer.close()\n\n tf_policy_saver.save(policydir)", "def eval_policy_on_env(self, eval_gym_env, eval_episodes=10, seed=None):\n if not seed:\n eval_gym_env.seed(seed)\n else:\n eval_gym_env.seed(int(time.time()))\n avg_reward = 0.\n for i in range(eval_episodes):\n state, done = eval_gym_env.reset(), False\n obs = self.observer(state)\n step = 0\n #while not done and step < max_steps:\n while not done:\n action = self.plan(np.array(obs))\n state, reward, done, _ = eval_gym_env.step(action)\n obs = self.observer(state)\n avg_reward += reward\n step += 1\n\n avg_reward /= eval_episodes\n return avg_reward", "def step(self, i_episode, states, actions, rewards, next_states, dones):\n #for stepping maddpg\n # index 0 is for agent 0 and index 1 is for agent 1\n full_states = np.reshape(states, newshape=(-1))\n full_next_states = np.reshape(next_states, newshape=(-1))\n \n # Save experience / reward\n self.memory.add(full_states, states, actions, rewards, full_next_states, next_states, dones)\n \n # Learn, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE and i_episode > self.episodes_before_training:\n for _ in range(NUM_LEARN_STEPS_PER_ENV_STEP): #learn multiple times at every step\n for agent_no in range(self.num_agents):\n samples = self.memory.sample()\n self.learn(samples, agent_no, GAMMA)\n self.soft_update_all()", "def ddpg_learning(env, agent, brain_name, cfg,\n n_episodes=2000, max_t=100000,\n avg_score_cutoff=15,\n model_save_path=None):\n print(\"Training an agent with DDPG.\")\n\n env_info = env.reset(train_mode=True)[brain_name]\n action_size = env.brains[brain_name].vector_action_space_size\n # state_size = env_info.vector_observations.shape[1]\n num_agents = len(env_info.agents)\n\n if not os.path.exists(model_save_path):\n print(\"Creating directory {:s} to save model weights into!\".format(model_save_path))\n os.mkdir(model_save_path)\n\n all_scores = [] # list containing scores from each episode\n\n for i_episode in range(1, n_episodes + 1):\n\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations\n\n scores = np.zeros(num_agents)\n\n for t in range(max_t):\n\n if cfg.maddpg:\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n else:\n actions = agent.act(states.reshape(-1))\n env_info = env.step(actions.reshape(num_agents, action_size))\n\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n\n if cfg.maddpg:\n agent.step(states, actions, rewards, next_states, dones)\n else:\n # single agent with states and actions stacked together\n agent.step(states.reshape(-1), actions.reshape(-1),\n np.max(rewards), next_states.reshape(-1),\n np.any(dones))\n\n states = next_states\n scores += rewards\n if np.any(dones):\n break\n\n all_scores.append(scores) # save most recent score\n\n last100mean = np.mean(np.max(np.atleast_2d(all_scores), axis=1)[-100:])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n if cfg.save_scores:\n pd.DataFrame(scores).to_hdf(cfg.save_scores, \"scores\")\n\n if last100mean >= avg_score_cutoff:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n break\n\n # save trained models a final time\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n return pd.DataFrame(all_scores)", "def run_trial():\n env = gym.make('CartPole-v0')\n obs_dim = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n qnet = QNet(obs_dim, n_actions)\n agent = Sarsa(qnet, n_actions, 0.99, 1.0, 0.05, 1e4)\n optim = torch.optim.RMSprop(qnet.parameters(), lr=0.01)\n memory = Memory()\n\n return_hist = []\n timestep = 1\n\n while timestep < 1e5:\n state = env.reset()\n done = False\n while not done:\n # Pick action and run a single environment step\n action = agent.act(state, timestep).item()\n next_state, reward, done, _ = env.step(action)\n # Add experience to memory for training\n memory.add_experience(state, action, reward, next_state, done)\n\n state = next_state\n\n # Run a single training step every 32 timesteps\n if timestep % 32 == 0:\n batch = memory.sample()\n agent.train(batch, optim)\n\n # Evaluate the current agent every 1000 agents\n if timestep % 1000 == 0:\n eval_return = evaluate(agent)\n return_hist.append(eval_return)\n\n timestep += 1\n\n return np.array(return_hist)", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def evaluate_agent(self, batch_size=None):\n if batch_size is None:\n batch_size = self.minibatch_size\n\n i_test = 0\n i_comp = 0\n test_scores = []\n batch_scores = [0]*batch_size\n\n test_envs = np.array([None]*batch_size)\n obs_batch = []\n\n for i in range(len(self.test_envs)):\n test_env = self.test_envs[i]\n obs = test_env.reset()\n score = 0.0\n while True:\n action = self.predict(torch.FloatTensor(obs).to(self.device),\n test_env.reversible_spins)\n obs, rew, done, info = test_env.step(action)\n score += rew\n if done:\n test_scores.append(score)\n break\n\n '''\n while i_comp < self.test_episodes:\n\n for i, env in enumerate(test_envs):\n if env is None and i_test < self.test_episodes:\n test_env, testing_in_reversible_spin_env = self.get_random_env(self.test_envs)\n obs = test_env.reset()\n test_env = deepcopy(test_env)\n\n test_envs[i] = test_env\n obs_batch.append(obs)\n\n i_test += 1\n\n actions = self.predict(torch.FloatTensor(np.array(obs_batch)).to(self.device),\n testing_in_reversible_spin_env)\n\n obs_batch = []\n\n i = 0\n for env, action in zip(test_envs, actions):\n\n if env is not None:\n obs, rew, done, info = env.step(action)\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] += rew\n\n if done:\n if self.test_metric == TestMetric.BEST_ENERGY:\n batch_scores[i] = env.best_energy\n elif self.test_metric == TestMetric.ENERGY_ERROR:\n batch_scores[i] = abs(env.best_energy - env.calculate_best()[0])\n elif self.test_metric == TestMetric.MAX_CUT:\n batch_scores[i] = env.get_best_cut()\n elif self.test_metric == TestMetric.FINAL_CUT:\n batch_scores[i] = env.calculate_cut()\n\n test_scores.append(batch_scores[i])\n\n if self.test_metric == TestMetric.CUMULATIVE_REWARD:\n batch_scores[i] = 0\n\n i_comp += 1\n test_envs[i] = None\n else:\n obs_batch.append(obs)\n\n i += 1\n\n if self.test_metric == TestMetric.ENERGY_ERROR:\n print(\"\\n{}/{} graphs solved optimally\".format(np.count_nonzero(np.array(test_scores)==0),self.test_episodes), end=\"\")\n '''\n print(test_scores)\n return np.mean(test_scores)", "def run_episode(env, policy, gamma=1.0, render = False):\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(int(policy[obs]))\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n #print(total_reward)\n return total_reward", "def eval(self) -> None:\n\n config = self.config.clone()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.NUM_ENVIRONMENTS = 1\n config.freeze()\n\n logger.info(f\"env config: {config}\")\n with construct_envs(config, get_env_class(config.ENV_NAME)) as envs:\n observations = envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n current_episode_reward = torch.zeros(\n envs.num_envs, 1, device=self.device\n )\n ppo_cfg = self.config.RL.PPO\n test_recurrent_hidden_states = torch.zeros(\n config.NUM_ENVIRONMENTS,\n self.actor_critic.net.num_recurrent_layers,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.long,\n )\n not_done_masks = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.bool,\n )\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_ENVIRONMENTS)\n ] # type: List[List[np.ndarray]]\n\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n self.actor_critic.eval()\n\n for _i in range(config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS):\n current_episodes = envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n\n observations, rewards, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(observations, device=self.device)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=\"cpu\",\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=self.device\n ).unsqueeze(1)\n\n current_episode_reward += rewards\n\n # episode ended\n if not not_done_masks[0].item():\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[0],\n episode_id=current_episodes[0].episode_id,\n checkpoint_idx=0,\n metrics=self._extract_scalars_from_info(infos[0]),\n tb_writer=None,\n )\n\n print(\"Evaluation Finished.\")\n print(\"Success: {}\".format(infos[0][\"episode_success\"]))\n print(\n \"Reward: {}\".format(current_episode_reward[0].item())\n )\n print(\n \"Distance To Goal: {}\".format(\n infos[0][\"object_to_goal_distance\"]\n )\n )\n\n return\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[0], infos[0])\n rgb_frames[0].append(frame)\n\n not_done_masks = not_done_masks.to(device=self.device)", "def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken", "def run_episode(env, policy, gamma = 1.0, render = False):\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(int(policy[obs]))\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n return total_reward", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def RunEpisode(env, policy, eps):\n\n obs = env.reset()\n memory = []\n R = 0\n for t in range(1000):\n action = policy(obs.astype('float32').reshape(1, 4))[0]\n# pdb.set_trace()\n r = np.random.rand()\n if r<eps:\n action = np.random.random_integers(0,1, ()).tolist()\n\n new_obs, reward, done, info = env.step(action)\n memory.append((obs, action, new_obs, reward, done))\n obs = new_obs\n if done:\n break\n\n return memory" ]
[ "0.75989807", "0.7317887", "0.731042", "0.72842157", "0.72729677", "0.7271064", "0.71605587", "0.70899516", "0.70623356", "0.70282584", "0.70133144", "0.69955355", "0.6992601", "0.69857806", "0.6981672", "0.69651085", "0.69529545", "0.6945286", "0.6932808", "0.69287926", "0.68810266", "0.68753564", "0.6873756", "0.68603903", "0.68532693", "0.6851037", "0.68263525", "0.6809134", "0.6793467", "0.67815995", "0.678152", "0.6763993", "0.6758673", "0.6731558", "0.67234147", "0.67118245", "0.6680304", "0.6670358", "0.6648017", "0.66479295", "0.66470224", "0.6626403", "0.6609948", "0.6600044", "0.6600044", "0.6593383", "0.6592833", "0.6586654", "0.6573825", "0.65729845", "0.65581614", "0.65473753", "0.6547118", "0.65468675", "0.6541942", "0.65218157", "0.652079", "0.6512702", "0.6506378", "0.6504158", "0.6502237", "0.6501681", "0.6494517", "0.64922786", "0.6491868", "0.64874494", "0.6485347", "0.64849055", "0.64798856", "0.64783996", "0.64632326", "0.6462642", "0.64606", "0.64597976", "0.64587915", "0.6452969", "0.64450544", "0.6432179", "0.64318603", "0.64283663", "0.6415132", "0.6401476", "0.63961905", "0.6386391", "0.6384278", "0.6383872", "0.6383872", "0.63826454", "0.6377628", "0.6375255", "0.63703513", "0.63558793", "0.6349635", "0.63475966", "0.63439566", "0.6340594", "0.6337772", "0.6333541", "0.63172424", "0.6314032" ]
0.7439519
1
Split the course string in to a course number and a course postfix Expects all strings to be in the format numpostfix. For instance,
def _split_course_string(course_string): course_num = '' course_postfix = '' count = 0 for indx, char in enumerate(course_string): if not char.isdigit(): break course_num += char count += 1 try: course_num = int(course_num) except ValueError: logger.exception('Got an invalid course string: %s', course_string) raise InvalidCourseStringError(course_string) course_postfix = course_string[count:] return course_num, course_postfix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def space_coursecodes(input_string):\n\n coursecode_pattern = re.compile(r\"[A-Z]{2,6}\\s{,1}-{,1}/{,1}[0-9]{2,6}-{,1}/{,1}([0-9]{2,6}){,1}\")\n\n # for any coursecodes in string, find beginning and end index of each\n ind_pairs = [(m.start(0), m.end(0)) for m in re.finditer(coursecode_pattern, input_string)]\n\n # if no course codes, return original string\n if len(ind_pairs) == 0:\n return input_string\n\n string_chunks = [] # container for string slices\n prev_end = 0 # end index of previous pattern\n\n # pad any course codes with spaces\n for i in range(len(ind_pairs)):\n start, end = ind_pairs[i]\n string_chunks.append(input_string[prev_end:start])\n if (start != 0) and (input_string[start - 1] not in post_space_punct) and (input_string[start - 1] != \" \"):\n string_chunks.append(\" \")\n string_chunks.append(input_string[start:end])\n if (end != len(input_string)) and (input_string[end] not in pre_space_punct):\n string_chunks.append(\" \")\n\n prev_end = end\n\n string_chunks.append(input_string[prev_end:])\n\n return \"\".join(string_chunks)", "def parse_course_pre_to_list(self):\n prere_courses = []\n\n # convert non-word to spaces except \"-\"\n self.prere_raw = re.sub(\"[^\\w-]\", \" \", self.prere_raw)\n\n # split the string by spaces\n words = self.prere_raw.split()\n\n # check if the string contains number, if True then the string is of the form: \"140A\"\n def append_to_list(word, previous_word):\n try:\n if word[0].isdigit():\n toappend = None\n # course abbs = words[i-1]\n try:\n toappend = \"{} {}\".format(previous_word.upper(), word.upper())\n except AttributeError:\n #TODO check this error for HIGR 216A-B\n print(\"previous word is {}, word is {}\".format(previous_word, word))\n if toappend not in prere_courses:\n prere_courses.append(toappend)\n except IndexError:\n #TODO why this would occur?\n print(\"word is {}, previous word is {}\".format(word, previous_word))\n\n # iterate through words to find numbers\n for i in range(len(words)):\n\n previous_word = None\n if i is not 0:\n # define the previous word like MATH\n previous_word = words[i-1]\n\n if \"-\" in words[i]:\n num = re.split(\"[A-Z]\", words[i])[0]\n letters = re.split(\"-\", words[i])\n new_words = []\n for i in range(len(letters)):\n if i is 0:\n new_words.append(letters[0])\n else:\n new_words.append(num + letters[i])\n for word in new_words:\n if word is not None and previous_word is not None:\n append_to_list(word, previous_word)\n else:\n #TODO: what if the word is None?\n pass\n else:\n append_to_list(words[i], previous_word)\n\n return prere_courses", "def split_citation_part(string: str):\n\n # Tokenization\n\n # fmt: off\n string = regex.sub(\n r\"(\"\n r\"\\d+(?>\\.\\d+)?[a-z]?|\"\n r\"\\b[ivx]+|\"\n r\"\\b[a-z]\\)?\"\n r\")\"\n r\"(\\sff?\\.|\\sff\\b)\",\n r\"\\1ff.\",\n string,\n flags=regex.IGNORECASE,\n )\n # fmt: on\n tokens = split_unit_number_pattern.split(\n string,\n )\n\n # Building pairs of units with their resp. values\n\n while len(tokens) > 0:\n token = tokens.pop(0)\n if StatutesParser.is_unit(token):\n if len(tokens) > 0:\n unit = StatutesParser.stem_unit(token)\n token = tokens.pop(0)\n numb = token\n assert StatutesParser.is_numb(numb), numb\n else: # when citation ends with unit\n print(\n f\"Citation {string} ends with unit {token}. Ignoring last unit.\"\n )\n break\n\n elif StatutesParser.is_pre_numb(token):\n numb = token\n token = tokens.pop(0)\n if not StatutesParser.is_unit(token):\n print(token, \"is not a unit in\", string)\n continue\n # to fix citation \"§ 30 DRITTER ABSCHNITT\"\n # Last part in now ignored,\n # but reference areas can still be improved.\n unit = StatutesParser.stem_unit(token)\n\n elif StatutesParser.is_numb(token):\n unit = None\n numb = token\n else:\n raise StringCaseException(token, \"in\", string)\n numb = regex.sub(r\"(ff?\\.|ff|\\))$\", \"\", numb)\n yield [unit, numb]", "def comp_split (phrase):\r\n \r\n level = 0\r\n\r\n phrase = list(phrase)\r\n for index, x in enumerate(phrase):\r\n if 'x' == '(':\r\n level += 1\r\n elif 'x' == ')':\r\n level -= 1 \r\n if level == 0:\r\n found = False\r\n for comp in COMPTERMS:\r\n if len(comp) == 2 and x == comp[0] and phrase[index+1]==comp[1]:\r\n phrase[index] = '#'+comp[0]\r\n phrase[index+1] = comp[1]+'#'\r\n found = True \r\n \r\n elif not found and len(comp) == 1 and x == comp:\r\n \r\n phrase[index] = '#'+x+'#'\r\n\r\n phrase = ''.join(phrase).split('#')\r\n\r\n newphrase = []\r\n for x in phrase:\r\n if x in COMPTERMS:\r\n newphrase.append(x)\r\n else:\r\n newphrase.append(self.calculator.calculate(x))\r\n return newphrase", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def split_string_at_numbers(string):\n split_list = re.compile(r'(\\d+)').split(string)\n filtered_list = []\n skip_next_loops = 0\n for i in range(len(split_list)):\n if skip_next_loops > 0:\n skip_next_loops -= 1\n continue\n part = split_list[i]\n if part.isdigit() or (part == '.' and i < len(split_list) - 1 and split_list[i + 1].isdigit()):\n # Some kind of number\n if part == '.':\n # number of format '.###' (start of string)\n part += split_list[i + 1]\n skip_next_loops = 1\n elif i < len(split_list) - 2 and split_list[i + 1] == '.' and split_list[i + 2].isdigit():\n # number of format '###.###'\n part += split_list[i + 1] + split_list[i + 2]\n skip_next_loops = 2\n elif (i > 0 and len(filtered_list) and len(filtered_list[-1]) and\n filtered_list[-1][-1] == '.'):\n # number of format '.###' (within string)\n filtered_list[-1] = filtered_list[-1][:-1]\n part = '.' + part\n # otherwise just number of format '###'\n factor = 1\n if i < len(split_list) - 1:\n # check next part for unit information\n msg = split_list[i + 1].strip()\n msg = msg.lstrip('_([{')\n msg = re.split('[^a-zA-Zµ]', msg)[0]\n if msg:\n for unit in tools.science.UNIT_SYMBOLS:\n if msg.endswith(unit):\n msg = msg[:-len(unit)]\n break\n if len(msg) == 1:\n factor = 10**tools.science.SI_PREFIXES.get(msg[0], 0)\n filtered_list.append(float(part)*factor)\n else:\n # Actual string\n filtered_list.append(part)\n return filtered_list", "def string_is_course_id(string):\n for i in range(min(len(string), 3)):\n if string[i].isdigit():\n return True\n return False", "def parse_cid(cid):\n cid = str(cid).replace(\"-\", \"\").replace(\" \", \"\").strip()\n \n if len(cid) == 5:\n return (cid[0:2], cid[2:])\n else:\n raise ValueError(\"This course ID doesn't seem to be valid.\")", "def commify(num, separator=','):\n regex = re.compile(r'^(-?\\d+)(\\d{3})')\n num = str(num) # just in case we were passed a numeric value\n more_to_do = 1\n while more_to_do:\n substring = rf'\\1{separator}\\2'\n (num, more_to_do) = regex.subn(substring, num)\n return num", "def separate_string_pattern_in_notes(pattern):\n output = []\n cont = 0\n for idx in range(len(pattern) - 1):\n if pattern[idx + 1] == \"#\" or pattern[idx + 1] == \"-\":\n output.append(pattern[idx] + pattern[idx + 1])\n elif pattern[idx] != \"#\" and pattern[idx] != \"-\":\n output.append(pattern[idx])\n if pattern[-1] != \"#\" and pattern[-1] != \"-\":\n output.append(pattern[-1])\n return output", "def part(string):\n characters = [] \n count = 0\n st = \"\"\n for char in string:\n if char == \" \":\n characters.append(\"space\")\n elif char ==\"?\" or char == \",\" or char == \";\" or char == \":\":\n characters.append(char)\n count = 0\n else:\n # st += char\n # count += 1\n characters.append(char)\n \n if count == 3:\n characters.append(st)\n st = \"\"\n count = 0\n return characters", "def parse_proasis(input_string):\n return (\n input_string[:3].strip(),\n int(input_string[5:].strip()),\n input_string[3:5].strip(),\n )", "def _consume_number(infix_string:str,index:int,output:list) -> int:\r\n if not (infix_string[index].isdigit() or infix_string[index]==Roll._minus): # handle integers and dice rolls ('XdY')\r\n raise ValueError(f\"Unexpected value in number token '{infix_string[index]}'\")\r\n digit = \"\"\r\n has_mandatory_segment=False\r\n if infix_string[index]==Roll._minus:\r\n sign=1\r\n while index<len(infix_string) and infix_string[index]==Roll._minus:\r\n sign*=-1\r\n index+=1\r\n if sign<0:\r\n digit+=Roll._minus\r\n while index<len(infix_string) and infix_string[index].isdigit():\r\n has_mandatory_segment=True\r\n digit+=infix_string[index]\r\n index+=1\r\n if index<len(infix_string) and infix_string[index].lower()==Roll._dice_sep:\r\n digit+=infix_string[index].lower()\r\n index+=1\r\n has_mandatory_segment = False\r\n while index<len(infix_string) and infix_string[index].isdigit():\r\n has_mandatory_segment=True\r\n digit+=infix_string[index]\r\n index+=1\r\n if not has_mandatory_segment:\r\n raise ValueError(\"Dice rolls must be supplied with a fixed number of sides (format: 'XdY')\")\r\n output.append(digit)\r\n return index", "def mysplit(string):\n result = []\n last_split = 0\n for i in range(len(string)-3):\n if( string[i] == \"a\" and\n string[i+1] == \"n\" and\n string[i+2] == \"d\"):\n partial = string[last_split:i]\n last_split = i+3\n result.append(partial)\n rest = string[last_split:]\n result.append(rest)\n return result", "def test_split_string(self):\n mytext = '2011 Senior PGA Championship presented by'\n string1, string2 = split_string(mytext, 25, 25)\n self.assertEqual(string1, '2011 Senior PGA')\n self.assertEqual(string2, 'Championship presented')", "def test_split_seq(self):\r\n seq = 'AAAACCCCCGTGTGTGT'\r\n barcode, primer, remainder = split_seq(seq, 4, 5)\r\n self.assertEqual(barcode, 'AAAA')\r\n self.assertEqual(primer, 'CCCCC')\r\n self.assertEqual(remainder, 'GTGTGTGT')", "def test_splitPartiesString(self):\n s = \"Appellant: Lucy Johnston - Respondent: Mary-Jane Lawrence\"\n expected = [\"Appellant: Lucy Johnston \",\" Respondent: Mary-Jane Lawrence\"]\n self.assertEqual(expected, split_parties.splitPartiesString(s))", "def coursecode(el):\n txt = text(el)\n return re.sub(r\"\\s*\\[\\d+\\]$\", \"\", txt, re.UNICODE)", "def punct_split(input_string):\n\n # apply all pre-wordninja preprocessing\n pre_ninja = space_coursecodes(space_punct(space_parantheses(correct_apostrophe(input_string))))\n\n doc = nlp(pre_ninja)\n\n doc_chunks = []\n\n # apply semantic split to all non-punctuation tokens\n for token in doc:\n if token.is_punct:\n doc_chunks.append(token.text)\n else:\n doc_chunks.append(semantic_split(token.text))\n\n final_string = \"\"\n\n # add all the chunks together with correct spacing (e.g. space after '.' but before '{')\n for i in range(len(doc_chunks)):\n final_string += doc_chunks[i]\n\n if (doc_chunks[i] in post_space_punct) or (doc_chunks[i] in no_space_punct):\n continue\n\n if (i != len(doc_chunks)-1) and (doc_chunks[i+1] not in pre_space_punct) and \\\n (doc_chunks[i+1] not in no_space_punct):\n final_string += \" \"\n\n # trick to remove extra white space\n final_string_lines = final_string.split(\"\\n\")\n for i in range(len(final_string_lines)):\n final_string_lines[i] = \" \".join(final_string_lines[i].split())\n final_string = \"\\n\".join(final_string_lines)\n\n return final_string", "def parse_puzzle(puzzle):\n puzzle = re.sub(\"\\sGrid \\d{2}\",\"\", sample)\n puzzle = puzzle.strip().split(\"\\n\") \n return puzzle", "def test_split_string(self):\n self.assertEqual(('1-4', 14), split_string('1-4/14'))", "def split_num(s):\n i = 0\n while i < len(s):\n if s[i] < '0' or s[i] > '9':\n break\n i += 1\n if s[i:]:\n return (int(s[:i]), s[i:], )\n return (int(s[:i]), )", "def split_well_name (well_name):\n\n letters = well_name.rstrip('0123456789')\n\n nums = well_name.lstrip(letters)\n\n\n #Do some checks to make sure it's a well name in for the format letter-letter-#-#\n if len(nums) == 0:\n raise ValueError('Something is wrong with your input, I cannot find a row number')\n\n\n for i in '0123456789':\n if i in letters:\n raise ValueError('Something is wrong with your input, I think there is a number in your column letter.')\n\n for j in nums:\n if j not in '0123456789':\n raise ValueError('Something is wrong with your input, I think there is a letter in your row number.')\n\n return letters, nums", "def insert_commas(string: str) -> str:\n q = build_string_queue(string)\n return extract_string_from_queue(q)", "def courses_string(self):\n course_string = \"\"\n for c in self.courses[0]:\n course_string += c + \", \"\n course_string = course_string[:-2]\n return course_string", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix", "def test_tax_court_docket_number_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n \n \n \n UNITED STATES TAX COURT\n \n \n \n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n \n \n \n Docket No. 5393-04. Filed May 31, 2006.\n \n \n \n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"5393-04\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n \n Docket No. 5706-12. Filed May 6, 2013.\n \n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n \n 200\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f (200) SHENK v. COMMISSIONER 201\n \n \n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n \n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n \n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n \n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n \n \n \n \n VerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n \f 202 140 UNITED STATES TAX COURT REPORTS (200)\n \n \n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"5706-12\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n docket_number = results[\"Docket\"][\"docket_number\"]\n self.assertEqual(docket_number, a)\n print \"✓\", docket_number", "def rewrite (substr):\r\n\r\n slengths = [0]* (2*numpops-1)\r\n firstint = [0] * (2*numpops - 1)\r\n holdsubs = [[]] * (2*numpops - 1)\r\n periodi = [0] * (2*numpops - 1)\r\n pos = 1\r\n subpos = pos\r\n subcount = 0\r\n pcount = 0\r\n slengths[subcount] = 0\r\n while 1:\r\n if substr[pos] == '(':\r\n pcount += 1\r\n if substr[pos] == ')':\r\n pcount-= 1\r\n pos+= 1\r\n slengths[subcount]+= 1\r\n if (pcount == 0):\r\n if (slengths[subcount] > 1):\r\n pos+= 1\r\n i = int(substr[pos])\r\n if pos <= len(substr)-2 and substr[pos+1].isdigit():\r\n ts = substr[pos] + substr[pos+1]\r\n i = int(ts)\r\n else:\r\n i = int(substr[pos])\r\n periodi[subcount] = i\r\n if (i >= 10):\r\n pos += 2\r\n slengths[subcount] += 3\r\n else:\r\n pos+= 1\r\n slengths[subcount] += 2\r\n else:\r\n periodi[subcount] = -1\r\n holdsubs[subcount] = substr[subpos:pos]\r\n (holdsubs[subcount],hold) = set0(holdsubs[subcount],slengths[subcount])\r\n i = 0\r\n while (holdsubs[subcount][i].isdigit() == False):\r\n i+= 1\r\n firstint[subcount] = int(holdsubs[subcount][i])\r\n subcount+= 1\r\n slengths[subcount] = 0\r\n if (substr[pos] == ','):\r\n pos+= 1\r\n subpos = pos\r\n if pos >= len(substr):\r\n break\r\n if ((periodi[0] > periodi[1] and periodi[0] >= 0 and periodi[1] >= 0) or (periodi[0] >= 0 and periodi[1] < 0)):\r\n substr = strlistadd(substr,0,'(')\r\n j = slengths[1]\r\n k = 0\r\n i = 1\r\n while i<= j:\r\n substr = strlistadd(substr,i,holdsubs[1][k])\r\n k += 1\r\n i += 1\r\n subpos = 1\r\n if (slengths[1] > 2):\r\n (substr,hold) = set0(substr,i)\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = joinlist(substr,hold)\r\n substr = strlistadd(substr,i,',')\r\n i+= 1\r\n subpos = i\r\n j += 1 + slengths[0]\r\n k = 0\r\n while i <= j:\r\n substr = strlistadd(substr,i,holdsubs[0][k])\r\n i += 1\r\n k += 1\r\n if (slengths[0] > 2):\r\n (substr,hold) = set0(substr,i)\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = joinlist(substr,hold)\r\n substr = strlistadd(substr,i,')')\r\n else:\r\n if (firstint[0] > firstint[1] and periodi[0] < 0 and periodi[1] < 0):\r\n substr = strlistadd(substr,0,'(')\r\n j = slengths[1]\r\n k = 0\r\n i = 1\r\n while i<= j:\r\n substr = strlistadd(substr,i,holdsubs[1][k])\r\n k += 1\r\n i += 1\r\n subpos = 1\r\n if (slengths[1] > 2):\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = strlistadd(substr,i,',')\r\n i+= 1\r\n subpos = i\r\n j += 1 + slengths[0]\r\n k = 0\r\n while i <= j:\r\n substr = strlistadd(substr,i,holdsubs[0][k])\r\n i+= 1\r\n k+= 1\r\n if (slengths[0] > 2):\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = strlistadd(substr,i,')')\r\n else:\r\n substr = strlistadd(substr,0,'(')\r\n subpos = 1\r\n if (slengths[0] > 2):\r\n (substr,hold) = set0(substr,slengths[0] + 1)\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = joinlist(substr,hold)\r\n substr = strlistadd(substr,slengths[0] + 1,',')\r\n subpos = slengths[0] + 2\r\n if (slengths[1] > 2):\r\n (substr,hold) = set0(substr,slengths[0] + slengths[1] + 2)\r\n substr[subpos:len(substr)] = rewrite (substr[subpos:len(substr)])\r\n substr = joinlist(substr,hold)\r\n substr = strlistadd(substr,slengths[0] + slengths[1] + 2,')')\r\n return substr", "def split_fix_msgs( longstring ) :\n return tuple( filter(lambda x: x, re.split('(8=FIX\\\\.[45]\\\\.[0-9]\\x01.*?\\x0110=[0-9]{3}\\x01)', longstring)) )", "def _input_as_gctmpca_char_priors(self,priors,char_order):\n # priors t be followed by a newline\n return ['\\t'.join([str(priors[c]) for c in char_order]),'']", "def splitTrackingNums(_pack):\n multi = [ i.strip() for i in _pack[1].split(';') ]\n splits_ = [ [_pack[0], m] for m in multi ]\n return splits_", "def parse_prerequisites(course_node):\n description = course_node.find_element_by_class_name(\n 'courseDescription').text\n if 'Prereq:' not in description:\n return []\n\n parts = description.split('Coreqs:')[0].split('Prereq:')[1].split('. ')\n if parts[0].endswith('.'):\n parts[0] = parts[0][:-1]\n\n prerequisites = set()\n p = re.compile(r'(([A-Za-z&]+) *\\d{2,}(?!\\.))')\n prev_dept = None\n for i in p.findall(parts[0]):\n if '.' in i[0]:\n continue\n\n dept = re.sub(r'[^A-Z]', '', i[0].upper())\n if dept in ('LEVEL', 'RECOMMEND', 'SPRING'):\n continue\n\n if not (i[0].startswith('&') or dept in ('AND', 'INTO', 'OR') or\n i[0].isdigit()):\n m = p.match(i[0])\n prev_dept = m.group(2)\n prerequisites.add(i[0].upper())\n continue\n\n prerequisites.add(\n '%s%s' %\n (prev_dept,\n i[0].replace('and ', '').replace('into ', '').replace('or ', '')))\n\n return sorted([i.replace(' ', '') for i in prerequisites])", "def split_number(string):\n\ttry:\n\t\tparts = string.split('-')\n\texcept AttributeError:\n\t\ttry:\n\t\t\tstring * string\n\t\t\treturn ('', string)\n\t\texcept TypeError:\n\t\t\treturn None\n\t\n\t\t\n\tend = parts[-1]\n\tif '.' in end:\n\t\ttry:\n\t\t\tnum = float(end)\n\t\texcept:\n\t\t\tnum = None\n\telse:\n\t\ttry:\n\t\t\tnum = int(end)\n\t\texcept:\n\t\t\tnum = None\n\tif num is not None:\n\t\tparts.pop(-1)\n\treturn ('-'.join(parts), num)", "def split_num(a_str):\n idx = None\n for i in iter(a_str):\n if i.isdigit():\n idx = a_str.index(i)\n break\n if idx == None:\n return (a_str[:idx], int('1'))\n else:\n return (a_str[:idx], int(a_str[idx:]))", "def punct_split(self, sentence):\n\t\tlEDU = []\n\t\toldc = \"\"\n\t\tl = \"\"\n\t\tfor c in sentence:\n\t\t\tl += c\n\t\t\tif c in self.list_punct_simple or oldc+c in self.list_punct_cmplx:\n\t\t\t\tif oldc == c and c == \"-\":\n\t\t\t\t\tlEDU.append(l[:-2])\n\t\t\t\t\tl = \"--\"\n\t\t\t\telse:\n\t\t\t\t\tlEDU.append(l)\n\t\t\t\t\tl = \"\"\n\t\t\toldc = c\n\t\tlEDU.append(l)\n\t\t\n\t\treturn lEDU", "def course_name(input):\n for course in config.current_courses:\n if strip_string(course) == strip_string(input):\n return course\n\n return input", "def split_str_digit(s):\n res = []\n for m in re.finditer('(\\d*)(\\D*)', s):\n for g in m.groups():\n if g != '':\n try:\n res.append(int(g))\n except ValueError:\n res.append(g)\n return tuple(res)", "def _split(string):\n out = [\"\", \"\"]\n for i in string:\n if i.isalpha():\n out[0] += i\n elif i.isnumeric() or i == \".\":\n out[1] += i\n return out", "def splitpop(string, delimiter):\n if delimiter not in string:\n string += delimiter\n fields = string.split(delimiter)\n return delimiter.join(fields[:-1]), fields[-1]", "def _parse_course_id_from_string(input_str):\r\n m_obj = re.match(r'^/courses/(?P<course_id>[^/]+/[^/]+/[^/]+)', input_str)\r\n if m_obj:\r\n return SlashSeparatedCourseKey.from_deprecated_string(m_obj.group('course_id'))\r\n return None", "def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result", "def split_on_sign(designator):\n if diag:\n print \"split_on_sign: received\",designator\n if re.search('\\+',designator):\n if diag:\n print \"Splitting on +\"\n parts = designator.split('+')\n parts.append('+')\n elif re.search('-',designator):\n if diag:\n print \"Splitting on -\"\n parts = designator.split('-')\n parts.append('-')\n else:\n if diag:\n print \"No split\"\n parts = [\"\",designator,\"\"]\n # pulsar names can have a \"-1, -2, ...\" suffix.\n if diag:\n print \"Parts found:\",parts\n if len(parts) > 3:\n parts = parts[:3]\n elif re.search('-',parts[1]):\n subparts = parts[1].split(\"-\")\n parts = parts[:1] + [subparts[0]]\n # Pulsars may have alphabetic suffixes of one more more characters\n else:\n test = parts[1]\n for i in range(1,len(test)):\n if diag:\n print i,\"Testing\",test\n if test[-1].isalpha():\n test = test[:-1]\n else:\n parts[1] = test\n break\n if diag:\n print \"stripped is\",parts[1]\n return parts", "def parse_sub_num(s, parser):\n s = s.strip()\n if s == '*':\n return s\n nums = s.split(',')\n msg = 'Invalid sub-entry number.'\n res = set()\n for num in nums:\n num = num.strip()\n if num.isdigit():\n try:\n num = int(num)\n assert num > 0\n res.add(num)\n except:\n raise parser.error(msg)\n else:\n try:\n m = re.search('(\\d+)-(\\d+)', num)\n if m is None:\n raise parser.error(msg)\n else:\n a = int(m.group(1))\n b = int(m.group(2))\n assert a > 0\n assert b > 0\n assert a <= b\n r = range(a, b + 1)\n res.update(r)\n except:\n raise parser.error(msg)\n res = list(res)\n res.sort()\n return res", "def two_passes(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n splitted = s.split('#')\n res = ''\n\n for i in range(len(splitted)):\n j = 0\n if i + 1 < len(splitted) and len(splitted[i]) > 2:\n while j < len(splitted[i]) - 2:\n res += alpha_map[splitted[i][j]]\n j += 1\n\n if i + 1 < len(splitted):\n res += alpha_map[splitted[i][j:]]\n else:\n while j < len(splitted[i]):\n res += alpha_map[splitted[i][j]]\n j += 1\n return res", "def parseSubscripts(part):\n subs = str(part)\n subs = part.split(\"]\")[:-1]\n return [int(sub[1:]) for sub in subs]", "def split_bucket_university_course(current_bucket_university_course):\r\n\r\n course_separator_delimiter = ['computer', 'artificial', 'cyber', 'network', 'data', 'machine']\r\n\r\n for delimiter in course_separator_delimiter:\r\n separated_list = current_bucket_university_course.split(delimiter,1)\r\n if len(separated_list) == 2:\r\n return separated_list[0], delimiter+separated_list[1]\r\n\r\n return None, None", "def parse_grouping(p: str) -> tuple[int, int]:\n width = len(p)\n g1 = p.rfind(',')\n if g1 == -1:\n return 1000, 1000\n g1 = width - g1 - 1\n g2 = p[:-g1 - 1].rfind(',')\n if g2 == -1:\n return g1, g1\n g2 = width - g1 - g2 - 2\n return g1, g2", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def replaceCompetitions(comp):\n comp = comp.lower()\n if 'six nations' in comp:\n comp = 'Six Nations'\n elif 'internationals' in comp:\n comp = 'Internationals'\n elif 'tour' in comp:\n comp = 'Tour'\n elif 'world cup' in comp:\n comp = 'World Cup'\n return comp", "def process_characters(s: str) -> tuple:\n chars = Counter(s)\n # Number of sentences\n sentences = 0\n for char in '.?!':\n sentences += chars[char]\n # Total number of punctuation signs\n punct_signs = 0\n for char in string.punctuation:\n punct_signs += chars[char]\n # Number of vowels\n vowels = 0\n for char in 'aeiouy':\n vowels += chars[char]\n # Number of digits\n digits = 0\n for char in '0123456789':\n digits += chars[char]\n return sentences, punct_signs, chars['\"'] // 2, vowels, digits", "def test_tax_court_citation_extractor(self):\n\n test_pairs = (\n (\n \"\"\" 1 UNITED STATES TAX COURT REPORT (2018)\n\n\n\n UNITED STATES TAX COURT\n\n\n\n BENTLEY COURT II LIMITED PARTNERSHIP, B.F. BENTLEY, INC., TAX\n MATTERS PARTNER, Petitioner v.\n COMMISSIONER OF INTERNAL REVENUE, Respondent\n\n\n\n Docket No. 5393-04. Filed May 31, 2006.\n\n\n\n Nancy Ortmeyer Kuhn, for petitioner.\n \"\"\",\n \"1 T.C. 2018\",\n ),\n (\n \"\"\" T.C. Memo. 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Memo. 150\",\n ),\n (\n \"\"\" T.C. Summary Opinion 2003-150\n\n\n\n UNITED STATES TAX COURT\n\n\n\n RIVER CITY RANCHES #1 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #2 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n RIVER CITY RANCHES #3 LTD., LEON SHEPARD,\n TAX MATTERS PARTNER,\n\n\n \"\"\",\n \"2003 T.C. Summary Opinion 150\",\n ),\n (\n \"\"\"\n MICHAEL KEITH SHENK, PETITIONER v. COMMISSIONER\n OF INTERNAL REVENUE, RESPONDENT\n\n Docket No. 5706–12. Filed May 6, 2013.\n\n P was divorced from his wife, and their 2003 ‘‘Judgment of\n Absolute Divorce’’ provided that his ex-wife would have pri-\n mary residential custody of their three minor children. The\n judgment provided that the dependency exemption deductions\n for the three children would be divided between the two ex-\n spouses according to various conditions but did not provide\n that the ex-wife must execute in P’s favor a Form 8332,\n ‘‘Release of Claim to Exemption for Child of Divorced or Sepa-\n rated Parents’’. The children resided with P’s ex-wife for more\n than half of 2009, and P’s ex-wife did not execute in P’s favor\n any Form 8332 or equivalent document for any year. For 2009\n P timely filed a Federal income tax return on which he\n claimed dependency exemption deductions and the child tax\n credit for two of the children, consistent with his under-\n standing of the terms of the judgment, but he did not attach\n any Form 8332 to his return. He also claimed head-of-house-\n hold filing status. His ex-wife, the custodial parent, timely\n filed a Federal income tax return for 2009 on which she also\n\n 200\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00001 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f (200) SHENK v. COMMISSIONER 201\n\n\n claimed two dependency exemption deductions, so that one\n child was claimed on both parents’ returns. R allowed to P the\n dependency exemption deduction for one of the children but\n disallowed his claim for the dependency exemption deduction\n for the child who had also been claimed by the custodial\n parent. At trial P contended he is entitled to a dependency\n exemption deduction for all three children. Held: Since the\n custodial parent did not execute, and P could not and did not\n attach to his return, any Form 8332 or equivalent release, P\n is not entitled under I.R.C. sec. 152(e)(2)(A) to claim the\n dependency exemption deduction or the child tax credit. Held,\n further, where both the custodial parent and the noncustodial\n parent have claimed for the same year a dependency exemp-\n tion deduction for the same child, a declaration signed by the\n custodial parent after the period of limitations for assess-\n ments has expired as to the custodial parent could not qualify\n under I.R.C. sec. 152(e)(2)(A), and therefore there is no reason\n to grant P’s request to leave the record open so that he may\n obtain and proffer such a declaration. Held, further, P is not\n entitled to head-of-household filing status under I.R.C. sec.\n 2(b)(1) nor to the child tax credit under I.R.C. sec. 24.\n\n Michael Keith Shenk, for himself.\n Shari Salu, for respondent.\n GUSTAFSON, Judge: The Internal Revenue Service (IRS)\n determined a deficiency of $3,136 in the 2009 Federal income\n tax of petitioner Michael Keith Shenk. Mr. Shenk petitioned\n this Court, pursuant to section 6213(a), 1 for redetermination\n of the deficiency. After Mr. Shenk’s concession that he\n received but did not report $254 in dividend income, the\n issue for decision is whether Mr. Shenk is entitled to a\n dependency exemption deduction for one of his children\n under section 151(c), a child tax credit for that child under\n section 24(a), and head-of-household filing status under sec-\n tion 2(b)(1). On these issues, we hold for the IRS.\n FINDINGS OF FACT\n\n The judgment of divorce\n Mr. Shenk was married to Julie Phillips, and they have\n three minor children—M.S., W.S., and L.S. They divorced in\n 2003. The family court’s ‘‘Judgment of Absolute Divorce’’ pro-\n 1 Unless otherwise indicated, all citations of sections refer to the Internal\n\n Revenue Code (26 U.S.C.) in effect for the tax year at issue, and all cita-\n tions of Rules refer to the Tax Court Rules of Practice and Procedure.\n\n\n\n\nVerDate Nov 24 2008 10:59 Jul 11, 2014 Jkt 372897 PO 20012 Frm 00002 Fmt 3857 Sfmt 3857 V:\\FILES\\BOUND VOL. WITHOUT CROP MARKS\\B.V.140\\SHENK JAMIE\n\f 202 140 UNITED STATES TAX COURT REPORTS (200)\n\n\n vided: that Ms. Phillips was ‘‘awarded primary residential\n custody’’ of the parties’ three children; and that Mr. Shenk\n would be liable for child support payments; but that, as to\n dependency exemptions—\"\"\",\n \"140 T.C. 200\",\n ),\n )\n site = tax.Site()\n for q, a in test_pairs:\n results = site.extract_from_text(q)\n cite_string = \"%s %s %s\" % (\n results[\"Citation\"][\"volume\"],\n results[\"Citation\"][\"reporter\"],\n results[\"Citation\"][\"page\"],\n )\n\n self.assertEqual(cite_string, a)\n print \"✓\", cite_string", "def substitute_num(self, numbers, qa3_answer):\n question = self\n for match in re.finditer('[0-9]+', question):\n num = match.group(0)\n i = qa3_answer.index_by_chunk(num)\n if i is None or qa3_answer.result[i].get_type() != 'refYear':\n if num not in numbers:\n numbers.append(num)\n question = re.sub(num, '<NUM' + string.ascii_uppercase[numbers.index(num)] + '>', question, 1)\n return Qa3Question(question)", "def partSum(numString, multiplier):\r\n initialsum = 0\r\n for i in range(6):\r\n initialsum += int(numString[i])\r\n finalsum = initialsum * int(multiplier)\r\n return finalsum", "def split_alnum(s):\n def convert(x):\n try:\n return int(x)\n except ValueError:\n return x\n r = []\n digit = None\n for c in s:\n d = c.isdigit()\n if d != digit:\n digit = d\n r += [c]\n else:\n r[-1] += c\n return [convert(x) for x in r]", "def part_1():\n input_ = parse_input()\n cups = turn_input_into_cups(input_)\n cups = solve(cups, first_cup=cups[input_[0]], turns=100)\n\n answer = []\n current_cup = cups[1].next\n while current_cup != cups[1]:\n answer.append(str(current_cup.number))\n current_cup = current_cup.next\n\n return \"\".join(answer)", "def get_cpd_ids(string):\n return [x for x in string.split(\" \") if x.startswith(\"C\")]", "def natural_key(string_):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_)]", "def natural_key(string_):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_)]", "def partition(string, n):\n letter_groups = zip_longest(*[iter(string)]*n, fillvalue='')\n return (\n ''.join(group)\n for group in letter_groups\n )", "def validate_procedure(procedure):\n # If it is not a string, use \"charmm\"\n if not isinstance(procedure, str):\n return \"charmm\"\n else:\n # Split on white space and replace it with underscores\n items = procedure.split()\n procedure = ''\n for i, item in enumerate(items):\n if i != 0:\n procedure += \"_\"\n procedure += item\n return procedure", "def _parse_mut(subs):\n if subs!=\"0\":\n subs = [[subs.replace(subs[-2:], \"\"),subs[-2], subs[-1]]]\n return subs", "def second_punctuation_pass(self, data):\n try:\n data = data.split(\" \")\n except AttributeError:\n pass\n\n for index, word in enumerate(data):\n word = re.sub('^-|\\(|\\)|\"', '', word)\n data[index] = word\n\n return data", "def normalize_puzzle_string(string):\n string = re.sub(r\"[\\s|+-]+\", \"\", string)\n string = re.sub(r\"[0_]\", \".\", string)\n return string", "def decompTwo(string):\r\n index = 0\r\n while True:\r\n index = string.find('->', index)\r\n if index > 0:\r\n if isFormula(string[:index]) and isFormula(string[index + 2:]):\r\n return [string[:index], string[index + 2:]]\r\n else:\r\n index += 2\r\n else:\r\n break\r\n return ['', string]", "def main(text, data, answer, temp, number):\n for char in text:\n if char.isdecimal():\n number += char\n elif temp.isdecimal():\n data.append(int(number))\n number = \"\"\n temp = char\n for i in data:\n answer += i\n print(answer)", "def soar_splitpart(value, index, split_chars=' - '):\n splits = value.split(split_chars)\n if len(splits) > index:\n return splits[index]\n\n return value", "def test__parse_course_id_from_string(self):\r\n COURSE_ID = u'org/num/run' # pylint: disable=C0103\r\n COURSE_URL = u'/courses/{}/otherstuff'.format(COURSE_ID) # pylint: disable=C0103\r\n NON_COURSE_URL = u'/blahblah' # pylint: disable=C0103\r\n self.assertEqual(\r\n _parse_course_id_from_string(COURSE_URL),\r\n SlashSeparatedCourseKey.from_deprecated_string(COURSE_ID)\r\n )\r\n self.assertIsNone(_parse_course_id_from_string(NON_COURSE_URL))", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def process_reference(ref):\n logger.debug(\"==================================================\")\n logger.debug(\"'{}'\".format(ref))\n stripped = ref.lower().strip()\n stripped = str(unidecode.unidecode(unicode(stripped, 'utf8')))\n\n conf_inds = []\n for conf_str in conf_strings:\n found_ind = stripped.find(conf_str)\n\n if found_ind > 0:\n logger.debug(\"found conf string {} at ind {}\".format(conf_str, found_ind))\n conf_inds.append(found_ind)\n\n if not conf_inds:\n logger.debug(\"found no conference strings\")\n conf_strip = stripped\n else:\n # Only consider conference strings in the second half of the reference.\n str_start = 0\n for ind in conf_inds:\n if ind > len(stripped)/2:\n str_start = ind\n\n\n if str_start == 0:\n logger.debug(\"No conf string found in the second half of the given reference\")\n conf_strip = stripped\n else:\n last_dot = stripped.rfind(\".\", 0, str_start) + 1\n last_comma = stripped.rfind(\",\", 0, str_start) + 1\n\n conf_strip = stripped[:max(last_dot, last_comma)]\n logger.debug(\"conf string started at {}, last dot at {}, last comma at {}\".format(str_start, last_dot, last_comma))\n logger.debug(\"all after latest punctuation: {}\".format(conf_strip))\n\n sep_inds, sep_inds_rev = punctuation_density_separate(conf_strip)\n if not sep_inds: # if it's empty this is usually just a spurious reference\n return ()\n\n # If there are two or fewer separation indices then the separation\n # wasn't able to get good separation for authors, title and journal,\n # so we don't change anything\n if len(sep_inds) > 2:\n # if there are 3 or more sep inds, we want to get all the text in the middle part\n punc_sep = conf_strip[sep_inds[0]:sep_inds_rev[-2]].strip()\n elif len(sep_inds) == 2:\n punc_sep = conf_strip[sep_inds[0]:sep_inds_rev[-1]].strip()\n else:\n punc_sep = conf_strip\n\n logger.debug(\"conf_strip: {}\\npunc_sep: {}\\n\".format(conf_strip, punc_sep))\n\n full_strip = punc_sep\n\n ref_stopped = [word for word in full_strip.split(\" \") if word not in stopwords and len(word) > 2]\n ref_nopunc = \" \".join(ref_stopped).translate(None, punctuation).split(\" \")\n ref_set = set(ref_nopunc)\n return (ref.strip(), ref_stopped, ref_nopunc, ref_set)", "def get_info_list(info_string, course):\n info_list = []\n split_on_newline = info_string.split(\"\\n\")\n for elem in split_on_newline:\n split = elem.split(\": \")\n for s in split:\n info_list.append(s)\n info_list = info_list[1:-1]\n info_tags = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students',\n ]\n i = 0\n for info_tag in info_tags:\n course[info_tag] = new_dict(\n info_list[i] + ': ', info_list[i + 1])\n i += 2\n return course", "def contract(s):\n if not s: return s\n\n tokens = s.split()\n old = tokens[0]\n count = [[1, old]]\n\n for t in tokens[1:]:\n if t == old:\n count[-1][0] += 1\n else:\n old = t\n count.append([1, t])\n\n return \" \".join(\"%d*%s\" % (c, t) for c, t in count)", "def get_courses(std):\n return std[2]", "def complete_description(description):\n local_find_all_re = find_all_re\n local_find_one_re = find_one_re\n local_find_series_2_re = find_series_2_re\n local_find_series_3_re = find_series_3_re\n local_course_re = course_re\n local_three_digits = three_digits\n\n find_all = re.findall(local_find_all_re, description)\n find_one = re.findall(local_find_one_re, description)\n \n def remove_in(a, b):\n remove = [y for x in a for y in b if y in x]\n a.extend(b)\n for x in remove: a.remove(x)\n return a\n \n if find_one:\n find_all = remove_in(find_all, find_one)\n for i, x in enumerate(find_all):\n completed = []\n find_all[i] = x.replace(' ', '')\n number = re.search(local_three_digits, find_all[i]).group(0)\n for crs in re.findall(local_course_re, find_all[i]):\n completed.append(f'{crs}{number}')\n description = description.replace(x, ' {}'.format('/'.join(completed)), 1)\n\n find_series_2 = re.findall(local_find_series_2_re, description)\n find_series_3 = re.findall(local_find_series_3_re, description)\n if find_series_2 or find_series_3:\n find_series_2 = remove_in(find_series_3, find_series_2)\n for i, series in enumerate(find_series_2):\n find_series_2[i] = series.replace(' ', '')\n course_dep = re.search(local_course_re, find_series_2[i])\n if course_dep: \n depmnt = course_dep.group(0)\n description = description.replace(series, f'{depmnt}{find_series_2[i][-3:]}', 1)\n return description", "def test_splitPartiesStringWithoutDivider(self):\n parties_str = \"Appellant: Denise O'B Respondent: Eugene D, Mary C\"\n expected = {\n \"applicant\": [{\"name\": \"denise o'b\"}],\n \"respondent\": [{\"name\": \"eugene d\"},\n {\"name\": \"mary c\"}]\n }\n result = split_parties.splitCaseParties(\"tribunal\", parties_str)\n self.assertEqual(expected, result)", "def question_8():\n subject = input(\"Enter subject code: \")\n while subject != \"\":\n if len(subject) == 6:\n it_string = \"\"\n if subject.startswith(\"CP\"):\n it_string = \" IT\"\n if subject[2] == '1':\n year_string = \"first-year\"\n elif subject[2] == '2':\n year_string = \"second-year\"\n elif subject[2] == '3':\n year_string = \"third-year\"\n else:\n year_string = \"Masters or other\"\n print(f\"That is a {year_string}{it_string} subject.\")\n else:\n print(\"Invalid subject code\")\n subject = input(\"Enter subject code: \")", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)", "def split_canonical_name(cname):\n return tuple(cname.rsplit('-', 2))", "def format_cnpj(x: str) -> str:\n x = strip_non_digits(str(x))\n return f\"{x[:2]}.{x[2:5]}.{x[5:8]}/{x[8:12]}-{x[12:14]}\"", "def split(n):\n rest_of_num, last_num = n // 10, n % 10\n return rest_of_num, last_num", "def fix_extra(in_str):\n spaced = camel_re.sub(\"_\", in_str)\n return spaced.split(\"_\")[0]", "def hunHL(s, keepdash=0):\n tt=re.split( '([a-zA-Z]+[0-9]*-?)|([^a-zA-Z])', s )\n tt = [ t for t in tt if (t!=None and t!='')]\n return tt", "def extract_ctcp(s):\n messages = s.split(X_DELIM)\n\n normal_msgs = list(filter(None, messages[::2]))\n extended_msgs = list()\n\n # messages[1::2] = extended_msgs\n # but first let's parse them...\n for e_msg in map(ctcp_dequote, filter(None, messages[1::2])):\n tag = e_msg\n data = None\n if SPC in e_msg:\n tag, data = e_msg.split(SPC, 1)\n extended_msgs.append((tag.upper(), data))\n\n return normal_msgs, extended_msgs", "def alphanum_key(s):\n return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)]", "def alphanum_key(s):\n return [int(c) if c.isdigit() else c for c in re.split('([0-9]+)', s)]", "def decomp(net):\n \n pt_netname = Word(alphanums) + Suppress('_') + restOfLine\n pt_netnamebus = Word(alphanums) + Suppress('_') + Word(alphanums+'_') + \\\n Suppress('[') + Word(nums) + Suppress(']')\n parser = (pt_netnamebus | pt_netname)\n \n return parser.parseString(net)", "def formatChordLabel(cl):\n # The only change I can think of: Cmaj -> C\n cl = cl.replace(\"maj\", \"\") if cl.endswith(\"maj\") else cl\n cl = cl.replace(\"-\", \"b\")\n return cl", "def add_concat(infix_regex: str):\n\n result = \"\"\n\n # we use None to symbolize the start of the string\n cant_concat_from = ['(', '|', None]\n cant_concat_to = ['*', '+', ')', '|']\n last_char = None\n\n for char in infix_regex:\n if char not in cant_concat_to and last_char not in cant_concat_from:\n result += '.'\n result += char\n last_char = char\n\n return result", "def extract_numbers_safe(cls, s, decimals=False):\n if decimals:\n tmp = ''.join([i for i in cls.escape(s) if ((i >= '0') and (i <= '9') or i == '.')])\n\n parts = tmp.split('.')\n\n try:\n output = '{a}.{b}'.format(a=parts[0], b=parts[1])\n except IndexError:\n output = parts[0]\n\n else:\n output = ''.join([i for i in cls.escape(s) if (i >= '0') and (i <= '9')])\n\n try:\n if s[0] == '-':\n output = '-{s}'.format(s=output)\n except:\n pass\n\n return output", "def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)", "def digit_concats(a, b, c):\n a_str = str(a)\n p = a_str.index(\".\")\n return a_str[p - b:p] + a_str[p:p + c + 1]", "def split(self):\n out = []\n if self.section != \"\":\n out.append(self.section)\n out.append(self.topic)\n if self.sub_topic != \"\":\n out.append(self.sub_topic)\n if self.cutter != \"\":\n out.append(self.cutter)\n if self.version != 0:\n out.append(\"v.\" + str(self.version))\n if self.year != 0:\n out.append(str(self.year) + self.work_letter)\n if self.other != \"\":\n out.append(self.other)\n if self.copy != 0:\n out.append(\"c.\" + str(self.copy))\n return out", "def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr", "def multiply_str(num1,num2):\r\n \r\n max_num_decs = max_num_decims(num1, num2)\r\n \r\n #total_num_decs = num_decims(num1) + num_decims(num2)\r\n \r\n num1, num2 = decim_modif_str(num1, num2)\r\n \r\n # Calculate the steps.\r\n \r\n steps = []\r\n for i in range(0,len(num2)): \r\n \r\n if (int(num1) * int(num2[::-1][i]) != 0):\r\n steps.append( str(int(num1)*int(num2[::-1][i])) + \"0\"*i )\r\n \r\n else:\r\n steps.append( \"0\"*len(num1) + \"0\"*i )\r\n \r\n # Add the steps.\r\n \r\n sum_steps = \"0\"\r\n for i in range(0,len(num2)):\r\n sum_steps = sum_str(sum_steps,steps[i])\r\n \r\n if len(sum_steps) != len(steps[-1]):\r\n \r\n n_zeros = abs(len(sum_steps)-len(steps[-1]))\r\n sum_steps = \"0\"*n_zeros + sum_steps\r\n \r\n # Put the decimal point.\r\n \r\n if max_num_decs != 0:\r\n \r\n result = sum_steps[:-2*max_num_decs] + \".\" + sum_steps[-2*max_num_decs:]\r\n \r\n elif max_num_decs == 0:\r\n \r\n result = sum_steps \r\n \r\n result = del_decim_zeros(result)\r\n \r\n return result", "def alphanum_key(s):\n return [ tryint(c) for c in re.split('(\\-?[0-9]+)', s) ]", "def front3(str):\r\n if len(str)<4:\r\n return 3*str\r\n else:\r\n return 3*str[:3]" ]
[ "0.59728956", "0.5590606", "0.5450177", "0.533765", "0.5325984", "0.51580155", "0.5109719", "0.5105101", "0.5051153", "0.5044799", "0.49115965", "0.48173293", "0.48144338", "0.48039567", "0.47931984", "0.47742018", "0.4764439", "0.47197244", "0.47027755", "0.46838984", "0.46251243", "0.46035188", "0.46020606", "0.4599271", "0.4598703", "0.45637363", "0.455487", "0.45362973", "0.4534617", "0.45281088", "0.45271453", "0.4505637", "0.44941363", "0.44922897", "0.4477428", "0.44702435", "0.44321716", "0.44313332", "0.44251138", "0.4420415", "0.44139758", "0.44081536", "0.4371906", "0.43665773", "0.43632498", "0.43526703", "0.43434295", "0.4315038", "0.43117628", "0.4307826", "0.43047583", "0.43036017", "0.42930436", "0.42915094", "0.42882743", "0.42859024", "0.42847976", "0.42840737", "0.42830694", "0.42830694", "0.4275611", "0.42755997", "0.4255814", "0.42534983", "0.42470837", "0.42425773", "0.42423424", "0.4239081", "0.42246705", "0.422255", "0.42148054", "0.42140818", "0.4208204", "0.42049772", "0.42014533", "0.41941813", "0.41905713", "0.41903013", "0.41889468", "0.41830048", "0.41819775", "0.41810408", "0.417534", "0.41723484", "0.41715088", "0.41686985", "0.4168513", "0.416758", "0.416758", "0.4166843", "0.4162752", "0.41593876", "0.41584775", "0.4158056", "0.41555345", "0.41541272", "0.41517463", "0.4149994", "0.4142643", "0.4139283" ]
0.7670243
0
Add a new column named placeholder fill with the arg_1 value
def main(dataframe: pd.DataFrame, arg_1: str='nothing') -> pd.DataFrame: dataframe["placeholder"] = arg_1 return dataframe
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_column(df, colTitle, colIndex, fillValue):\n if colTitle not in df.columns:\n df.insert(colIndex, colTitle, fillValue, True)\n return df", "def add_column_parameter(params, name, dataset, args, key):\n column_id = args.get_value(key, raise_error=False)\n if column_id is None:\n return\n column = dataset.column_by_id(column_id)\n params.append(name + '(' + column.name_in_rdb + ')')", "def fill_col(col, x):\n col.append(x)\n return col", "def act_on_column_name(self, *, arg, value):\n assert arg is None\n assert isinstance(value, str)", "def addColumn(self, *column):\n self.insertColumn(self._width, *column)", "def _create_placeholders(self):\n raise NotImplementedError", "def add_column(values, df=pandas.DataFrame()):\n df['col_{}'.format(len(df.columns))] = values\n return df", "def set_col(self, *, d, colname: str, values):\n d2 = d.with_columns([pl.Series(values=values).alias(colname)])\n return d2", "def add_placeholder(self, init_from=None, datatype=None, name=None):\n\n if init_from is None and datatype is None:\n raise ValueError(\"Datatype not set!\")\n if init_from is not None:\n _, datatype = numpy_val_to_builtin_val(init_from)\n return self._build_op('Placeholder', [], datatype=datatype, name=name)", "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def add_column(values, df=None):\n # Updates the function to create a default DataFrame\n if df is None:\n df = pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df", "def better_add_column(values, df=None):\n # Update the function to create a default DataFrame\n if df is None:\n df = pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df", "def act_on_column_name(self, *, arg, value):\n assert isinstance(arg, (pl.DataFrame, type(None)))\n assert isinstance(value, str)\n return PolarsTerm(polars_term=pl.col(value), is_column=True)", "def add_column(values, df=None):\n if df is None:\n df=pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df", "def register_input(self, arg_):\n self.input_placeholder_ids += (self._store_placeholders(arg_).value,)", "def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)", "def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata", "def PlaceHolders(sql_args):\n return ','.join('%s' for _ in sql_args)", "def get_geom_placeholder(self, value, srid):\r\n if hasattr(value, 'expression'):\r\n placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))\r\n else:\r\n placeholder = '%s(%%s)' % self.from_text\r\n return placeholder", "def add_column(self, fieldname, column, align=..., valign=...):\n ...", "def wrap_in_func(self, func, *cols):\n return '{func}({args})'.format(func=func,\n args=', '.join(cols))", "def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)", "def fill_row(row, x):\n row.append(x)\n return row", "def augment_column(self, col: pd.Series,) -> pd.Series:", "def _add_column_and_format(self, table, label, column):\n table[label] = column\n if label in self._formats:\n table.set_format(label, self._formats[label])", "def __init__(self, placeholder):\r\n super().__init__(placeholder.name)\r\n self._placeholder = placeholder", "def __init__(self, placeholder):\n super().__init__(placeholder.name)\n self._placeholder = placeholder", "def arg_wrapper(attr, arg):\n return \"{}{}{}\".format(color_start(\"white\"), arg, color_start(attr))", "def add_widget_singlecolumn(self, name, widget):\n if recoverer is not None:\n self.widget_dict[name] = self.stato_iniziale(name, widget)\n else:\n self.widget_dict[name] = widget", "def placeholder(self, name, type_name):\n provenance = NQExprProvenance(\n operation='placeholder', args=(type_name, name))\n value = tf.compat.v1.placeholder(\n tf.float32, shape=[None, self.get_max_id(type_name)], name=name)\n return self.as_nql(value, type_name, provenance)", "def add_column(self, col_name, definition):\n if not self.column_exists(col_name):\n self.execute(self.commands.add_column(self.name, col_name, definition))", "def column_bind(arguments):\n return Component(\n \"ColumnBind\",\n arguments=arguments,\n options={\n \n },\n constraints=None)", "def put(self,colname,value,**kwargs):\n arguments = {'where' : \"String e.g 'chainID = 'A''\",\n 'index' : \"Array e.g. [27,28,30]\",\n 'name' : \"'CA' atome name\",\n 'query' : \"SQL query e.g. 'WHERE chainID='B' AND resName='ASP' \"}\n\n # the asked keys\n keys = kwargs.keys()\n\n # if we have more than one key we kill it\n if len(keys)>1 :\n print('You can only specify 1 conditional statement for the pdb2sql.put function')\n return\n\n # check if the column exists\n try:\n self.c.execute(\"SELECT EXISTS(SELECT {an} FROM ATOM)\".format(an=colname))\n except:\n print('Error column %s not found in the database' %colname)\n self.get_colnames()\n return\n\n\n # if we have 0 key we take the entire db\n if len(kwargs) == 0:\n query = 'UPDATE ATOM SET {cn}=?'.format(cn=colname)\n value = tuple([value])\n self.c.execute(query,value)\n return\n\n # otherwise we have only one key\n key = list(keys)[0]\n cond = kwargs[key]\n\n # select which key we have\n if key == 'where':\n query = 'UPDATE ATOM SET {cn}=? WHERE {cond}'.format(cn=colname,cond=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n elif key == 'name' :\n values = tuple([value,cond])\n query = 'UPDATE ATOM SET {cn}=? WHERE name=?'.format(cn=colname)\n self.c.execute(query,values)\n\n elif key == 'index' :\n values = tuple([value] + [v+1 for v in cond])\n qm = ','.join(['?' for i in range(len(cond))])\n query = 'UPDATE ATOM SET {cn}=? WHERE rowID in ({qm})'.format(cn=colname,qm=qm)\n self.c.execute(query,values)\n\n elif key == 'query' :\n query = 'UPDATE ATOM SET {cn}=? {c1}'.format(cn=colname,c1=cond)\n value = tuple([value])\n self.c.execute(query,value)\n\n else:\n print('Error arguments %s not supported in pdb2sql.get()\\nOptions are:\\n' %(key))\n for posskey,possvalue in arguments.items():\n print('\\t' + posskey + '\\t\\t' + possvalue)\n return", "def format_column(cls, label, column, min_width=min_val_width, max_width=max_val_width, etc=' ...'):\n val_width = 0 if len(column) == 0 else max(len(cls.format_value(v)) for v in column)\n val_width = min(val_width, max_width)\n width = max(val_width, len(str(label)), min_width, len(etc))\n def pad(value):\n raw = cls.format_value(value)\n if len(raw) > width:\n prefix = raw[:width-len(etc)] + etc\n else:\n prefix = raw\n return prefix.ljust(width)\n return pad", "def column_create(request):\n try:\n dataset = DatasetSchema.objects.get(\n slug=request.matchdict['slug']\n ) \n except DatasetSchema.DoesNotExist:\n return {\n 'success': False, \n 'message': 'No dataset named: %s' % \n (request.matchdict['slug'])\n }\n # make sure required params are here\n required_params_list = ['name', 'data_type']\n for param in required_params_list:\n if not request.POST.get(param):\n return {\n 'success': False, \n 'message': 'Param: %s missing from request' % (param),\n }\n\n name = request.POST['name']\n data_type = request.POST['data_type']\n\n # make sure datatype is acceptable\n if data_type not in VALID_DATA_TYPES:\n return {\n 'success': False,\n 'message': 'Data Type: %s not a valid data type' % (data_type),\n }\n\n # start building new field\n new_field = Field(\n name = name,\n data_type = data_type,\n created_by_user_id = request.user.id,\n created_datetime = datetime.now(),\n )\n\n # if type is datetime make sure that a format is along with it\n\n if request.POST.get('data_type') == 'datetime':\n if not request.POST.get('datetime_format'):\n return {\n 'success': False,\n 'message': 'Missing a datetime format',\n }\n else:\n # add it\n new_field.datetime_format = request.POST['datetime_format']\n\n # save the new field\n dataset.fields.append(new_field)\n dataset.save()\n return HTTPMovedPermanently(location='/dataset/get/{}'.format(dataset.slug))", "def inject_constants(row, **constants):\n\n stats = '_pass'\n\n for key, value in constants.items():\n if value is not None:\n row[key] = value\n\n return row, stats", "def add_new_column(dataframe, column_name):\r\n dataframe[column_name] = \"\"\r\n return dataframe", "def _modify_columns(self, cols, X, y=None):", "def with_placeholders(self, **placeholders):\n self.placeholders = placeholders\n return self", "async def m002_add_charge_extra_data(db):\n await db.execute(\n \"\"\"ALTER TABLE satspay.charges \n ADD COLUMN extra TEXT DEFAULT '{\"mempool_endpoint\": \"https://mempool.space\", \"network\": \"Mainnet\"}';\n \"\"\"\n )", "def rep_with_blank(value, arg):\n return value.replace(arg, '')", "def interpolate(self, column_name):\n self.check_for_column(column_name)\n\n start_date = min(self.data.index)\n end_date = max(self.data.index)\n date_range = pd.date_range(start_date, end_date, freq='H')\n self.data = self.data.reindex(date_range)\n column = self.data[column_name]\n column = column.interpolate()\n self.data[column_name] = column", "def insert_column(self, tb_name, column_name, data_type):\n sentences = f\"\"\"\n ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type};\n \"\"\"\n print(sentences)\n self.commit(sentences)", "def map(\n self,\n arg: Union[Dict, Callable],\n /,\n na_action: Literal[\"ignore\", None] = None,\n dtype: Optional[dt.DType] = None,\n columns: Optional[List[str]] = None,\n ):\n\n if columns is None:\n return super().map(arg, na_action, dtype)\n self._check_columns(columns)\n\n if len(columns) == 1:\n idx = self._data.type().get_child_idx(columns[0])\n return ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n ).map(arg, na_action, dtype)\n else:\n if not isinstance(arg, dict) and dtype is None:\n (dtype, _) = dt.infer_dype_from_callable_hint(arg)\n dtype = dtype or self._dtype\n\n def func(*x):\n return arg.get(tuple(*x), None) if isinstance(arg, dict) else arg(*x)\n\n cols = []\n for n in columns:\n idx = self._data.type().get_child_idx(n)\n cols.append(\n ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n )\n )\n\n res = Scope.default._EmptyColumn(dtype)\n for i in range(len(self)):\n if self.is_valid_at(i):\n res._append(func(*[col[i] for col in cols]))\n elif na_action is None:\n res._append(func(None))\n else:\n res._append(None)\n return res._finalize()", "def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)", "def addColumnValues(self, column):\n nr1 = self.data.shape[1]\n nr = len(column)\n if nr1 == 0:\n # case 1: empty table\n if nr == 0:\n # case 1a: we're just adding a name\n self.data = numpy.reshape(self.data, (1, 0))\n pass\n else:\n # case 1b: we're adding a column of values\n self.data = numpy.reshape(numpy.array(column), (1, nr))\n pass\n pass\n else:\n # case 2: non-empty table\n if nr1 > 0 and nr != nr1:\n raise Exception(\"New column must have the same length as existing ones %s %s\"%(nr1,nr))\n new_column = numpy.reshape(numpy.array(column), (1, nr))\n self.data = numpy.concatenate((self.data, new_column))\n pass\n return", "def f_onearg_and_default(self, arg1, default = 1) :\n pass", "def adjust(self, column=0, op='+', val=0):\n lst=self.data[column]\n for i in range(0,self.length()):\n lst[i]=eval(str(lst[i]) + op + str(val))\n return", "def argument(arg, default):\n return \"{0}={1}\".format(arg, default) if default else arg", "def place_holder(*args):\n return args", "def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)", "def _fill_na_by_ratio_checker(func):\n\n @wraps(func)\n def wrapper_checker(database, column_name):\n _CheckInput._check_database_input(database)\n _CheckInput._check_column_name(column_name)\n _CheckInput._check_column_in_database(column_name, database)\n func(database, column_name)\n return wrapper_checker", "def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)", "def add_one(df, col):\n return df.withColumn(col, F.col(col) + 1)", "def add_fill(self, shape, value, name=None):\n return self._build_op('Fill', [shape, value], name=name)", "def add_calculated_col(df, func, *cols, funcargs=[], funckw={}, **kw):\n name = kw.get(\"name\", \"func{}\".format(str(tuple(cols))))\n\n if name in df.colnames:\n print(\"{} already in table\".format(name))\n return None\n\n colindex = kw.get(\"index\", None)\n\n df.add_column(\n func(*[df[c] for c in cols], *funcargs, **funckw),\n index=colindex,\n name=name,\n )\n\n df[name].info.description = kw.get(\"description\", name)\n\n if kw.get(\"return\", False) is True:\n return df", "def _add_cols(df: pandas.DataFrame, scope = (globals(), locals())) -> None:\n command : str = input(\"\\nAdd a column:\\n\")\n if command.lower() in ['n', 'no', 'quit()', 'exit', 'return']:\n return\n\n col_name : str = command[ \\\n re.search(r'[\\w\\.\\(\\)]+', command).start(): \\\n re.search(r'[\\w\\.\\(\\)]+', command).end() \\\n ]\n # new column's name\n\n arg : str = command[re.search(r'[=,;]', command).end():]\n # the new column's \"function\"\n ref_cols = re.findall(r'(?<=\\{)\\w[\\w\\.\\(\\)]*(?=\\})', arg)\n # df column names that are referenced to create new columns\n\n for i in range(len(ref_cols)):\n arg = re.sub(\n f'{{{ref_cols[i]}}}',\n f'df[\\'{ref_cols[i]}\\']',\n arg\n )\n # substituting references\n\n scope[0].update(globals())\n scope[1].update(locals())\n\n col_arg = eval(arg, scope[0], scope[1])\n # pandas.Series for type checking\n df[col_name] = col_arg\n # creating column\n\n more : str = input(\"\\nWould you like to add more columns?\\n\")\n if more.lower() in ['y', 'yes', 'continue', 'true']:\n return _add_cols(df)\n return", "def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))", "def insert_column(self, identifier, position, name, datastore):\n # Raise ValueError if given colum name is invalid.\n if name is not None and not is_valid_name(name):\n raise ValueError(\"invalid column name '{}'\".format(name))\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new column into dataset.\n df = dataset.to_dataframe()\n df = vizual.insert_column(df=df, names=[name], pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def make_placeholder(fig: Figure) -> None:\n fig.add_artist(FancyBboxPatch(\n xy = (0.35, 0.45),\n width = 0.3,\n height = 0.1,\n boxstyle = 'Round, pad=0.015',\n linewidth = 3,\n edgecolor = 'red',\n facecolor = 'lightpink',\n alpha = 0.5\n ))\n fig.text(\n x = 0.5,\n y = 0.5,\n s = \"Placeholder\",\n ha = \"center\",\n va = \"center\",\n fontsize = 'xx-large',\n fontweight = 'bold',\n alpha = 0.5\n )", "def addcolumn(self, title=\"\"):\n new_column = table_column()\n self.__column_list.append(new_column)\n if title:\n new_column.settitle(title)\n return new_column", "def _joined_column_sql(editable, generated, joined):\n return f\"IF({editable} IS NOT NULL AND {editable} != '' OR {generated} = 'NA', {editable}, {generated}) AS {joined}\"", "def get_cols_dummy():", "def _convert_column(self, col, function):\n col_new = []\n for x in self[col]:\n if x == \"\":\n col_new.append(None)\n else:\n col_new.append(function(x))\n self.df[col] = col_new", "def reload_placeholder(update):\n pass", "def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None", "def __init__(self, col, val):\n\n self.__col = col\n self.__val = val", "def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta", "def _wrap_FunctionDefArgument(self, expr):\n var = expr.var\n name = var.name\n self.scope.insert_symbol(name)\n collisionless_name = self.scope.get_expected_name(var.name)\n if var.is_ndarray or var.is_optional:\n new_var = Variable(BindCPointer(), self.scope.get_new_name(f'bound_{name}'),\n is_argument = True, is_optional = False, memory_handling='alias')\n arg_var = var.clone(collisionless_name, is_argument = False, is_optional = False,\n memory_handling = 'alias', allows_negative_indexes=False)\n self.scope.insert_variable(arg_var)\n else:\n new_var = var.clone(collisionless_name)\n self.scope.insert_variable(new_var)\n\n return BindCFunctionDefArgument(new_var, value = expr.value, original_arg_var = expr.var,\n kwonly = expr.is_kwonly, annotation = expr.annotation, scope=self.scope)", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def appendColumn(self, contents = None):\n\n\t\t\t\t#Find the last column\n\t\t\t\tcolumn = len(tuple(self.thing.iter_cols())) + 1\n\n\t\t\t\t#Write to cells\n\t\t\t\tif ((contents != None) and (len(contents) != 0)):\n\t\t\t\t\tfor row, item in enumerate(contents):\n\t\t\t\t\t\tself.setCell(row + 1, column, item)\n\t\t\t\telse:\n\t\t\t\t\tself.setCell(1, column, \" \")", "def f_onearg(self, arg1) :\n pass", "def insert(self, j, column, default=None):\n try: column = [v for v in column]\n except:\n raise TypeError, \"Table.columns.insert(x): x must be list\"\n column = column + [default] * (len(self._table) - len(column))\n if len(column) > len(self._table):\n self._table.extend([[None]] * (len(column)-len(self._table)))\n for i, row in enumerate(self._table):\n row.insert(j, column[i])\n self._table._m += 1 # Increase column count.", "def assign_columns(f, df):\n df = assign_image(f, df)\n df = assign_cropbox(df)\n df = assign_uid(df)\n return df", "def TextFieldOptionsAddPlaceHolder(builder, placeHolder):\n return AddPlaceHolder(builder, placeHolder)", "def add_select(self, *column):\n if not column:\n column = []\n\n self.columns += column\n\n return self", "def setColumn(self,item,column,value):\n raise UncodedError", "def _visit_arg_with_default(self, arg: ast.arg, default: ast.AST | None) -> str:\n name = self.visit(arg)\n if default:\n if arg.annotation:\n name += \" = %s\" % self.visit(default)\n else:\n name += \"=%s\" % self.visit(default)\n return name", "def _init_(self, input_column, output_column):\n super()._init_([input_column], output_column)", "def re_add_col(df, col_search, col_add, pattern):\n \n search = [] \n\n for value in df[col_search]:\n search.append(re.findall(pattern, value))\n \n df[col_add] = flatten(search)\n \n # Add option for NA if its not found\n return df", "def insertColumn(self, index, *column):\n if ((len(column) == 1) and (type(column[0]) in MATRIX_VALID_COLLECTIONS)):\n column = column[0]\n if self._height:\n if not (len(column) == self._height):\n raise ValueError('Improper length for new column: %d, should be %d' % (len(column), self._height))\n else:\n self._height = len(column)\n for i in range(self._height):\n self._value.append(list())\n self._width += 1\n for i in range(self._height):\n if not (type(column[i]) in MATRIX_VALID_TYPES):\n message = \"Values must be of type \"\n for t in range(len(MATRIX_VALID_TYPENAMES)):\n if t:\n message += ' or '\n message += \"'%s'\" % MATRIX_VALID_TYPENAMES[t]\n raise TypeError(message)\n self._value[i].insert(index, column[i])", "def removeplaceholders(tab):#{{{\n for i in tab.dtype.names:\n m = 0\n while m < len(tab):\n if tab[i][m] == '*':\n tab[i][m] = ''\n m+=1\n return tab\n #}}}", "def add_column(\n self,\n header: \"RenderableType\" = \"\",\n footer: \"RenderableType\" = \"\",\n *,\n header_style: Optional[StyleType] = None,\n footer_style: Optional[StyleType] = None,\n style: Optional[StyleType] = None,\n justify: \"JustifyMethod\" = \"left\",\n vertical: \"VerticalAlignMethod\" = \"top\",\n overflow: \"OverflowMethod\" = \"ellipsis\",\n width: Optional[int] = None,\n min_width: Optional[int] = None,\n max_width: Optional[int] = None,\n ratio: Optional[int] = None,\n no_wrap: bool = False,\n ) -> None:\n\n column = Column(\n _index=len(self.columns),\n header=header,\n footer=footer,\n header_style=header_style or \"\",\n footer_style=footer_style or \"\",\n style=style or \"\",\n justify=justify,\n vertical=vertical,\n overflow=overflow,\n width=width,\n min_width=min_width,\n max_width=max_width,\n ratio=ratio,\n no_wrap=no_wrap,\n )\n self.columns.append(column)", "def __init__(self, title, value_digits, uncertainty_digits,\n exponent=None):\n\n if exponent is None:\n pattern = \\\n \"%%.%df ± %%.%df\" % (value_digits, uncertainty_digits)\n self.scale = 1\n elif exponent == \"%\":\n pattern = \"(%%.%df ± %%.%df)%%%%\" \\\n % (value_digits, uncertainty_digits)\n self.scale = 100\n elif isinstance(exponent, int):\n pattern = \"(%%.%df ± %%.%df)x10^%d\" \\\n % (value_digits, uncertainty_digits, exponent)\n self.scale = 10.0 ** (-exponent)\n else:\n raise ValueError, \"unrecognized exponent %r\" % exponent\n\n PatternColumn.__init__(self, title, pattern, \"right\")", "def addColumn(self, name, column):\n self.columnNames.append(name)\n self.addColumnValues(column)", "def insert_arg(self, factory, arg, *args, **kwargs):\n args = (arg,) + args\n try:\n spec = self._by_factory[factory]\n except KeyError:\n spec = self._by_factory[factory] = HandlerSpec(factory)\n\n if spec.args is None:\n spec.args = []\n spec.args.insert(0, (args, kwargs))", "def add_placeholders(self):\n ### YOUR CODE HERE\n self.input_placeholder=tf.placeholder(dtype=tf.float32,shape=(None, Config.n_features),name='input_placeholder')\n self.labels_placeholder=tf.placeholder(dtype=tf.int32,shape=(None, Config.n_classes), name='labels_placeholder')\n ### END YOUR CODE", "def get_first_selection(table, column_name):\n def replace(entry):\n if pd.isnull(entry):\n return None\n else:\n return re.sub(r',.*', '', entry)\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)", "def _add_necessary_columns(args, custom_columns):\n # we need to add the variant's chrom, start and gene if \n # not already there.\n if custom_columns.find(\"gene\") < 0:\n custom_columns += \", gene\"\n if custom_columns.find(\"start\") < 0:\n custom_columns += \", start\"\n \n return custom_columns", "def add_add_filter(source, args, index):\n tagspec = _parse_tagspec(args.get('add-tag%02d' % index))\n header = args.get('add-header%02d' % index)\n value = args.get('add-value%02d' % index, '')\n before = (args.get('add-before%02d' % index) == 'on')\n values = [(hxl.Column.parse(tagspec, header=header), value)]\n return source.add_columns(specs=values, before=before)", "def __init__(self, column_id='', data_type='', data_format='',\n constant_value=''):\n self.column_id = column_id\n self.data_type = data_type\n self.data_format = data_format\n self.constant_value = constant_value", "def __init__(self) -> None:\n super().__init__()\n self.placeholder = 1.0", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'group_id',\n 'first_name': 'first_name',\n 'starter': 'starter',\n 'main': 'main',\n 'dessert': 'dessert',\n 'special_diet': 'special_diet',\n 'requirements': 'requirements',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'", "def test_fill_values(parallel, read_basic):\n text = \"\"\"\nA, B, C\n, 2, nan\na, -999, -3.4\nnan, 5, -9999\n8, nan, 7.6e12\n\"\"\"\n table = read_basic(text, delimiter=\",\", parallel=parallel)\n # The empty value in row A should become a masked '0'\n assert isinstance(table[\"A\"], MaskedColumn)\n assert table[\"A\"][0] is ma.masked\n # '0' rather than 0 because there is a string in the column\n assert_equal(table[\"A\"].data.data[0], \"0\")\n assert table[\"A\"][1] is not ma.masked\n\n table = read_basic(\n text, delimiter=\",\", fill_values=(\"-999\", \"0\"), parallel=parallel\n )\n assert isinstance(table[\"B\"], MaskedColumn)\n assert table[\"A\"][0] is not ma.masked # empty value unaffected\n assert table[\"C\"][2] is not ma.masked # -9999 is not an exact match\n assert table[\"B\"][1] is ma.masked\n # Numeric because the rest of the column contains numeric data\n assert_equal(table[\"B\"].data.data[1], 0.0)\n assert table[\"B\"][0] is not ma.masked\n\n table = read_basic(text, delimiter=\",\", fill_values=[], parallel=parallel)\n # None of the columns should be masked\n for name in \"ABC\":\n assert not isinstance(table[name], MaskedColumn)\n\n table = read_basic(\n text,\n delimiter=\",\",\n fill_values=[(\"\", \"0\", \"A\"), (\"nan\", \"999\", \"A\", \"C\")],\n parallel=parallel,\n )\n assert np.isnan(table[\"B\"][3]) # nan filling skips column B\n # should skip masking as well as replacing nan\n assert table[\"B\"][3] is not ma.masked\n assert table[\"A\"][0] is ma.masked\n assert table[\"A\"][2] is ma.masked\n assert_equal(table[\"A\"].data.data[0], \"0\")\n assert_equal(table[\"A\"].data.data[2], \"999\")\n assert table[\"C\"][0] is ma.masked\n assert_almost_equal(table[\"C\"].data.data[0], 999.0)\n assert_almost_equal(table[\"C\"][1], -3.4) # column is still of type float", "def _set_fill(self, _var_name, _list_index, _operation):\r\n self.command_stack.do(model.structure.SetFill(self._structure, bool(self._fill_var.get())))", "def __init__(self, title, decimal_places, exponent=None):\n\n if exponent is None:\n pattern = \"%%.%df\" % decimal_places\n self.scale = 1\n elif exponent == \"%\":\n pattern = \"%%.%df%%%%\" % decimal_places\n self.scale = 100\n elif isinstance(exponent, int):\n pattern = \"%%.%df x10^%d\" % (decimal_places, exponent)\n self.scale = 10.0 ** (-exponent)\n else:\n raise ValueError, \"unrecognized exponent %r\" % exponent\n\n PatternColumn.__init__(self, title, pattern, \"right\")", "def generate_1st_column(rows):\n\n values = [i for i in range(1, rows+1)]\n result = []\n result.append(['col1'])\n\n for i in range(0, len(values)):\n result.append([values[i]])\n display_indicator(ROWS, i, str(i) + \" numbers processed for column 1\")\n\n return result", "def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)", "def add_column(self):\n if len(self._grid) == 1:\n self._grid[0].append(None)\n elif len(self._grid) > 1:\n for i in range(len(self._grid)):\n self._grid[i].append(None)\n return True", "def cols(self, col):\n self.col += col" ]
[ "0.61651623", "0.60531425", "0.5932682", "0.55233854", "0.53711224", "0.5348192", "0.5308065", "0.52713895", "0.5266658", "0.51256454", "0.5117017", "0.51092947", "0.510111", "0.5097036", "0.5096841", "0.50488573", "0.50472313", "0.50351626", "0.50323486", "0.5029519", "0.50209", "0.5020322", "0.5008579", "0.49879998", "0.49875143", "0.4987115", "0.49745104", "0.49728018", "0.49304664", "0.4921568", "0.490559", "0.48839056", "0.48724413", "0.48527145", "0.484913", "0.4789116", "0.47564662", "0.47554064", "0.47538564", "0.47518426", "0.47392115", "0.47273302", "0.47146493", "0.47086275", "0.4705034", "0.47021616", "0.46801904", "0.46568087", "0.46465468", "0.4642794", "0.46407413", "0.46287492", "0.46211058", "0.46009585", "0.45984402", "0.45956165", "0.45935237", "0.45859697", "0.45848903", "0.45780697", "0.457521", "0.45715266", "0.45685712", "0.4565568", "0.45640418", "0.45549026", "0.45499387", "0.45496917", "0.4542987", "0.45408848", "0.45378935", "0.45354664", "0.4532241", "0.45184523", "0.45100027", "0.45001587", "0.44943872", "0.44875103", "0.44830492", "0.4481156", "0.44796738", "0.4473719", "0.4469728", "0.44596025", "0.44481236", "0.44425017", "0.44422847", "0.44398728", "0.44394872", "0.4435586", "0.44316784", "0.44218427", "0.44213945", "0.44211113", "0.44203776", "0.44200855", "0.44183555", "0.4416013", "0.44082752", "0.44035688" ]
0.6702973
0
Insert a new node.
def insert(self, val): node = Node(val) current = self.root if self.root is None: self.root = node return node while current: if val >= current.val: if current.right is not None: current = current.right else: current.right = node break elif val < current.val: if current.left is not None: current = current.left else: current.left = node break return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __insert(self, node, value):\n #if DEBUG: print('\\t__insert({})'.format(value))\n\n new = Node(value, node.next)\n node.next = new\n return new", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def insert(self, value):\n self.head = Node(value, self.head)", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def add_node(self, node):", "def insert(self, value):\n node = Node(value)\n\n if self.head is not None:\n node.next = self.head\n self.head = node", "def insert(self, data):\n if self.head == None:\n self.head = Node(data)\n else:\n curr = self.head\n while curr.link != None:\n curr = curr.link\n curr.link = Node(data)", "def insert(self, value):\n\n node = Node(value)\n node.next = self.head\n self.head = node", "def insert(self, value):\n\n node = Node(value)\n node.next = self.head\n self.head = node", "def insert(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = none\n else:\n self.right.insert(node)", "def insert(self, key, val):\n if self.root is None:\n self.root = self.Node(key, val)\n else:\n self.root.insert(key, val)", "def insert(self, value, ident):\n print(\"Insert\", value, ident)\n found_on_next_node = self._pop_node(ident)\n # if found_on_next_node:\n # print(\"Found node:\"\n # ,found_on_next_node.value\n # ,found_on_next_node.ident\n # )\n self._insert_node(value, ident)", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert(self, value):\n\n # create new node\n\n # self.head =new_node\n current = self.head\n if current == None:\n self.head = Node(value, self.head)\n return\n while current.next != None:\n current=current.next\n new_node=Node(value)\n\n current.next=new_node", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert_node(self, node):\n if self._is_node_reserved(node):\n return False\n\n # Put node in map\n self._node_map[node.get_id()] = node\n return True", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n # Updates the min of this node if the inserted node has a smaller\n # key.\n if node.key < self.min.key:\n self.min = node\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insertnode(self, node_path, node_val):\n\t\t# Get to the correct tree\n\t\tcurr_tree = self\n\t\tfor node_name in node_path[1:]:\n\t\t\tcurr_tree = curr_tree[node_name]\n\t\t\n\t\t# Allocate to tree (only once)\n\t\tif curr_tree.name == None:\n\t\t\tcurr_tree.name = node_path[-1]\n\t\t\tcurr_tree.value = node_val\n\t\telse:\n\t\t\tprint curr_tree.name\n\t\t\tprint node_path\n\t\t\tassert(False)", "def insert(self, key):\n # Create new node\n n = TreeNode(key)\n if not self.node:\n self.node = n\n self.node.left = AvlTree()\n self.node.right = AvlTree()\n elif key < self.node.val:\n self.node.left.insert(key)\n elif key > self.node.val:\n self.node.right.insert(key)\n self.re_balance()", "def insert(self,node,key):\n position=self.find(node,key)\n if position.key==key:\n print(\"node already present\")\n elif position.key>key:\n n=Node(key)\n position.setLeftChild(n)\n n.setParent(position)\n print(n.getParent())\n else:\n n=Node(key)\n position.setRightChild(n)\n n.setParent(position)", "def _insert(self, key):\n self.tree.insert(key)", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n # Update the min of this node if the inserted node has a smaller key.\n if node.key < self.min.key:\n self.min = node\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def insert(self, val):\n if not self.root:\n self.root = Node(val)\n self.size_number += 1\n else:\n self._sink(val, self.root)\n # check parent from node, until unbalanced.", "def insert(self, item):\n insert_location = self.__find(item)\n if insert_location is None: #No root\n self.root = Node(item, None)\n elif item < insert_location.item:\n insert_location.left_child = Node(item, insert_location)\n else: # it should be that item >= insert_location.item\n insert_location.right_child = Node(item, insert_location)", "def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node", "def insert(self, new_node):\n\n # If there haven't been any nodes added, add this new node as a\n # child of the root node.\n if self._latest_node == self.root:\n self.__insert_in_place(new_node, self._latest_node)\n # Tree start nodes should be inserted as siblings of the\n # previous node, unless it was a tree start, or tree continue\n # node. In which case it's a child.\n elif new_node.is_start_node():\n if self._latest_node.is_start_node() or self._latest_node.is_cont_node():\n self.__insert_in_place(new_node, self._latest_node)\n else:\n self.__insert_in_place(new_node, self._latest_node.parent)\n\n # If the node is a tree continue or a tree end node, it must be\n # added as a sibling of a valid / active tree node.\n elif new_node.is_cont_node() or new_node.is_end_node():\n # Need to walk back to find the previous level where an else\n # or an end can be added\n self.walk_to_tree_insertion_point()\n\n self.__insert_in_place(new_node, self._latest_node.parent)\n\n # Otherwise, if the previous node was a tree start or a tree\n # continue, the new node is a child. If not, it's a sibling.\n else:\n if self._latest_node.is_start_node() or self._latest_node.is_cont_node():\n self.__insert_in_place(new_node, self._latest_node)\n else:\n self.__insert_in_place(new_node, self._latest_node.parent)", "def insert(self, position, data):\n\n node = Node(data)\n traverse = self.head\n\n for i in range(0, position - 1):\n traverse = traverse.next\n temp = traverse.next\n traverse.next = node\n node.next = temp", "def insert(self, value, pos):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n if pos == 0:\r\n self.prepend(value)\r\n return\r\n\r\n index = 0\r\n node = self.head\r\n while node.next and index <= pos:\r\n if (pos - 1) == index:\r\n new_node = Node(value)\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n index += 1\r\n node = node.next\r\n else:\r\n self.append(value)", "def insert(self, k):\n node = self.klass(None, k)\n if self.root is None:\n self.root = node\n else:\n self.root.insert(node)\n return node", "def insert(self, key, value):\r\n insert_node = NodeRBT(key, value, color=RED)\r\n\r\n parent_node, _ = self.__compare(key)\r\n\r\n # Case 1: root node\r\n # if no parent_node, means the insert node is the root\r\n if not parent_node:\r\n self.root = insert_node\r\n self.root.color = BLACK\r\n self.root.left_child = NodeRBT(None, None)\r\n self.root.right_child = NodeRBT(None, None)\r\n\r\n else:\r\n insert_node.parent = parent_node\r\n insert_node.left_child = NodeRBT(None, None)\r\n insert_node.right_child = NodeRBT(None, None)\r\n if key <= parent_node.key:\r\n parent_node.left_child = insert_node\r\n else:\r\n parent_node.right_child = insert_node\r\n\r\n # Case 2: parent node is BLACK, do nothing\r\n if parent_node.color == BLACK:\r\n pass\r\n # Case 3: parent node is RED, solve the two-red problem\r\n else:\r\n self.__fix_double_reds(insert_node)\r\n\r\n # update the size of tree\r\n self.__update_size_tree(insert_node)", "def insert(self, item):\n # Handle the case where the tree is empty\n if self.is_empty():\n # if self.root is None:\n # TODO: Create a new root node\n self.root = ...\n # TODO: Increase the tree size\n self.size ...\n return\n # Find the parent node of where the given item should be inserted\n parent = self._find_parent_node(item)\n # TODO: Check if the given item should be inserted left of the parent node\n if ...:\n # TODO: Create a new node and set the parent's left child\n parent.left = ...\n # TODO: Check if the given item should be inserted right of the parent node\n elif ...:\n # TODO: Create a new node and set the parent's right child\n parent.right = ...\n # TODO: Increase the tree size\n self.size ...", "def insert(self, data, index):\n if index == 0:\n self.add(data)\n\n if index > 0:\n new = Node(data)\n position = index # Cada que se llama a current = current.next_node, se decrementa el valor de position en 1, cuando el valor sea cero, se ha llegado al nodo que está actualmente en la posición que queremos insertar el nuevo valor\n current = self.head\n\n while position > 1:\n current = current.next_node\n position -= 1\n \n prev_node = current\n next_node = current.next_node\n\n prev_node.next_node = new\n new.next_node = next_node", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def insert(self, data):\n new_node = Item(data)\n new_node.next = self.head\n self.head = new_node", "def insert(self, k):\n node = self.klass(None, k)\n if self.root is None:\n # The root's parent is None.\n self.root = node\n else:\n self.root.insert(node)\n return node", "def insert(self, val):\n inserted_node = DblNode(val, self.head)\n if not self.head:\n self.head = inserted_node\n self.tail = self.head\n self.head.previous_node = inserted_node\n self.head = inserted_node", "def insert_node(self, head, node):\n prev, curr = None, head\n while curr.val < node.val:\n prev, curr = curr, curr.next\n if not prev:\n head = node\n else:\n prev.next = node\n node.next = curr\n return head", "def insert(self, data):\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n else:\n self.data = data", "def insert(self, key, data):\n debug.printMsg('Insert for \"' + key + '\" With data: ' + str(data) )\n # if there is no root node\n if not self.root:\n debug.printMsg(\"No root was found, create one\")\n self.root = Node(key, data)\n else:\n debug.printMsg(\"Root was found, starting recursive insert\")\n self.recursiveInsert(key, data, self.root)\n # increment the size of the BST\n debug.printMsg(\"Incrementing size of BST\")\n self.size = self.size + 1", "def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n", "def insert(self, value):\n\n if self.typing is None: # first insertion: set type of this tree\n self.typing = type(value)\n else: # perform type check\n if type(value) != self.typing:\n raise TypeError(\"Type \" + str(type(value)) + \" is incompatible\" +\n \" with tree of type \" + str(self.typing) + \".\")\n # TODO allow different yet comparable types\n\n # if no error:\n if self.root is None:\n self.root = SplayNode(value)\n else:\n self.root = self.root.insert(value)\n self._size += 1", "def insert(self, data):\n if data < self.data:\n if self.left is None:\n self.left = Node(data, self)\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Node(data, self)\n else:\n self.right.insert(data)", "def insert(self, data, index):\n if index == 0:\n self.prepend(data)\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current or previous:\n if current_index == index:\n new_node = Node(data)\n new_node.next = current\n previous.next = new_node\n break\n\n previous = current\n current = current.next\n current_index += 1", "def insert(self, key):\r\n if self.root.num_keys() == self.max_num_keys:\r\n self.root = Node([], [self.root])\r\n self.root.split_child(0)\r\n\r\n node = self.root \r\n while not node.is_leaf():\r\n index = node.search(key)\r\n\r\n child = node.children[index]\r\n if child.num_keys() == self.max_num_keys:\r\n node.split_child(index)\r\n\r\n if node.keys[index] < key:\r\n index += 1\r\n\r\n node = node.children[index] \r\n\r\n node.insert(key)", "def insert(self, key, value):\n self.root.insert(key, value)\n\n # Update the new root if need be.\n node = self.root\n while node.parent != None:\n node = node.parent\n self.root = node", "def insert_append(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def insert(self, pos, item):\n \n if pos == 0:\n self.add(item)\n \n elif pos >= self.length():\n self.append(item)\n \n else:\n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n n = Node(item)\n previous.set_next(n)\n n.set_next(current)", "def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new", "def insert(self, data):\n\n\t\tif (self.treetype() and type(data) != self.treetype()):\n\t\t\traise TypeError(str(type(data)) + \" is invalid for this tree.\")\n\n\t\tself._size += 1\n\n\t\tif (not self._root):\n\t\t\tself._root = BTNode(value=data, depth=1)\n\t\t\treturn\n\n\t\tself._recursive_insert(data, self._root)\n\t\treturn", "def add_node (self, node):\n raise NotImplementedError", "def add_node(self, node):\n self.nodes.append(node)", "def insert(root,key):\n node = Node(key)\n node.insert_without_rotation(root)\n Node.recalculate_heights(node)\n Node.rotatation_adjusting_heights(node)", "def insert(self, value):\n if self.root is None:\n self.root = Node(value)\n self.size = 1\n return self.root\n inserted = self._insert(self.root, value)\n # if this does not hold true then the value was already\n # contained within the tree\n if inserted is not AVLTree.NULL_NODE:\n self.size += 1\n self.root = inserted\n return inserted", "def push(self, node):\n self.prepend(node)", "def insert(self, node, update=False):\n if not isinstance(node, RbNode):\n node = RbNode(node)\n node = super(RbTree, self).insert(node, update)\n \"\"\" :type: RbNode \"\"\"\n self._balance_insert(node)\n\n if self.debug:\n print 'After balancing:'\n print self\n print '*' * 20\n\n return node", "def insert(self, item, key):\n if self.key == key:\n self.item = item\n elif self.key < key:\n if self.right:\n self.right.insert(item, key)\n else:\n self.right = BSTreeNode(item, key)\n else:\n if self.left:\n self.left.insert(item, key)\n else:\n self.left = BSTreeNode(item, key)\n # Replace by correct code\n pass", "def _insert_node(node, before_node=None, after_node=None, in_index=0):\n\n if not before_node and not after_node:\n log.info('Nowhere to insert %s to.' % node.path())\n return False\n\n if before_node:\n log.debug('Inserting %s before %s' % (node.path(), before_node.path()))\n before_node_input = None\n before_node_inputs = before_node.inputs()\n if not before_node_inputs:\n return False\n before_node_input = before_node_inputs[0]\n\n node.setInput(in_index, before_node_input)\n before_node.setInput(in_index, node)\n return True\n\n if after_node:\n log.debug('Inserting %s after %s' % (node.path(), after_node.path()))\n after_node_output = None\n after_node_outputs = after_node.outputs()\n if not after_node_outputs:\n node.setInput(in_index, after_node)\n return True\n\n after_node_output = after_node_outputs[0]\n\n after_node_output.setInput(in_index, node)\n node.setInput(in_index, after_node)\n return True\n\n return False", "def insert(self, value):\n old_head = self.head\n self.head = Node(value, old_head)\n if self.count > 0: # if any Nodes: set tail previous to current Node\n old_head.next = self.head\n else: # adding to an empty, than define front\n self.tail = self.head\n self.count += 1", "def insert(self, value):\n\n\n if value < self.data:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BinaryNode(value)\n\n elif value > self.data:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BinaryNode(value)\n\n else:\n self.data = self.data", "def insert(node, key):\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node", "def Insert(root, node):\n target = root.ChooseLeaf(node)\n node.father = target\n target.leaves.append(node)\n target.MBR = merge(target.MBR, node.MBR)\n target.AdjustTree()\n if root.father != None:\n root = root.father\n return root", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1", "def insert(self):\n pass", "def insert(self, pos, data):\n assert pos >= 0\n if pos >= self.size(): # todo: support to insert node in end of the list\n raise Exception(\"pos:%d is out of index:%d\" % (pos, self.size()-1))\n\n last = None\n current = self.head\n count = -1\n while current is not None:\n count += 1\n if count == pos:\n node = Node(data)\n\n if last is None:\n node.next = self.head\n self.head = node\n else:\n node.next = current\n last.next = node\n\n return\n\n last = current\n current = current.next", "def add_node(self, node):\n self.nodes.add(node)", "def insert(self, n, pos):\n if pos == 0:\n self.cons(n)\n else:\n prev = self.index(pos-1)\n next = prev.next\n prev.next = n\n n.next = next\n self.len += 1", "def insert(self, value):\n new_node = Node(value)\n if self.root is None:\n self.root = new_node\n else:\n node = self.root\n while(node!=None):\n if(value <= node.data):\n if node.left is None:\n node.left = new_node\n node = node.left\n node = node.left\n elif(value > node.data):\n if node.right is None:\n node.right = new_node\n node = node.right\n node = node.right", "def _insert(self, value, cur_node):\n if value < cur_node.value:\n if cur_node.left_child == None:\n cur_node.left_child = Node(value)\n else: \n self._insert(value, cur_node.left_child)\n elif value > cur_node.value: #creating elif in case the value is same as the current node \n if cur_node.right_child == None:\n cur_node.right_child = Node(value)\n else:\n self._insert(value, cur_node.right_child)\n else:\n print(\"Value already in the tree\")", "def insert(self, node , hx, data):\n #if tree is empty , return a root node\n if node is None:\n self.node_count += 1\n return self.create_node(hx, data)\n if data <= node.data:\n node.left = self.insert(node.left, hx, data)\n elif data > node.data:\n node.right = self.insert(node.right, hx, data)\n\n return node", "def insert(self,value):\n try:\n new_node=Node(value)\n if self.head == None:\n self.head=new_node\n else:\n current=self.head\n while current.next:\n current=current.next\n current.next=new_node\n print( new_node.value)\n return( new_node.value)\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def add_node(self, **kwargs):\n self._content.append(Node(**kwargs))", "def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))", "def insert(self, course):\n new_node = Node(course)\n\n if self.head is None or self.head.data.number() >= new_node.data.number():\n new_node.next = self.head\n self.head = new_node\n self._size += 1\n return\n\n cur_node = self.head\n while cur_node.next and cur_node.next.data.number() < new_node.data.number():\n cur_node = cur_node.next\n new_node.next = cur_node.next\n cur_node.next = new_node\n self._size += 1", "def insert(self, new_val):\r\n if self.root is None:\r\n self.root = RBTreeNode(new_val, color=BLACK) # root has to be black\r\n else:\r\n self._insert(self.root, new_val)", "def insert(self, value):\n if value < self.value:\n if self.left:\n self.left.insert(value)\n else:\n self.left = BSTNode(value)\n else:\n if self.right:\n self.right.insert(value)\n else:\n self.right = BSTNode(value)", "def insert(self, key: int) -> None:\n if not self.root:\n self.root = TreeNode(val=key)\n return\n node = self._insert(key)\n self._inspect_changes(node)", "def insert(self, key, value=None):\n if key in self.nodes:\n return None\n else:\n new_node = Node(key, value)\n (self.nodes)[key] = new_node \n current = self.root\n last = current\n\n if current is None:\n self.root = self.nodes[key]\n self.root.height = 0\n return new_node\n\n while (current is not None):\n if new_node.key > current.key:\n last = current\n current = current.right\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n else:\n last = current\n current = current.left\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n\n if new_node.key > last.key:\n last.right = new_node\n new_node.parent = last\n else:\n last.left = new_node\n new_node.parent = last\n\n self.root.height = self.get_height_tree()\n return new_node", "def register_node(self, node):\n self.nodes.add(node)", "def insert_head(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n node.next = self.head\n self.head.prev = node\n self.head = node", "def insert(self, i, node_value):\n node = Node(node_value, None)\n if i < 0 or i > self.num_elements:\n raise IndexError(\"Insert index is out of range.\")\n if i == 0:\n node.next = self.head\n self.head = node\n else:\n current_node = self.head\n for j in xrange(i - 1):\n current_node = current_node.next\n node.next = current_node.next\n current_node.next = node\n self.num_elements += 1", "def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)", "def insert(self, word):\n node = self.root\n for c in word:\n if c in node.children:\n node = node.children[c]\n else:\n new_node = self.Node(c)\n node.children[c] = new_node\n node = new_node\n node.word_end = True\n return", "def insert(self, val):\n\n\t\tif not self.root:\n\t\t\tself.root = BinaryTreeNode(val)\n\n\t\telse:\n\t\t\tQ = [self.root]\n\t\t\twhile Q:\n\t\t\t\tnode = Q.pop(0)\n\t\t\t\tif not node.left:\n\t\t\t\t\tnode.left = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.left)\n\n\t\t\t\tif not node.right:\n\t\t\t\t\tnode.right = BinaryTreeNode(val)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tQ.append(node.right)\n\n\t\tself.numNodes += 1", "def insert_node_to_onnx(cls,\n node: onnx.NodeProto,\n onnx_model: onnx.ModelProto,\n idx: int = 0):\n onnx_model.graph.node.insert(idx, node)", "def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)", "def _insert(self, node, key, value_ref):\n #create a tree if there was none so far\n if node is None:\n #print ('a')\n new_node = RedBlackNode(\n RedBlackNodeRef(), key, value_ref, RedBlackNodeRef())\n elif key < node.key:\n newleft_ref = self._insert(self._follow(node.left_ref), key, value_ref)\n newleft = self.balance(self._follow(newleft_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n left_ref=RedBlackNodeRef(referent=newleft)))\n elif key > node.key:\n newright_ref = self._insert(self._follow(node.right_ref), key, value_ref)\n newright = self.balance(self._follow(newright_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n right_ref=RedBlackNodeRef(referent=newright)))\n else: #create a new node to represent this data\n new_node = RedBlackNode.from_node(node, value_ref=value_ref)\n #new_node = self._blacken(new_node)\n return RedBlackNodeRef(referent=new_node)", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def add_node(self, metadata, pos):\n node = Node(metadata, pos)\n self.addItem(node)\n self.nodes[node.id] = node\n return node", "def insert_after(self,node,new_node):\n new_node.next = node.next\n node.next = new_node", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def insert(self,key, value):\n if key in self._position:\n # reset value for this node\n node_pos = self._position[key]\n node = self._heap[node_pos]\n node.value = value\n self._sink(node_pos)\n self._swim(node_pos)\n else:\n # insert a new node\n new_node = _Node(key,value)\n node_pos = len(self._heap)\n self._heap.append(new_node)\n self._position[key] = node_pos\n\n # repair priority\n self._swim(node_pos)", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def insert(self, value):\n\t\tif value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.right.insert(value)\n\t\telif value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = BSTreeNode(value, parent=self)\n\t\t\telse:\n\t\t\t\tself.left.insert(value)\n\t\tself.check_balance()", "def insert(self, data):\n if not self:\n self.root.append(data)\n return self\n\n parent, current = self._lookup(data)\n if current: # data equivalent node found!\n current.append(data)\n else: # equivalent node not found!\n setattr(parent, \"right\" if parent < data else \"left\", Node().append(data))\n return self", "def insert(self, data: int) -> NoReturn:\n self._insert(data=data, node=self._root)", "def insert(self, data):\r\n pass" ]
[ "0.7825037", "0.7765275", "0.7721343", "0.7457422", "0.74511695", "0.74140745", "0.73851484", "0.7339789", "0.7339789", "0.73216206", "0.73207855", "0.7279264", "0.72762483", "0.72737384", "0.72505534", "0.7235147", "0.7235147", "0.7208238", "0.719422", "0.71857184", "0.7184621", "0.71666354", "0.7134919", "0.7131146", "0.7104014", "0.7069683", "0.69740564", "0.69732106", "0.69599205", "0.6952198", "0.69349295", "0.6927594", "0.6926011", "0.69224554", "0.6921847", "0.6906707", "0.69040436", "0.68972343", "0.6892387", "0.6874281", "0.68637276", "0.6856897", "0.6850013", "0.6845088", "0.6844834", "0.68423784", "0.68422365", "0.68309826", "0.68286717", "0.6818065", "0.6814044", "0.68034685", "0.6802582", "0.6789305", "0.6781129", "0.67798764", "0.67710674", "0.67694795", "0.6765857", "0.676572", "0.6758966", "0.6750501", "0.67471373", "0.67411464", "0.67400753", "0.6736702", "0.6729508", "0.6720091", "0.6705869", "0.67010826", "0.66992027", "0.66787565", "0.66775924", "0.66742957", "0.6671728", "0.6669304", "0.66520905", "0.6639954", "0.6630022", "0.6625994", "0.6625749", "0.6622156", "0.66199225", "0.6619904", "0.6616394", "0.6613979", "0.6612865", "0.6611537", "0.6610994", "0.66092163", "0.6607913", "0.66052246", "0.6602007", "0.66008466", "0.66007084", "0.65903056", "0.6585938", "0.6583722", "0.65833825", "0.6572978", "0.6569471" ]
0.0
-1
Endpoint qui permet d'envoyer la convention de partenariat par mail pour une perm d'id {id}.
def send_convention(request, id): perm = perm_models.Perm.objects.get(pk=id) convention_template = get_template('convention_partenariat.html') convention_context = { 'perm': perm, 'articles': perm.get_convention_information()['perm_articles'], 'montant': round(perm.get_montant_deco_max(), 2), 'mail': True, } context_content = convention_template.render(convention_context) send_mail('Convention Perm Pic\'Asso', 'Pour lire ce message, merci d\'utiliser un navigateur ou un client mail compatible HTML.', DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content) return Response(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))", "def send_justificatif(request, id):\n perm = perm_models.Perm.objects.get(pk=id)\n info = perm.get_justificatif_information()\n justificatif_template = get_template('justificatif_paiement.html')\n justificatif_context = {\n 'perm': perm, 'articles': info['perm_articles'], 'total_ht': info['total_ht'],\n 'total_ttc': info['total_ttc'], 'tva_amounts': info['tva_amounts'], 'mail': True,\n }\n context_content = justificatif_template.render(justificatif_context)\n send_mail('Justificatif paiement Pic\\'Asso', 'Pour lire ce message, merci d\\'utiliser un navigateur ou un client mail compatible HTML.',\n DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content)\n return Response(True)", "def mailissue(request):\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n issue = request.issue\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n\n return HttpTextResponse('OK')", "def post(self, request, *args, **kwargs):\n usuario=Usuario.objects.get(id=self.kwargs['pk'])\n if request.POST[\"esta_aprobado\"] == 'True':\n CorreoMail(\"Aprobado\",\"Usted fue apobado en el sistema, bienvenido!!\",usuario.user.email )\n return super(ActualizarUser, self).post(request, **kwargs)", "def api_by_id(id):\n mail = mail_dao.get_by_id(int(id))\n return _create_response([mail])", "async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})", "async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")", "async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')", "async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def step_impl_the_msg_to_is_set_to_internal_as_string_not_array(context):\n context.bdd_helper.message_data[\"msg_to\"] = context.bdd_helper.internal_id_specific_user", "def __whatsmyid(self, update, context):\n user = self.User(update)\n output = f\"your ID is: {user.id}\"\n user.send_message(output)\n self.data_base.log(user, update.message.text, \"*\" * len(str(user.id)))", "def step_impl_the_msg_to_is_set_to_respondent_as_string_not_array(context):\n context.bdd_helper.message_data[\"msg_to\"] = context.bdd_helper.respondent_id", "def __str__(self):\n return f'Carrito de {self.user.email}'", "def get(self, request,correo, id, idevento):\n\n \n try:\n print(\"aoeuoeu\")\n reg = RegEvento.objects.get(id = id)\n print(reg.confirmacion)\n reg.confirmacion = \"Confirmado\"\n reg.save()\n\n print(\"Usuario confirmado\")\n except Exception as e: print(e)\n\n\n \n\n return redirect(\"Eventos:confirmarA\", user_mail=correo, post_id=idevento)", "def get_id(self):\n return escape(self.email)", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def sent_view(request, id):\n if request.method == \"POST\":\n sent_user = request.POST['sent_user']\n read_only = request.POST.get(\"read_only\", None)\n queryset_task = Todo.objects.filter(id=id).first()\n user_sent_query = User.objects.filter(username=sent_user).first()\n if user_sent_query:\n \n queryset_task.user_to = user_sent_query\n queryset_task.send_to = True\n queryset_task.send_from = True\n queryset_task.save()\n if read_only:\n queryset_task.read_only = True\n queryset_task.save()\n \n return render(request, 'todos/index.html')", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='info@mediville.com',\n reply_to='info@mediville.com',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def sendEmail(email_type, ctx, to=None):\n if not to:\n to = [settings.ADMIN_EMAIL]\n if email_type == \"#ACTIVATE_ACCOUNT\":\n subject = ctx['username'] + \" \" + _(\"Bienvenido!\")\n plaintext = get_template('email_welcome.txt')\n url_key = settings.PRINCIPAL_DOMAIN # change this to the original url\n variables = {\n \"project_name\": settings.PROJECT_NAME,\n \"activate_url\": url_key,\n \"full_name\": ctx['username']\n }\n html_content = render_to_string('email/email_activate_account.html', variables)\n elif email_type == \"#SEND_CREDENTIALS\":\n subject = _(u\"Datos para iniciar sesión en tu nuevo colegio\") + \" \" + unicode(settings.PROJECT_NAME, 'utf-8')\n variables = {\n \"project_name\": settings.PROJECT_NAME,\n \"full_name\": ctx['username'],\n \"school_url\": ctx['school_url'],\n \"username\": ctx['username'],\n \"password\": ctx['password'],\n \"manual_url\": settings.PRINCIPAL_DOMAIN + \"#manual\",\n }\n plaintext = get_template('email/email_welcome.txt')\n html_content = render_to_string('email/email_send_credentials.html', variables)\n else:\n plaintext = get_template('email/email_welcome.txt')\n html_content = render_to_string('email/email_welcome.txt')\n subject, to = 'Mensaje de prueba', ['no-reply@daiech.com']\n from_email = settings.EMAIL_HOST_USER\n d = Context(ctx)\n text_content = plaintext.render(d)\n html_content = html_content\n\n try:\n smtp = settings.EMAIL_HOST_PASSWORD and settings.EMAIL_HOST_USER\n except:\n smtp = None\n if smtp:\n try:\n resp = sendGmailEmail(to, subject, html_content)\n except Exception, e:\n resp = \"[WARNING] %s\" % (e)\n return resp\n else:\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n try:\n msg.send()\n return \"EMAIL enviado!\"\n except Exception, e:\n print e\n print \"Error al enviar correo electronico tipo: \", email_type, \" con plantilla HTML.\"\n return \"Correo NO enviado\"", "async def invite(self, ctx):\n await ctx.send(f'🐱You can invite me to your server using the following url:\\n{self.invite_url}'\n '\\n\\nYou will need the **Manage Server** permission to add me to a server. '\n f'Run `{self.heleus.command_prefix[0]}help` to see what you can customise!')", "def get(self, email, application_category):", "def __str__(self):\n\t\treturn self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def _url(self):\n return 'contact/{email}'.format(email=self.email)", "def post(self, request):\n form = DelEventoForm(request.POST)\n if form.is_valid():\n try:\n u = Evento.objects.get(id = form.cleaned_data['id'])\n correo = request.POST.get('correo', '')\n\n v = RegEvento.objects.all()\n \n\n\n for i in v:\n if(i.id_Evento == u.id and i.email_Usuario == correo):\n print(str(correo) + \"Elminado del evento\" + str(i.id_Evento))\n send_mail(\n 'Anulacion de invitacion',\n 'Has sido dado de baja del evento',\n 'pumaeventosunam@gmail.com',\n [i.email_Usuario],\n fail_silently=False,\n ) \n i.delete()\n\n \n except:\n print(\"no existe\") \n\n return render(request, self.template, self.context)", "async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))", "def controls(email):", "def get(self, request, user_mail, post_id):\n #all_posts = Post.objects.all()\n #self.context['posts'] = all_posts\n all_posts = Evento.objects.all()\n self.context['posts'] = all_posts\n all_events = RegEvento.objects.all()\n self.context['eventos'] = all_events\n \n self.context['usuario'] = user_mail\n\n self.context['evento'] = post_id\n\n all_staff = AsigStaff.objects.all()\n self.context['staffs'] = all_staff\n all_user = User.objects.get(username = user_mail)\n self.context['user'] = all_user\n\n return render(request, self.template, self.context)", "def test_mailpiece_patch_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n mailPiecePK = MailPiece.objects.filter(user=userPK)[0].pk\n url = reverse('MailPiece-detail', kwargs={'pk': mailPiecePK})\n data = {'tracking': 9876543210,\n 'user': userPK}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a mail piece\n #that you arent the user on.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(MailPiece.objects.get(pk=mailPiecePK).user,\n data['user'])", "def post(self, request):\n\n try:\n eventoid = request.POST.get('id', '')\n correo = request.POST.get('correo', '')\n AsigStaff.objects.create(id_Evento = eventoid, email_staff = correo)\n print(\"Exito en la asignación de staff\")\n except:\n print(\"Error en la asignacion de staff\")\n\n \n return render(request, self.template, self.context)\n #return render(request, self.template, self.context)", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "async def 초대하기(self, ctx):\n embed = discord.Embed(title=\"저를 파티에 초대해주세요!\", description=f\"**{ctx.author.name}**, 아래의 링크를 사용하세요\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def send_confirmation_email(user_pk):\n pass", "def get(self, request, post_id, user_mail):\n #all_posts = Post.objects.all()\n #self.context['posts'] = all_posts\n all_posts = Evento.objects.all()\n self.context['posts'] = all_posts\n all_events = RegEvento.objects.all()\n self.context['eventos'] = all_events\n\n try:\n post = Evento.objects.get(id=post_id)\n count = 0\n for i in all_events:\n if(i.id_Evento == post_id):\n count+=1\n if(count< post.cupo_maximo):\n RegEvento.objects.create(id_Evento = post_id, email_Usuario = user_mail) \n img = qrcode.make(\"http://ec2-54-167-69-130.compute-1.amazonaws.com:8000/confirmar/\" + str(user_mail) + '/' + str(post_id)) \n img.save('../static_cdn/Eventos/img/'+ str(user_mail) + '$' + str(post_id) + '.png')\n email = EmailMessage('Invitacion a evento','Aquí está tu invitacion','pumaeventosunam@gmail.com',[user_mail])\n email.attach_file('../static_cdn/Eventos/img/'+ str(user_mail)+ '$'+ str(post_id)+'.png')\n email.send()\n print(\"Exito en el registro\")\n else:\n print(\"Ya no hay cupo\")\n except Exception as e: print(e)\n\n\n\n\n return redirect(\"Eventos:vconfirmados\", user_mail = user_mail)", "def order_created(order_id):\n order = Order.objects.get(id=order_id)\n sg = sendgrid.SendGridAPIClient(apikey=SENDGRID_API_KEY)\n subject1 = 'Замовлення з магазину Меблі-Лем'\n message = 'Ваше замовлення оформлено. Номер вашого замовлення № {}'.format(order.id)\n message += '\\n З Вами зв\\'яжеться менеджер \\n\\n З повагою, магазин \"Меблі-Лем\"'\n content = Content(\"text/plain\", message)\n from_email = Email(ADMIN_EMAIL)\n to_email = Email(order.email)\n\n # subject2 = 'Поступило Замовлення (Меблі-Лем)'\n # message_admin = 'Замовник {0} {1} з {2} \\n оформив замовлення № {3}'.format(order.first_name,\n # order.last_name, order.address,\n # order.id)\n # message_admin += '\\n Телефон замовника {}'.format(order.phone)\n # content_admin = Content(\"text/plain\", message_admin)\n mail = Mail(from_email, subject1, to_email, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n\n return response", "def manage_addMailSender( self, id='MailHost', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailSender( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )", "def getmessage(self, update, context):\r\n\r\n redirect_uri = \"https://thawing-ridge-47246.herokuapp.com\"\r\n\r\n # настройка соединения\r\n flow = Flow.from_client_secrets_file(\r\n 'credentials.json',\r\n scopes=SCOPES,\r\n redirect_uri=redirect_uri)\r\n\r\n code = self.get_code()\r\n\r\n flow.fetch_token(code=code, code_verifier=\"111\") # устанавливаем соединение с гуглом\r\n\r\n session = flow.authorized_session() # создаем сессию\r\n response = session.get('https://www.googleapis.com/gmail/v1/users/me/messages').json() # формируем запрос и получаем ответ сервера\r\n\r\n messages = response[\"messages\"]\r\n\r\n # у каждого из сообщений достаем id\r\n for message in messages[0:10]:\r\n mid = message['id']\r\n\r\n # получаем сообщение по id\r\n message_message = session.get(f'https://www.googleapis.com/gmail/v1/users/me/messages/{mid}').json()\r\n\r\n # информация об отправителе, получателе и теме сообщения хранится в ключе 'payload' --> 'headers'\r\n headers = message_message['payload']['headers']\r\n\r\n from_who = None\r\n to_whom = None\r\n subject = None\r\n\r\n for item in headers:\r\n if item['name'] == 'From':\r\n from_who = item['value']\r\n elif item['name'] == 'To':\r\n to_whom = item['value']\r\n elif item['name'] == 'Subject':\r\n subject = item['value']\r\n\r\n # ищем текст сообщения\r\n # достаем из сообщения его части\r\n message_payload_parts = message_message['payload']['parts']\r\n zero_part = message_payload_parts[0]\r\n\r\n if zero_part['mimeType'] == 'text/plain':\r\n self.message_without_attachments(context, message_payload_parts, from_who, to_whom, subject)\r\n elif zero_part['mimeType'] == 'multipart/alternative':\r\n self.message_with_attachments(session, mid, context, zero_part, message_payload_parts, from_who,\r\n to_whom, subject)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'Done.')", "def step_impl_the_msg_to_is_set_to_respondent(context):\n step_impl_the_msg_to_is_set_to(context, context.bdd_helper.respondent_id)", "def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]", "async def invite(self, ctx):\n embed = discord.Embed(title='Invite links for NOVA',\n description='[<:news:730866149109137520> Required Permissions](https://discord.com/api/'\n 'oauth2/authorize?client_id=709922850953494598&permissions=1573252215&scope='\n 'bot)\\n'\n '[<:news:730866149109137520> No Permissions]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&permi'\n 'ssions=0&scope=bot)\\n[<:news:730866149109137520> All Permissions (admin)]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&perm'\n 'issions=8&scope=bot)', color=0x5643fd)\n embed.set_footer(text='Developed by YeetVegetabales', icon_url='https://cdn.discordapp.com/avatars'\n '/569374429218603019'\n '/a_6dac6946906e498650f6c2466aa82200.gif?size'\n '=256&f=.gif')\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/54Mim4lahztGCP4hgmpy4lOdEUc4'\n '-dOeNA_x6hVHMlc/%3Fsize%3D4096/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n await ctx.send(embed=embed)", "def confirm_ss(self, cr, uid, ids,context=None):\n send_mail(self, cr, uid, ids[0], 'admin_affairs.group_admin_affair_manager',unicode(' طلب نثرية', 'utf-8'), unicode(' طلب نثرية في إنتظارك', 'utf-8'), context=context)\n return self.write(cr, uid, ids, {'state':'confirm_ss'}, context=context)", "def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)", "def assistenza(request):\n operator_list = User.objects.filter(groups__name='Operators')\n num = random.randint(0, len(operator_list)-1)\n random_operator = operator_list[num]\n form = ContactForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n mittente = request.user\n destinatario = random_operator\n data = form.cleaned_data['date']\n testo = form.cleaned_data['messaggio']\n messaggio = Messaggio(userMittente=mittente, userDestinatario=destinatario, data_ora=data, text=testo)\n messaggio.save()\n messages.add_message(request, messages.SUCCESS, 'Messaggio inviato con successo!')\n return HttpResponseRedirect('/')\n else:\n form = ContactForm()\n return render(request, 'main_page/contact.html', {'form': form, 'operator': random_operator})", "def send_email_key(request):\n if settings.EMAIL_VALIDATION == True:\n if request.user.email_isvalid:\n data = {\n 'email': request.user.email, \n 'action_type': 'key_not_sent', \n 'change_link': reverse('user_changeemail')\n }\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n send_new_email_key(request.user)\n return validation_email_sent(request)\n else:\n raise Http404", "def post(self):\n return send_email(request.args)", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def __str__(self):\n\n return self.email", "def __str__(self):\n\n return self.email", "def delete(self,request,*args,**kwargs):\n self.object = self.get_object()\n \n usages = ReachOut.objects.all().filter(email=self.object.id)\n if len(usages) > 0:\n t = loader.get_template('follower/email_delete_forbidden.html')\n c = Context({'reason': 'This email has already been sent'})\n return HttpResponseForbidden(t.render(c))\n else:\n return super(DeleteView,self).delete(request,*args,**kwargs)", "def __str__(self):\n\n return self.email", "async def _dmid(self, ctx, id: int, *, message: str = None):\n if not isinstance(id, str):\n return await ctx.send(\"You have not entered a valid ID\")\n\n if not message:\n return await ctx.send(\"You must give a message to send.\")\n\n try:\n user = await ctx.bot.fetch_user(int(id))\n except Exception as e:\n return await ctx.send(f\"Error happened while trying to fetch user.\\n{e}\")\n\n if user.bot is True:\n return await ctx.send(\"I cannot send messages to bots\")\n\n if not user.dm_channel:\n await user.create_dm()\n\n message = \" \".join(message)\n e = discord.Embed(description=message, color=discord.Color.blurple())\n e.set_author(name=f\"Message from {ctx.author}!\", icon_url=ctx.author.avatar_url)\n e.set_footer(text=f\"Sent at {arrow.now(tz='US/Eastern').strftime('%X')} EST\", icon_url=ctx.bot.user.avatar_url)\n try:\n await user.send(embed=e)\n return await ctx.send(f\"Message has been sent to `{user}`!\")\n except discord.Forbidden:\n return await ctx.send(\"Cannot send messages to this user\")\n except discord.HTTPException:\n return await ctx.send(\"Message failed.\")\n except Exception as e:\n await ctx.send(f\"Error while sending embed. {e}\")", "async def _invite(self, ctx: Context):\n\n # read_messages=True,\n # send_messages=True,\n # manage_messages=True,\n # embed_links=True,\n # attach_files=True,\n # external_emojis=True,\n # add_reactions=True\n perms = discord.Permissions(322624)\n\n try:\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perms)\n value = (\n \"Add Brawlcord to your server by **[clicking here]\"\n f\"({invite_url})**.\\n\\n**Note:** By using the link\"\n \" above, Brawlcord will be able to\"\n \" read messages,\"\n \" send messages,\"\n \" manage messages,\"\n \" embed links,\"\n \" attach files,\"\n \" add reactions,\"\n \" and use external emojis\"\n \" wherever allowed.\\n\\n*You can remove the permissions manually,\"\n \" but that may break the bot.*\"\n )\n except Exception as exc:\n invite_url = None\n value = (\n f\"Error \\\"{exc}\\\" while generating invite link.\"\n \" Notify bot owner using the `-report` command.\"\n )\n\n embed = discord.Embed(color=EMBED_COLOR, description=value)\n embed.set_author(\n name=f\"Invite {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n # embed.add_field(name=\"__**Invite Link:**__\", value=value)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n \"I do not have the permission to embed a link.\"\n \" Please give/ask someone to give me that permission.\"\n )", "async def 서버(self, ctx):\n if isinstance(ctx.channel, discord.DMChannel) or ctx.guild.id != 749595288280498188:\n return await ctx.send(f\"**여기로! {ctx.author.name} 🍻\\n<{self.config.botserver}>**\")\n\n await ctx.send(f\"**{ctx.author.name}** 이게 제 집이잖아요~ :3\")", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def put(self,id):\n adm = Administration()\n cm = ChatMessage.from_dict(api.payload)\n\n if cm is not None:\n\n cm.set_id(id)\n adm.save_chatmessage(cm)\n return '', 200\n\n else:\n return '', 500", "def hello():\n email = request.args.get('email')\n\n # Logic to select receipient\n # # TODO:\n\n my_sender='pythondistributionbot@gmail.com' # Sender Mail\n my_pass = 'czwcekimsscsixzx' # Sender PW\n my_user='drcharlesshi@gmail.com' # Reciver Mail\n try:\n msg=MIMEText('填写邮件内容','plain','utf-8')\n msg['From']=formataddr([\"FromRunoob\",my_sender]) # Sender Nick/mail\n msg['To']=formataddr([\"FK\",email]) # Reciver Nike/Mail\n msg['Subject']=\"发送邮件测试\" # Topic\n\n server=smtplib.SMTP_SSL(\"smtp.gmail.com\", 465) # SMTP,port 465\n server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码\n server.sendmail(my_sender,[my_user,],msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件\n server.quit() # 关闭连接\n except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False\n return 'Failure ' + request.args.get('email')\n return 'Success ' +", "def post(self, request):\n config_name = request.POST.get('config')\n email = request.POST.get('recipient')\n config = MailConfig.objects.get(name=config_name)\n version = TemplateVersion.objects.active(config.template.name)\n message = utils.render(config_name, email, version.test_data)\n pk = utils.send(\n f'[TEST] {message.subject}',\n message.from_email,\n message.to_email,\n message.body\n )\n return JsonResponse({'id': pk})", "async def botinvite_command(self, ctx):\n invite = f\"https://discord.com/api/oauth2/authorize?client_id={self.client.user.id}&permissions=1374809815&scope=bot\"\n await ctx.send(invite)", "async def userfromid(ctx, iden:int):\n user = bot.get_user(iden)\n await ctx.send(user.mention)", "def index_add(self, mail):\n\t\tself.db.execute('INSERT INTO Mails (\"MD5-Value\", \"Message-ID\", \"From\", \"To\", \"Cc\", \"Bcc\", Date, \"In-Reply-To\", Subject) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', (self.index_md5_value(mail), email.utils.unquote(header_decode(mail['Message-ID'])), header_decode(mail['From']), header_decode(mail['To']), header_decode(mail['CC']), header_decode(mail['BCC']), header_decode(mail['Date']), email.utils.unquote(header_decode(mail['In-Reply-To'])), header_decode(mail['Subject'])))\n\t\tself.db.commit()", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def send_new_credentials(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_generate(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def Admin_affairs_manager_confirmed(self, cr, uid, ids,context=None):\n send_mail(self, cr, uid, ids[0], 'base_custom.group_general_hr_manager',unicode(' طلب نثرية', 'utf-8'), unicode(' طلب نثرية في إنتظارك', 'utf-8'), context=context)\n return self.write(cr, uid, ids, {'state':'Admin_affairs_manager_confirmed'}, context=context)", "def rispondi(request, messageID):\n form = ContactForm(request.POST)\n message = Messaggio.objects.get(id=messageID)\n message.letto = True\n message.save()\n form = ContactForm(request.POST)\n mittente = message.userDestinatario\n destinatario = message.userMittente\n if request.method == 'POST':\n if form.is_valid():\n data = form.cleaned_data['date']\n testo = form.cleaned_data['messaggio']\n messaggio = Messaggio(userMittente=mittente, userDestinatario=destinatario, data_ora=data, text=testo)\n messaggio.save()\n messages.add_message(request, messages.SUCCESS, 'Messaggio inviato con successo!')\n return HttpResponseRedirect('/')\n else:\n form = ContactForm()\n return render(request, 'main_page/response.html', {'form': form, 'mittente':mittente, 'destinatario':destinatario, 'message':message})", "def edit_status(self,id,type,status):\n\n current_user = get_jwt_identity()\n try:\n con = init_db()\n cur = con.cursor()\n cur.execute(\"SELECT is_admin FROM users WHERE email = %s\",(current_user,))\n user = cur.fetchall() \n user_role = user[0][0] \n \n if user_role != True:\n return{\n \"Status\": 403,\n \"Message\":\"Unauthorized user\" \n },403 \n cur.execute(\"SELECT user_id FROM incidents WHERE \\\n incident_id = %s AND type = %s\",(id,type))\n record = cur.fetchall()\n if not record:\n return{\n \"Status\": 404,\n \"Message\": \"Record does not exist\"\n },404 \n\n user =record[0][0] \n cur.execute(\"SELECT email FROM users WHERE user_id = %s\",(user,))\n user_email = cur.fetchall()\n email = user_email[0][0]\n\n cur.execute(\"UPDATE incidents SET status = %s WHERE \\\n incident_id = %s and type = %s \\\n RETURNING incident_id,type,location,status,comment,user_id\",\n (status,id,type))\n updated_record = cur.fetchone()\n close_connection(con)\n new_record = {\n \"Created by\":updated_record[5],\n \"Incident Id\":updated_record[0],\n \"Type\":updated_record[1],\n \"Location\":updated_record[2],\n \"Status\":updated_record[3],\n \"Comment\":updated_record[4]\n }\n #send mail\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login(\n \"projectemail2303@gmail.com\", \"Projectemail_2303\")\n msg = \"Your {} record is now {}\".format(type, status)\n server.sendmail(\n \"projectemail2303@gmail.com\", email, msg)\n server.quit() \n #update admin after status change \n return{\n \"Status\": 200,\n \"Message\":\"Updated \" + type + \" record status\",\n \"Data\": new_record\n }\n except (Exception,psycopg2.DatabaseError) as error:\n print(error)\n return{\n \"message\":\"Record has not been edited please try again\"\n }", "async def invite(self, context: Context) -> None:\n embed = discord.Embed(\n description=f\"Invite me by clicking [here](https://discordapp.com/oauth2/authorize?&client_id={self.bot.config['application_id']}&scope=bot+applications.commands&permissions={self.bot.config['permissions']}).\",\n color=0xD75BF4,\n )\n try:\n # To know what permissions to give to your bot, please see here: https://discordapi.com/permissions.html and remember to not give Administrator permissions.\n await context.author.send(embed=embed)\n await context.send(\"I sent you a private message!\")\n except discord.Forbidden:\n await context.send(embed=embed)", "def ad_rep_email(obj):\n return '%s' % obj.ad_rep.email", "def idme(bot, update):\n update.message.reply_text(\"Your ID is: \" + str(update.message.from_user.id))", "def delegate_remainder(template=None):\n\n regs = Registration.objects.all()\n\n for reg in regs:\n subject = DEF_REMAINDER_ACCO_CONTACT_SUBJECT\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.get_full_name()})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='info@scipy.in')", "def ZeusEmailId(cls, mail_domain):\n return ('zeus+%s+noreply@%s.%s' %\n (PipelineConfig.Instance().pipeline_id(), socket.gethostname(),\n mail_domain))", "def send_confirm_email(request,uid):\n user=models.UserProfile.objects.get(id=uid)\n current_site=get_current_site(request)\n email_subject='Activate Your Account'\n message=render_to_string('activate_account.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(uid)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The confirmation email has been sent.\",\n }\n )", "def mailing_id(self, val: str):\n self._mailing_id = val", "def invitation(request):\n # Check Email is valid\n email = request.DATA['email']\n try:\n validate_email(email)\n except ValidationError:\n return Response({'status': '400', 'code': 'E_INVALID_EMAIL',\n 'detail': code['E_INVALID_EMAIL']}, status=400)\n # Email Info\n subject = 'Golfconnect Invitation Email'\n email = [request.DATA['email']]\n message = '<p>' + request.DATA['content'] + '</p>'\n # Create Email\n send_ok = send_email(subject, message, email)\n if send_ok:\n return Response({'status': '200', 'code': 'OK_SEND_EMAIL',\n 'detail': code['OK_SEND_EMAIL']}, status=200)\n else:\n return Response({'status': '400', 'code': 'E_SEND_EMAIL_FAIL',\n 'detail': code['E_SEND_EMAIL_FAIL']}, status=400)", "def post(self, new_mail, datasource=\"tranquility\",**kwargs):\n kwargs_dict ={\n\"new_mail\" : new_mail, \"datasource\" : datasource, \n }\n kwargs_dict.update(kwargs)\n return EsiRequestObject(self.base_url, self.post_responses) \\\n .post(**kwargs_dict)", "async def invite(ctx):\n permissions = 2134207679\n url = discord.utils.oauth_url(client_id=bot.user.id, permissions=discord.Permissions(permissions=permissions),\n scopes=(\"bot\", \"applications.commands\"))\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"Invite\", url=url))\n await ctx.respond(\"I'm glad you want to add me to your server, here's a link!\", view=view)", "def ___str__(self):\n return self.email", "async def put_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n if user.username == \"admin\":\n return web.Response(status=400, text=\"Cannot change admin\")\n\n is_admin = request.GET.getone(\"is_admin\", None) # str \"true\" or \"flase\"\n if not is_admin: # if None\n return web.Response(text=\"Nothing to change\", status=204)\n\n if is_admin.lower() == \"true\":\n user.is_admin = True\n data = {\"result\": \"{u} is now admin \".format(u=user.username)}\n elif is_admin.lower() == \"false\":\n user.is_admin = False\n data = {\"result\": \"{u} is no longer admin \".format(u=user.username)}\n\n try:\n request.cirrina.db_session.commit() # pylint: disable=no-member\n except sqlalchemy.exc.DataError:\n request.cirrina.db_session.rollback() # pylint: disable=no-member\n return web.Response(status=500, text=\"Database error\")\n\n # TODO : change to a multicast group\n await app.websocket_broadcast(\n {\n \"event\": Event.changed.value,\n \"subject\": Subject.user.value,\n \"changed\": {\"id\": user_id, \"is_admin\": user.is_admin},\n }\n )\n\n return web.json_response(data)", "def email_update(request, update_id):\n update = BuzzartUpdate.objects.get(id=update_id)\n project = update.project\n subject, from_email, to = update.title, settings.NOTIFIER_FROM_MAIL, project.email\n bcc = settings.NOTIFIER_BCC\n text_content = update.update \n static_full_url = request.build_absolute_uri(settings.STATIC_URL)\n dashboard_url = request.build_absolute_uri('/dashboard/{}'.format(project.id)) \n #html_content = '<p>{}</p><p>Bekijk je dashboard hier: <a href=\"{}\">{}</a></p>'.format(update.update, dashboard_url, dashboard_url) \n html_content = render_to_string('update-mailing.html', \n {'update': update, \n 'project': project,\n 'dashboard_url': dashboard_url,\n 'static_full_url' : static_full_url}, \n context_instance=RequestContext(request)) \n msg = EmailMultiAlternatives(subject, text_content, from_email, [to], bcc)\n msg.attach_alternative(html_content, \"text/html\")\n data = msg.send()\n if data: \n update.mail_sent = True\n update.save()\n return redirect('/dashboard/{}'.format(project.id))\n else:\n return HttpResponse(json.dumps('Sending mail failed'), content_type='application/json')", "def horde_message(self, message):", "def get_personalized_notification_email_text(personal_id):\n return notification_email_text % (personal_id, personal_id)", "def invitation(id):\n invitation = get_required(Invitation, id)\n if g.user == invitation.inviter.user:\n flash(\"You can't send an invitation to yourself.\")\n return redirect(url_for('front'))\n if invitation.acceptor_member_id:\n flash(\"This invitation has already been used.\")\n return redirect(url_for('front'))\n clicked_invitation(invitation)\n db.session.commit()\n return redirect(invitation.circle.url)" ]
[ "0.6005149", "0.5587609", "0.5556423", "0.5496287", "0.52939004", "0.5288747", "0.52882147", "0.52684224", "0.52666515", "0.5243778", "0.5187994", "0.5171401", "0.51496404", "0.5147281", "0.5110905", "0.50985926", "0.5092945", "0.50828236", "0.50735444", "0.5072316", "0.5068105", "0.50600207", "0.49981424", "0.49957234", "0.49880904", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49834704", "0.49730006", "0.49543458", "0.4952574", "0.4952511", "0.49501792", "0.49385333", "0.49377215", "0.49355072", "0.49351507", "0.49010196", "0.4889851", "0.48860276", "0.4885387", "0.48799807", "0.48798484", "0.48744872", "0.48680073", "0.48654693", "0.48641238", "0.48623607", "0.48503685", "0.48430982", "0.4832005", "0.4825005", "0.48233843", "0.48233843", "0.48222", "0.48111805", "0.48004532", "0.4794974", "0.47930133", "0.4792349", "0.47861415", "0.47823912", "0.47812942", "0.47785076", "0.4776569", "0.4772784", "0.47719172", "0.4771366", "0.4771191", "0.47698337", "0.47652328", "0.4755866", "0.47551945", "0.4754479", "0.47525895", "0.4750108", "0.47379178", "0.4730665", "0.47269717", "0.47229272", "0.47199836", "0.47191685", "0.47141483", "0.47140768", "0.47139126", "0.47115818", "0.47098702" ]
0.6470842
0
Endpoint qui permet d'envoyer le justificatif de paiement par mail pour une perm d'id {id}.
def send_justificatif(request, id): perm = perm_models.Perm.objects.get(pk=id) info = perm.get_justificatif_information() justificatif_template = get_template('justificatif_paiement.html') justificatif_context = { 'perm': perm, 'articles': info['perm_articles'], 'total_ht': info['total_ht'], 'total_ttc': info['total_ttc'], 'tva_amounts': info['tva_amounts'], 'mail': True, } context_content = justificatif_template.render(justificatif_context) send_mail('Justificatif paiement Pic\'Asso', 'Pour lire ce message, merci d\'utiliser un navigateur ou un client mail compatible HTML.', DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content) return Response(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_convention(request, id):\n perm = perm_models.Perm.objects.get(pk=id)\n convention_template = get_template('convention_partenariat.html')\n convention_context = {\n 'perm': perm,\n 'articles': perm.get_convention_information()['perm_articles'],\n 'montant': round(perm.get_montant_deco_max(), 2),\n 'mail': True,\n }\n context_content = convention_template.render(convention_context)\n send_mail('Convention Perm Pic\\'Asso', 'Pour lire ce message, merci d\\'utiliser un navigateur ou un client mail compatible HTML.',\n DEFAULT_FROM_EMAIL, [perm.mail_resp], html_message=context_content)\n return Response(True)", "def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)", "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))", "def position_applicants_send_email(id):\n if current_user.id is None:\n abort(403)\n else:\n form = ContactForm(request.form)\n if request.method == 'POST' and form.validate():\n position = db.session.query(Job).get(id)\n if position is None:\n abort(404)\n emails = [u.email for u in position.users]\n message = Message(subject=form.subject.data,\n sender='info@mediville.com',\n reply_to='info@mediville.com',\n recipients=[''],\n bcc=emails,\n body=form.text.data)\n mail.send(message)\n flash(\"Message was send.\", 'success')\n return redirect(url_for('organisations.view_applicants', id=id))\n return render_template('organisations/message_send_form.html', form=form)", "def sent_view(request, id):\n if request.method == \"POST\":\n sent_user = request.POST['sent_user']\n read_only = request.POST.get(\"read_only\", None)\n queryset_task = Todo.objects.filter(id=id).first()\n user_sent_query = User.objects.filter(username=sent_user).first()\n if user_sent_query:\n \n queryset_task.user_to = user_sent_query\n queryset_task.send_to = True\n queryset_task.send_from = True\n queryset_task.save()\n if read_only:\n queryset_task.read_only = True\n queryset_task.save()\n \n return render(request, 'todos/index.html')", "def post(self, request, *args, **kwargs):\n usuario=Usuario.objects.get(id=self.kwargs['pk'])\n if request.POST[\"esta_aprobado\"] == 'True':\n CorreoMail(\"Aprobado\",\"Usted fue apobado en el sistema, bienvenido!!\",usuario.user.email )\n return super(ActualizarUser, self).post(request, **kwargs)", "def api_by_id(id):\n mail = mail_dao.get_by_id(int(id))\n return _create_response([mail])", "def get(self, request, user_mail, post_id):\n #all_posts = Post.objects.all()\n #self.context['posts'] = all_posts\n all_posts = Evento.objects.all()\n self.context['posts'] = all_posts\n all_events = RegEvento.objects.all()\n self.context['eventos'] = all_events\n \n self.context['usuario'] = user_mail\n\n self.context['evento'] = post_id\n\n all_staff = AsigStaff.objects.all()\n self.context['staffs'] = all_staff\n all_user = User.objects.get(username = user_mail)\n self.context['user'] = all_user\n\n return render(request, self.template, self.context)", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def mailissue(request):\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n issue = request.issue\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n\n return HttpTextResponse('OK')", "def mail(request):\n email_admin.delay('testinggg')\n return JsonResponse({\"details\":\"working\"})", "def manage_addMailSender( self, id='MailHost', title='', host=None, port=None, REQUEST=None ):\n self._setObject( id, MailSender( id, title, host, port ) )\n\n if REQUEST is not None:\n REQUEST.RESPONSE.redirect( REQUEST.URL1 )", "def emailNote(self, authenticationToken, parameters):\r\n pass", "async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )", "async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')", "def post(self):\n return send_email(request.args)", "def __whatsmyid(self, update, context):\n user = self.User(update)\n output = f\"your ID is: {user.id}\"\n user.send_message(output)\n self.data_base.log(user, update.message.text, \"*\" * len(str(user.id)))", "def email_admins(subject, message):\n mail_admins(subject, message=message)", "def claim_email(request):\n email = request.POST.get('email', '')\n email_user = User.objects.filter(email=email)\n payload = {\n 'res': 'failed'\n }\n if email_user.exists() and \\\n not email_user[0].profile.send_mail:\n request.user.profile.add_email(email)\n payload['res'] = 'success'\n\n return payload", "def send_personal_group_invite(request):\n\tif request.method == \"POST\":\n\t\tinvite_decision = request.POST.get('invite_dec',None)\n\t\tif invite_decision == '1':\n\t\t\tcontext = {'set_privacy':True,'target_av_url':request.session.get(\"personal_group_invite_target_av_url\",None),\\\n\t\t\t'tun':request.session.get(\"personal_group_invite_target_username\",None)}\n\t\t\treturn render(request,\"personal_group/invites/personal_group_status.html\",context)\n\t\telse:\n\t\t\torigin, poid, target_username = request.POST.get('org',None), request.POST.get('poid',None), request.POST.get('nickname',None)\n\t\t\tif origin == 'publicreply':\n\t\t\t\tif poid:\n\t\t\t\t\trequest.session[\"link_pk\"] = poid\n\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\treturn redirect(\"publicreply_view\")\n\t\t\t\telse:\n\t\t\t\t\treturn redirect(\"home\")\n\t\t\telse:\n\t\t\t\treturn return_to_source(origin,poid,target_username)\n\telse:\n\t\treturn redirect(\"home\")", "def filter_mail(self, mail):\n\t\ttry:\n\t\t\tself.filtered += 1\n\t\t\tif self.filter_mail_pass(mail):\n\t\t\t\tif self.export_payload or self.reduce_payload:\n\t\t\t\t\tself.payload_parse(mail)\n\t\t\t\tif self.indexing: # and not caching # disables indexing\n\t\t\t\t\tself.index_add(mail)\n\t\t\t\tself.resultset_add(mail)\n\t\t\t\tself.passed += 1\n\t\texcept sqlite3.IntegrityError as excp:\n\t\t\tself.error(\"can't add mail twice to result index\", mail)\n\t\texcept:\n\t\t\t#traceback.print_tb(sys.exc_info()[2])\n\t\t\tself.error(str(sys.exc_info()[1]), mail)", "def event_guestlist_mailinglist(id):\n check_admin()\n guests = []\n message = \"Hi attend this event\"\n subject = \"Event Invite: \"\n\n guestList = GuestList.query.filter_by(event_id=id).all()\n for guest in guestList:\n guests.append(User.query.get_or_404(guest.guest_id))\n\n form = EmailForm()\n if form.validate_on_submit():\n subject = form.subject.data \n body = form.body.data\n try:\n flash('Email sent to guestlist mailing list')\n send_email_to_users(guests, subject, body)\n return redirect(url_for('admin.list_events'))\n \n except:\n #in case email fails\n flash('ERROR')\n\n \n return render_template('admin/events/mailinglist.html',\n form = form, users=guests, title=\"mailinglist\", id=id)", "def send_ext_customer_task(email,name,password,phone,shop,address,lead_mail,mem_mail,website):\n print(\"member email\",mem_mail)\n logger.info(\"in sending existing customer mail task\")\n return send_ext_customer_mail(email,name,password,phone,shop,address,lead_mail,mem_mail,website)", "def send_confirmation_email(user_pk):\n pass", "def sendEmail(email_type, ctx, to=None):\n if not to:\n to = [settings.ADMIN_EMAIL]\n if email_type == \"#ACTIVATE_ACCOUNT\":\n subject = ctx['username'] + \" \" + _(\"Bienvenido!\")\n plaintext = get_template('email_welcome.txt')\n url_key = settings.PRINCIPAL_DOMAIN # change this to the original url\n variables = {\n \"project_name\": settings.PROJECT_NAME,\n \"activate_url\": url_key,\n \"full_name\": ctx['username']\n }\n html_content = render_to_string('email/email_activate_account.html', variables)\n elif email_type == \"#SEND_CREDENTIALS\":\n subject = _(u\"Datos para iniciar sesión en tu nuevo colegio\") + \" \" + unicode(settings.PROJECT_NAME, 'utf-8')\n variables = {\n \"project_name\": settings.PROJECT_NAME,\n \"full_name\": ctx['username'],\n \"school_url\": ctx['school_url'],\n \"username\": ctx['username'],\n \"password\": ctx['password'],\n \"manual_url\": settings.PRINCIPAL_DOMAIN + \"#manual\",\n }\n plaintext = get_template('email/email_welcome.txt')\n html_content = render_to_string('email/email_send_credentials.html', variables)\n else:\n plaintext = get_template('email/email_welcome.txt')\n html_content = render_to_string('email/email_welcome.txt')\n subject, to = 'Mensaje de prueba', ['no-reply@daiech.com']\n from_email = settings.EMAIL_HOST_USER\n d = Context(ctx)\n text_content = plaintext.render(d)\n html_content = html_content\n\n try:\n smtp = settings.EMAIL_HOST_PASSWORD and settings.EMAIL_HOST_USER\n except:\n smtp = None\n if smtp:\n try:\n resp = sendGmailEmail(to, subject, html_content)\n except Exception, e:\n resp = \"[WARNING] %s\" % (e)\n return resp\n else:\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n try:\n msg.send()\n return \"EMAIL enviado!\"\n except Exception, e:\n print e\n print \"Error al enviar correo electronico tipo: \", email_type, \" con plantilla HTML.\"\n return \"Correo NO enviado\"", "def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]", "def delete(self,request,*args,**kwargs):\n self.object = self.get_object()\n \n usages = ReachOut.objects.all().filter(email=self.object.id)\n if len(usages) > 0:\n t = loader.get_template('follower/email_delete_forbidden.html')\n c = Context({'reason': 'This email has already been sent'})\n return HttpResponseForbidden(t.render(c))\n else:\n return super(DeleteView,self).delete(request,*args,**kwargs)", "def get_personalized_notification_email_text(personal_id):\n return notification_email_text % (personal_id, personal_id)", "def send_email_key(request):\n if settings.EMAIL_VALIDATION == True:\n if request.user.email_isvalid:\n data = {\n 'email': request.user.email, \n 'action_type': 'key_not_sent', \n 'change_link': reverse('user_changeemail')\n }\n return render_to_response(\n 'authenticator/changeemail.html',\n RequestContext(request, data)\n )\n else:\n send_new_email_key(request.user)\n return validation_email_sent(request)\n else:\n raise Http404", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "def send_mail(self, subject):\r\n pass", "def assistenza(request):\n operator_list = User.objects.filter(groups__name='Operators')\n num = random.randint(0, len(operator_list)-1)\n random_operator = operator_list[num]\n form = ContactForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n mittente = request.user\n destinatario = random_operator\n data = form.cleaned_data['date']\n testo = form.cleaned_data['messaggio']\n messaggio = Messaggio(userMittente=mittente, userDestinatario=destinatario, data_ora=data, text=testo)\n messaggio.save()\n messages.add_message(request, messages.SUCCESS, 'Messaggio inviato con successo!')\n return HttpResponseRedirect('/')\n else:\n form = ContactForm()\n return render(request, 'main_page/contact.html', {'form': form, 'operator': random_operator})", "def mail_sent():\n\n url = settings.SITE_URL + '\\charts'\n subject = 'Анализ запрошенного ресурса'\n message = 'Графики популярного часа дня и дня недели {}'.format(url)\n mail_sent = send_mail(subject,\n message,\n 'admin@myshop.com',\n ['user@mail.ru,'])\n print(message)\n return mail_sent", "def protected_user():\n message = f\"welcome {flask_praetorian.current_user().username} admin, this is protected endpoint\"\n return {\"message\": message}", "def post(self, request):\n form = DelEventoForm(request.POST)\n if form.is_valid():\n try:\n u = Evento.objects.get(id = form.cleaned_data['id'])\n correo = request.POST.get('correo', '')\n\n v = RegEvento.objects.all()\n \n\n\n for i in v:\n if(i.id_Evento == u.id and i.email_Usuario == correo):\n print(str(correo) + \"Elminado del evento\" + str(i.id_Evento))\n send_mail(\n 'Anulacion de invitacion',\n 'Has sido dado de baja del evento',\n 'pumaeventosunam@gmail.com',\n [i.email_Usuario],\n fail_silently=False,\n ) \n i.delete()\n\n \n except:\n print(\"no existe\") \n\n return render(request, self.template, self.context)", "def send_mail(Email_id,OTP):\r\n try : \r\n s = smtplib.SMTP('smtp.gmail.com', 587) \r\n s.ehlo()\r\n # start TLS for security \r\n s.starttls() \r\n # Authentication \r\n s.login(mail_id,mail_Password) \r\n message = str(OTP)\r\n # sending the mail \r\n s.sendmail(mail_id, Email_id, message) \r\n # terminating the session \r\n s.quit() \r\n msg=\"Mail has been sent to Registered mail id.\"\r\n except :\r\n msg=\"UserName and Password not accepted kindly provide correct UserName and Password.\"\r\n return msg", "def email_update(request, update_id):\n update = BuzzartUpdate.objects.get(id=update_id)\n project = update.project\n subject, from_email, to = update.title, settings.NOTIFIER_FROM_MAIL, project.email\n bcc = settings.NOTIFIER_BCC\n text_content = update.update \n static_full_url = request.build_absolute_uri(settings.STATIC_URL)\n dashboard_url = request.build_absolute_uri('/dashboard/{}'.format(project.id)) \n #html_content = '<p>{}</p><p>Bekijk je dashboard hier: <a href=\"{}\">{}</a></p>'.format(update.update, dashboard_url, dashboard_url) \n html_content = render_to_string('update-mailing.html', \n {'update': update, \n 'project': project,\n 'dashboard_url': dashboard_url,\n 'static_full_url' : static_full_url}, \n context_instance=RequestContext(request)) \n msg = EmailMultiAlternatives(subject, text_content, from_email, [to], bcc)\n msg.attach_alternative(html_content, \"text/html\")\n data = msg.send()\n if data: \n update.mail_sent = True\n update.save()\n return redirect('/dashboard/{}'.format(project.id))\n else:\n return HttpResponse(json.dumps('Sending mail failed'), content_type='application/json')", "def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})", "def test_mailpiece_patch_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n mailPiecePK = MailPiece.objects.filter(user=userPK)[0].pk\n url = reverse('MailPiece-detail', kwargs={'pk': mailPiecePK})\n data = {'tracking': 9876543210,\n 'user': userPK}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a mail piece\n #that you arent the user on.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(MailPiece.objects.get(pk=mailPiecePK).user,\n data['user'])", "def sent_to_view(request):\n context= {}\n queryset_todos = Todo.objects.filter(user_to_id=request.user.id,send_from=True)\n context['sent_from_tasks'] = queryset_todos\n return render(request, \"todos/send_from.html\",context)", "async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")", "def index_add(self, mail):\n\t\tself.db.execute('INSERT INTO Mails (\"MD5-Value\", \"Message-ID\", \"From\", \"To\", \"Cc\", \"Bcc\", Date, \"In-Reply-To\", Subject) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', (self.index_md5_value(mail), email.utils.unquote(header_decode(mail['Message-ID'])), header_decode(mail['From']), header_decode(mail['To']), header_decode(mail['CC']), header_decode(mail['BCC']), header_decode(mail['Date']), email.utils.unquote(header_decode(mail['In-Reply-To'])), header_decode(mail['Subject'])))\n\t\tself.db.commit()", "def send_reminder(self):\n pass", "def get_id(self):\n return escape(self.email)", "def get(self, request, post_id, user_mail):\n #all_posts = Post.objects.all()\n #self.context['posts'] = all_posts\n all_posts = Evento.objects.all()\n self.context['posts'] = all_posts\n all_events = RegEvento.objects.all()\n self.context['eventos'] = all_events\n\n try:\n post = Evento.objects.get(id=post_id)\n count = 0\n for i in all_events:\n if(i.id_Evento == post_id):\n count+=1\n if(count< post.cupo_maximo):\n RegEvento.objects.create(id_Evento = post_id, email_Usuario = user_mail) \n img = qrcode.make(\"http://ec2-54-167-69-130.compute-1.amazonaws.com:8000/confirmar/\" + str(user_mail) + '/' + str(post_id)) \n img.save('../static_cdn/Eventos/img/'+ str(user_mail) + '$' + str(post_id) + '.png')\n email = EmailMessage('Invitacion a evento','Aquí está tu invitacion','pumaeventosunam@gmail.com',[user_mail])\n email.attach_file('../static_cdn/Eventos/img/'+ str(user_mail)+ '$'+ str(post_id)+'.png')\n email.send()\n print(\"Exito en el registro\")\n else:\n print(\"Ya no hay cupo\")\n except Exception as e: print(e)\n\n\n\n\n return redirect(\"Eventos:vconfirmados\", user_mail = user_mail)", "def sendEmail(request, names):\n datas = ()\n i = 1\n for name in [name for name in names.split(',')]:\n # user1 = get_object_or_404(User, username='徐超伟')\n # print(user1.email)\n if name:\n # print(name)\n user = get_object_or_404(User, username__exact=name)\n if not user.email:\n request.session['res'] = '0'\n # print(res)\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))\n\n message = (u'还书提示', u'你已经超出了还书期限,请尽快归还图书。',\n 'LocalLibrarySystem<670736258@qq.com>', [user.email])\n datas += (message,)\n\n res = send_mass_mail(datas, fail_silently=False,)\n # print(res)\n request.session['res'] = res\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))", "def emailAdmin(ip, nrLoggedEmails, lastLog):\n \n msg = lastLog[1]\n toEmail = lastLog[2]\n\n msg = \"VARNING! En dator med IP-nummer %s har skickat fler än max-antal e-postmeddelanden under angivet tidsintervall.\\n\\n\" % (ip)\n msg += \"Utdrag från senaste loggade mejlet:\\n\\nIP: %s\\nMottagare: %s\\n\\nFör mer info, kolla loggfilen på: %s\\n\\n\"\\\n % (ip, toEmail, logPath)\n msg += \"----------------------------------------------------------\\n\"\n msg += \"Detta mejl har genererats automatiskt av sMap's email-log\\n\"\n msg += \"----------------------------------------------------------\\n\"\n\n # Add a log summary\n logger = logEmail.EmailLogger(logPath)\n \n logSummary = logger.getLogSummary(secondsBack=None, asText=True)\n msg = msg + \"\\nUtdrag från loggen (visar alla sända mejl uppdelat på IP-nummer):\\n\\n%s\" % (logSummary)\n\n mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n mimeMsg['Subject'] = \"Varning från sMaps e-post\"\n mimeMsg['From'] = fromEmail\n mimeMsg['To'] = adminEmails\n\n \"\"\"for debuggning from localhost: sendEmail.sendEmail(\"noreply.mkarta@gmail.com\", \"asdf1234\", adminEmails,\\\n port=port, msg=mimeMsg)\"\"\"\n \n sendEmail.sendEmail(fromEmail, password, adminEmails.split(\",\"),\\\n smtp, port=port, msg=mimeMsg)\n \n # Store data that warning has been sent so that it won't\n # create what it tries to prevent - spamming!!\n blocked = logger.setBlock(ip)", "def handle_emails():\n email = request.data['email'].strip()\n user = User.query.filter_by(email=email).first()\n option = \\\n request.data['option'].strip() # have a <select> in the frontend\n token = s.dumps(email, salt='email-confirm')\n\n msg = Message('Reset password', sender=app.config['ADMINS'][0],\n recipients=[email])\n link = 'http://localhost:3000/confirm_email/{}/{}'\\\n .format(option, token)\n if user:\n msg.body = 'Your link is {}'.format(link)\n else:\n msg.body = 'You attempted to reset your password but you do not \\\n have an account with us. Please Sign Up and Log in. {}'\\\n .format('http://localhost:3000/register')\n\n mail.send(msg)\n return jsonify({\"message\":\"Please confirm your email.\"}), 201", "def reveal(self):\n content = self.password_entry.get()\n #content = int(content)\n\n if content == 'admin':\n #do something more with this\n message = \"Access granted.\"\n\n # email text\n granted_msg = 'Access granted via login system.'\n\n server.starttls()\n server.login(user, password)\n server.sendmail(fromaddr, toaddr, granted_msg)\n\n # will auto log onto gmail acc\n #os.system(\"start chrome www.gmail.com\")\n else:\n # email text\n denied_msg = 'Someone failed to login to your secure system!'\n\n # print you were denied access to actual text handler\n message = \"Access denied.\"\n\n server.starttls()\n server.login(user, password)\n server.sendmail(fromaddr, toaddr, denied_msg)\n\n self.text.delete(0.0, END)\n self.text.insert(0.0, message)", "def step_impl_the_msg_to_is_set_to_internal_as_string_not_array(context):\n context.bdd_helper.message_data[\"msg_to\"] = context.bdd_helper.internal_id_specific_user", "def send_email(self, message):\n pass", "def get(self, email, application_category):", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def forgot_Password(): \r\n OTP=randint(10000,100000)\r\n UserName=request.args.get(\"UserName\")\r\n try:\r\n\r\n with open('api.key', 'r') as apikey:\r\n key=apikey.read().replace('\\n', '')\r\n if request.headers.get('API_KEY') == key:\r\n user_details=fetch_details(UserName)\r\n Email_id=user_details[0]['Email']\r\n update_otp(UserName,OTP)\r\n msg=send_mail(Email_id,OTP)\r\n logging.info(\"sendmail function called. \")\r\n \r\n else:\r\n msg=\"Enter correct API KEY for Authentication.\"\r\n except IndexError:\r\n msg=f\"{UserName} details not found kindly enter valid UserName.\"\r\n\r\n return msg", "def __call__(self, serv, author, args):\n if not self.bot.has_admin_rights(serv, author):\n return\n if len(args) > 1:\n liste = args[1].split(\"@\")[0]\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE liste=%s AND moderated=0 ORDER BY date DESC\")\n values = (liste,)\n message = (\"Messages en attente de modération \" +\n \"pour la liste \" + liste + \" :\")\n else:\n query = (\"SELECT id, subject, author, liste FROM moderation \" +\n \"WHERE moderated=0 ORDER BY date DESC\")\n values = ()\n message = \"Messages en attente de modération :\"\n try:\n bdd = self.bot.pgsql_connect(serv)\n assert(bdd is not None)\n except AssertionError:\n return\n\n bdd_cursor = bdd.cursor()\n bdd_cursor.execute(query, values)\n if bdd_cursor.rowcount <= 0:\n self.bot.ans(serv,\n author,\n \"Aucun message en attente de modération.\")\n return\n self.bot.ans(serv, author, message)\n for (ident, subject, author, liste) in bdd_cursor:\n self.bot.say(serv, \"[\" + liste + \"] : « \" + subject + \" » par \" +\n author)\n bdd_cursor.close()", "def get(self, request,correo, id, idevento):\n\n \n try:\n print(\"aoeuoeu\")\n reg = RegEvento.objects.get(id = id)\n print(reg.confirmacion)\n reg.confirmacion = \"Confirmado\"\n reg.save()\n\n print(\"Usuario confirmado\")\n except Exception as e: print(e)\n\n\n \n\n return redirect(\"Eventos:confirmarA\", user_mail=correo, post_id=idevento)", "def send_email(email_dict, appointment_id):\n event_identifier = g_cal.send_invite_through_gcal(email_dict)\n models.Appointments.objects.filter(id=appointment_id).update(event_identifier=event_identifier)", "def send_lead_task(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_email(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "def sent_from_view(request):\n context= {}\n queryset_todos = Todo.objects.filter(user_id=request.user.id,send_to=True)\n print(queryset_todos)\n context['sent_to_tasks'] = queryset_todos\n return render(request, \"todos/send_to.html\",context)", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def send(id):\n try:\n userIds = json.loads(request.data)\n notification = Notification.objects.filter(id=id).to_json()\n if not notification:\n return jsonify({\n \"message\": \"BAD REQUEST\",\n \"success\": False\n }), 400\n ### Call Send Notification ###\n log = {\n \"notificationId\": id,\n \"userIds\": userIds\n }\n NotificationLog(**log).save()\n return jsonify({\n \"message\": \"Send Successfully\",\n \"success\": True\n }), 200\n except BaseException as e:\n print(e)\n return e.message, 400", "def email(self, instance):\r\n return mark_safe('<a href=\"mailto:{0}\">{1}</a>'.format(\r\n instance.user.email, instance.user.email,\r\n ))", "def sendEmail(body, subject, email=\"\"):\n dest = [\"micneeley14@gmail.com\", \"hunterreid49@gmail.com\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"michael@neeley.dev\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)", "def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")", "def invitation(id):\n invitation = get_required(Invitation, id)\n if g.user == invitation.inviter.user:\n flash(\"You can't send an invitation to yourself.\")\n return redirect(url_for('front'))\n if invitation.acceptor_member_id:\n flash(\"This invitation has already been used.\")\n return redirect(url_for('front'))\n clicked_invitation(invitation)\n db.session.commit()\n return redirect(invitation.circle.url)", "def ad_rep_email(obj):\n return '%s' % obj.ad_rep.email", "def post(self, request):\n\n try:\n eventoid = request.POST.get('id', '')\n correo = request.POST.get('correo', '')\n AsigStaff.objects.create(id_Evento = eventoid, email_staff = correo)\n print(\"Exito en la asignación de staff\")\n except:\n print(\"Error en la asignacion de staff\")\n\n \n return render(request, self.template, self.context)\n #return render(request, self.template, self.context)", "def email_user(user, template_path, from_address, context_dict):\n return email_list([user.email], template_path, from_address, context_dict)", "def Contactos(request):\n form = FormAyuda(request.POST)\n if form.is_valid():\n user=request.user\n mensajeOBJ = form.cleaned_data\n mensaje = mensajeOBJ.get(\"Consulta\")\n asunto= \"Inconveniente o consulta de: \"+ str(user)\n guardarAuditoria = \"El usuario envio la siguiente consulta o inquietud: \"+ mensaje\n registrarAuditoria(request.user, guardarAuditoria)\n\n ##correo se envia en segundo plano\n t = Timer(1,CorreoMail,args=(asunto,mensaje,\"gerardocabrer@gmail.com\"))\n t.start()\n\n context = {\n \"mensaje\": \"GRACIAS POR UTILIZAR NUESTRO SISTEMA! :)\",\n \"titulo\": \"MENSAJE ENVIADO\",\n \"titulo_b1\": \"SALIR\",\n \"boton1\": \"/menu/\",\n \"titulo_b2\": \"\",\n \"boton2\": \"\",\n }\n return render(request, 'Error.html', context)\n\n context={\n \"form\":form,\n }\n\n return render(request,'Menu/contactos.html', context)", "def toggle_jobmail(request):\n if request.is_ajax():\n if request.method == 'POST':\n request.user.jobmail = not request.user.jobmail\n request.user.save()\n\n return HttpResponse(status=200, content=json.dumps({'state': request.user.jobmail}))\n raise Http404", "def sendMail(self, empireID, message):\n try:\n serverResult = self.game.server.sendMail(self.game.authKey, empireID, message)\n if serverResult <> 1:\n self.modeMsgBox(serverResult)\n else:\n self.destroyTempFrames()\n except:\n self.modeMsgBox('sendMail->Connection to Server Lost, Login Again')", "def mail_msg_id(self, mail_msg_id):\n\n self._mail_msg_id = mail_msg_id", "async def _dmid(self, ctx, id: int, *, message: str = None):\n if not isinstance(id, str):\n return await ctx.send(\"You have not entered a valid ID\")\n\n if not message:\n return await ctx.send(\"You must give a message to send.\")\n\n try:\n user = await ctx.bot.fetch_user(int(id))\n except Exception as e:\n return await ctx.send(f\"Error happened while trying to fetch user.\\n{e}\")\n\n if user.bot is True:\n return await ctx.send(\"I cannot send messages to bots\")\n\n if not user.dm_channel:\n await user.create_dm()\n\n message = \" \".join(message)\n e = discord.Embed(description=message, color=discord.Color.blurple())\n e.set_author(name=f\"Message from {ctx.author}!\", icon_url=ctx.author.avatar_url)\n e.set_footer(text=f\"Sent at {arrow.now(tz='US/Eastern').strftime('%X')} EST\", icon_url=ctx.bot.user.avatar_url)\n try:\n await user.send(embed=e)\n return await ctx.send(f\"Message has been sent to `{user}`!\")\n except discord.Forbidden:\n return await ctx.send(\"Cannot send messages to this user\")\n except discord.HTTPException:\n return await ctx.send(\"Message failed.\")\n except Exception as e:\n await ctx.send(f\"Error while sending embed. {e}\")", "def send_new_credentials(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website):\n\n logger.info(\"in send lead mail task\")\n return send_lead_generate(email,name,password,phone,shop,address,lead_mail,fname,mem_mail,website)", "async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))", "def render_or_send(func, message):\n if request.endpoint != func.func_name:\n mail.send(message)\n\n if (current_user.is_authenticated() and current_user.superuser):\n return render_template('debug_email.html', message=message)", "async def password_link_generate(mail: TextData, background_tasks: BackgroundTasks):\n email = mail.data\n mail, subject, body = await AccountProcessor.send_reset_link(email.strip())\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Success! A password reset link has been sent to your mail id\"}", "def __str__(self):\n return f'Carrito de {self.user.email}'", "def _url(self):\n return 'contact/{email}'.format(email=self.email)", "def mail_method(self,address,city,state,zip,name):\n id = self.find_employee_id(name)\n if id in self.pymthd:\n self.pymthd[id] = \"Mailed Check\"\n print(\"{}{}\".format(name, \" was successfully changed to Mailed Check\"))\n self.emp_dict[id][1] = address\n self.emp_dict[id][2] = city\n self.emp_dict[id][3] = state\n self.emp_dict[id][4] = zip\n self.emp_dict[id][6] = \"2\"\n return self.pymthd, self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def set_email_para(self,\n email_dict):\n\n self.__email_flag__ = 1\n\n # email\n self.__email_host__ = email_dict[\"email_host\"]\n self.__email_receiver_list__ = email_dict[\"email_recv_list\"]\n self.__email_sender__ = email_dict[\"email_sender_mailbox\"]\n self.__email_user__ = email_dict[\"email_username\"]\n self.__email_passwd__ = email_dict[\"email_password\"]\n\n print(\"NotifyManager email host=%s\"\n % self.__email_host__)\n print(\"NotifyManager email sender mailbox=%s\"\n % self.__email_sender__)\n print(\"NotifyManager email receiver mailbox=%s\"\n % self.__email_receiver_list__)\n\n return", "def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)", "def service_sendTestMail(self, context, sender=None, recipient=None):\n\n if sender is None:\n sender = self.config.sender_mail\n else:\n sender = sender.strip()\n\n if recipient is None:\n recipient = self.config.admin_mail\n else:\n recipient = recipient.strip()\n\n # TODO fr / en\n # add fqdn\n msg_text = u\"\"\"Bonjour,\nCe message de test a été envoyé depuis l'interface d'administration\nd'EdenWall. Si vous l'avez reçu, cela confirme que la configuration\nen place au moment de l'envoi vous permet de recevoir les messages\nsystème (alertes et informations) de votre pare-feu EdenWall.\"\"\"\n if context.isUserContext():\n session = context.getSession()\n msg_text += u\"\\n\\nL'envoi ce de message a été déclenché par une action utilisateur.\\nInformations de traçage: %s\\n\" % (session,)\n\n msg = MIMEText(msg_text.encode('ISO-8859-1'), 'plain', 'ISO-8859-1')\n msg['Subject'] = 'EdenWall : test mail'\n\n if check_mail(sender):\n msg['From'] = sender\n else:\n raise NuConfError(CONTACT_INVALID_SENDER, \"'sender' e-mail : invalid e-mail address\")\n\n if check_mail(recipient):\n msg[\"To\"] = recipient\n else:\n raise NuConfError(CONTACT_INVALID_RECIPIENT, \"'recipient' e-mail : invalid e-mail address\")\n\n return self.sendTestMail('127.0.0.1', msg['From'], [msg['To']], msg.as_string())", "def mailto_supervisor(request, application):\n applicant_name = application.get_full_name()\n subject = '{} -- {} clinical database access request'.format(\n applicant_name, settings.SITE_NAME)\n body = loader.render_to_string(\n 'notification/email/mailto_contact_supervisor.html', {\n 'application': application,\n 'applicant_name': applicant_name,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n # rm comma to handle mailto issue with comma and special char.\n # ref https://github.com/MIT-LCP/physionet-build/issues/1028\n to = formataddr((application.reference_name.replace(',', ''),\n application.reference_email))\n bcc = 'credential-reference+{0}@{1}'.format(\n application.id, get_current_site(request))\n return mailto_url(to, subject=subject, bcc=bcc, body=body)", "def test_additional_emails_role_no_email(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n assignment = fake_clients.FakeRoleAssignment(\n scope={\"project\": {\"id\": project.id}},\n role_name=\"member\",\n user={\"id\": user.id},\n )\n\n setup_identity_cache(\n projects=[project], users=[user], role_assignments=[assignment]\n )\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"test@example.com\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n data = {\"email\": \"new_test@example.com\", \"roles\": [\"member\"]}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 1)\n\n # Test that the token email gets sent to the other addresses\n self.assertEqual(mail.outbox[0].to[0], \"new_test@example.com\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True, \"password\": \"1234\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def send_password_mails(password_f_name):\n from databoard.db_tools import send_password_mails\n send_password_mails(password_f_name)", "def get_email(obj):\r\n return obj.user.email", "def protected():\n message = \"\"\n if flask_praetorian.current_user().roles == \"admin\":\n message = f\"welcome {flask_praetorian.current_user().username}, this is protected endpoint\"\n else:\n message = f'Endpoint not allowed for user {flask_praetorian.current_user().username}'\n return {\"message\": message}", "async def patreon(self, ctx):\n await ctx.send(\"https://www.patreon.com/joinemm\")", "async def put_user_byid(request):\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(text=\"Incorrect value for user_id\", status=400)\n\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n if user.username == \"admin\":\n return web.Response(status=400, text=\"Cannot change admin\")\n\n is_admin = request.GET.getone(\"is_admin\", None) # str \"true\" or \"flase\"\n if not is_admin: # if None\n return web.Response(text=\"Nothing to change\", status=204)\n\n if is_admin.lower() == \"true\":\n user.is_admin = True\n data = {\"result\": \"{u} is now admin \".format(u=user.username)}\n elif is_admin.lower() == \"false\":\n user.is_admin = False\n data = {\"result\": \"{u} is no longer admin \".format(u=user.username)}\n\n try:\n request.cirrina.db_session.commit() # pylint: disable=no-member\n except sqlalchemy.exc.DataError:\n request.cirrina.db_session.rollback() # pylint: disable=no-member\n return web.Response(status=500, text=\"Database error\")\n\n # TODO : change to a multicast group\n await app.websocket_broadcast(\n {\n \"event\": Event.changed.value,\n \"subject\": Subject.user.value,\n \"changed\": {\"id\": user_id, \"is_admin\": user.is_admin},\n }\n )\n\n return web.json_response(data)", "async def perm_check(ctx,roles_list: List[int]):\n for n,role in enumerate(ctx.author.roles):\n # If authorized\n if role.id in roles_list:\n return \"pass\"\n # Not authorized\n if n == len(ctx.author.roles) - 1:\n return await ctx.send(embed=Embed(title=\"> **⚠ Attention !**\",description=\"Vous n'avez pas la permission d'éxecutez cette commande !\",color=Colour.from_rgb(255,255,0)).set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url))", "def controls(email):", "def step_impl_the_msg_to_is_set_to_internal_specific_user(context):\n step_impl_the_msg_to_is_set_to(context, context.bdd_helper.internal_id_specific_user)", "def email(self):\n return self.__email", "def send_email_with_paper(request, id):\n paper = get_object_or_404(Paper, pk=id)\n if paper.status == '5completed' or paper.status == '3rejected':\n id_email = paper.user.email\n subject = paper.title\n body = render_to_string(\n 'papers/paper_file.html',\n {\n 'request': request,\n 'paper': paper\n }\n )\n\n send_mail(\n subject, body, settings.EMAIL_HOST_USER, [id_email],\n html_message=body, fail_silently=False\n )", "def post(self, request):\n config_name = request.POST.get('config')\n email = request.POST.get('recipient')\n config = MailConfig.objects.get(name=config_name)\n version = TemplateVersion.objects.active(config.template.name)\n message = utils.render(config_name, email, version.test_data)\n pk = utils.send(\n f'[TEST] {message.subject}',\n message.from_email,\n message.to_email,\n message.body\n )\n return JsonResponse({'id': pk})", "def invitation(request):\n # Check Email is valid\n email = request.DATA['email']\n try:\n validate_email(email)\n except ValidationError:\n return Response({'status': '400', 'code': 'E_INVALID_EMAIL',\n 'detail': code['E_INVALID_EMAIL']}, status=400)\n # Email Info\n subject = 'Golfconnect Invitation Email'\n email = [request.DATA['email']]\n message = '<p>' + request.DATA['content'] + '</p>'\n # Create Email\n send_ok = send_email(subject, message, email)\n if send_ok:\n return Response({'status': '200', 'code': 'OK_SEND_EMAIL',\n 'detail': code['OK_SEND_EMAIL']}, status=200)\n else:\n return Response({'status': '400', 'code': 'E_SEND_EMAIL_FAIL',\n 'detail': code['E_SEND_EMAIL_FAIL']}, status=400)", "def post_chat_action_in_personal_group(request):\n\tif request.method == \"POST\":\n\t\town_id, target_id = request.user.id, request.POST.get('tid',None)\n\t\town_anon_status, their_anon_status, group_id = get_personal_group_anon_state(own_id, target_id)\n\t\tif their_anon_status is None:\n\t\t\treturn redirect(\"personal_group_user_listing\")\n\t\telse:\n\t\t\toption = request.POST.get('opt',None)\n\t\t\tif option == '6':\n\t\t\t\ttheir_uname, their_avurl = get_uname_and_avurl(target_id,their_anon_status)\n\t\t\t\treturn render(request,\"personal_group/general_settings/personal_group_all_settings.html\",{'their_anon':their_anon_status,\\\n\t\t\t\t\t'avatar':their_avurl,'name':their_uname,'own_anon':own_anon_status,'tid':target_id})\n\t\t\telif option in ('1','2','3','4','5'):\n\t\t\t\tobj_count, obj_ceiling, gid, bid, idx, img_id, img_wid, hw_ratio = add_content_to_personal_group(content=option, type_='action', \\\n\t\t\t\t\twriter_id=own_id, group_id=group_id)\n\t\t\t\tprivate_chat_tasks.delay(own_id=own_id,target_id=target_id,group_id=group_id,posting_time=time.time(),text=PRIV_CHAT_EMOTEXT[option],\\\n\t\t\t\t\ttxt_type='action',own_anon='1' if own_anon_status else '0',target_anon='1' if their_anon_status else '0',blob_id=bid, idx=idx, \\\n\t\t\t\t\timg_url='',own_uname='',own_avurl='',deleted='undel',hidden='no',successful=True if bid else False)\n\t\t\t\tpersonal_group_sanitization(obj_count, obj_ceiling, gid)\n\t\t\telse:\n\t\t\t\tpass\n\t\t\trequest.session['personal_group_tid_key'] = target_id\n\t\t\trequest.session[\"personal_group_gid_key:\"+target_id] = group_id\n\t\t\trequest.session.modified = True\n\t\t\treturn redirect(\"enter_personal_group\")\t\t\n\telse:\t\t\n\t\treturn redirect(\"enter_personal_group\")" ]
[ "0.57655394", "0.5716651", "0.56478196", "0.56307864", "0.55586624", "0.5510274", "0.54520583", "0.54152286", "0.5387207", "0.5373411", "0.5325026", "0.53217953", "0.529948", "0.52949625", "0.52771795", "0.5257251", "0.5247731", "0.5247631", "0.52378786", "0.52317333", "0.52260846", "0.5215479", "0.52058053", "0.51971185", "0.51639205", "0.5163302", "0.51333696", "0.5105796", "0.5101908", "0.5095805", "0.5090047", "0.50793546", "0.5071781", "0.5060939", "0.50604844", "0.5053226", "0.5051105", "0.50403816", "0.5036703", "0.5032051", "0.5028449", "0.5019878", "0.5019865", "0.5012715", "0.49936974", "0.49880686", "0.4987762", "0.49850357", "0.49831063", "0.4981536", "0.4978092", "0.49778992", "0.49755287", "0.4968249", "0.49666357", "0.49573886", "0.49542356", "0.49439162", "0.49434695", "0.49344394", "0.49330443", "0.4927456", "0.49257323", "0.4919293", "0.4918363", "0.49169058", "0.49161056", "0.49149403", "0.4910941", "0.49103877", "0.4909001", "0.490769", "0.49055213", "0.48978603", "0.48972526", "0.4891499", "0.48865527", "0.48746434", "0.48736098", "0.48710138", "0.48691675", "0.4866533", "0.4866092", "0.4859864", "0.48559305", "0.48518294", "0.48504812", "0.4849238", "0.48484477", "0.48477477", "0.48469236", "0.48468125", "0.48453513", "0.48421955", "0.48411453", "0.4840319", "0.4836613", "0.48353246", "0.4829776", "0.4829583" ]
0.63593924
0
Build the right part of the fireball function.
def build_rightpart(): # build in 1: (K dec) apply_card("put", 1) apply_slot(1, "dec") apply_card("K", 1) # build in 0: greg build_greg(0) # smash together to get (greg (K dec)) in 0 smash() # copy it to 1. apply_card("put", 1) apply_slot(1, "zero") apply_card("get", 1) # build horace in 0. build_horace(0) # smash together to get (horace (greg (K dec))) in 0. smash() # Wrap with an S. apply_card("S", 0) # build ian in 1. build_ian(1) # smash together to get ((S (horace (greg (K dec)))) ian) in 0. smash()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def build_wall(): #py:build_wall\n RUR._build_wall_()", "def event11512005():\n header(11512005)\n\n BONFIRE_FLAGS = (\n # on_warp_list, required_flag (from rest)\n (11012045, 11012044), # Parish Turret\n (11302045, 11302040), # Bone Chimney\n (11202045, 11202040), # Moonlight Grove\n (11402045, 11402040), # Fetid Slagmire\n (11002045, 11002044), # The Sluiceworks\n (11512045, 11512043), # Sun Chamber\n (11512046, 11512044), # Gwyn's Altar\n (11312045, 11312044), # The Undercrypt\n (11412085, 11412080), # Sanctum of Chaos\n (11602045, 11602044), # The Abyss\n (11702045, 11702044), # The Duke's Archives\n # (11102045, 11102044), # Cloister of Exiles (now on permanent Lordvessel list)\n (11212085, 11212084), # Royal Hippodrome\n (11812045, 11812040), # Undead Asylum (must be rested at on return)\n )\n\n # Miscellaneous: enable Chasm Cell bonfire warp flag if Early Oolacile is on.\n skip_if_event_flag_off(1, EVENT.EarlyOolacile)\n flag.enable(213)\n\n # Start by disabling all of them.\n for warp_list_flag, _ in BONFIRE_FLAGS:\n flag.disable(warp_list_flag)\n\n # Wait for Chthonic Spark possession or Gwyn to be dead.\n if_player_has_good(-1, GOOD.ChthonicSpark)\n if_event_flag_on(-1, EVENT.LordvesselFull)\n if_condition_true(0, -1)\n\n # Enable all bonfires that have been rested at.\n for warp_list_flag, required_flag in BONFIRE_FLAGS:\n # Note that you'll need to reload for a newly rested-at bonfire to appear in the menu.\n skip_if_event_flag_off(1, required_flag)\n flag.enable(warp_list_flag)\n\n # Restart if Spark is lost and Lordvessel is not full.\n if_player_does_not_have_good(1, GOOD.ChthonicSpark)\n if_event_flag_off(1, EVENT.LordvesselFull)\n if_condition_true(0, 1)\n restart()", "def sling_action():\n global mouse_distance\n global rope_length\n global angle\n global mouse_x_pos\n global mouse_y_pos\n\n #add code inside sling function\n # Fixing bird to the sling rope\n vec = vector((initial_x_sling, initial_y_sling), (mouse_x_pos, mouse_y_pos))\n unit_vec = unit_vector(vec)\n uv_1 = unit_vec[0]\n uv_2 = unit_vec[1]\n mouse_distance = distance(initial_x_sling, initial_y_sling, mouse_x_pos, mouse_y_pos) #point at which currrent bird id\n fix_pos = (uv_1*rope_length+initial_x_sling, uv_2*rope_length+initial_y_sling)\n highest_length = 102 #when stretched\n\n #to make bird stay within rope\n x_redbird = mouse_x_pos - 20\n y_redbird = mouse_y_pos - 20\n if mouse_distance > rope_length:\n pux, puy = fix_pos\n pux -= 20\n puy -= 20\n first_pos = pux, puy\n screen.blit(redbird, first_pos)\n second_pos = (uv_1*highest_length+initial_x_sling, uv_2*highest_length+initial_y_sling) #current position\n pygame.draw.line(screen, (255, 0, 0), (next_x_sling, next_y_sling), second_pos, 5) #catapult rope\n screen.blit(redbird, first_pos)\n pygame.draw.line(screen, (255, 0, 0), (initial_x_sling, initial_y_sling), second_pos, 5) #ANOTHER SIDE of catapult\n else:\n #when not fully stretched\n mouse_distance += 10\n third_pos = (uv_1*mouse_distance+initial_x_sling, uv_2*mouse_distance+initial_y_sling)\n pygame.draw.line(screen, (0, 0, 0), (next_x_sling, next_y_sling), third_pos, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (initial_x_sling, initial_y_sling), third_pos, 5)\n # Angle of impulse\n\n change_in_y = mouse_y_pos - initial_y_sling\n change_in_x = mouse_x_pos - initial_x_sling\n if change_in_x == 0:\n dx = 0.00000000000001\n angle = math.atan((float(change_in_y))/change_in_x)", "def __init__(self,left,bottom,left_boundary,right_boundary):\n\t\tsuper(Donkey,self).__init__(0,0,69,71,'p1_duck.png')\n\t\tself.rect.left = left\n\t\tself.rect.bottom = bottom\n\t\tself.left_boundary = left_boundary\n\t\tself.right_boundary = right_boundary\n\t\tself.move_right()\n\t\t# Variable for keeping track so as to when to change the Donkeys direction \n\t\tself.__steps = 0\t\t\n\t\tself.__threshold_steps = random.randint(25,50)\t\n\t\tself.direction = 'RIGHT' # Current direction of Donkey\n\t\t# Variable for keeping track so to when to emit fireballs \n\t\tself.__loop_count = 0\n\t\t# Variable which determines minimum iterations of main game loop\n\t\t# after which to emit a fireball\n\t\tself.__threshold_time = 50\t \n\t\tDonkey.all_donkeys.add(self)", "def generate_fire_recurrence(self):\r\n \r\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\r\n return self.time_to_next_fire", "def create_super_ball():\n super_balls.append(gen_super_ball())\n generate_velocity(super_balls)", "def emp_line_strategy(self, game_state):\n self.build_basic_base(game_state)\n\n \"\"\"\n Build corner defense.\n \"\"\"\n self.build_defences(game_state)\n\n \"\"\"\n Finally deploy our information units to attack.\n \"\"\"\n self.deploy_attackers(game_state)", "def explode(self):\n fire_potential = self.flannability * self.weight\n if fire_potential < 10:\n return '...fizzle'\n elif fire_potential < 50:\n return '...boom!'\n else:\n return '...BABOOM!!'\n\n # part 3 sublass", "def fire_tick(self):\n SOUNDS['fire'].play()\n for room in self.room_list:\n if room.fire_level == 0:\n for j in room.adjacent:\n if self.lookup(j).fire_level == 2 and random.random() <= room.spread_chance:\n room.fire_level = 1\n self.num_onfire += 1\n break\n elif room.fire_level == 1:\n room.fire_level = 2", "def __init__(self, center, waypoints, firepoints):\n super().__init__(center, MallFighter.MALL_FIGHTER_SPEED, MallFighter.ANIMATION_DELAY, *MallFighter.FILE_PATH)\n self.fire_idx = 0\n self.way_idx = 0\n self.waypoints = waypoints\n self.firepoints = firepoints", "def fire_arrows():\n\n global fireball_cooldown\n\n angle = None\n if keys[pygame.K_UP]: \n angle = 270\n if keys[pygame.K_LEFT]: angle = 225\n elif keys[pygame.K_RIGHT]: angle = 315\n elif keys[pygame.K_DOWN]: \n angle = 90\n if keys[pygame.K_LEFT]: angle = 135\n elif keys[pygame.K_RIGHT]: angle = 45\n elif keys[pygame.K_LEFT]: angle = 180\n elif keys[pygame.K_RIGHT]: angle = 0\n\n\n if angle != None:\n fireball_cooldown += 1\n\n new_bullet = projectiles.Fireball(player.rect.center[0], player.rect.center[1], angle)\n bullets.add(new_bullet)", "def specialfire_draw(self, window):\n self.specialfire_x = self.x + 10\n window.blit(self.specialfire_image, (self.specialfire_x, self.specialfire_y))", "def fireworks():\n\n sleep_speed = 0.025\n\n # Turn on white\n PYGLOW.color(\"white\", 60)\n sleep(sleep_speed)\n # Turn on blue\n PYGLOW.color(\"blue\", 60)\n sleep(sleep_speed)\n # Fade white\n PYGLOW.color(\"white\", 50)\n sleep(sleep_speed)\n # Turn on green\n PYGLOW.color(\"green\", 60)\n sleep(sleep_speed)\n # Fade white and blue\n PYGLOW.color(\"white\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 50)\n sleep(sleep_speed)\n # Turn on yellow\n PYGLOW.color(\"yellow\", 60)\n sleep(sleep_speed)\n # Fade white, blue, and green\n PYGLOW.color(\"white\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 50)\n sleep(sleep_speed)\n # Turn on orange\n PYGLOW.color(\"orange\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, and yellow\n PYGLOW.color(\"white\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 50)\n sleep(sleep_speed)\n # Turn on red\n PYGLOW.color(\"red\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, yellow, and orange\n PYGLOW.color(\"white\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 50)\n sleep(sleep_speed)\n # Fade all\n PYGLOW.color(\"white\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 50)\n sleep(sleep_speed)\n # Fade blue, green, yellow, orange, and red\n PYGLOW.color(\"blue\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 40)\n sleep(sleep_speed)\n # Fade green, yellow, orange, and red\n PYGLOW.color(\"green\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 30)\n sleep(sleep_speed)\n # Fade yellow, orange, and red\n PYGLOW.color(\"yellow\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 20)\n sleep(sleep_speed)\n # Fade orange, and red\n PYGLOW.color(\"orange\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 10)\n sleep(sleep_speed)\n # Fade red\n PYGLOW.color(\"red\", 0)\n sleep(sleep_speed)\n # Pause 1 second before the next one\n sleep(1)", "def generate_fire_recurrence(self):\n\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\n return self.time_to_next_fire", "def fourLegSimulator(beta_list, gamma_list, beta_list2, gamma_list2, beta_list3, gamma_list3, beta_list4, gamma_list4, bodyHeight, femur, tibia):\n \n #import necessary packages\n import numpy as np \n import itertools # This package is specifically used for having multiple variable \"for\" loop using zip function\n from numpy import pi, sin, cos, sqrt\n import matplotlib.pyplot as plt\n import matplotlib.animation as animation\n get_ipython().run_line_magic('matplotlib', 'qt')\n\n\n\n # input parameters\n Femur_one_leg = femur # Length of femur (upper bone)\n Tibia_one_leg = tibia # Length of Tibia (lower bone)\n\n\n # Making arrays for containing value of respective coordinates\n X1 = np.zeros(len(beta_list)) # array for x_coordinates of moving point of femur\n Y1 = np.zeros(len(beta_list)) # array for y_coordinates of moving point of femur\n X2 = np.zeros(len(gamma_list)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2 = np.zeros(len(gamma_list)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n X1_2 = np.zeros(len(beta_list2)) # array for x_coordinates of moving point of femur\n Y1_2 = np.zeros(len(beta_list2)) # array for y_coordinates of moving point of femur\n X2_2 = np.zeros(len(gamma_list2)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_2 = np.zeros(len(gamma_list2)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n\n X1_3 = np.zeros(len(beta_list3)) # array for x_coordinates of moving point of femur\n Y1_3 = np.zeros(len(beta_list3)) # array for y_coordinates of moving point of femur\n X2_3 = np.zeros(len(gamma_list3)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_3 = np.zeros(len(gamma_list3)) # array for y_coordinates of moving point of tibia i.e end effector in our case \n \n \n X1_4 = np.zeros(len(beta_list4)) # array for x_coordinates of moving point of femur\n Y1_4 = np.zeros(len(beta_list4)) # array for y_coordinates of moving point of femur\n X2_4 = np.zeros(len(gamma_list4)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_4 = np.zeros(len(gamma_list4)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n \n #Populating the above defined arrays currently filled with zeros to respective coordinates\n #Here in the for loop zip function is used to iterate two variales simultaneously and enumerate function to return index numbers\n\n for index,(beta,gamma) in enumerate(zip(beta_list,gamma_list)):\n x1 = Femur_one_leg*cos(-beta - (pi/2)) # x-cooridnate of femur\n y1 = Femur_one_leg*sin(-beta - (pi/2)) # y-cooridnate of femur\n x2 = x1 + Tibia_one_leg*cos(-pi/2 - (beta + gamma)) # x-coordinate of tibia\n y2 = y1 + Tibia_one_leg*sin(-pi/2 - (beta + gamma)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1[index] = x1 \n Y1[index] = y1 \n X2[index] = x2 \n Y2[index] = y2 \n \n for index2,(beta2,gamma2) in enumerate(zip(beta_list2,gamma_list2)):\n x1_2 = Femur_one_leg*cos(-beta2 - (pi/2)) # x-cooridnate of femur\n y1_2 = Femur_one_leg*sin(-beta2 - (pi/2)) # y-cooridnate of femur\n x2_2 = x1_2 + Tibia_one_leg*cos(-pi/2 - (beta2 + gamma2)) # x-coordinate of tibia\n y2_2 = y1_2 + Tibia_one_leg*sin(-pi/2 - (beta2 + gamma2)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_2[index2] = x1_2 \n Y1_2[index2] = y1_2 \n X2_2[index2] = x2_2 \n Y2_2[index2] = y2_2 \n\n for index3,(beta3,gamma3) in enumerate(zip(beta_list3,gamma_list3)):\n x1_3 = 40 + Femur_one_leg*cos(-beta3 - (pi/2)) # x-cooridnate of femur\n y1_3 = Femur_one_leg*sin(-beta3 - (pi/2)) # y-cooridnate of femur\n x2_3 = x1_3 + Tibia_one_leg*cos(-pi/2 - (beta3 + gamma3)) # x-coordinate of tibia\n y2_3 = y1_3 + Tibia_one_leg*sin(-pi/2 - (beta3 + gamma3)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_3[index3] = x1_3 \n Y1_3[index3] = y1_3 \n X2_3[index3] = x2_3 \n Y2_3[index3] = y2_3\n \n for index4,(beta4,gamma4) in enumerate(zip(beta_list4,gamma_list4)):\n x1_4 = 40 + Femur_one_leg*cos(-beta4 - (pi/2)) # x-cooridnate of femur\n y1_4 = Femur_one_leg*sin(-beta4 - (pi/2)) # y-cooridnate of femur\n x2_4 = x1_4 + Tibia_one_leg*cos(-pi/2 - (beta4 + gamma4)) # x-coordinate of tibia\n y2_4 = y1_4 + Tibia_one_leg*sin(-pi/2 - (beta4 + gamma4)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_4[index4] = x1_4 \n Y1_4[index4] = y1_4 \n X2_4[index4] = x2_4 \n Y2_4[index4] = y2_4 \n\n # Setting up figure and subplot\n\n fig = plt.figure()\n fig.canvas.set_window_title('One Leg trajectory Planning')\n ax = fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=(-30,70), ylim=(-50,50))\n ax.grid()\n ax.set_title('Leg Trajectory')\n ax.axes.xaxis.set_ticklabels([])\n ax.axes.yaxis.set_ticklabels([])\n \n line, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line2, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n line3, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line4, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n \n\n\n # initialization function\n def init():\n line.set_data([], [])\n line2.set_data([], [])\n line3.set_data([], [])\n line4.set_data([], [])\n return line,line2,line3,line4,\n\n # animation function\n def animate(i):\n x_points = [0, X1[i], X2[i]]\n y_points = [0, Y1[i], Y2[i]]\n \n x2_points = [0, X1_2[i], X2_2[i]]\n y2_points = [0, Y1_2[i], Y2_2[i]]\n \n x3_points = [40, X1_3[i], X2_3[i]]\n y3_points = [0, Y1_3[i], Y2_3[i]]\n \n x4_points = [40, X1_4[i], X2_4[i]]\n y4_points = [0, Y1_4[i], Y2_4[i]]\n \n\n line.set_data(x_points, y_points)\n line2.set_data(x2_points, y2_points)\n line3.set_data(x3_points, y3_points)\n line4.set_data(x4_points, y4_points)\n \n return line, line2, line3, line4\n\n # call the animation\n ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(X1), interval=100, blit=True, repeat=True)\n \n\n # plotting respective movement trajectories in the same plot\n plt.plot(X2,Y2, '#05143b')\n# plt.plot(X1,Y1)\n \n plt.plot(X2_2,Y2_2,'#37acf0')\n# plt.plot(X1_2,Y1_2)\n \n plt.plot(X2_3,Y2_3,'#05143b')\n# plt.plot(X1_3,Y1_3)\n \n plt.plot(X2_4,Y2_4,'#37acf0')\n# plt.plot(X1_4,Y1_4)\n \n \n \n plt.plot([-20,60],[-bodyHeight,-bodyHeight],'brown')\n plt.plot([-4,44],[0,0],'#010b24')\n plt.plot([-4,-4],[0,5],'#010b24')\n plt.plot([44,44],[0,5],'#010b24')\n plt.plot([-4,44],[5,5],'#010b24')\n \n for ind in range(100):\n plt.plot([-4,44],[ind*5/100,ind*5/100],'black')\n \n return None", "def build_basic_base(self, game_state):\n filter_locations_x = [27, 24, 21, 18, 15, 12]\n for x in filter_locations_x:\n location = [x, 13]\n if game_state.can_spawn(FILTER, location):\n self.spawn_wrapper(game_state, FILTER, location)\n\n\n \"\"\"\n We use Encryptors firewalls because they are cheap and give shields\n This will build the line that our EMPs will use to snipe.\n If they are destroyed we replace them with destructors.\n \"\"\"\n encryptor_locations = []\n for x in range(26, 12, -1):\n # Because of the way pathing works don't need to build behind our Filters saving cores\n if x not in filter_locations_x and x != self.hole_x_location:\n encryptor_locations.append([x, 12])\n \n for location in encryptor_locations:\n if game_state.can_spawn(ENCRYPTOR, location):\n if game_state.turn_number < 2:\n self.spawn_wrapper(game_state, ENCRYPTOR, location)\n else:\n self.spawn_wrapper(game_state, DESTRUCTOR, location)", "def sling_action():\n global mouse_distance\n global rope_lenght\n global angle\n global x_mouse\n global y_mouse\n # Fixing bird to the sling rope\n v = vector((sling_x, sling_y), (x_mouse, y_mouse))\n uv = unit_vector(v)\n uv1 = uv[0]\n uv2 = uv[1]\n # mouse_distance = distance(sling_x, sling_y, x_mouse, y_mouse)\n sling = Vec2d(sling_x, sling_y)\n mouse = Vec2d(x_mouse, y_mouse)\n mouse_distance = (sling - mouse).length\n\n pu = (uv1*rope_lenght+sling_x, uv2*rope_lenght+sling_y)\n bigger_rope = 102\n x_redbird = x_mouse - 20\n y_redbird = y_mouse - 20\n if mouse_distance > rope_lenght:\n pux, puy = pu\n pux -= 20\n puy -= 20\n pul = pux, puy\n screen.blit(redbird, pul)\n pu2 = (uv1*bigger_rope+sling_x, uv2*bigger_rope+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu2, 5)\n screen.blit(redbird, pul)\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu2, 5)\n else:\n mouse_distance += 10\n pu3 = (uv1*mouse_distance+sling_x, uv2*mouse_distance+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu3, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu3, 5)\n # Angle of impulse\n dy = y_mouse - sling_y\n dx = x_mouse - sling_x\n if dx == 0:\n dx = 0.00000000000001\n angle = math.atan((float(dy))/dx)", "def compose_lwss_gun (glider_gun, A = -1, B = -1, C = -1):\n if (A < 0): A = 40\n if (B < 0): B = A;\n if (C < 0): C = A;\n\n m = min (A, B, C)\n a = A - m\n b = B - m\n c = C - m\n return \\\n glider_gun[4 * a] ( A, -A - 3, flip_x) + \\\n glider_gun[4 * b] (-B + 2, -B + 1) + \\\n glider_gun[4 * c + 1] (-C + 6, C, flip_y)", "def reBuild(self): # redefine the rebuild method for loss function (polymorphism)\n self.updateRange()\n self.buildLine()\n self.normalize() # normalize loss function to have total area of 1 ", "def breath_fire(self):\r\n print(\"$@#$#@$\\n\")", "def generate_fire_recurrence(self):\n self._time_to_next_fire = round(\n weibullvariate(self._scale_parameter, self._shape_parameter), 2\n )\n return self._time_to_next_fire", "def myBarbieHome(pos):\n bulldozer(pos)\n ground(pos)\n mc.postToChat(\"Ground done !\")\n\n pos.z += 5\n makeTheHouse(pos)\n mc.postToChat(\"House done !\")\n\n theRoof(pos)\n mc.postToChat(\"Roof done !\")\n\n makeTheDeco(pos, flowers = wFlower_Cyan)\n mc.postToChat(\"ALL Work done !\")", "def fire(self, speed=200):\n velocity = (speed * math.cos(self.angle), -1 * speed * math.sin(self.angle))\n\n # position is at the end of the barrel\n line_start = (int(self.position[0]), int(self.position[1] - self.size/2))\n gun_start = (int(self.position[0] + self.size/2), line_start[1])\n gun_end = (int(gun_start[0] + math.cos(self.angle) * self.barrel), int(gun_start[1] - math.sin(self.angle) * self.barrel))\n\n return Shell(gun_end, velocity)", "def Fire(self, *args):\n return _gmat_py.Burn_Fire(self, *args)", "def run_to_ball_bottom_right(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # ball is to the bottom right from player's position\n if (obs[\"ball\"][0] > player_x and\n obs[\"ball\"][1] > player_y):\n return True\n return False\n \n def get_action(obs, player_x, player_y):\n \"\"\" get action of this memory pattern \"\"\"\n return Action.BottomRight\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def shoot_fire(self, camera):\n\n cursor_pos = pygame.mouse.get_pos()\n tempMouseRect = pygame.Rect(cursor_pos, (0, 0))\n tempMouseRect = camera.use_cam_rect(tempMouseRect)\n\n relPos = tempMouseRect.topleft\n\n self.intMousePos = relPos\n ang = self.get_shoot_angle(relPos)\n #ang = math.radians(170 - math.degrees(ang))\n ang = math.radians(( (math.degrees(ang)+ 180 )))\n #ang = int(ang)\n\n if self.canShoot and self.ammo: #and self.is_good_angle(ang):\n self.canShoot = False\n self.ammo -= 1\n self.timer_fire = time.time()\n\n # decide starting position of fireball\n\n xPos = self.rect.centerx\n\n fire = powersC.Fireball((xPos, self.rect.centery), ang, self.direction)\n self.powerGroup.add(fire)", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def build_wall(self): #py:UR.build_wall\n RUR._UR.build_wall_(self.body)", "def step(self):\n # gets who has fired who in this step\n blues_fire_reds = np.array([[blue.fires_(red) for red in self.red_drones] for blue in self.blue_drones])\n reds_fire_blues = np.array([[red.fires_(blue) for blue in self.blue_drones] for red in self.red_drones])\n\n # if the foe is no longer seen, the count restarts from 0\n self.blues_have_fired_reds *= blues_fire_reds\n self.reds_have_fired_blues *= reds_fire_blues\n\n # and the count is incremented for the others\n self.blues_have_fired_reds += blues_fire_reds\n self.reds_have_fired_blues += reds_fire_blues\n\n # np magic : first find the list of duos shooter/shot, keep the shots (only once)\n red_deads = np.unique(np.argwhere(self.blues_have_fired_reds >= self.blue_shots_to_kill).T[1])\n blue_deads = np.unique(np.argwhere(self.reds_have_fired_blues >= self.red_shots_to_kill).T[1])\n\n\n # tell the drones that they are dead\n for drone_id in blue_deads:\n self.blue_drones[drone_id].is_killed(is_blue=True)\n for drone_id in red_deads:\n self.red_drones[drone_id].is_killed(is_blue=False)\n\n # consider only living drones\n blue_drones = [drone for drone in self.blue_drones if drone.is_alive]\n red_drones = [drone for drone in self.red_drones if drone.is_alive]\n\n bf_obs, rf_obs = self.get_observation()\n bf_reward = rf_reward = 0\n remaining_blues, remaining_reds = len(blue_drones), len(red_drones),\n blue_shots, red_shots = len(blue_deads), len(red_deads)\n\n if blue_shots + red_shots > 0:\n print('someone is killed: {0} blues and {1} reds'.format(blue_shots, red_shots))\n\n return bf_obs, bf_reward, remaining_blues, blue_shots, rf_obs, rf_reward, remaining_reds, red_shots", "def shooterPrep(oe0, oef, A, B, dt, scInfo, dynInfo, time, shooter, plot, body, grav, srp, thirdbod):\n\n # Normalizing based off dt1 and mu = 1\n shooterInfo = {} # dictionary for shooter functions\n shooterInfo['abtol'] = 1e-12\n shooterInfo['reltol'] = 1e-12\n shooterInfo['m0'] = scInfo['m0'] # kg, spacecraft mass\n t_norm = np.copy(dt) # s, time of first burn\n r_norm = (dynInfo['mu_host']*t_norm**2)**(1/3) # m\n shooterInfo['mu_host'] = dynInfo ['mu_host']/r_norm**3*t_norm**2\n\n # State\n meoe0_norm = af.oe2meoe(oe0)\n meoe0_norm[0] /= r_norm # m\n meoef_norm = af.oe2meoe(oef)\n meoef_norm[0] /= r_norm # m\n\n # Time\n shooterInfo['t0'] = dynInfo['t0']/t_norm # s\n dt_norm = np.copy(dt)/t_norm # s\n shooterInfo['manTime'] = np.sum(dt)/t_norm # s\n\n # BLT parameters\n A_norm = np.copy(A) # -\n B_norm = np.copy(B)*t_norm # 1/s\n\n # Constants\n if grav == False and srp == False and thirdbod == False:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host,)\n\n elif grav == True and srp == False and thirdbod == False:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_grav_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_grav_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n w_host = dynInfo['w_host']*t_norm # rad/s\n degree = dynInfo['degree'] # -\n order = dynInfo['order'] # -\n theta_gst = dynInfo['theta_gst'] # rad\n gc = dynInfo['gc'] # -\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, w_host, degree, order, theta_gst, gc, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, w_host, degree, order, theta_gst, gc)\n\n elif grav == False and srp == True and thirdbod == False:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_srp_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_srp_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n d_3B = dynInfo['d_3B']/r_norm # m\n r_3B = dynInfo['r_3B']/r_norm # m\n Cr = scInfo['Cr'] # -\n a2m = scInfo['a2m']/r_norm/r_norm # m2/kg\n srp_flux = con.srp_flux*t_norm**3 # kg/s3\n c = con.c/r_norm*t_norm # m/s\n AU = con.AU*1e3/r_norm # m\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, d_3B, r_3B, Cr, a2m, srp_flux, c, AU, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, d_3B, r_3B, Cr, a2m, srp_flux, c, AU)\n\n elif grav == False and srp == False and thirdbod == True:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_3bod_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_3bod_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n mu_3B = dynInfo['mu_3B']/r_norm**3*t_norm**2 # m3/s2\n d_3B = dynInfo['d_3B']/r_norm # m\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, mu_3B, d_3B, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, mu_3B, d_3B)\n\n elif grav == True and srp == True and thirdbod == False:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_grav_srp_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_grav_srp_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n w_host = dynInfo['w_host']*t_norm # rad/s\n d_3B = dynInfo['d_3B']/r_norm # m\n r_3B = dynInfo['r_3B']/r_norm # m\n degree = dynInfo['degree'] # -\n order = dynInfo['order'] # -\n theta_gst = dynInfo['theta_gst'] # rad\n gc = dynInfo['gc'] # -\n Cr = scInfo['Cr'] # -\n a2m = scInfo['a2m']/r_norm/r_norm # m2/kg\n srp_flux = con.srp_flux*t_norm**3 # kg/s3\n c = con.c/r_norm*t_norm # m/s\n AU = con.AU*1e3/r_norm # m\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, w_host, d_3B, r_3B, degree, order, theta_gst, gc, Cr, a2m, srp_flux, c, AU, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, w_host, d_3B, r_3B, degree, order, theta_gst, gc, Cr, a2m, srp_flux, c, AU)\n\n elif grav == True and srp == False and thirdbod == True:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_grav_3bod_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_grav_3bod_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n w_host = dynInfo['w_host']*t_norm # rad/s\n mu_3B = dynInfo['mu_3B']/r_norm**3*t_norm**2 # m3/s2\n d_3B = dynInfo['d_3B']/r_norm # m\n degree = dynInfo['degree'] # -\n order = dynInfo['order'] # -\n theta_gst = dynInfo['theta_gst'] # rad\n gc = dynInfo['gc'] # -\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, w_host, mu_3B, d_3B, degree, order, theta_gst, gc, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, w_host, mu_3B, d_3B, degree, order, theta_gst, gc)\n\n elif grav == False and srp == True and thirdbod == True:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_srp_3bod_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_srp_3bod_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n mu_3B = dynInfo['mu_3B']/r_norm**3*t_norm**2 # m3/s2\n d_3B = dynInfo['d_3B']/r_norm # m\n r_3B = dynInfo['r_3B']/r_norm # m\n Cr = scInfo['Cr'] # -\n a2m = scInfo['a2m']/r_norm/r_norm # m2/kg\n srp_flux = con.srp_flux*t_norm**3 # kg/s3\n c = con.c/r_norm*t_norm # m/s\n AU = con.AU*1e3/r_norm # m\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, mu_3B, d_3B, r_3B, Cr, a2m, srp_flux, c, AU, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, mu_3B, d_3B, r_3B, Cr, a2m, srp_flux, c, AU)\n\n elif grav == True and srp == True and thirdbod == True:\n # Dynamic Files\n shooterInfo['ode_burn'] = dyn.ode_2bod_grav_srp_3bod_blt_meoe \n shooterInfo['ode_coast'] = dyn.ode_2bod_grav_srp_3bod_coast_meoe\n\n # Necessary Constants\n mu_host = dynInfo['mu_host']/r_norm**3*t_norm**2 # m3/s2\n r_host = dynInfo['r_host']/r_norm # m\n mu_3B = dynInfo['mu_3B']/r_norm**3*t_norm**2 # m3/s2\n w_host = dynInfo['w_host']*t_norm # rad/s\n d_3B = dynInfo['d_3B']/r_norm # m\n r_3B = dynInfo['r_3B']/r_norm # m\n degree = dynInfo['degree'] # -\n order = dynInfo['order'] # -\n theta_gst = dynInfo['theta_gst'] # rad\n gc = dynInfo['gc'] # -\n Cr = scInfo['Cr'] # -\n a2m = scInfo['a2m']/r_norm/r_norm # m2/kg\n srp_flux = con.srp_flux*t_norm**3 # kg/s3\n c = con.c/r_norm*t_norm # m/s\n AU = con.AU*1e3/r_norm # m\n g0 = con.g0/r_norm*t_norm*t_norm # m/s2\n T = scInfo['T']/r_norm*t_norm*t_norm # (kg)m/s2\n isp = scInfo['isp']/t_norm # s\n\n shooterInfo['extras_burn'] = (mu_host, r_host, w_host, mu_3B, d_3B, r_3B, degree, order, theta_gst, gc, Cr, a2m, srp_flux, c, AU, g0, T, isp)\n shooterInfo['extras_coast'] = (mu_host, r_host, w_host, mu_3B, d_3B, r_3B, degree, order, theta_gst, gc, Cr, a2m, srp_flux, c, AU)\n\n return meoe0_norm, meoef_norm, A_norm, B_norm, dt_norm, shooterInfo, r_norm, t_norm", "def run_to_ball_right(obs, player_x, player_y):\n def environment_fits(obs, player_x, player_y):\n \"\"\" environment fits constraints \"\"\"\n # ball is to the right from player's position\n if (obs[\"ball\"][0] > player_x and\n abs(obs[\"ball\"][1] - player_y) < 0.01):\n return True\n return False\n \n def get_action(obs, player_x, player_y):\n \"\"\" get action of this memory pattern \"\"\"\n return Action.Right\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def fire(self):", "def __init__(self, screen, dialName, needleName1, needleName2, startAngle, dialPos, needlePos, needleOffset):\n self.screen = screen\n self.dialPos = dialPos\n self.needlePos = needlePos\n self.needlePos2 = needlePos\n self.speed = SPEED_OF_NEEDLE \n self.dial = pygame.image.load(dialName)\n self.needle = pygame.image.load(needleName1)\n self.needle2 = pygame.image.load(needleName2)\n self.needleOffset = needleOffset\n self.needleOffset2 = (20,20)\n\n self.requestedAngle = 0 # requested angle from user\n self.currentAngle = 90 # current angle of needle, 90 degrees => 0 ft\n self.finalAngle = 0 # final angle that was requested\n self.flag1 = False # flag to handle overshoot of needle\n self.flag2 = False # flag to handle overshoot of needle\n self.inputData = 0.0 # input data\n self.startInputValue = 0.0 # input start value, normally 0\n self.inputValFont = pygame.font.SysFont(\"None\",38)\n\n\n self._get_rect_size()\n self.input_data(self.startInputValue)", "async def fire(self,\n start_x: int,\n start_y: int,\n x_speed: Optional[Union[float, int]] = 0,\n y_speed: Optional[Union[float, int]] = -1) -> NoReturn:\n\n x, y = start_x, start_y\n self._canvas.addstr(round(y), round(x), '*')\n await sleep(0)\n\n self._canvas.addstr(round(y), round(x), 'O')\n await sleep(0)\n self._canvas.addstr(round(y), round(x), ' ')\n\n x += x_speed\n y += y_speed\n\n symbol = '-' if x_speed else '|'\n\n max_y, max_x = get_canvas_size(self._canvas)\n curses.beep()\n fire_shot_object = MapObject(Frame(symbol), x, y)\n while 1 < y < max_y and 1 < x < max_x:\n self._canvas.addstr(round(y), round(x), symbol)\n await sleep(0)\n self._canvas.addstr(round(y), round(x), ' ')\n fire_shot_object.change_coordinates(x + x_speed, y + y_speed)\n for obj_id, obj in self._dynamic_objects.items():\n if obj_id.startswith('rubbish') and obj & fire_shot_object:\n draw_frame(self._canvas, obj.x, obj.y, obj.frame,\n negative=True)\n self._dynamic_objects.pop(obj_id)\n await self.explode(obj.x, obj.y)\n return\n\n y += y_speed\n x += x_speed", "def __generate_goal(width, length):\n goal = np.arange(1, ((width * length)+1)).reshape(length, width)\n goal[length - 1][width - 1] = 0\n return goal\n\n\n # This was the string builder method for the returned string.", "def make_flower(shape, x, y, c1, c2, l, s):\n shape.penup()\n shape.speed(20)\n shape.setpos(x, y)\n shape.color(c2, c1)\n shape.begin_fill()\n shape.pendown()\n for side in range(6):\n shape.left(60)\n shape.forward(s) # s stands for short side\n shape.right(60)\n shape.forward(l) # l stands for long side\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(l)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.end_fill()\n shape.pendown()\n\n shape.color(\"green\")\n shape.right(90)\n shape.penup()\n shape.forward(10)\n shape.pendown()\n shape.forward(110)\n shape.left(90)\n\n\n\n # ...", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def roof_mwfrs(lenght, width, overhang=1, wall_height=3, roof_height=4):\n h = wall_height + 0.5*roof_height\n area = (lenght + overhang) * (width + overhang)\n area_1 = 0.5*h*width\n area_2 = 0.5*h*width\n area_3 = h*width\n area_4 = area - area_1 -area_2 - area_3\n return area, area_1, area_2, area_3, area_4", "def build_random_function(min_depth, max_depth):\n if max_depth == 1:\n rand = random.random()\n if rand < 1.0/(2+timeflag):\n return [\"x\"]\n elif rand < 2.0/(2+timeflag):\n return [\"y\"]\n else:\n return [\"t\"]\n elif min_depth <= 1:\n if random.random() > (max_depth - 1.0)/max_depth:\n rand = random.random()\n if rand < 1.0/(2+timeflag):\n return [\"x\"]\n elif rand < 2.0/(2+timeflag):\n return [\"y\"]\n else:\n return [\"t\"]\n\n rand = random.random()\n if rand < 1.0/NUM_FF:\n return [\"prod\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 2.0/NUM_FF:\n return [\"avg\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 3.0/NUM_FF:\n return [\"cos_pi\",build_random_function(min_depth-1,max_depth-1)]\n elif rand < 4.0/NUM_FF:\n return [\"sin_pi\",build_random_function(min_depth-1,max_depth-1)]\n elif rand < 5.0/NUM_FF:\n return [\"hypot\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n #elif rand < 6.0/NUM_FF:\n # return [\"pow\",build_random_function(min_depth-1,max_depth-1)]\n #elif rand < 6.0/NUM_FF:\n # return [\"add\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 6.0/NUM_FF:\n return [\"cube\",build_random_function(min_depth-1,max_depth-1)]", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def cupboard_details(answeryes, answerfire): # Two parameters - answeryes and asnwerfire\r\n \r\n\r\n # The below line is a docstring explaining this function's activities.\r\n \r\n \r\n opencupboard = input(\"\\nTo open the cupboard, type yes: \") # User input to open the cupboard\r\n\r\n if opencupboard.lower() == answeryes: # Comparing if input given by user above matches the argument \"yes\" given below.\r\n\r\n print(\"\\nSolve this riddle if you want to reach the box:\")\r\n\r\n time.sleep(2)\r\n\r\n # Printing the riddle found from Red paper.\r\n \r\n riddle = print(\"\\n I am not alive, \\n but I grow; I don't have lungs, \\n but I need air; I don't have a mouth, \\n but water kills me. What am I?\")\r\n\r\n riddle_answer = input(\"\\nYour answer: \")\r\n\r\n # Create condition to check correct answer of riddle given by player:\r\n\r\n if riddle_answer.lower() == answerfire: # Comparing the riddle answer with argument given below that is assigned in this function\r\n\r\n print(f\"\\n\\n{answerfire.title()} is correct.\") # If player's answer is correct, print by acknowledging.\r\n\r\n time.sleep(3)\r\n\r\n print(f\"\\n Lucifer - 'Good job, {name.title()}! Let us wait for the paper to levitate and watch which direction does it fall at.'\")\r\n\r\n time.sleep(5)\r\n\r\n print(\"\\n\\n Paper falls on North side of the room...\")\r\n \r\n time.sleep(3)\r\n\r\n print(f\"\\n Lucifer - '{name.title()}, I think we are getting closer to the box! Lets check to north if we could find something.'\")\r\n\r\n\r\n else: # If player enters wrong answer on the paper, restart the game from beginning.\r\n\r\n print(\"Sorry, you gave wrong answer! You will have to restart the game :(\")\r\n\r\n sys.exit()", "def build(self):\n\n # Create a custom grid, fe_set \n nfe = 6\n fe_a = 1/4.0\n fe_b = 0.2\n fe_set = [0, 0.004]\n for i in range(1,nfe+1):\n if i < nfe*fe_a:\n fe_set.append(i*fe_b/(nfe*fe_a))\n elif i == nfe: \n fe_set.append(1)\n else:\n fe_set.append(fe_b + (i-nfe*fe_a)*(1-fe_b)/(nfe*(1-fe_a)))\n\n \"\"\"\n Args:\n dae_method = method to use for calcuating derivatives (default = OCLR)\n - BFD1 - 1st order backwards finite difference\n - OCLR - Orthogonal collocation, Lagrange-Radau\n - OCLL - Orthogonal collocation, Lagrange-Legendre\n press_drop = Pressure drop correlation for superficial velocity calc.\n - SimplifiedP - simplified pressure correlations \n - Ergun - Ergun equation\n fe_set = set of normalised finite element locations\n nfe = number of finite elements for bed discretization (default = 15)\n (not used if fe_set specified)\n ncp = number of collocation points (OCLR or OCLL only, default = 3)\n \"\"\" \n\n # Create unit model for fuel reactor\n self.MB_fuel = MB_CLC_fuel.MB(\n parent=self,\n dae_method = 'OCLR',\n press_drop = 'Ergun',\n fe_set = fe_set,\n ncp = 3)", "def main():\n testlife = CellularAutomation()\n testlife.printParams()\n testlife.printLifeformsDir()\n testlife.printRuleset()\n testlife.printDisplay()\n \n params = getConfig()\n XRES = params[0]\n YRES = params[1]\n BLOCK_SIZE = params[2]\n DELTA_T = params[3]\n FCOLOR = params[4]\n BCOLOR = params[5]\n XMAX = XRES / BLOCK_SIZE\n YMAX = YRES / BLOCK_SIZE\n\n options, args = getOptions() #IGNORE:W0612\n if options.directory:\n printLifeformsDir()\n sys.exit(0)\n \n \n if options.ruleset == None:\n ruleset_string = 'B3/S23' # Conway's Life\n else:\n ruleset_string = options.ruleset\n ruleset_string = ruleset_string.upper()\n ruleset = createRuleset(ruleset_string)\n\n pygame.display.init()\n fullname = os.path.join('data', 'glider32x32.bmp')\n seticon(fullname)\n os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'\n screen = pygame.display.set_mode((XRES, YRES))\n pygame.display.set_caption('My So-Called Life')\n pygame.mouse.set_visible(1)\n \n pygame.time.set_timer(USEREVENT, DELTA_T) # change state\n\n #Create The Backgound\n background = pygame.Surface(screen.get_size())\n background = background.convert()\n background.fill(BCOLOR)\n \n #Display The Background\n screen.blit(background, (0, 0))\n pygame.display.flip()\n \n #Prepare Game Objects\n white_block = pygame.Surface((BLOCK_SIZE - 2, BLOCK_SIZE - 2))\n white_block.fill(FCOLOR)\n\n if options.random or \\\n (not options.random and options.filename == None) or \\\n options.filename == 'random.lif':\n blocks, ca_matrix = randomStart(XMAX, YMAX, BLOCK_SIZE)\n else:\n try:\n filename = options.filename\n startx = options.startx\n starty = options.starty\n blocks, ca_matrix = loadStart(filename, startx, starty, XMAX, YMAX, BLOCK_SIZE)\n #print \n except Usage, err:\n print >>sys.stderr, err.msg\n print >>sys.stderr, \"for help use --help\"\n return 2\n\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip() \n\n print 'Ruleset: ', ruleset_string, rulesetName(ruleset_string)\n\n generation = 0\n while 1:\n if options.sstep:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit()\n return\n elif event.type == KEYDOWN and event.key == K_SPACE:\n ca_matrix = updateCA_MATRIX(ca_matrix, XMAX, YMAX, ruleset)\n blocks = updateDisplay(ca_matrix, XMAX, YMAX, BLOCK_SIZE) \n screen.blit(background, (0, 0))\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip()\n generation += 1\n print 'Generation: ', generation\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n pygame.display.quit()\n return\n elif event.type == KEYDOWN and event.key == K_f:\n pygame.display.toggle_fullscreen()\n elif event.type == KEYDOWN and event.key == K_d:\n printLifeformsDir()\n elif event.type == KEYDOWN and event.key == K_s:\n options.sstep = False\n elif event.type == KEYDOWN and event.key == K_w:\n # open a file for output\n try:\n fullname = os.path.join('lifeforms', 'snapshot.cells')\n fout = open(fullname, \"w\")\n except IOError:\n print \"Error! Cannot open file\"\n print\n sys.exit(1)\n \n # get program version number\n id_list = __version__.split()\n version_num = id_list[2]\n fout.write('! Output from mylife.py version' + \\\n version_num + '\\n')\n fout.write('! XMAX: ' + str(XMAX) + '\\n')\n fout.write('! YMAX: ' + str(YMAX) + '\\n')\n line_list = []\n for row in range(YMAX):\n for col in range(XMAX):\n if ca_matrix[row][col] == 1:\n line_list.append('0')\n else:\n line_list.append('.')\n line_list.append('\\n')\n line = \"\".join(line_list)\n fout.write(line)\n line_list = []\n \n fout.close()\n print 'Screen written to file \"snapshot.cells\"'\n else:\n #Handle Input Events\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit()\n print generation, 'generations run.'\n return\n elif event.type == USEREVENT:\n ca_matrix = updateCA_MATRIX(ca_matrix, XMAX, YMAX, ruleset)\n blocks = updateDisplay(ca_matrix, XMAX, YMAX, BLOCK_SIZE) \n for block in blocks:\n screen.blit(white_block,block)\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n pygame.display.quit()\n print generation, 'generations run.'\n return\n elif event.type == KEYDOWN and event.key == K_d:\n printLifeformsDir()\n elif event.type == KEYDOWN and event.key == K_s:\n options.sstep = True\n\n #Draw Everything\n generation += 1\n screen.blit(background, (0, 0))\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip()", "def treasury():\n room = Room()\n\n room.name = \"Treasury\"\n \"\"\" Set up the game and initialize the variables. \"\"\"\n # Sprite lists\n room.wall_list = arcade.SpriteList()\n room.item_list = arcade.SpriteList()\n\n # collect pies\n # Set up the items\n for i in range(10):\n # Create the item instance\n item = arcade.Sprite(\"images/16x16/Item__67.png\", SPRITE_SCALING)\n\n # Position the item\n item.center_x = random.randrange(SCREEN_WIDTH - 150)\n item.center_y = random.randrange(SCREEN_HEIGHT - 150)\n\n # Add the item to the lists\n room.item_list.append(item)\n\n # -- Set up the walls\n # Create bottom and top row of mirrors\n # This y loops a list of two, the coordinate 0, and just under the top of window\n for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):\n # Loop for each mirror going across\n for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):\n wall = arcade.Sprite(\"images/16x16/Item__71.png\", SPRITE_SCALING*5)\n wall.left = x\n wall.bottom = y\n wall.tag = \"trophy\"\n room.wall_list.append(wall)\n\n # Create left and right column of mirrors\n for x in (0, SCREEN_WIDTH - SPRITE_SIZE):\n # Loop for each mirror going across\n for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):\n # Skip making a block 4 and 5 blocks up\n if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:\n wall = arcade.Sprite(\"images/16x16/Item__71.png\", SPRITE_SCALING*5)\n wall.left = x\n wall.bottom = y\n wall.tag = \"trophy\"\n room.wall_list.append(wall)\n\n spirals = []\n spirals = fibonacci(500)\n\n for i in spirals:\n for j in spirals:\n wall = arcade.Sprite(\"images/16x16/Item__71.png\", SPRITE_SCALING*3)\n wall.left = j * SPRITE_SCALING + 300\n wall.bottom = i * SPRITE_SCALING + 200\n room.wall_list.append(wall)\n\n room.background = arcade.load_texture(\"images/bgs/PNG/Full/City/classic_city.png\")\n\n return room", "def gen_super_ball():\n super_ball_radius = 80\n super_ball_x = randint(super_ball_radius, screen_width - super_ball_radius)\n super_ball_y = randint(super_ball_radius, screen_height - super_ball_radius)\n super_ball_color = (RED, YELLOW, GREEN)\n return [super_ball_color, super_ball_x, super_ball_y, super_ball_radius]", "def __init__(self, screen, dialName, needleName, startAngle, dialPos, needlePos, needleOffset):\n self.screen = screen\n self.dialPos = dialPos\n self.needlePos = needlePos\n self.speed = SPEED_OF_NEEDLE \n self.dial = pygame.image.load(dialName)\n self.needle = pygame.image.load(needleName)\n self.nail = pygame.image.load('grey_nail.png')\n self.needleOffset = needleOffset\n\n self.requestedAngle = 0 # requested angle from user\n self.currentAngle = startAngle # current angle of needle\n self.finalAngle = 0 # final angle that was requested\n self.flag1 = False # flag to handle overshoot of needle\n self.flag2 = False # flag to handle overshoot of needle\n self.inputData = 0.0 # input data\n\n self._get_rect_size()", "def computer_fire(self):\n\n # Check tracker to see if previous attempt was a hit\n # If yes, continue to bomb rest of the ship first\n for shipID, size in self.tracker.items():\n if (size != 0) and (self.counter_copy[shipID] != size):\n for n in range(len(self.hit)):\n if self.hit[n] == shipID:\n self.bomb(n)\n return\n\n # Else, randomly fire on a new location\n n = random.randrange(0, len(self.hit))\n while self.hit[n] == 5:\n n = random.randrange(0, len(self.hit))\n self.bomb(n)", "def fire_Bubble(self, vel, firing_angle):\n\n\t\tvec = ang_to_vec(firing_angle)\n\t\tself.Bubble_vel = [vel*vec[0], vel*vec[1]]", "def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)", "def get_parts(self, tool_factory):\n if not self.settings.override_level_settings:\n if 'max_num_steps' in self.jf:\n self.max_num_steps = self.jf['max_num_steps']\n if 'overlap_thresh' in self.jf:\n self.settings.overlap_threshold = self.jf['overlap_thresh']\n\n\n if 'marker_must_hit' in self.jf:\n self.marker_must_hit = self.jf['marker_must_hit']\n if 'sec_goal_reward' in self.jf:\n self.sec_goal_reward = self.jf['sec_goal_reward']\n if 'target_reward' in self.jf:\n self.target_reward = self.jf['target_reward']\n if 'goal_is_basket' in self.jf:\n self.goal_is_basket = self.jf['goal_is_basket']\n if 'ball_is_basket' in self.jf:\n self.ball_is_basket = self.jf['ball_is_basket']\n if 'moving_goal' in self.jf:\n self.moving_goal = self.jf['moving_goal']\n if 'target_ball_radius' in self.jf:\n self.target_ball_radius = self.jf['target_ball_radius']\n\n if self.task_id is None or self.eval_rnd_map is None:\n self.eval_rnd_map = self.gen_noise_apply_map()\n\n target_pos = self.gen_target_pos + get_noise(self.eval_rnd_map, 'target')\n goal_pos = self.gen_goal_pos + get_noise(self.eval_rnd_map, 'goal')\n\n if self.ball_is_basket:\n target_ball = tool_factory.create(ToolTypes.BASKET_BALL, target_pos)\n elif self.target_ball_radius is not None:\n target_ball = tool_factory.create(ToolTypes.TARGET_BALL, target_pos, {'radius': self.target_ball_radius})\n else:\n target_ball = tool_factory.create(ToolTypes.TARGET_BALL, target_pos)\n env_tools = self.get_tools(self.env_jf, tool_factory)\n\n return env_tools, target_ball, goal_pos", "def build_random_function(min_depth, max_depth):\n listoffunctions = [ [\"x\"] , [\"y\"] , [\"\"] ]\n\n var = random.randrange(0,7) #this is not inclusive at the upper end\n halfvar = random.randrange(0,2)\n otherhalfvar = random.randrange(0,2)\n if max_depth == 0: # if you get the the max depth it ends\n if halfvar == 0:\n return [\"x\"]\n else:\n return [\"y\"]\n if min_depth == 0: # if you get to the the minimum recursion length\n if max_depth != 0: # There is a 1/2 possibility it will end there\n if halfvar == 0: \n if otherhalfvar == 0: \n return [\"x\"] \n else: \n return [\"y\"]\n\n # some of the below return two so that they cna evaluate more than onne value\n\n if var == 0:\n return [\"sin_pi\", build_random_function(min_depth-1, max_depth-1)]\n if var == 1:\n return [\"cos_pi\", build_random_function(min_depth-1, max_depth-1)]\n if var == 2:\n return [\"prod\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1)]\n if var == 3:\n return [\"avg\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1)]\n if var == 4:\n return [\"squ\", build_random_function(min_depth-1, max_depth-1)]\n if var == 5:\n return [\"cir\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1) ]\n if var == 6:\n return [\"sms\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1) ]", "def simulate_fire(self, FGPathway_object, location, weather, supr_decision):\n\n if len(weather) == 0:\n print(\"weather is empty\")\n\n ######################################\n # Initialize the fire sequence\n ######################################\n current_day = 0\n current_time = 0.0\n cells_burned = 0\n cells_crowned = 0\n spreading = False\n loc = location[:]\n sppr_dec = supr_decision\n\n # 1) is this fire going to spread at all?\n init_ignitions = self.get_neighbor_ignitions(FGPathway_object, loc, weather[current_day], sppr_dec)\n for ign in init_ignitions:\n #check to see if any of the spread rates are greater than zero.\n if ign[0] > 0:\n spreading = True\n break\n\n\n if not spreading:\n fr = FireRecord()\n fr.ignition_location=location[:]\n fr.weather = [weather[0]] #only including the first day of weather\n fr.suppressed = sppr_dec\n fr.note = \"non-spreading\"\n return fr\n\n #initialize the priority queue\n pq = Queue.PriorityQueue()\n\n #initialize the burn maps\n burn_map = np.zeros(FGPathway_object.size, dtype='int')\n crown_burn_map = np.zeros(FGPathway_object.size, dtype='int')\n\n #add the initial ignition location to the queue\n pq.put((current_time, loc[0], loc[1]))\n\n #the weather stream is arranged by day, so if the weather model gave us 4 days\n # to spread a fire (before a fire-ending weather event), it will have length 4.\n #We want to spread a fire for four whole days, starting at time = 0, and ending\n # at time = 3.9999, for a total of 4 \"time units\" which is represents four days.\n max_time = len(weather)\n\n #start the loop, and continue while there's anything queued, or until time expires\n while (current_time < max_time) and not (pq.empty()):\n #get the next queue item\n current_ign = pq.get()\n loc = (current_ign[1], current_ign[2])\n\n\n #check if the location is out of bounds, and if so, ignore this point\n if ( loc[0] < 0 ) or (loc[0] >= FGPathway_object.size[0]):\n continue\n if ( loc[1] < 0 ) or (loc[1] >= FGPathway_object.size[1]):\n continue\n\n\n #increment current time to this cell's ignition time. This can allow a single\n # ignition to go beyond the max time, so, check the index, since the weather\n # stream will not have a day for that index.\n current_time = current_ign[0]\n #adjust day, if needed\n if current_time - current_day >= 1.0:\n current_day += 1\n if current_day >= len(weather): break\n\n #check to see if this cell has already been burned\n if burn_map[loc[0], loc[1]]:\n #it's already burned in a previous step, so lets move on\n continue\n \n #we haven't 'continued', so this cell hasn't burned yet.\n # a) update the burn map\n burn_map[loc[0], loc[1]] = 1\n cells_burned += 1\n\n # b) check for crown fire, and if necessary, update the crown burn map\n crowned = False\n if self.get_crown_burn(FGPathway_object, loc, weather[current_day], sppr_dec):\n crown_burn_map[loc[0], loc[1]] = 1\n cells_crowned += 1\n crowned = True\n \n # c) get the neighbor ignitions\n n_igns = self.get_neighbor_ignitions(FGPathway_object, loc, weather[current_day], sppr_dec)\n\n # d) add ignitions to the priority queue\n for ign in n_igns:\n #if the spread rate is other than zero\n if ign[0] > 0:\n pq.put(ign)\n\n # e) update the pathway's data to reflect what happened\n self.update_cell(FGPathway_object, loc, burned=True, crowned=crowned)\n\n\n #all done with the queue, so either we ran out of new cells, or the time expired\n fr = FireRecord()\n fr.acres_burned = cells_burned * FGPathway_object.acres_per_cell\n fr.acres_crown_burned = cells_crowned * FGPathway_object.acres_per_cell\n fr.weather = weather[:]\n fr.suppressed = sppr_dec\n fr.ignition_location = location[:]\n\n #save the maps, if desired\n if self.SAVE_BURN_MAPS:\n fr.burn_map = burn_map\n fr.crown_burn_map = crown_burn_map\n\n fr.suppression_cost = self.calc_suppression_cost(fr)\n\n return fr", "def other_wakes(self, current, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.nodisplacements = []\r\n self.procedures = []\r\n \r\n # blockage matrices:\r\n self.bn = []\r\n self.bt = []\r\n \r\n for i, turbine in enumerate(turbines):\r\n # append the own wake matrices when the current turbine is \r\n # compared to itself:\r\n \r\n if i == current:\r\n self.bn.append(Turbine.wn)\r\n self.bt.append(Turbine.wt)\r\n elif i != current:\r\n # it is shadowed when at least one control point of the current\r\n # turbine lies in the direct wake of the i-th turbine.\r\n self.shadowed = np.any((self.yi[i]>=-1) & (self.yi[i]<=1))\r\n self.behind = self.x0 > turbine.x0\r\n \r\n if (self.shadowed and self.behind):\r\n # compute obstruction matrices:\r\n self.set_templates(self.yi[i])\r\n self.offset_templates(i, turbine)\r\n \r\n # offsetted block matrices are appended to the list:\r\n self.bn.append(self.newQn)\r\n self.bt.append(self.newQt)\r\n else:\r\n # add empty blockage matrices if there is no obstruction:\r\n self.bn.append(np.copy(Turbine.zeros))\r\n self.bt.append(np.copy(Turbine.zeros))", "def touching_border(self, bb: list, quickness: int = 1) -> str:\r\n \r\n # Make the coordinates easier to handle\r\n x1, y1, x2, y2 = bb[0], bb[1], bb[2], bb[3]\r\n\r\n # Check which way the box should be expanded\r\n # The first two conditions check if the box should\r\n # be expanded diagonally downwards.\r\n # These just speed up the box sizing\r\n \r\n ## Check if there is a green pixel on the bottom right of the box\r\n if not (x2 + quickness >= len(self.bin_pic[0])) and \\\r\n not (y2 + quickness >= len(self.bin_pic)):\r\n if self.bin_pic[y2][x2]:\r\n return 'BR'\r\n\r\n ## Check if there is a green pixel on the bottom right of the box\r\n if not (x1 - quickness < 0) and not (y2 + quickness >= len(self.bin_pic)):\r\n if self.bin_pic[y2][x1]:\r\n return 'BL'\r\n\r\n # The next four check the sides of the box\r\n ## Check if there is a green pixel on the right side of the box\r\n if not (x2 + quickness >= len(self.bin_pic[0])):\r\n for i in range(y2 - y1 + 1):\r\n if self.bin_pic[y1 + i][x2]:\r\n return 'RIGHT'\r\n \r\n ## Check if there is a green pixel on the bottom side of the box\r\n if not (y2 + quickness >= len(self.bin_pic)):\r\n for i in range(x2 - x1 + 1):\r\n if self.bin_pic[y2][x1 + i]:\r\n return 'BOTTOM'\r\n\r\n ## Check if there is a green pixel on the left side of the box\r\n if not (x1 - quickness < 0):\r\n for i in range(y2 - y1 + 1):\r\n if self.bin_pic[y1 + i][x1 - quickness]:\r\n return 'LEFT'\r\n\r\n ## Check if there is a green pixel on the top side of the box\r\n if not (y1 - quickness < 0):\r\n for i in range(x2 - x1 + 1):\r\n if self.bin_pic[y1 - quickness][x1 + i]:\r\n return 'TOP'\r\n\r\n # If all the green pixels are found and the box is on the edge\r\n if (x2 + quickness >= len(self.bin_pic[0])) or \\\r\n (y2 + quickness >= len(self.bin_pic)) or \\\r\n (y1 - quickness < 0) or \\\r\n (x1 - quickness < 0):\r\n return 'EDGE'\r\n\r\n # If there are no green pixels touching the box, return 'NONE'\r\n return 'NONE'", "def initialize(self):\n self.verbose = True # display the command descriptions next to the bot labels\n self.carrier = None\n self.interceptors = []\n self.assassins = dict()\n self.defenders = []\n self.camper = None\n self.attackers = []\n self.spawnCampers = []\n self.aliveEnemies = 0\n self.lastEventIndex = -1\n \n \n\n # Calculate flag positions and store the middle.\n self.ours = self.game.team.flag.position\n self.theirs = self.game.enemyTeam.flag.position\n self.middle = (self.theirs + self.ours) / 2.0\n\n # Now figure out the flanking directions, assumed perpendicular.\n d = (self.ours - self.theirs)\n self.left = Vector2(-d.y, d.x).normalized()\n self.right = Vector2(d.y, -d.x).normalized()\n self.front = Vector2(d.x, d.y).normalized()\n self.defendAngle = self.level.fieldOfViewAngles[BotInfo.STATE_DEFENDING]\n self.midEnemySpawn = self.game.enemyTeam.botSpawnArea[0].midPoint(self.game.enemyTeam.botSpawnArea[1])\n \n \"\"\"circle = 2 * math.pi\n outerVec = self.game.enemyTeam.botSpawnArea[0] - self.game.enemyTeam.flagSpawnLocation\n while circle > 0:\n self.defenders += [[None, self.angledVector(outerVec, self.defendAngle / 2)]]\n outerVec = self.angledVector(outerVec, self.defendAngle)\n circle -= self.defendAngle\n \n campPos = []\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x - self.level.firingDistance, self.game.enemyTeam.botSpawnArea[0].y + 0.5 * (self.game.enemyTeam.botSpawnArea[1].y - self.game.enemyTeam.botSpawnArea[0].y)))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x + 0.5 * (self.game.enemyTeam.botSpawnArea[1].x - self.game.enemyTeam.botSpawnArea[0].x ), self.game.enemyTeam.botSpawnArea[1].y + self.level.firingDistance))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[1].x + self.level.firingDistance, self.game.enemyTeam.botSpawnArea[0].y + 0.5 * (self.game.enemyTeam.botSpawnArea[1].y - self.game.enemyTeam.botSpawnArea[0].y)))\n campPos.append(Vector2(self.game.enemyTeam.botSpawnArea[0].x + 0.5 * (self.game.enemyTeam.botSpawnArea[1].x - self.game.enemyTeam.botSpawnArea[0].x ), self.game.enemyTeam.botSpawnArea[0].y - self.level.firingDistance))\n\n for cp in campPos:\n free = self.level.findNearestFreePosition(cp)\n if free:\n sys.stdout.write(str(free) + '\\n')\n self.spawnCampers.append([None, free, False])\n \"\"\"\n sys.stdout.write(str(self.game.enemyTeam.botSpawnArea[1]) + ' ' + str(self.level.characterRadius) + '\\n')\n visited, islandEdges, islandOuter = [], [], []\n for x in range(0, len(self.level.blockHeights)):\n for y in range(0, len(self.level.blockHeights[x])):\n _, edges, island = self.recurseNeighbours(x, y, visited)\n if edges:\n islandEdges.append(edges)\n islandOuter.append(island)\n \n \n sys.stdout.write(str(islandEdges) + '\\n' + str(islandOuter) + '\\n')\n \n blocked = [item for sublist in islandOuter for item in sublist]\n #blockedOrSpawn = blocked[:]\n spawn = []\n for x in range(int(self.game.enemyTeam.botSpawnArea[0].x), int(self.game.enemyTeam.botSpawnArea[1].x)):\n for y in range(int(self.game.enemyTeam.botSpawnArea[0].y), int(self.game.enemyTeam.botSpawnArea[1].y)):\n spawn.append(Vector2(x, y))\n #blockedOrSpawn += spawn\n \n self.deadlines = dict()\n for i in range(len(islandEdges)):\n for coord, orientation in islandEdges[i]:\n if orientation is self.TOPLEFT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - self.level.firingDistance / 1.0283968, coord.y + 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.BOTTOMLEFT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - self.level.firingDistance / -1.0283968, coord.y - 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.LEFTUP:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + 0.24 * self.level.firingDistance / 1.0283968, coord.y - self.level.firingDistance / 1.0283968))\n elif orientation is self.RIGHTUP:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - 0.24 * self.level.firingDistance / 1.0283968, coord.y - self.level.firingDistance / 1.0283968))\n elif orientation is self.TOPRIGHT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + self.level.firingDistance / 1.0283968, coord.y + 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.BOTTOMRIGHT:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + self.level.firingDistance / 1.0283968, coord.y - 0.24 * self.level.firingDistance / 1.0283968))\n elif orientation is self.LEFTDOWN:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x + 0.24 * self.level.firingDistance / 1.0283968, coord.y + self.level.firingDistance / 1.0283968))\n elif orientation is self.RIGHTDOWN:\n self.deadlineFromLine(blocked, spawn, coord, Vector2(coord.x - 0.24 * self.level.firingDistance / 1.0283968, coord.y + self.level.firingDistance / 1.0283968))\n \n sys.stdout.write(str(self.deadlines) + '\\n')\n pointsAndLinesByEdge = dict()\n try:\n self.recursePaths(self.midEnemySpawn, blocked, self.deadlines, [], pointsAndLinesByEdge)\n except RuntimeError as e:\n sys.stdout.write(str(e) + '\\n')\n camplines = set()\n for edge, pls in pointsAndLinesByEdge.iteritems():\n for _, contact in pls:\n camplines.add((self.level.findNearestFreePosition(edge), contact))\n sys.stdout.write('\\n' + str(camplines))\n \n for cl in camplines:\n self.spawnCampers.append([[], cl])", "def __init__(self, fire):\n self.scene = fire.scene\n self.params = None\n self.fire = fire\n self.obstacles = self.speed_ref = self.smoke = None\n self.smoke_field = self.sparse_disc_matrix = None\n self.source = None", "def make_move(B, cur_monkey_pos, cur_num_balloons, cur_num_lives, move):\n\n def check_lose(B, cur_monkey_pos):\n \"\"\"\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n Output:\n bool: True if a balloon will hit the monkey when the balloons shift down; False otherwise\n \"\"\"\n assert B[-1][cur_monkey_pos] == \"x\"\n if B[-2][cur_monkey_pos] != 0:\n return True\n return False\n\n def shift_down(B, cur_monkey_pos, cur_num_lives):\n \"\"\"\n Just performs the shift of all the balloons downwards.\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_lives (int): current number of lives in this configuration\n Output:\n (tuple, int): tuple consisting of the board configuration after balloons have all moved\n down by 1 and the new number of lives (or None if the monkey gets hit)\n \"\"\"\n\n if check_lose(B, cur_monkey_pos):\n return None\n\n new_board = []\n new_num_lives = cur_num_lives\n\n # construct the top row: if the balloon hits the ground, it respawns with +1 and we lose a life\n new_num_lives -= sum(1 for b in B[-2] if b > 0)\n top_row = tuple((b + 1 if 0 < b < 3 else b) for b in B[-2])\n new_board.append(top_row)\n\n # move all the middle rows down\n new_board.extend([r for r in B[:-2]])\n\n # add the ground row: nothing changes\n new_board.append(B[-1])\n\n return (tuple(new_board), new_num_lives)\n\n def partial_move(B, cur_monkey_pos, cur_num_balloons, move):\n \"\"\"\n Just performs the move, without the shift downwards\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_balloons (int): current number of balloons on the board\n move (str): the proposed move (one of 'left', 'right', 'shoot')\n Output:\n (tuple, int, int): A tuple consisting of the board configuration after the move,\n the new monkey position, and the new number of balloons on the map\n (or None if invalid move)\n \"\"\"\n\n assert B[-1][cur_monkey_pos] == \"x\"\n R = len(B)\n C = len(B[0])\n\n new_board = [r for r in B[:-1]]\n new_bottom_row = [0 for _ in range(C)]\n new_monkey_pos = cur_monkey_pos\n new_num_balloons = cur_num_balloons\n\n if move == \"left\":\n if new_monkey_pos == 0:\n return None\n new_monkey_pos -= 1\n elif move == \"right\":\n if new_monkey_pos == C - 1:\n return None\n new_monkey_pos += 1\n elif move == \"shoot\":\n # simulate the dart\n for row in range(R - 2, -1, -1):\n if B[row][new_monkey_pos] != 0:\n new_row = list(B[row])\n new_row[new_monkey_pos] -= 1\n if new_row[new_monkey_pos] == 0:\n new_num_balloons -= 1\n new_board[row] = tuple(new_row)\n break\n else:\n assert False, \"invalid move: \" + move\n\n new_bottom_row[new_monkey_pos] = \"x\"\n new_board.append(tuple(new_bottom_row))\n return (tuple(new_board), new_monkey_pos, new_num_balloons)\n\n # make the move\n move_res = partial_move(B, cur_monkey_pos, cur_num_balloons, move)\n if move_res is None: # invalid move\n return None\n move_board, new_monkey_pos, new_num_balloons = move_res # unpack\n\n # shift all the balloons down\n shift_res = shift_down(move_board, new_monkey_pos, cur_num_lives)\n if shift_res is None: # check if a balloon hit the monkey\n return None\n new_board, new_num_lives = shift_res # unpack\n return (new_board, new_monkey_pos, new_num_balloons, new_num_lives)", "def light(self, item):\n item = ' '.join(item)\n if item == 'fire':\n print('ohh fire')\n self.items.append('fire')\n if self.finished_places == 6:\n self.finished_places += 1\n return self\n return super(Up, self).light(item)\n # if item is fire do stuff", "def make_squirrel(x, y):\n global squirrel_frames\n images1 = gamebox.load_sprite_sheet(\"Textures/squirrel_left.png\", 1, 3)\n images2 = gamebox.load_sprite_sheet(\"Textures/squirrel_right.png\", 1, 3)\n squirrel = []\n for image in images1:\n squirrel.append(gamebox.from_image(x, y, image))\n for image in images2:\n squirrel.append(gamebox.from_image(x, y, image))\n squirrel2 = squirrel\n squirrel3 = squirrel\n squirrel_frames = 3\n return squirrel, squirrel2, squirrel3", "def __init__(self, hit_y, room, wall, wall_direction):\n # Call the parent's constructor\n pygame.sprite.Sprite.__init__(self)\n\n self.wall = wall\n self.room = room\n self.direction = wall_direction\n\n if self.wall.rect.height <= 50:\n height = self.wall.rect.height\n else:\n height = 50\n \n # Make a blue wall, of the size specified in the parameters\n if self.direction == 'right':\n self.image = pygame.image.load('png/ledge_attach_right.png').convert_alpha()\n else:\n self.image = pygame.image.load('png/ledge_attach_left.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (50, height))\n \n # Make our top-left corner the passed-in location.\n self.rect = self.image.get_rect()\n self.rect.centery = hit_y\n if wall_direction == 'right':\n self.rect.x = self.wall.rect.left - 50\n else:\n self.rect.x = self.wall.rect.right\n self.spread_per_update = 1\n self.spread_up = self.rect.top\n self.spread_down = self.rect.bottom\n self.climb_okay = True\n # Keep track of the most recent fungi grown on the wall, looking at the top of the ones growing\n # upward and the bottom of the ones growing down\n self.grow_above = self.rect.top\n self.grow_below = self.rect.bottom\n\n self.timer = 660", "def build(_):", "def tick(self):\n\n commander = self.commander\n our_flag = commander.game.team.flag.position\n targetLocation = commander.game.team.flagScoreLocation\n\n if self.defender and (self.defender.health <= 0 or self.defender.flag):\n self.defender = None\n\n # First process bots that are done with their orders...\n for bot in commander.game.bots_available:\n\n\n\n if ( self.defender == None or bot.flag) and len(commander.game.bots_alive) > 1:\n self.defender = bot\n targetMin = our_flag - Vector2(2.0, 2.0)\n targetMax = our_flag + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if position:\n their_flag = commander.game.enemyTeam.flag.position\n their_base = commander.level.botSpawnAreas[commander.game.enemyTeam.name][0]\n their_score = commander.game.enemyTeam.flagScoreLocation\n commander.issue(orders.Defend, self.defender, [(p-bot.position, t) for p, t in [(our_flag, 5.0), (their_flag, 2.5), (their_base, 2.5), (their_score, 2.5)]], description = 'defending by scanning')\n\n # If we captured the flag\n if commander.game.enemyTeam.flag.carrier != None:\n # Return the flag home relatively quickly!\n targetMin = targetLocation - Vector2(2.0, 2.0)\n targetMax = targetLocation + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n commander.issue(orders.Charge, bot, position, description = 'returning enemy flag!')\n # In this case, the flag has not been captured yet\n else:\n path = [commander.game.enemyTeam.flag.position]\n\n if random.choice([True, False]):\n targetPosition = commander.game.team.flag.position\n targetMin = targetPosition - Vector2(8.0, 8.0)\n targetMax = targetPosition + Vector2(8.0, 8.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if position and (targetPosition - position).length() > 3.0:\n commander.issue(orders.Charge, bot, position, description = 'defending the flag')\n else:\n commander.issue(orders.Charge, bot, path, description = 'attacking enemy flag')\n\n\n # Process the bots that are waiting for orders, bots are in a holding attack pattern.\n holding = len(commander.game.bots_holding)\n for bot in commander.game.bots_holding:\n if holding > 3:\n commander.issue(orders.Charge, bot, random.choice([b.position for b in bot.visibleEnemies]))", "def new_field(left, right, top, bottom):\n fieldlist = []\n for x in range(width + 1):\n fieldlist.append({})\n for y in range(length + 1):\n fieldlist[x][y] = None\n for x in range(width + 1):\n for y in range(length + 1):\n #creates trees\n if ((not ((x == 0 or x == width - 1) and y == length - 2)) and (x%2 == 0 and (y == 0 or y == length - 2))):\n fieldlist[x][y] = Tree1\n elif ((not ((x == 1 or x == width) and y == length - 2)) and (x%2 == 1 and (y == 0 or y == length - 2))):\n fieldlist[x][y] = Tree2\n elif ((not ((x == 0 or x == width - 1) and y == 2)) and (x%2 == 0 and (y == 2 or y == length))):\n fieldlist[x][y] = Tree5\n elif ((not ((x == 1 or x == width) and y == 2)) and (x%2 == 1 and (y == 2 or y == length))):\n fieldlist[x][y] = Tree6\n elif (((x == 0 or x == width - 1) and y%2 == 1) or (x%2 == 0 and (y == 1 or y == length - 1))):\n fieldlist[x][y] = Tree3\n elif (((x == 1 or x == width) and y%2 == 1) or (x%2 == 1 and (y == 1 or y == length - 1))):\n fieldlist[x][y] = Tree4\n elif ((x == 0 or x == width - 1) and y%2 == 0):\n fieldlist[x][y] = Tree7\n elif ((x == 1 or x == width) and y%2 == 0):\n fieldlist[x][y] = Tree8\n elif (x == 2 or x == width - 2 or y == 3):\n fieldlist[x][y] = plain\n else:\n #Creates random grass patterns\n if (random.random()<0.082):\n for i in range(random.randint(1,3)):\n for j in range(random.randint(1,3)):\n for k in range(random.randint(1,3)):\n for l in range(random.randint(1,3)):\n if (fieldlist[x+i][y+j] == None):\n fieldlist[x+i][y+j] = grass\n if (fieldlist[x+i][y-l] == None):\n fieldlist[x+i][y-l] = grass\n if (fieldlist[x-k][y+j] == None):\n fieldlist[x-k][y+j] = grass\n if (fieldlist[x-k][y-l] == None):\n fieldlist[x-k][y-l] = grass\n else:\n fieldlist[x][y] = None\n \n #fills all other tiles with plain tile images\n for x in range(width + 1):\n for y in range(length + 1):\n if (fieldlist[x][y] == None):\n fieldlist[x][y] = plain\n \n #creates clearings\n if (left):\n left_clearing(fieldlist)\n if (right):\n right_clearing(fieldlist)\n if (top):\n top_clearing(fieldlist)\n if (bottom):\n bottom_clearing(fieldlist)\n return fieldlist", "def HellFire_ShotGuns(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def follow_ball(rel_state):\n\n # Transform rel_state string into separate variables\n\n # Split into individual relational mini states\n rel_list = rel_state.split(\" AND \")\n\n # Check whether the ball is present\n ball_present = True if 'b_pre' in rel_list else False\n\n # Check which object is more to the right\n if 'l_x(b,p)' in rel_list:\n right_obj = 'p'\n elif 's_x(b,p)' in rel_list:\n right_obj = 'none'\n elif 'm_x(b,p)' in rel_list:\n right_obj = 'b'\n\n # Check x trajectory of ball\n if 'l_trajx(b2,b1)' in rel_list:\n ball_trajx = 'l'\n elif 's_trajx(b2,b1)' in rel_list:\n ball_trajx = 's'\n elif 'm_trajx(b2,b1)' in rel_list:\n ball_trajx = 'm'\n\n # Implement minimal rule (ignore paddle_traj x and y completely)\n if ball_present:\n if right_obj == 'b':\n if ball_trajx == 'l':\n action = 0\n elif ball_trajx == 's':\n action = 2\n elif ball_trajx == 'm':\n action = 2\n elif right_obj == 'none':\n if ball_trajx == 'l':\n action = 3\n elif ball_trajx == 's':\n action = 0\n elif ball_trajx == 'm':\n action = 2\n elif right_obj == 'p':\n if ball_trajx == 'l':\n action = 3\n elif ball_trajx == 's':\n action = 3\n elif ball_trajx == 'm':\n action = 0\n else:\n action = np.random.choice(range(4))\n\n return action", "def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)", "def build_W(points):\n return None", "def build_cave(data):\n \n \n # MAKING A MATRIX OF GIVEN SIZE\n \n matrix = [[0 for x in range(data['size'])] for y in range(data['size'])] \n \n \n # Checking given conditions\n \n if len(data)==0:\n return None\n \n if data[\"size\"] <= 0: \n return None\n\n if \"entrance\" in data:\n if len([data[\"entrance\"]])>1:\n return None\n \n if \"exit\" in data:\n if len([data[\"exit\"]])>1:\n return None\n \n \n # Checking the given conditions\n \n if \"entrance\" not in data:\n return None\n if \"exit\" not in data:\n return None\n if \"size\" not in data:\n return None\n \n \n \n if \"walls\" not in data and \"treasure\" not in data and \"sword\" not in data:\n \n return None\n \n \n # Getting the coordinates near dragon\n \n if \"dragon\" in data:\n \n \n \n locations_near_dragon = []\n \n dx = data[\"dragon\"][0]\n dy = data[\"dragon\"][1]\n \n \n locations_near_dragon.append((dx - 1, dy))\n\n locations_near_dragon.append((dx, dy - 1))\n\n locations_near_dragon.append((dx + 1, dy))\n\n locations_near_dragon.append((dx, dy + 1))\n\n locations_near_dragon.append((dx + 1, dy - 1))\n\n locations_near_dragon.append((dx - 1, dy + 1))\n \n locations_near_dragon.append((dx + 1, dy + 1))\n\n locations_near_dragon.append((dx - 1, dy - 1))\n \n \n # Checking whether entrance lies near dragon\n \n if \"dragon\" in data:\n if \"entrance\" in locations_near_dragon:\n return None\n \n \n \n if \"size\" not in data:\n return None\n \n if data[\"size\"] <= 0:\n return None\n \n \n \n \n if \"exit\" not in data:\n \n return None\n \n \n if \"treasure\" in data:\n if (len(data[\"treasure\"]))>3:\n return None\n \n if (len(data[\"treasure\"]))<0:\n return None\n \n # Making Treasure as $\n \n else:\n \n list1 = []\n \n for i in range(0, len(data[\"treasure\"])):\n list1.append(data[\"treasure\"][i])\n \n for k, j in list1:\n\n matrix[k][j] = \"$\"\n \n # Making Entrance as @\n # Making Exit as X\n \n matrix[data['entrance'][0]][data['entrance'][1]] = \"@\"\n matrix[data['exit'][0]][data['exit'][1]] = \"X\"\n \n # Making Dragon as W\n \n if \"dragon\" in data:\n \n if (data[\"dragon\"][0] < 0 or data[\"dragon\"][0] > data[\"size\"] or\n data[\"dragon\"][1] < 0 or data[\"dragon\"][1] > data[\"size\"]):\n return None\n \n if (len([data[\"dragon\"]]))>1:\n return None\n \n elif (len([data[\"dragon\"]]))==1:\n matrix[data['dragon'][0]][data['dragon'][1]] = \"W\"\n \n # Making Sword as t\n \n if \"sword\" in data: \n \n if (len([data[\"sword\"]]))>1:\n return None\n \n elif (len([data[\"sword\"]]))==1:\n matrix[data['sword'][0]][data['sword'][1]] = \"t\"\n\n \n \n # Making Walls as #\n \n if \"walls\" in data: \n \n list2=[]\n \n for i in range(0, len(data[\"walls\"])):\n \n list2.append(data[\"walls\"][i])\n\n for k, j in list2:\n\n matrix[k][j] = \"#\"\n \n \n \n for m in range(0, data[\"size\"]):\n for n in range(0, data[\"size\"]):\n \n if matrix[m][n] == 0:\n \n matrix[m][n]=\".\"\n \n return matrix", "def fire(self):\n prepare.SFX[\"explosion\"].play()\n pos = project(self.base_rect.center, self.angle, 38)\n return Gift(pos, self.angle, self.fire_speed)", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def draw_house():\n houseturtle.begin_fill()\n for i in range (4): # Draws the house\n houseturtle.forward(200)\n houseturtle.left(90)\n houseturtle.forward(300)\n houseturtle.left(90)\n\n houseturtle.end_fill() # Gets ready to draw the door\n houseturtle.penup()\n houseturtle.color(106, 65, 5)\n houseturtle.forward(87)\n houseturtle.left(90)\n houseturtle.pendown()\n houseturtle.begin_fill()\n\n for q in range(2): # Draws the door and fills it\n houseturtle.forward(75)\n houseturtle.right(90)\n houseturtle.forward(30)\n houseturtle.right(90)\n\n houseturtle.end_fill()\n\n houseturtle.penup() # Positions the turtle to draw the roof\n houseturtle.setpos(0, 100)\n houseturtle.color(43, 17, 5)\n houseturtle.pendown()\n houseturtle.setx(-200)\n houseturtle.right(180)\n\n houseturtle.begin_fill() # Draws and fills in the roof\n houseturtle.setpos(0, 200)\n houseturtle.setpos(200, 100)\n houseturtle.setpos(-200, 100)\n houseturtle.end_fill()\n\n houseturtle.penup() # Draws the window frame; will be filled in with draw_window_pane\n houseturtle.setpos(0, 35)\n houseturtle.color(100, 100, 100)\n houseturtle.pensize(5)\n houseturtle.pendown()\n houseturtle.forward(80)\n houseturtle.forward(-40)\n houseturtle.right(90)\n houseturtle.forward(40)\n houseturtle.forward(-80)\n houseturtle.right(90)\n houseturtle.forward(5)\n houseturtle.color(\"white\")\n\n draw_window_pane() # Fills in a pane of the window frame with a white square\n\n houseturtle.setpos(40, -45) # Next 3 lines are filling in the rest of the frame\n draw_window_pane()\n\n houseturtle.setpos(-5, -45)\n draw_window_pane()\n\n houseturtle.setpos(-5, 0)\n draw_window_pane()\n\n houseturtle.setpos(45, 40) # Prepares to draw square around the window to finish the framing\n houseturtle.color(100, 100, 100)\n houseturtle.pendown()\n houseturtle.left(90)\n\n for u in range(4): # Draws the square\n houseturtle.forward(90)\n houseturtle.left(90)", "def tick(self):\n\n #Simplified commands\n captured = self.captured()\n commander = self.commander\n targetLocation = commander.game.team.flagScoreLocation\n enemyFlagLocation = commander.game.enemyTeam.flag.position\n our_flag = commander.game.team.flag.position\n\t\t\n\t\t#when we have no defender or dead, we need one when free\n if self.defender and (self.defender.health <= 0):\n self.defender = None\n\n # First process bots that are done with their orders or they don't have any order yet\n for bot in commander.game.bots_available:\n if self.defender == None and self.commander.game.enemyTeam.flag.carrier != bot:\n self.defender = bot\n\n targetMin = our_flag - Vector2(2.0, 2.0)\n targetMax = our_flag + Vector2(2.0, 2.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n if (our_flag-bot.position).length() > 2:\n commander.issue(orders.Charge, self.defender, position, description = 'run to the flag')\n else:\n commander.issue(orders.Attack, bot, position, description = 'defend around flag')\n else:\n if self.defender == bot:\n self.defender = None\n if captured:\n # Return the flag home\n targetMin = targetLocation - Vector2(4.0, 4.0)\n targetMax = targetLocation + Vector2(4.0, 4.0)\n position = commander.level.findRandomFreePositionInBox((targetMin,targetMax))\n commander.issue(orders.Charge, bot, position, description = 'return enemy flag!')\n else:\n # Find the enemy team's flag position and run to that.\n if random.choice([True, False]):\n commander.issue(orders.Attack, bot, enemyFlagLocation, description = 'Attack flag!')\n else:\n commander.issue(orders.Charge, bot, enemyFlagLocation, description = 'Charge flag!')\n\n for bot in commander.game.bots_holding:\n if captured:\n targetLocation = commander.game.team.flagScoreLocation\n commander.issue(orders.Charge, bot, targetLocation , description = 'return enemy flag!')\n else:\n commander.issue(orders.Charge, bot, enemyFlagLocation, description = 'Charge flag!')", "def buildSubType(self):\n \n if self.subType == \"wallType\":\n \"\"\"Build a wall\"\"\"\n \n if \"col\" in self.name:\n \"\"\"Build the collision body for this wall\"\"\"\n self.bulletBody = self.factory.basePhysics.buildTriangleMesh(\n self.object, self.levelEgg, 0, self.isDynamic)\n \n else:\n self.object.reparentTo(self.renderObjectsLevel)\n \n elif self.subType == \"groundType\":\n \"\"\"Build the ground with either custom Mesh or use the plane\"\"\"\n if self.useBulletPlane:\n self.factory.basePhysics.buildGroundPlane()\n \n self.object.reparentTo(self.renderObjectsLevel)\n self.object.setPos(self.position)\n self.object.setHpr(self.hpr)\n \n else:\n \n if \"col\" in self.name:\n self.bulletBody = self.factory.basePhysics.buildTriangleMesh(\n self.object, self.levelEgg, 0, self.isDynamic)\n \n else:\n self.object.reparentTo(self.renderObjectsLevel)\n self.object.setPos(self.position)\n self.object.setHpr(self.hpr)", "def generateData(building, floors):\n # initialize dictionaries\n floor_dictionary = {}\n up_dictionary = {}\n down_dictionary = {}\n i = 0\n # generates random number of people at each floor\n while i < floors:\n floor_dictionary[i] = random.randint(0,5)\n i += 1\n # allocates which passengers are going up and which are going down\n for key in floor_dictionary:\n counter_up = 0\n up_array = []\n counter_down = 0\n down_array = []\n\n # people at top floor cannot go up, bottom floor cannot go down.\n if key != (floors - 1) and key != 0:\n people_up = random.randint(0,floor_dictionary[key])\n people_down = floor_dictionary[key] - people_up\n elif key == 0:\n people_down = 0\n people_up = floor_dictionary[key]\n elif key == (floors - 1):\n people_down = floor_dictionary[key]\n people_up = 0\n\n # assign random floor to people going up, above current floor\n while counter_up < people_up:\n up_array.append(random.randint(key+1, (floors-1)))\n counter_up += 1\n # assign random floor to people going down, below current floor\n while counter_down < people_down:\n down_array.append(random.randint(0, key-1))\n counter_down += 1\n\n # assign each array to dictionary for that key\n up_dictionary[key] = up_array\n down_dictionary[key] = down_array\n\n # update object with new values generated\n building = Building(floors, floor_dictionary, up_dictionary, down_dictionary)\n return building", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def _setup_rumble(self):\n self.left_rumble = self._get_ready_to('99500')\n self.right_rumble = self._get_ready_to('00599')\n self.double_rumble = self._get_ready_to('99599')", "def get_wave(self, wave, game):\n enemies = []\n\n if 1 <= wave <= 2:\n #A hardcoded singleton list of (step, enemy) pairs\n\n enemies = [ (10, SimpleEnemy()),(12, SimpleEnemy()),(14, SimpleEnemy())]\n\n\n elif 3 <= wave < 8:\n #List of (step, enemy) pairs spread across an interval of time (steps)\n\n steps = int(40 * (wave ** .5)) #The number of steps to spread the enemies across\n count = wave * 2 #The number of enemies to spread across the (time) steps\n\n for step in self.generate_intervals(steps, count):\n #make enemies have more health each wave!\n enemies.append((step, SimpleEnemy(health=wave/2*100)))\n #enemies.append((step+20, SwarmEnemy()))\n\n elif 7 <= wave < 10:\n #List of (step, enemy) pairs spread across an interval of time (steps)\n\n steps = int(40 * (wave ** .5)) #The number of steps to spread the enemies across\n count = wave #The number of enemies to spread across the (time) steps\n\n for step in self.generate_intervals(steps, count):\n enemies.append((step, SimpleEnemy(health=wave/2*100)))\n enemies.append((step+20, HardenedEnemy(health=wave/2*100)))\n\n elif wave == 10:\n #Generate sub waves\n sub_waves = [\n #(steps, number of enemies, enemy constructor, args, kwargs)\n (50, 10, SimpleEnemy, (), {}), #10 enemies over 50 steps\n (100, None, None, None, None), #then nothing for 100 steps\n (50, 10, SimpleEnemy, (), {}), #then another 10 enemies over 50 steps\n (30, 1, lambda game=game: SuperRichardEnemy(game,health=wave/2.5*1500), (), {}),\n ]\n\n enemies = self.generate_sub_waves(sub_waves)\n\n else: #11 <= wave <= 20\n #Now it's going to get hectic\n\n sub_waves = [\n (\n int(13 * wave), #total steps\n int(25 * wave ** (wave / 50)), #number of enemies\n SimpleEnemy, #enemy constructor\n (), #positional arguments to provide to enemy constructor\n {}, #keyword arguments to provide to enemy constructor\n ),\n (\n int(13 * wave), #total steps\n int(25 * wave ** (wave / 50)), #number of enemies\n HardenedEnemy, #enemy constructor\n (), #positional arguments to provide to enemy constructor\n {}, #keyword arguments to provide to enemy constructor\n ),\n (\n int(2 * wave), #total steps\n int(wave/8 + 1), #number of enemies\n lambda game=game: SuperRichardEnemy(game), #enemy constructor\n (), #positional arguments to provide to enemy constructor\n {}, #keyword arguments to provide to enemy constructor\n ),\n\n ]\n enemies = self.generate_sub_waves(sub_waves)\n\n return enemies", "def warriorBattle1_2():\n print(\"You charge the ogre with full force.\")\n print(\"You smash into the ogre,\")\n print(\"with the warrior strength the ogres feet leaves the ground\")\n print(\"You push the ogre straight into the cave wall,\")\n print(\"as the ogre smashes in to it, it collapses on the ground\")\n print(f\"The ogres health is now {ogre_data - 100}\")\n print(\"The ogre is dead\")\n print(f\"Your health is now {hero_data[0] - 80}\")\n get_warrior_loot()\n warriorPath1_1_1()", "def build_logo(ph):\n\n # Robot\n sh = ShBox(vec3(robot_c,robot_c,robot_c))\n robot = OSimple(sh)\n robot.color = Color4(1.,1.,0.)\n robot.trans = robot_tr\n robot.addToWorld(ph)\n\n # Dots\n sh = ShSphere(dot_r)\n for x in (0.50, 0.90):\n o = OSimple(sh)\n o.color = Color4.white()\n o.pos = robot_tr * vec3(x, 0, 0)\n o.addToWorld(ph)\n\n # Cross\n sh = ShBox(vec3(cross_r,3*cross_r,cross_r))\n for a in (math.pi/4, 3*math.pi/4):\n o = OSimple(sh)\n o.color = Color4.white()\n o.pos = robot_tr * vec3(1.50, 0, 0)\n o.rot = robot_tr.basis * quat(0, 0, a)\n o.addToWorld(ph)\n\n # Ground\n o = OGroundSquareStart(color0, color1, color2)\n o.addToWorld(ph)\n\n sh = ShBox(vec3(start_c*0.5, start_c*0.5, Display.draw_epsilon*2))\n v = 0.5 * ( table_size - vec2(start_c, start_c) )\n o = OSimple(sh)\n o.color = color1\n o.pos = vec2(-v.x, v.y)\n o.addToWorld(ph)\n o = OSimple(sh)\n o.color = color2\n o.pos = v\n o.addToWorld(ph)\n\n # Walls (N, S, E, W)\n sh = ShBox(0.5*vec3(table_size.x+2*wall_width, wall_width, wall_height))\n o = OSimple(sh)\n o.color = Color4.black()\n o.pos = 0.5*vec3(0, table_size.y+wall_width, wall_height)\n o.addToWorld(ph)\n o = OSimple(sh)\n o.color = Color4.black()\n o.pos = 0.5*vec3(0, -table_size.y-wall_width, wall_height)\n o.addToWorld(ph)\n\n sh = ShBox(0.5*vec3(wall_width, table_size.y+2*wall_width, wall_height))\n o = OSimple(sh)\n o.color = Color4.black()\n o.pos = 0.5*vec3(table_size.x+wall_width, 0, wall_height)\n o.addToWorld(ph)\n o = OSimple(sh)\n o.color = Color4.black()\n o.pos = 0.5*vec3(-table_size.x-wall_width, 0, wall_height)\n o.addToWorld(ph)", "def draw(self):\r\n #if the UFO has only 1 life left, turn it red\r\n if(self.life <= 1):\r\n TARGET_UFO_COLOR = arcade.color.RED\r\n #If UFO has more than 1 life left, keep it silver\r\n else:\r\n TARGET_UFO_COLOR = arcade.color.SILVER\r\n arcade.draw_circle_outline(self.center.x, self.center.y, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 3)\r\n arcade.draw_ellipse_filled(self.center.x, self.center.y, TARGET_UFO_WIDTH, TARGET_UFO_HEIGHT, TARGET_UFO_COLOR, 15)", "def set_own_wake(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n\r\n # alias:\r\n N = Turbine.N\r\n F = Turbine.F\r\n t = Turbine.t\r\n \r\n cls.wn = np.zeros((N, N), dtype=float)\r\n cls.wt = np.zeros((N, N), dtype=float)\r\n \r\n # fill the normal loads wake matrix (lower half):\r\n right_index = (np.arange(N/2, N)).astype(int)\r\n left_index = (np.arange(N/2 - 1, -1, -1)).astype(int)\r\n cls.wn[right_index, right_index] = 1.00\r\n cls.wn[right_index, left_index] = -1.00\r\n \r\n # fill the tangential loads wake matrix (lower half):\r\n Y = F*np.cos(t[N//2+1: N-1]) \r\n cls.wt[right_index[1:-1], right_index[1:-1]] = -Y/np.sqrt(1.0 - Y**2)\r\n cls.wt[right_index[1:-1], left_index[1:-1]] = -Y/np.sqrt(1.0 - Y**2)\r\n \r\n # the [1:-1] means that the head and tail are omitted due to the fact\r\n # that Y would yield values greater than 1 in the poles, thereby \r\n # leading to singularities (-Y/(1 - Y^2)^(1/2)).\r", "def house(x, y):\r\n # roof\r\n triangle(x + 15, y, x, y + 15, x + 30, y + 15)\r\n # bottom of the house\r\n rect(x, y + 15, 30, 30)\r\n # door\r\n rect(x + 12, y + 30, 10, 15)", "def fire_bullet(settings, screen, ship, bullets):\n if len(bullets) < settings.bullets_allowed: #create a new bullet and add it to the bullets group.bullets\n new_bullet = Bullet(settings, screen, ship.rect.centerx, ship.rect.top)\n bullets.add(new_bullet)# stores the new bullet in the group bullets.\n\n now = pygame.time.get_ticks()\n if now - ship.last_shot > 150:\n ship.last_shot = now\n if ship.power == 1:\n bullet = Bullet(settings, screen, ship.rect.centerx, ship.rect.top)\n bullets.add(bullet)\n if ship.power == 2:\n bullet1 = Bullet(settings, screen, ship.rect.left, ship.rect.centery)\n bullet2 = Bullet(settings, screen, ship.rect.right, ship.rect.centery)\n bullets.add(bullet1)\n bullets.add(bullet2)\n if ship.power >=3:\n bullet1 = Bullet(settings, screen, ship.rect.left, ship.rect.centery)\n bullet2 = Bullet(settings, screen, ship.rect.right, ship.rect.centery)\n bullet3 = Bullet(settings, screen, ship.rect.centerx, ship.rect.top)\n bullets.add(bullet1)\n bullets.add(bullet2)\n bullets.add(bullet3)", "def gen_flinch_trigs(self, vmf: VMF, name: str, start_disabled: str) -> None:\n normal = self.normal()\n\n # Horizontal fizzlers would just have you fall through.\n if abs(normal.z) > 1e-6:\n return\n\n # Disabled.\n if not options.get_itemconf(('VALVE_FIZZLER', 'FlinchBack'), False):\n return\n\n # Make global entities if not present.\n if '_fizz_flinch_hurt' not in vmf.by_target:\n glob_ent_loc = options.get(Vec, 'global_ents_loc')\n vmf.create_ent(\n classname='point_hurt',\n targetname='_fizz_flinch_hurt',\n Damage=10, # Just for visuals and sounds.\n # BURN | ENERGYBEAM | PREVENT_PHYSICS_FORCE\n DamageType=8 | 1024 | 2048,\n DamageTarget='!activator', # Hurt the triggering player.\n DamageRadius=1, # Target makes this unused.\n origin=glob_ent_loc,\n )\n\n # We need two catapults - one for each side.\n neg_brush = vmf.create_ent(\n targetname=name,\n classname='trigger_catapult',\n spawnflags=1, # Players only.\n origin=self.base_inst['origin'],\n physicsSpeed=0,\n playerSpeed=96,\n launchDirection=(-normal).to_angle(),\n startDisabled=start_disabled,\n )\n neg_brush.add_out(Output('OnCatapulted', '_fizz_flinch_hurt', 'Hurt'))\n\n pos_brush = neg_brush.copy()\n pos_brush['launchDirection'] = normal.to_angle()\n vmf.add_ent(pos_brush)\n\n for seg_min, seg_max in self.emitters:\n neg_brush.solids.append(vmf.make_prism(\n p1=(seg_min\n - 4 * normal\n - 64 * self.up_axis\n ),\n p2=seg_max + 64 * self.up_axis,\n mat=consts.Tools.TRIGGER,\n ).solid)\n pos_brush.solids.append(vmf.make_prism(\n p1=seg_min - 64 * self.up_axis,\n p2=(seg_max\n + 4 * normal\n + 64 * self.up_axis\n ),\n mat=consts.Tools.TRIGGER,\n ).solid)", "def build_scene(self, scene, reverse=False):\n success_trial = 'success' in scene\n if (scene.startswith(\"barge_in\")\n or scene.startswith(\"dynamic_barge_in\")):\n num_people = 4\n # Walls\n wall_width = 1.0\n wall_length = 7.0\n wall_dist = 4.0\n human_goal_dist = 3.0\n up_wall_vertices = [\n (wall_length, 2*wall_width + wall_dist),\n (0, 2*wall_width + wall_dist),\n (0, wall_width + wall_dist),\n (wall_length, wall_width + wall_dist)\n ]\n down_wall_vertices = [\n (wall_length, wall_width),\n (0, wall_width),\n (0, 0),\n (wall_length, 0)\n ]\n self.obstacles.append(up_wall_vertices)\n self.obstacles.append(down_wall_vertices)\n\n # Add the robot\n robot_pos = (wall_length - 1.0, wall_width + wall_dist/2.0 +\n randomize(-0.5, 0.5))\n self.overall_robot_goal = (wall_length + 3.0, wall_width +\n wall_dist/2.0 + randomize(-0.5, 0.5))\n if reverse:\n tmp = robot_pos\n robot_pos = self.overall_robot_goal\n self.overall_robot_goal = tmp\n self.robot_num = self.sim.addAgent(\n robot_pos,\n 10.0, 10, 2.0, 5.0, 0.5, 3.0, (0, 0)\n )\n self.agents.append(self.robot_num)\n self.goals.append(robot_pos)\n self.headings.append(randomize(-math.pi/8, math.pi/8))\n\n hum_perb = 0.1 # Random perturbation to add to human positions\n if scene.startswith(\"barge_in\"):\n # \"Humans,\" really just obstacles that fill the corridor\n # Note that they are just the same vertex thrice because RVO2\n # didn't like one vert obstacles and shapely needs 3 verticies\n # to treat them like a polygon (used to find dist from robot\n # to obstacles).\n hums = [\n [\n (wall_length + 0.2, wall_width + 0.1),\n (wall_length + 0.2, wall_width + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1, wall_width + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 2 + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 2 + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people * 2 + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 3 + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 3 + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people * 3 + 0.1)\n ]\n ]\n for hum in hums:\n for i, vert in enumerate(hum):\n hum[i] = (vert[0] + hum_perb * random.random(),\n vert[1] + hum_perb * random.random())\n self.obstacles.append(hum)\n else:\n if success_trial:\n num_people = 4\n pos1 = (wall_length + randomize(0, 0.5),\n wall_width + (wall_dist / num_people) / 2.0)\n goal1 = (pos1[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos1[1] - 1.0)\n\n pos2 = (wall_length + randomize(1.0, 1.5), pos1[1] + 1.0)\n goal2 = (pos2[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos1[1] - 0.5)\n\n pos3 = (wall_length + randomize(0, 0.5),\n pos2[1] + 1.0)\n goal3 = (pos3[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos3[1] + 0.5)\n\n pos4 = (wall_length + randomize(1.0, 1.5), pos3[1] + 1.0)\n goal4 = (pos4[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos4[1] + 1.0)\n\n poses = [pos1, pos2, pos3, pos4]\n gs = [goal1, goal2, goal3, goal4]\n if reverse:\n poses = [goal1, goal2, goal3, goal4]\n gs = [pos1, pos2, pos3, pos4]\n for p in poses:\n self.agents.append(self.sim.addAgent(\n p, 10.0, 10, 2.0, 5.0,\n 0.5, 0.7, (0, 0)\n ))\n self.headings.append(randomize(-math.pi/8, math.pi/8))\n for g in gs:\n self.goals.append(g)\n else:\n # Make humans actual agents that move either towards or\n # away from the robot\n min_hum = 4\n max_hum = 4\n max_hum_rad = 0.5\n num_hum = random.randint(min_hum, max_hum)\n for i in range(num_hum):\n # Stack humans in front of the passage\n pos = (\n wall_length+2*max_hum_rad\n + random.random() * hum_perb,\n wall_width+wall_dist+0.1\n + random.random() * hum_perb\n - 2*(max_hum_rad + hum_perb)\n * (max_hum/num_hum) * i\n )\n self.agents.append(self.sim.addAgent(\n pos, 10.0, 10, 2.0, 5.0, 0.5,\n 0.7, (0, 0)\n ))\n goal_min = -2.0\n goal_max = -1.0\n self.goals.append((\n pos[0] + randomize(goal_min, goal_max),\n wall_width + wall_dist/2.0\n ))\n self.headings.append(\n normalize(randomize(7*math.pi/8, 9*math.pi/8))\n )\n # By default, builds a scene in which the robot barges in to the\n # right. If one of the following specific scenes is provided,\n if scene.endswith(\"left\"): # Negate x coordinate\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (-vert[0], vert[1])\n obs.reverse() # Verticies must be in ccw order\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (-pos[0], pos[1]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (-goal[0], goal[1])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading + math.pi)\n self.overall_robot_goal = (-self.overall_robot_goal[0],\n self.overall_robot_goal[1])\n elif scene.endswith(\"top\"): # flip x and y coordinates\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (vert[1], vert[0])\n obs.reverse() # Verticies must be in ccw order\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (pos[1], pos[0]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (goal[1], goal[0])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading + math.pi/2)\n self.overall_robot_goal = (self.overall_robot_goal[1],\n self.overall_robot_goal[0])\n elif scene.endswith(\"bottom\"):\n # flip x and y coordinates\n # then negate new y\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (vert[1], -vert[0])\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (pos[1], -pos[0]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (goal[1], -goal[0])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading - math.pi/2)\n self.overall_robot_goal = (self.overall_robot_goal[1],\n -self.overall_robot_goal[0])\n for obs in self.obstacles:\n self.sim.addObstacle(obs)\n elif scene == \"crossing\": # Build crossing scene\n position1 = (-1.5, 25.0)\n position2 = (2.5, 25.0)\n self.robot_num = self.sim.addAgent(\n position1, 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(0.8, 2.0)\n )\n self.agents.append(self.robot_num)\n self.goals.append(position2)\n self.headings.append(normalize(randomize(-math.pi/8, math.pi/8)))\n\n self.agents.append(\n self.sim.addAgent(\n position2, 15.0, 10, 5.0, 5.0, randomize(0.15, 0.25),\n randomize(0.8, 2.0)\n )\n )\n self.goals.append(position1)\n self.headings.append(normalize(randomize(7 * math.pi/8,\n 9 * math.pi/8)))\n elif scene.startswith(\"overtake\"): # overtaking scene\n neighbor_dist = 10.0\n max_neighbors = 10\n time_horizon = 2.0\n time_horizon_obst = 5.0\n radius = 0.3\n robot_max_speed = 3.0\n slow_human_max_speed = 0.4\n human_max_speed = 0.7\n\n pos1 = (randomize(-2.0, 1.5), randomize(-1.0, 1.0)) # Robot\n # Human to overtake\n pos2 = (randomize(-1.0, -0.5), randomize(-1.0, 1.0))\n hum_goal = (randomize(5.0, 6.0), randomize(-1.0, 1.0))\n # Robot\n self.robot_num = self.sim.addAgent(pos1, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, robot_max_speed, (0, 0))\n self.goals.append(pos1) # Robot has no explicit goal at first\n # Used to determine if success controller has failed.\n self.overall_robot_goal = hum_goal\n self.agents.append(self.robot_num)\n self.headings.append(\n normalize(randomize(-math.pi / 8, math.pi / 8)))\n # Human to overtake\n self.agents.append(self.sim.addAgent(pos2, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, slow_human_max_speed, (0, 0)))\n self.goals.append(hum_goal)\n self.headings.append(\n normalize(randomize(-math.pi / 8, math.pi / 8)))\n # Another human going the opposite way\n self.agents.append(self.sim.addAgent(hum_goal, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, human_max_speed, (0, 0)))\n self.goals.append(pos2)\n self.headings.append(\n normalize(math.pi + randomize(-math.pi / 8,\n math.pi / 8)))\n if not success_trial:\n # Add other humans walking around in the middle of the path...\n self.agents.append(self.sim.addAgent(\n (randomize(1.0, 2.0), randomize(-1.0, -2.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(-1.0, 0.0), randomize(0.0, 1.0)))\n self.headings.append(\n normalize(3 * math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(0.0, 1.0), randomize(0.0, -1.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(-2.0, -1.0), randomize(1.0, 2.0)))\n self.headings.append(\n normalize(3 * math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(-2.0, -1.0), randomize(1.0, 2.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(1.0, 2.0), randomize(-2.0, -1.0)))\n self.headings.append(\n normalize(-math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(0.0, -1.0), randomize(0.0, 1.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(0.0, 1.0), randomize(0.0, -1.0)))\n self.headings.append(\n normalize(-math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n \"\"\"else:\n self.agents.append(self.sim.addAgent(\n (hum_goal[0] + randomize(0.5, 0.7),\n hum_goal[1] + randomize(0.5, 0.7)), 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)\n ))\n self.goals.append((pos1[0] + randomize(0.5, 0.7), pos1[1] +\n randomize(0.5, 0.7)))\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n\n self.agents.append(self.sim.addAgent(\n (pos1[0] + randomize(0.5, 0.7),\n pos1[1] + randomize(0.5, 0.7)), 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)\n ))\n self.goals.append((hum_goal[0] + randomize(0.5, 0.7),\n hum_goal[1] + randomize(0.5, 0.7)))\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\"\"\"\n\n else: # Build a random scene\n max_dim = self.max_dim # Maximum x and y start/goal locations\n min_agents = 5\n max_agents = 10\n min_obs = 5\n max_obs = 10\n num_agents = random.randint(min_agents, max_agents)\n num_obstacles = random.randint(min_obs, max_obs)\n # Create the robot\n robot_pos = (max_dim * random.random(), max_dim * random.random())\n self.robot_num = self.sim.addAgent(\n robot_pos\n )\n self.agents.append(self.robot_num)\n self.goals.append(robot_pos)\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n # For this, just create small square obstacles\n for i in range(num_obstacles):\n pt = (max_dim * random.random(), max_dim * random.random())\n width = 0.2\n o = [\n pt, (pt[0] + width, pt[1]), (pt[0] + width, pt[1] + width),\n (pt[0], pt[1] + width)\n ]\n self.obstacles.append(o)\n self.sim.addObstacle(o)\n # Create agents in random spots with random goals\n for i in range(num_agents):\n self.agents.append(\n self.sim.addAgent(\n (max_dim * random.random(), max_dim * random.random())\n )\n )\n self.goals.append(\n (max_dim * random.random(), max_dim * random.random())\n )\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n\n self.sim.processObstacles()\n if self.file is not None:\n # First line is obstacles in the scene\n self.file.write(str(self.obstacles) + \"\\n\")\n self.file.write(\"timestamp position0 velocity0 radius0 \"\n \"heading0 goal \")\n self.file.write(\"pref_speed theta \")\n num = 1\n for _ in range(len(self.agents) - 1):\n self.file.write(\"position\" + str(num) + \" \")\n self.file.write(\"velocity\" + str(num) + \" \")\n self.file.write(\"radius\" + str(num) + \" \")\n self.file.write(\"heading\" + str(num) + \" \")\n num += 1\n for _ in self.obstacles:\n self.file.write(\"position\" + str(num) + \" \")\n self.file.write(\"velocity\" + str(num) + \" \")\n self.file.write(\"radius\" + str(num) + \" \")\n self.file.write(\"heading\" + str(num) + \" \")\n num += 1\n self.file.write(\"\\n\")\n self.update_visualization()", "def test_g1_perform_tick(self):\n config.NR_ROWS = 5\n config.NR_COLS = 5\n blinker = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n new_gamefield = logic.perform_tick(blinker)\n\n self.assertEqual(new_gamefield, [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n ])", "def fire(board,target):\n\n # verifies the input \n if target[0] < 0 or target[0] > len(board[0]) - 1 or \\\n target[1] < 0 or target[1] > len(board) - 1:\n return\n \n if board[target[1]][target[0]] is None: # when miss\n return False, 0\n else: # when hit\n destroyed_index = 0 # 0 if the ship didn't destroy completely\n # reduce the size of the ship\n board[target[1]][target[0]][2][0] = board[target[1]][target[0]][2][0] \\\n - 1\n # check if the ship have been destroyed completely\n if board[target[1]][target[0]][2][0] == 0:\n # recieve the index of the destroyed ship\n destroyed_index = board[target[1]][target[0]][0] \n board[target[1]][target[0]] = None # remove the ship part from board\n return True, destroyed_index", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def PrepareBoundaries(self):\r\n \r\n self.maxleft = self.currentpoint[0]\r\n self.maxright = self.currentpoint[0]\r\n self.maxtop = self.currentpoint[1]\r\n self.maxbottom = self.currentpoint[1]\r\n for element in self.finalstring:\r\n if element == '+':\r\n self.currentheading += self.angle\r\n elif element == '-':\r\n self.currentheading -= self.angle\r\n elif element == 'F':\r\n if self.maxleft > self.currentpoint[0]:\r\n self.maxleft = self.currentpoint[0]\r\n if self.maxright < self.currentpoint[0]:\r\n self.maxright = self.currentpoint[0]\r\n if self.maxbottom > self.currentpoint[1]:\r\n self.maxbottom = self.currentpoint[1]\r\n if self.maxtop < self.currentpoint[1]:\r\n self.maxtop = self.currentpoint[1]\r\n \r\n \r\n self.currentpoint = self.NextPoint(self.currentpoint, self.length, self.currentheading)\r\n \r\n \r\n \r\n elif element == '[':\r\n self.stack.append([self.currentpoint[0], self.currentpoint[1], self.currentheading])\r\n elif element == ']':\r\n popped = self.stack.pop()\r\n self.currentheading = popped.pop()\r\n self.currentpoint = popped\r\n \r\n #Yes, for the special case where the last point is actually a boundary, we need to do this post-check\r\n if self.maxleft > self.currentpoint[0]:\r\n self.maxleft = self.currentpoint[0]\r\n if self.maxright < self.currentpoint[0]:\r\n self.maxright = self.currentpoint[0]\r\n if self.maxbottom > self.currentpoint[1]:\r\n self.maxbottom = self.currentpoint[1]\r\n if self.maxtop < self.currentpoint[1]:\r\n self.maxtop = self.currentpoint[1] \r\n \r\n #After parsing the string, we set the heading and currentpoint back to their original values.\r\n self.currentheading = 0\r\n self.currentpoint = self.startingpoint", "def split_bottleExpansion((nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n \n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def DrawTurret(self):\n pygame.draw.rect(self.displaysurf, self.color, (int(self.x_coord - T_W1 / 2), WINHEIGHT - T_H1 - GR_HEIGHT, T_W1, T_H1), 0)\n pygame.draw.rect(self.displaysurf, self.color, (int(self.x_coord - T_W2 / 2), WINHEIGHT - (T_H2 + T_H1) - GR_HEIGHT, T_W2, T_H2), 0)\n self.barrel_endx = self.x_coord - int(T_LEN*(math.cos(self.barrel)))\n self.barrel_endy = WINHEIGHT - T_H1 - int(T_LEN*(math.sin(self.barrel))) - GR_HEIGHT\n pygame.draw.line(self.displaysurf, self.color, (self.x_coord, WINHEIGHT - T_H1 - GR_HEIGHT), (self.barrel_endx, self.barrel_endy), T_WID)", "def betterEvaluationFunction(currentGameState):\n\n # Useful information you can extract from a GameState (pacman.py)\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newCapsules = currentGameState.getCapsules()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n # Volem que s'apropi a les fruites i s'allunyi dels fantasmes en cas que aquests ens puguin matar, si no, hem d'intentar menjar-nos-els, pensant en seguir optant a la fruita.\n\n foodDistance = [util.manhattanDistance(newPos, food) for food in newFood.asList()]\n if foodDistance:\n foodMinima = min(foodDistance)\n else:\n foodMinima = -1 # perque si la llista esta buida vol dir que hem hem d'anar cap aquesta direcció, i per tant necessitem un valor molt gran.\n\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n ghostDistance = [util.manhattanDistance(newPos, ghostState.getPosition()) for ghostState in newGhostStates]\n\n distanciaFantasmes = 0\n fantasmaMoltAprop = 0\n\n for i in range(len(ghostDistance)):\n if newScaredTimes[i] >= 2:\n distanciaFantasmes -= ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop -= 1\n else:\n distanciaFantasmes += ghostDistance[i]\n if ghostDistance[i] <= 1:\n fantasmaMoltAprop += 1\n\n if distanciaFantasmes == 0:\n distanciaFantasmes = -1 # perque aixo voldra dir que tenim els fantasmes al voltant, i per tant ens en volem allunyar si o si d'aquesta direcció\n\n capsulesDistances = [util.manhattanDistance(newPos, capsuleState) for capsuleState in newCapsules]\n\n if capsulesDistances:\n capsulaMinima = min(capsulesDistances)\n itemMinim = min(capsulaMinima, foodMinima)\n else:\n itemMinim = foodMinima\n\n result = currentGameState.getScore() + 1 / float(itemMinim) - 1 / float(distanciaFantasmes) - fantasmaMoltAprop\n\n\n return result", "def build_room(self):\r\n room = self.new_room()\r\n return self.ran_exts(room)", "def draw_foe_mines(self):\n self.foe_top.draw(self.foe_top_rect.topleft)\n self.foe_middle.draw(self.foe_middle_rect.topleft)\n self.foe_midbot.draw(self.foe_midbot_rect.topleft)\n self.foe_bottom.draw(self.foe_bottom_rect.topleft)", "def draw_building():\n\n gerardo.penup()\n gerardo.backward(135)\n gerardo.pendown()\n gerardo.begin_fill()\n for i in range(2): # this loop draws out the rectangle for the building\n gerardo.forward(200)\n gerardo.right(90)\n gerardo.forward(100)\n gerardo.right(90)\n gerardo.end_fill()\n gerardo.hideturtle()", "def create_glider_gun(i, j, grid):\n\n ggun = np.zeros(11*38).reshape(11, 38)\n\n ggun[5][1] = ggun[5][2] = 1\n ggun[6][1] = ggun[6][2] = 1\n\n ggun[3][13] = ggun[3][14] = 1\n ggun[4][12] = ggun[4][16] = 1\n ggun[5][11] = ggun[5][17] = 1\n ggun[6][11] = ggun[6][15] = ggun[6][17] = ggun[6][18] = 1\n ggun[7][11] = ggun[7][17] = 1\n ggun[8][12] = ggun[8][16] = 1\n ggun[9][13] = ggun[9][14] = 1\n\n ggun[1][25] = 1\n ggun[2][23] = ggun[2][25] = 1\n ggun[3][21] = ggun[3][22] = 1\n ggun[4][21] = ggun[4][22] = 1\n ggun[5][21] = ggun[5][22] = 1\n ggun[6][23] = ggun[6][25] = 1\n ggun[7][25] = 1\n\n ggun[3][35] = ggun[3][36] = 1\n ggun[4][35] = ggun[4][36] = 1\n\n grid[i:i+11, j:j+38] = ggun", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)" ]
[ "0.7823686", "0.5754557", "0.56262046", "0.5477109", "0.5417354", "0.53973466", "0.5374712", "0.53535587", "0.53477806", "0.5309742", "0.52957034", "0.5281329", "0.52798027", "0.5256859", "0.5233532", "0.5210926", "0.519959", "0.51973045", "0.51967686", "0.5172551", "0.5160206", "0.5152625", "0.51373273", "0.51194423", "0.5082148", "0.5057475", "0.5056859", "0.5046589", "0.5046411", "0.5042627", "0.50183165", "0.5011946", "0.49948257", "0.49804497", "0.49767238", "0.4975864", "0.49746606", "0.49738738", "0.49698564", "0.49676207", "0.49659222", "0.49600038", "0.49595752", "0.49369296", "0.4934691", "0.4931468", "0.49304718", "0.49107036", "0.49074566", "0.4906428", "0.48905838", "0.4890036", "0.48892218", "0.488695", "0.4883732", "0.4882672", "0.48816934", "0.48691306", "0.4865825", "0.48512974", "0.4848025", "0.48473477", "0.4845669", "0.4845325", "0.4843215", "0.48429695", "0.48398066", "0.48371056", "0.48365396", "0.48314393", "0.4831201", "0.48275623", "0.4827412", "0.48272783", "0.48264974", "0.48214298", "0.4817726", "0.48111546", "0.4810638", "0.48027527", "0.47986168", "0.47949013", "0.47937113", "0.47860202", "0.47800243", "0.47788057", "0.47738922", "0.47726542", "0.4769186", "0.47651073", "0.4761857", "0.47596583", "0.47560817", "0.47551933", "0.47549695", "0.47523868", "0.47481164", "0.47367352", "0.47316352", "0.4727895", "0.47270963" ]
0.0
-1
Build the left part of the fireball function. Doing this uses slots 0,1,2,3 and the result will be in slot 0. (S (horace ((S (horace ((S (horace (greg (K S)))) ((S (horace fanny)) (greg I))))) june ((S (horace fanny)) ian) ))) kelly
def build_leftpart(): # build kelly. build_kelly() # copy kelly to 3. copy(0, 3) # build june in slots 0,1,2 build_june() # copy kelly to slot 1 copy(3, 1) # smash together to get (june kelly) in 0 smash() # copy (june kelly) to 1 copy(0, 1) # build horace in 0 build_horace(0) # smash together to get (horace (june kelly)) in 0 smash() # wrap with an S for the whole left part. apply_card("S", 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def keyLeft(self):\n if pyxel.btnp(pyxel.KEY_LEFT, 10, 1) and not mapCheck(self, theFallen, -1, 0):\n mapDel(self, theFallen)\n self.x = max(-self.left, self.x - 1)\n mapAdd(self, theFallen)", "def _left(self, j):\n return 2 * j + 1", "def _left(self, j):\n return 2 * j + 1", "def build_rightpart():\n # build in 1: (K dec)\n apply_card(\"put\", 1)\n apply_slot(1, \"dec\")\n apply_card(\"K\", 1)\n\n # build in 0: greg\n build_greg(0)\n\n # smash together to get (greg (K dec)) in 0\n smash()\n\n # copy it to 1.\n apply_card(\"put\", 1)\n apply_slot(1, \"zero\")\n apply_card(\"get\", 1)\n\n # build horace in 0.\n build_horace(0)\n\n # smash together to get (horace (greg (K dec))) in 0.\n smash()\n\n # Wrap with an S.\n apply_card(\"S\", 0)\n\n # build ian in 1.\n build_ian(1)\n\n # smash together to get ((S (horace (greg (K dec)))) ian) in 0.\n smash()", "def move_left(self):\r\n\r\n counter = 0\r\n for y in range(1, self._col):\r\n for x in reversed(range(self._row)):\r\n if '[' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n counter += 1\r\n elif '|' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n counter += 1\r\n if counter == 3:\r\n\r\n for y in range(1, self._col):\r\n for x in reversed(range(self._row)):\r\n if '[' in self._board[x][y] and self._board[x][y - 1] == ' ':\r\n self._board[x][y-1] = self._board[x][y]\r\n self._board[x][y] = ' '\r\n elif '|' in self._board[x][y] and self._board[x][y-1] == ' ':\r\n self._board[x][y-1] = self._board[x][y]\r\n self._board[x][y] = ' '\r\n\r\n return self._board", "def TransformLeftMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side)\n line = []\n for x in range(i, j):\n line.append(field[x])\n\n line = move(line)\n k = 0\n for x in range(i, j):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field", "def hangman_figure(attempt_left):\n if attempt_left == N_TURNS:\n print('___________')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 1:\n print('___________')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 2:\n print('___________')\n print('| |')\n print('| O')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 3:\n print('___________')\n print('| |')\n print('| O')\n print('| |')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 4:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 5:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 6:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| /')\n print('| |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 7:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 8:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| |')\n print('|_____')\n if attempt_left == N_TURNS - 9:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 10:\n print('___________')\n print('| |')\n print('| -O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 11:\n print('___________')\n print('| |')\n print('| -O-')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')", "def lefts(self):\n lstack = [len(self.arr)]\n i = len(self.arr) - 1\n while i >= 0:\n if self.arr[i] > self.arr[lstack[-1] - 1]:\n while lstack and self.arr[i] > self.arr[lstack[-1] - 1]:\n x = lstack.pop()\n self.left[x - 1] = i + 1\n lstack.append(i + 1)\n i -= 1", "def left (x):\n\n return Sinary(side(x.v,1))", "def push_left (grid):\r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0\r\n \r\n for i in range(4): \r\n for j in range(3): \r\n if grid[i][j]==grid[i][j+1]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i][j+1]=0 \r\n \r\n for a in range(4): \r\n for i in range(4): \r\n for j in range(3,0,-1): \r\n if grid[i][j-1]==0: \r\n grid[i][j-1]=grid[i][j] \r\n grid[i][j]=0", "def left_edge(f: SwimmingFish) -> float:\n if f.dx > 0:\n return f.posn.x\n else:\n return f.posn.x - f.fish.size", "def compose_lwss_gun (glider_gun, A = -1, B = -1, C = -1):\n if (A < 0): A = 40\n if (B < 0): B = A;\n if (C < 0): C = A;\n\n m = min (A, B, C)\n a = A - m\n b = B - m\n c = C - m\n return \\\n glider_gun[4 * a] ( A, -A - 3, flip_x) + \\\n glider_gun[4 * b] (-B + 2, -B + 1) + \\\n glider_gun[4 * c + 1] (-C + 6, C, flip_y)", "def move_left (f):\r\n tmp_moved = f[:]\r\n for k in range(len(f)):\r\n for l in range(len(f)):\r\n if f[k][l] == 'x':\r\n tmp_moved[k][l] = '_'\r\n if l == 0:\r\n tmp_moved[k][len(f)-1] = 'x'\r\n else:\r\n tmp_moved[k][l-1] = 'x'\r\n break\r\n return tmp_moved", "def _left(node):\n return 2 * node + 1", "def left(i):\r\n return 2 * i + 1", "def to_left_boundary(self) -> str:\n return '{}{}'.format('[' if self.is_closed else '(', self.value)", "def rotate_left(shape, times=1):\n return rotate(shape, 3) if times <= 1 else rotate_left(rotate(shape, 3), times-1)", "def left(t):\r\n return t(1)", "def cutNow(self,leftMonomers,definitive=False):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n for A1 in leftMonomers:\n A2 = A1 + 1\n # Mise a jour de la matrice laplacienne\n self.LaplacianMatrix[A1,A2] = 0\n self.LaplacianMatrix[A2,A1] = 0\n self.LaplacianMatrix[A1,A1] -= 1 \n self.LaplacianMatrix[A2,A2] -= 1 \n # Mise a jour de la liste d'adjacence\n self.cutEdge(A1,A2)\n # Add new free ends to freeMonomers list\n self.freeMonomers.extend([A1,A2])\n \n if definitive:\n self.generatePossibleEncounters()\n# \n for i in range(len(self.freeMonomers)):\n self.freeMonomersNames[self.freeMonomers[i]] = chr(97 + i//2) + str(1 + i%2)", "def left(length, depth):\n turtle.setheading(240)\n turtle.forward(length)\n binary_tree(length / 2, depth - 1)", "def make_empty_left(self, e=0):\n self.make_empty_side(u'left')", "def _animateLeftAlien(self):\n incr = self.speedChange()\n max = self._maxAlien()\n min = self._minAlien()\n left = self.getLPos()\n\n if left > ALIEN_H_SEP:\n if self._direction == True:\n for a in self._aliens:\n for b in a:\n if b != None:\n b.x -= ALIEN_H_WALK\n self._time = 0\n self._alienStep += 1\n n = 0\n if left <= ALIEN_H_SEP:\n if self._direction == True:\n for a in self._aliens:\n for b in a:\n if b != None:\n b.y -= ALIEN_V_SEP\n self._direction = False\n self._time = 0\n self._alienStep += 1", "def left(self):\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == 0:\r\n self.d = direction_tuple[3]\r\n else:\r\n self.d = direction_tuple[index - 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")", "def left_view(self)->list:\n\t\tqueue=[]\n\t\tleft=[]\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tfor i in range(len(queue)):\n\t\t\t\tif(i==0):\n\t\t\t\t\tleft.append(queue[0].data)\n\t\t\t\tnode=queue[0]\n\t\t\t\tqueue.pop(0)\n\t\t\t\tif(node.left!=None):\n\t\t\t\t\tqueue.append(node.left)\n\t\t\t\tif(node.right!=None):\n\t\t\t\t\tqueue.append(node.right)\n\t\treturn left", "def _left(self, index):\r\n return 2*index + 1", "def pleft(self):\n return -self.pfill(1) + self.plen(-1, s=True)", "def push_left (grid):\r\n \r\n for i in range(4):\r\n row = grid[i]\r\n \r\n if row == [0, 0 ,0 ,0]:\r\n continue\r\n for k in range(4):\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0\r\n for l in range(1, 4):\r\n if row[l-1] == row[l]:\r\n row[l-1] = row[l]*2\r\n row[l] = 0\r\n for j in range(1, 4):\r\n if row[j-1] == 0:\r\n row[j-1] = row[j]\r\n row[j] = 0 \r\n grid[i] = row\r\n return grid", "def left(self):\n n = len(self)-1\n fl = np.zeros(n+1)\n for ii in range(n+1):\n for jj in range(n+1-ii):\n for kk in range(n+1-ii-jj):\n fl[ii+jj] += self[ii,jj,kk]\n return fl", "def left(i):\n return 2*i", "def new_field(left, right, top, bottom):\n fieldlist = []\n for x in range(width + 1):\n fieldlist.append({})\n for y in range(length + 1):\n fieldlist[x][y] = None\n for x in range(width + 1):\n for y in range(length + 1):\n #creates trees\n if ((not ((x == 0 or x == width - 1) and y == length - 2)) and (x%2 == 0 and (y == 0 or y == length - 2))):\n fieldlist[x][y] = Tree1\n elif ((not ((x == 1 or x == width) and y == length - 2)) and (x%2 == 1 and (y == 0 or y == length - 2))):\n fieldlist[x][y] = Tree2\n elif ((not ((x == 0 or x == width - 1) and y == 2)) and (x%2 == 0 and (y == 2 or y == length))):\n fieldlist[x][y] = Tree5\n elif ((not ((x == 1 or x == width) and y == 2)) and (x%2 == 1 and (y == 2 or y == length))):\n fieldlist[x][y] = Tree6\n elif (((x == 0 or x == width - 1) and y%2 == 1) or (x%2 == 0 and (y == 1 or y == length - 1))):\n fieldlist[x][y] = Tree3\n elif (((x == 1 or x == width) and y%2 == 1) or (x%2 == 1 and (y == 1 or y == length - 1))):\n fieldlist[x][y] = Tree4\n elif ((x == 0 or x == width - 1) and y%2 == 0):\n fieldlist[x][y] = Tree7\n elif ((x == 1 or x == width) and y%2 == 0):\n fieldlist[x][y] = Tree8\n elif (x == 2 or x == width - 2 or y == 3):\n fieldlist[x][y] = plain\n else:\n #Creates random grass patterns\n if (random.random()<0.082):\n for i in range(random.randint(1,3)):\n for j in range(random.randint(1,3)):\n for k in range(random.randint(1,3)):\n for l in range(random.randint(1,3)):\n if (fieldlist[x+i][y+j] == None):\n fieldlist[x+i][y+j] = grass\n if (fieldlist[x+i][y-l] == None):\n fieldlist[x+i][y-l] = grass\n if (fieldlist[x-k][y+j] == None):\n fieldlist[x-k][y+j] = grass\n if (fieldlist[x-k][y-l] == None):\n fieldlist[x-k][y-l] = grass\n else:\n fieldlist[x][y] = None\n \n #fills all other tiles with plain tile images\n for x in range(width + 1):\n for y in range(length + 1):\n if (fieldlist[x][y] == None):\n fieldlist[x][y] = plain\n \n #creates clearings\n if (left):\n left_clearing(fieldlist)\n if (right):\n right_clearing(fieldlist)\n if (top):\n top_clearing(fieldlist)\n if (bottom):\n bottom_clearing(fieldlist)\n return fieldlist", "def mate_left(self):\n # TODO: query self.local_obj geometry to get center of face?\n return Mate(self, CoordSystem(\n origin=(-self.width / 4, 0, (self.height + self.left_wall_height) / 2),\n xDir=(0,1,0),\n normal=(-sin(radians(self.angle_left)), 0, cos(radians(self.angle_left)))\n ))", "def left_clearing(fieldlist):\n fieldlist[0][length/2] = plain\n fieldlist[1][length/2] = plain\n fieldlist[0][(length/2) + 1] = plain\n fieldlist[1][(length/2) + 1] = plain\n fieldlist[0][(length/2) - 1] = plain\n fieldlist[1][(length/2) - 1] = plain\n fieldlist[0][(length/2) + 2] = Tree1\n fieldlist[1][(length/2) + 2] = Tree2\n fieldlist[0][(length/2) - 2] = Tree5\n fieldlist[1][(length/2) - 2] = Tree6", "def push_left (grid):\r\n \r\n for row in range (4):\r\n section = []\r\n for col in range (4):\r\n section.append(grid[row][col])\r\n add(section)\r\n for i in range (4):\r\n grid[row][i] = section[i]", "def get_right_and_left_lanelet(self): \n if self.scenario is not None:\n possible_lanelet_ids = self.scenario.lanelet_network.find_lanelet_by_position([np.array(list(self.current_pos))])[0]\n self.current_lanelet = None\n self.right_lanelet = None\n self.left_lanelet = None \n for lane_id in possible_lanelet_ids: \n self.current_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lane_id) \n if self.current_lanelet is not None:\n if self.current_lanelet.adj_left is not None:\n self.left_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_left)\n if self.current_lanelet.adj_right is not None:\n self.right_lanelet = self.scenario.lanelet_network.find_lanelet_by_id(self.current_lanelet.adj_right)", "def push_left (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if (col-1)>=0:\r\n continue\r\n \r\n elif grid[row][col]==grid[row][col-1]:\r\n return grid[row][col]+ grid[row][col-1]", "def left(self):\n pos = self.get_current()\n if pos in self._left_col():\n return None\n\n new_pos = pos - 1\n new_board = list(self.values)\n new_board[pos], new_board[new_pos] = new_board[new_pos], new_board[pos]\n return Board(new_board, board_size=(self.rows, self.cols), moved=new_board[pos])", "def left(cargo):\n # When the robot has turned left change state to follow\n line_follower.turn_left()\n #print(\"Turning left\")\n\n # return\n new_state = \"follow\"\n txt = \"follow line..\"\n return (new_state, txt)", "def push_left (grid):\n #moves the block if there is a 0 value\n for i in range(3):\n for j in range(1,4):\n for k in range(4):\n if grid[k][j-1]==0 or grid[k][j-1]==\" \":\n grid[k][j-1] = grid[k][j]\n grid[k][j]= 0\n #checks if adjacent blocks have the same values and adds them\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]==grid[j][i]:\n grid[j][i-1]+=grid[j][i]\n grid[j][i]= 0 \n #moves the rest of the grid up\n for i in range(1,4):\n for j in range(4):\n if grid[j][i-1]== 0:\n grid[j][i-1] = grid[j][i]\n grid[j][i] = 0\n #if there is a value in the position\n return grid", "def leftChild(self, pos):\n return 2 * pos", "def leftChild(self, pos):\n return 2 * pos", "def get_left_hand(index, annotations):\n return get_hand_points(index, annotations, 0)", "def left(self):\n x, y = (self.loc[0] - 1, self.loc[1])\n\n if x < 0:\n return None # None\n\n return self.garden.cells[y][x]", "def rotate_left(self):\n\t\ttemp = self.right\n\t\tself.right = temp.left\n\t\ttemp.left = self\n\t\tself = temp", "def leftTurn(self):\n #print('leftTurn\\r')\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=1.0)", "def left_child(self, pos): \n return 2 * pos", "def moveLeft(board):\n\t# initial shift\n\t#shiftLeft(board)\n\trykTilVenstre(board)\n\t# merge cells\n\tfor i in range(4):\n\t\tfor j in range(3):\n\n\t\t\tif board[i][j] == board[i][j + 1] and board[i][j] != 0:\n\t\t\t\tboard[i][j] *= 2\n\t\t\t\tboard[i][j + 1] = 0\n\t\t\t\tj = 0\n\n\t# final shift\n\t#shiftLeft(board)\n\trykTilVenstre(board)\n\treturn board", "def left(self):\n self.counterUp(teamNumber = 1)", "def push_left(self, event):\n self.stack()\n self.merge()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()", "def _restructure_leftchild(self):\n left = self._leftchild\n if left.full(): # If left has both children\n if left._rightchild._height > left._leftchild._height:\n left._rotate_rightchild() # Double rotate if right unbalanced\n elif left._rightchild and not left._leftchild:\n left._rotate_rightchild()\n self._rotate_leftchild()", "def go_left(self):\n self.change_x = -6\n self.direction = \"L\"", "def left(cell):\n return [cell[0], cell[1] - 1]", "def turn_left(self):\n pass", "def rotate_cube_left_list(liste):\n \n rotate_cube_right_list(liste)\n rotate_cube_right_list(liste)\n rotate_cube_right_list(liste)\n \n return liste", "def push_left (grid):\r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0\r\n \r\n \r\n #moves values left\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0 \r\n \r\n \r\n #checks for similar values and combines\r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==grid[row][column+1]:\r\n grid[row][column]=2*grid[row][column]\r\n grid[row][column+1]=0\r\n \r\n #moves remaining values left \r\n for row in range(4):\r\n for column in range(3):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row][column+1]\r\n grid[row][column+1]=0", "def positive_left_right_requirements(\n self,\n ) -> Tuple[Tuple[GriddedPerm, ...], Tuple[GriddedPerm, ...]]:\n left, right = [], []\n for (x, y) in self._tiling.active_cells:\n if self._fuse_row and y == self._row_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x, y + 1)))\n if not self._fuse_row and x == self._col_idx:\n left.append(GriddedPerm.single_cell((0,), (x, y)))\n right.append(GriddedPerm.single_cell((0,), (x + 1, y)))\n return tuple(sorted(left)), tuple(sorted(right))", "def generate_cell_left(self, x, y, list_of_nearby_cells):\n cell_2 = self.island_map[y][x - 1]\n list_of_nearby_cells.append(cell_2)", "def push_left (grid):\r\n temp = [] #Same as above\r\n for i in range(4):\r\n for j in range(4):\r\n if grid[i][j] > 0:\r\n temp.append(grid[i][j]) #Same as above\r\n lentemp = len(temp)\r\n if lentemp > 0:\r\n for k in range(1, lentemp): #Same as above\r\n if (temp[k] == temp[k-1]) or (temp[k-1] == 0):\r\n temp[k-1] = temp[k] + temp[k-1]\r\n temp[k] = 0\r\n while 0 in temp: #Same as above\r\n temp.remove(0) \r\n lentemp = len(temp)\r\n for k in range(4-lentemp): #Adds zeroes to the end\r\n temp.append(0)\r\n for j in range(4):\r\n grid[i][j] = temp[j] #Same as above\r\n temp = [] #Same as above\r\n return grid", "def changeLaneLeft(self, speed, accel):\n self.changeLane(speed, accel, -44.5)", "def RotateLead(winner,tricksplayed):\r\n temps = [[],[],[],[]]\r\n\r\n if winner == 0:\r\n return None\r\n else: #Need to rotate\r\n for i in range(4):\r\n for j in range(7-tricksplayed):\r\n temps[i].append(PlayerHands[i][j])\r\n \r\n if winner == 1:\r\n for j in range(7 - tricksplayed):\r\n PlayerHands[0][j] = temps[1][j]\r\n PlayerHands[1][j] = temps[2][j]\r\n PlayerHands[2][j] = temps[3][j]\r\n PlayerHands[3][j] = temps[0][j]\r\n \r\n elif winner == 2:\r\n for j in range(7 - tricksplayed):\r\n PlayerHands[0][j] = temps[2][j]\r\n PlayerHands[1][j] = temps[3][j]\r\n PlayerHands[2][j] = temps[0][j]\r\n PlayerHands[3][j] = temps[1][j]\r\n\r\n else:\r\n for j in range(7 - tricksplayed):\r\n PlayerHands[0][j] = temps[3][j]\r\n PlayerHands[1][j] = temps[0][j]\r\n PlayerHands[2][j] = temps[1][j]\r\n PlayerHands[3][j] = temps[2][j]\r\n\r\n return None", "def _rotate_left(self):\n pivot = self.right\n if pivot is None:\n return\n self.val, pivot.val = pivot.val, self.val\n self.right = pivot.right\n if self.right is not None:\n self.right.parent = self\n pivot.right = pivot.left\n pivot.left = self.left\n if pivot.left is not None:\n pivot.left.parent = pivot\n self.left = pivot", "def move_left():\n return __maze.move_left()", "def left(self, key):\n return self.side(key, self.forward)", "def go_left(self):\n self.change_x = -6", "def go_left(self):\n self.change_x = -6", "def rotateLeft(board):\n\tb = [[board[j][i] for j in range(4)] for i in range(3, -1, -1)]\n\treturn b", "def rotateLeft(self):\n self.faceHeading+=shipRotationSpeed\n self.reDraw()", "def turnLeft(ev3):\n ev3.set_angle(\"A\", \"-30\", \"-90\")\n ev3.set_angle(\"B\", \"30\", \"90\")\n ev3.set_angle(\"C\", \"-30\", \"-90\")", "def ghost_cc_left(self,current, ghost, g_pos):\n x, y, facing ,(st,b),start,p_prob = g_pos[ghost]\n node = self.nodes_array[x][y].getNeighborByDirection(facing)\n if st > 0 and b and node is not None:\n st = max(st-1,0)\n return [(1,(node.i,node.j,facing,(st,not b),start,p_prob))]\n\n node = self.nodes_array[x][y]\n st = st - 1 if st - 1 > 0 else 0\n if not st:\n if node.left and (facing != Directions.EAST or len(node.neighbors) == 1):\n return [(1.0, (node.left.i, node.left.j, Directions.WEST,(0,False),start,p_prob))]\n if node.down and (facing != Directions.NORTH or len(node.neighbors) == 1):\n return [(1.0, (node.down.i, node.down.j, Directions.SOUTH,(0,False),start,p_prob))]\n if node.right and (facing != Directions.WEST or len(node.neighbors) == 1):\n return [(1.0, (node.right.i, node.right.j, Directions.EAST,(0,False),start,p_prob))]\n if node.up and (facing != Directions.SOUTH or len(node.neighbors) == 1):\n return [(1.0, (node.up.i, node.up.j, Directions.NORTH,(0,False),start,p_prob))]\n else:\n if node.left and (facing != Directions.EAST or len(node.neighbors) == 1):\n return [(1.0, (node.left.i, node.left.j, Directions.WEST,(st,True),start,p_prob))]\n if node.down and (facing != Directions.NORTH or len(node.neighbors) == 1):\n return [(1.0, (node.down.i, node.down.j, Directions.SOUTH,(st,True),start,p_prob))]\n if node.right and (facing != Directions.WEST or len(node.neighbors) == 1):\n return [(1.0, (node.right.i, node.right.j, Directions.EAST,(st,True),start,p_prob))]\n if node.up and (facing != Directions.SOUTH or len(node.neighbors) == 1):\n return [(1.0, (node.up.i, node.up.j, Directions.NORTH,(st,True),start,p_prob))]", "def mate_bolt_left(self):\n return Mate(\n self,\n CoordSystem(\n origin=(3, self.y_bolt_offset + 1.0, self.thickness / 2.0),\n # xDir=(1, 0, 0), normal=(0, -1, 0),\n ),\n )", "def left(self, A, i):\n if 2*i +1 < len(A):\n return 2*i +1\n return None", "def left(self, A, i):\n if 2*i +1 < len(A):\n return 2*i +1\n return None", "def left_right(steps):\n lengths = lens(steps)\n a_side = []\n b_side = []\n for i in range(len(lengths)):\n if i % 2 == 0:\n a_side.append(steps[i])\n else:\n b_side.append(steps[i])\n return a_side, b_side", "def left(self, left):\n self.ptr.left(left)", "def left_join_list_one():\n return[\n ['wrath', 'anger', 'delight'],\n ['fond', 'enamored', 'averse'],\n ['guide', 'usher', 'jam'],\n ['outfit', 'garb', 'follow'],\n ['diligent', 'employed', 'idle'],\n ]", "def RPSLS():\n from sage.matrix.constructor import matrix\n A = matrix([[0, -1, 1, 1, -1],\n [1, 0, -1, -1, 1],\n [-1, 1, 0, 1, -1],\n [-1, 1, -1, 0, 1],\n [1, -1, 1, -1, 0]])\n g = NormalFormGame([A])\n g.rename('Rock-Paper-Scissors-Lizard-Spock - ' + repr(g))\n return g", "def turn_left(self):\n turn = self.__heading + Ship.TURN\n if turn >= Ship.MAX_HEADING:\n turn -= Ship.MAX_HEADING\n self.__heading = turn", "def Left_Left_fix(self, GP):\r\n P = GP.left\r\n U = GP.right\r\n ### D\r\n if U.color == \"red\":\r\n P.color = \"black\"\r\n U.color = \"black\"\r\n GP.color = \"red\"\r\n return GP\r\n else:\r\n GP.left = P.right\r\n P.right = GP\r\n # Fix the colors\r\n P.color = \"black\"\r\n GP.color = \"red\"\r\n return P", "def extract_left_hand(data):\n box = data['lhb'][data['i']];\n return extract_area(data,box);", "def _rect_left(self):\n\treturn min(self.x, self.x + self.w)", "def roof_mwfrs(lenght, width, overhang=1, wall_height=3, roof_height=4):\n h = wall_height + 0.5*roof_height\n area = (lenght + overhang) * (width + overhang)\n area_1 = 0.5*h*width\n area_2 = 0.5*h*width\n area_3 = h*width\n area_4 = area - area_1 -area_2 - area_3\n return area, area_1, area_2, area_3, area_4", "def move_left(self):\n kya = self.board.board[self.player.y][self.player.x-1]\n if self.player.x > 0 and kya != 'X' and kya != 'G':\n self.board.board[self.player.y][self.player.x] = '.'\n self.coin_taken(-1, 0)\n self.board.board[self.player.y][self.player.x-1] = 'P'\n self.player.x -= 1\n else:\n print \"Can't move left\"\n self.dont_move_ghosts = 1", "def left(self, n):\n return n._left", "def rotate_left_list(liste):\n \n rotate_cube_right_list(liste)\n rotate_front_list(liste)\n rotate_cube_left_list(liste)\n \n return liste", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def turn_left(): #py:turn_left\n RUR._turn_left_()", "def optimize_left(connect):\n if intersect(connect[0], connect[1], connect[2]) == False:\n return connect\n else:\n connect = connect_left(connect[0], connect[2])\n if intersect(connect[0], connect[1], connect[2]) == True:\n return optimize_left(connect)\n else:\n return connect", "def flow_left(tiles, x0=0, y0=0, dim=14):\n global _flowcnt\n for xl in range(x0-1, 0, -1):\n if(tiles[y0][xl] == '#'):\n return [xl, y0]\n else:\n tiles[y0][xl] = '|' ; _flowcnt += 1 # flowing left\n if( tiles[y0+1][xl] == '.' ): # stop flowing left and head down\n tiles[y0][xl] = '|' \n deepest = flow_down(tiles,xl,y0+1,dim)\n return [xl, deepest]\n\n # if we get here no clay tile found left of x0, y0\n print(\"flow_left> _flowcnt:\", _flowcnt)\n return [-1, y0]", "def turn_left(self): #py:UR.turn_left\n RUR._UR.turn_left_(self.body)", "def left_rauzy_move(self, winner):\n result = copy(self)\n\n winner_letter = result._labels[winner][0]\n loser_letter = result._labels[1-winner].pop(0)\n\n if winner_letter in result._labels[winner][1:]:\n loser_to = result._labels[winner][1:].index(winner_letter)+2\n result._labels[winner].insert(loser_to, loser_letter)\n\n else:\n loser_to = result._labels[1-winner].index(winner_letter)\n result._labels[1-winner].insert(loser_to, loser_letter)\n\n return result", "def left_rauzy_move(self, winner):\n result = copy(self)\n\n winner_letter = result._labels[winner][0]\n loser_letter = result._labels[1-winner].pop(0)\n\n if winner_letter in result._labels[winner][1:]:\n loser_to = result._labels[winner][1:].index(winner_letter)+2\n result._labels[winner].insert(loser_to, loser_letter)\n\n else:\n loser_to = result._labels[1-winner].index(winner_letter)\n result._labels[1-winner].insert(loser_to, loser_letter)\n\n return result", "def MoveLeftStep(self):\n if self.facing == 0:\n self.facing = 3\n self.x -= self.stepLeft\n elif self.facing == 1:\n self.facing = 0\n self.y -= self.stepUp\n elif self.facing == 2:\n self.facing = 1\n self.x += self.stepRight\n elif self.facing == 3:\n self.facing = 2\n self.y += self.stepDown", "def get_left(lst: 'list[Cell]', size: int) -> 'list[Line]':\n form = {1: [[0], [1, 2]],\n 2: [[0, 2], [1, 3, 5], [4, 6]],\n 3: [[0, 2, 5], [1, 3, 6, 9], [4, 7, 10], [8, 11]],\n 4: [[0, 2, 5, 9], [1, 3, 6, 10, 14], [4, 7, 11, 15], [8, 12, 16],\n [13, 17]],\n 5: [[0, 2, 5, 9, 14], [1, 3, 6, 10, 15, 20], [4, 7, 11, 16, 21],\n [8, 12, 17, 22], [13, 18, 23], [19, 24]]}\n result = []\n for line in form[size]:\n result.append(Line([lst[n] for n in line]))\n return result", "def left(self):\n self.move(-1, 0)", "def swipeLeft (self) :\n rotated = Grid(np.rot90(np.rot90(np.rot90(self.grid))))\n self.grid = np.rot90(rotated.swipeBase())", "def __init__(self, is_p1_turn: bool, side_length: int) -> None:\n super().__init__(is_p1_turn)\n self.side_length = side_length\n # ISSUE: what if node is more than 26 --> no need to handle side more than 5\n # construct a list of uppercase and lower case letters\n alph_lst_upper = list(string.ascii_uppercase)\n alph_lst_lower = list(string.ascii_lowercase)\n # alph_lst has a length of 52\n alph_lst = alph_lst_upper + alph_lst_lower\n\n # assign original value for each ley-line\n hori_result = []\n for i in range(side_length + 1):\n hori_result.append(\"@\")\n left_result = []\n for i in range(side_length + 1):\n left_result.append(\"@\")\n right_result = []\n for i in range(side_length + 1):\n right_result.append(\"@\")\n self.hori_result = hori_result\n self.left_result = left_result\n self.right_result = right_result\n\n self.hori_lst = []\n self.left_lst = []\n self.right_lst = []\n\n # construct horizontal ley-lines\n n = 2\n start_index = 0\n end_index = 0\n while n <= side_length + 1:\n end_index = start_index + n\n self.hori_lst.append(alph_lst[start_index:end_index])\n start_index = end_index\n n += 1\n end_index = start_index + side_length\n self.hori_lst.append(alph_lst[start_index:end_index])\n\n # copy hori_lst\n hori_copy = []\n for item in self.hori_lst:\n hori_copy.append(item)\n\n # construct left ley-lines\n for i in range(side_length + 1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) > i:\n temp.append(lst[i])\n self.left_lst.append(temp)\n for i in range(1, side_length + 1):\n self.left_lst[i].append(hori_copy[-1][i - 1])\n\n # construct right ley-lines\n for i in range(-1, side_length * (-1) - 2, -1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) >= i * (-1):\n temp.append(lst[i])\n self.right_lst.append(temp)\n self.right_lst = self.right_lst[::-1]\n for i in range(side_length):\n self.right_lst[i].append(hori_copy[-1][i])", "def rotate_left(self):\n\n grid = Grid(self.width, self.height)\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n v = self.get(self.width - 1 - j, i)\n grid.set(i, j, v)\n\n return grid", "def left_child(self, index):\n return 2 * index", "def _rotate_left(self):\n tmp = self.right\n tmp.parent = self.parent\n self.right = tmp.left\n if self.right is not None:\n self.right.parent = self\n tmp.left = self\n self.parent = tmp\n self.size -= 1 + (0 if tmp.right is None else tmp.right.size)\n tmp.size += 1 + (0 if self.left is None else self.left.size)\n return tmp", "def _up_left(self, col, row):\n ones = 0\n twos = 0\n for step in range(4):\n\n current = self.layout[col + (step*-1)][row + (step)] #step up and left\n if current == 1: ones+=1\n if current == 2: twos+=1\n\n return self._score_a_quartet(ones, twos)" ]
[ "0.70482737", "0.58627445", "0.58096784", "0.58096784", "0.5768745", "0.5619706", "0.55908644", "0.55596286", "0.5548311", "0.5537584", "0.55325747", "0.5531275", "0.5530027", "0.55278164", "0.54952", "0.5482912", "0.54783756", "0.54747903", "0.5452353", "0.5442784", "0.54378104", "0.5426339", "0.54134023", "0.5412929", "0.5406135", "0.5398989", "0.5379603", "0.5377192", "0.5376657", "0.53528947", "0.53275156", "0.5324548", "0.53114414", "0.52944505", "0.52696615", "0.52308816", "0.52267796", "0.5224559", "0.5220359", "0.5212286", "0.5212286", "0.5211841", "0.52093315", "0.5208359", "0.5200722", "0.5192368", "0.51849586", "0.51789355", "0.5176477", "0.5171528", "0.5151339", "0.5127035", "0.512499", "0.5122323", "0.510045", "0.50952846", "0.5092566", "0.50921446", "0.5091871", "0.5090954", "0.50810325", "0.5079162", "0.50739896", "0.5067074", "0.5067074", "0.5060276", "0.5024469", "0.5016657", "0.5005618", "0.5003883", "0.5000859", "0.5000859", "0.50001264", "0.49893597", "0.49867266", "0.49829647", "0.49806473", "0.49792594", "0.4978444", "0.49769774", "0.49765655", "0.49721757", "0.49700868", "0.4967329", "0.49664032", "0.49617592", "0.49602848", "0.49544743", "0.493244", "0.49312708", "0.49312708", "0.4927665", "0.49260035", "0.4922133", "0.49138188", "0.49110526", "0.49098486", "0.49030483", "0.49004602", "0.48958966" ]
0.7919818
0
Build the fireball function. We'll apply the Y combinator to it. Stomps registers [0,4].
def build_fireball(): # build the right part build_rightpart() # copy it to 4. copy(0, 4) # build the left part, now it's in 0 build_leftpart() # copy right part from 4 to 1. copy(4, 1) # smash together for whole fireball. smash()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fy(self):\n\n def fy(p):\n p0, p1 = p\n e = anp.exp(-(p0 + p1) * self.ts)\n x = (\n 1\n / (-p0 - p1)\n * anp.array(\n [\n [-p1 - p0 * e, -p1 + p1 * e],\n [-p0 + p0 * e, -p0 - p1 * e],\n ]\n )\n )\n y = anp.einsum(\"mnr,n->mr\", x, self.x0)\n return y\n\n return fy", "def generate_fire_recurrence(self):\r\n \r\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\r\n return self.time_to_next_fire", "def _f(red_component, green_component, blue_component):\n return _e(red_component, green_component, blue_component, Z_FORE)", "def generate_fire_recurrence(self):\n\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\n return self.time_to_next_fire", "def generate_fire_recurrence(self):\n self._time_to_next_fire = round(\n weibullvariate(self._scale_parameter, self._shape_parameter), 2\n )\n return self._time_to_next_fire", "def f(t,y):\n return (lam*y)", "def y(self):\n #print(xdb.xray_lines(self.element)[self.line].energy/1000)\n #self.output_params={}\n #print(self.getBulkCon(self.element,self.botchem,self.botden))\n x = self.x + self.qoff\n if not self.__fit__:\n self.output_params['scaler_parameters']={}\n return self.fluCalFun(x)\n #return self.x", "def event11512005():\n header(11512005)\n\n BONFIRE_FLAGS = (\n # on_warp_list, required_flag (from rest)\n (11012045, 11012044), # Parish Turret\n (11302045, 11302040), # Bone Chimney\n (11202045, 11202040), # Moonlight Grove\n (11402045, 11402040), # Fetid Slagmire\n (11002045, 11002044), # The Sluiceworks\n (11512045, 11512043), # Sun Chamber\n (11512046, 11512044), # Gwyn's Altar\n (11312045, 11312044), # The Undercrypt\n (11412085, 11412080), # Sanctum of Chaos\n (11602045, 11602044), # The Abyss\n (11702045, 11702044), # The Duke's Archives\n # (11102045, 11102044), # Cloister of Exiles (now on permanent Lordvessel list)\n (11212085, 11212084), # Royal Hippodrome\n (11812045, 11812040), # Undead Asylum (must be rested at on return)\n )\n\n # Miscellaneous: enable Chasm Cell bonfire warp flag if Early Oolacile is on.\n skip_if_event_flag_off(1, EVENT.EarlyOolacile)\n flag.enable(213)\n\n # Start by disabling all of them.\n for warp_list_flag, _ in BONFIRE_FLAGS:\n flag.disable(warp_list_flag)\n\n # Wait for Chthonic Spark possession or Gwyn to be dead.\n if_player_has_good(-1, GOOD.ChthonicSpark)\n if_event_flag_on(-1, EVENT.LordvesselFull)\n if_condition_true(0, -1)\n\n # Enable all bonfires that have been rested at.\n for warp_list_flag, required_flag in BONFIRE_FLAGS:\n # Note that you'll need to reload for a newly rested-at bonfire to appear in the menu.\n skip_if_event_flag_off(1, required_flag)\n flag.enable(warp_list_flag)\n\n # Restart if Spark is lost and Lordvessel is not full.\n if_player_does_not_have_good(1, GOOD.ChthonicSpark)\n if_event_flag_off(1, EVENT.LordvesselFull)\n if_condition_true(0, 1)\n restart()", "def gen_y(self, y_func):\n assert not np.all(self.x == None)\n assert not np.all(self.act == None)\n self.y = y_func(self.x, self.act, self.ydim, self.generator)", "def intern_F(self):\n if self.A is None:\n def Fx(x,y):\n if self.hx is None:\n fx = self.gradf(x)\n self.Fz = fx, None, None\n return fx, None, None\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n self.Fz = fx, fy, None\n return fx, fy, None\n else:\n def Fx(x,y,u):\n if self.hx is None:\n fx = self.gradf(x)\n fu = self.b-self.A@x\n self.Fz = fx, None, fu\n return fx, None, fu\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n fu = self.b-self.A@x\n self.Fz = fx, fy, fu\n return fx, fy, fu\n return Fx", "def Fire(self, *args):\n return _gmat_py.Burn_Fire(self, *args)", "def __call__(self, glon, glat, e):\n return self.flux(glon, glat, e)", "def __call__(self, y, f):\n ll = self._loglike(y, f)\n return ll", "def f(x, y):\n return Ripple.g(np.sqrt(x ** 2 + y ** 2))", "def f4(x):\n return sin(x)/x", "def function(self):\r\n lambd = 5*np.sin(2*np.pi*self.x_array) #The function in question\r\n return 3*np.pi*np.exp(-lambd)", "def next(self, f=1.0):\n self.x += self.step\n\n r = (np.sin(self.frequencyR * self.x + \n self.phaseR) * self.widthR + self.minR) * f\n g = (np.sin(self.frequencyG * self.x + \n self.phaseG) * self.widthG + self.minG) * f\n b = (np.sin(self.frequencyB * self.x + \n self.phaseB) * self.widthB + self.minB) * f\n\n return (b, g, r)", "def fire_module(x, fire_id, squeeze=16, expand=64):\r\n\tsq1x1 = \"squeeze1x1\"\r\n\texp1x1 = \"expand1x1\"\r\n\texp3x3 = \"expand3x3\"\r\n\trelu = \"relu_\"\r\n\ts_id = 'fire' + str(fire_id) + '/'\r\n\r\n\tif K.image_data_format() == 'channels_first':\r\n\t\tchannel_axis = 1\r\n\telse:\r\n\t\tchannel_axis = 3\r\n\t\r\n\tx = Convolution2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)\r\n\tx = Activation('relu', name=s_id + relu + sq1x1)(x)\r\n\r\n\tleft = Convolution2D(expand, (1, 1), padding='valid', name=s_id + exp1x1)(x)\r\n\tleft = Activation('relu', name=s_id + relu + exp1x1)(left)\r\n\r\n\tright = Convolution2D(expand, (3, 3), padding='same', name=s_id + exp3x3)(x)\r\n\tright = Activation('relu', name=s_id + relu + exp3x3)(right)\r\n\r\n\tx = concatenate([left, right], axis=channel_axis, name=s_id + 'concat')\r\n\treturn x", "def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f", "def y(self):\n return self.yn.func", "def evaluate_random_function(f, x, y):\n # this section actually evaluates the functions \n\n if f[0] == \"x\":\n return x \n elif f[0] == \"y\":\n return y\n elif f[0] == \"sin_pi\":\n return math.sin(math.pi*evaluate_random_function(f[1], x, y ))\n elif f[0] == \"cos_pi\":\n return math.cos(math.pi*evaluate_random_function(f[1], x, y ))\n elif f[0] == \"prod\":\n return evaluate_random_function(f[1] , x , y ) * evaluate_random_function(f[2] , x , y )\n elif f[0] == \"avg\":\n return (evaluate_random_function(f[1] , x , y ) + evaluate_random_function(f[2] , x , y)) / 2.0\n elif f[0] == \"squ\":\n return evaluate_random_function(f[1] , x , y ) * evaluate_random_function(f[1] , x , y ) \n elif f[0] == \"cir\":\n return ((evaluate_random_function(f[1] , x , y )**2 + evaluate_random_function(f[2] , x , y)) **2 )**0.5\n elif f[0] == \"sms\":\n return (evaluate_random_function(f[1] , x , y )**2 - evaluate_random_function(f[2] , x , y)) **2 \n\n #elif f == [\"sinpi\"]:\n # return math.sin(math.pi*)", "def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def fill_influence_matrices(self):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n \r\n # import the jiited function from outside the class:\r\n global f\r\n\r\n # alias:\r\n NT = Turbine.NT\r\n N = Turbine.N\r\n t = Turbine.t\r\n DT = Turbine.DT\r\n\r\n for i in range(int(NT)):\r\n f(self.cx[i], self.cy[i], self.xi[i], self.yi[i], t, N, DT)", "def fireworks():\n\n sleep_speed = 0.025\n\n # Turn on white\n PYGLOW.color(\"white\", 60)\n sleep(sleep_speed)\n # Turn on blue\n PYGLOW.color(\"blue\", 60)\n sleep(sleep_speed)\n # Fade white\n PYGLOW.color(\"white\", 50)\n sleep(sleep_speed)\n # Turn on green\n PYGLOW.color(\"green\", 60)\n sleep(sleep_speed)\n # Fade white and blue\n PYGLOW.color(\"white\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 50)\n sleep(sleep_speed)\n # Turn on yellow\n PYGLOW.color(\"yellow\", 60)\n sleep(sleep_speed)\n # Fade white, blue, and green\n PYGLOW.color(\"white\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 50)\n sleep(sleep_speed)\n # Turn on orange\n PYGLOW.color(\"orange\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, and yellow\n PYGLOW.color(\"white\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 50)\n sleep(sleep_speed)\n # Turn on red\n PYGLOW.color(\"red\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, yellow, and orange\n PYGLOW.color(\"white\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 50)\n sleep(sleep_speed)\n # Fade all\n PYGLOW.color(\"white\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 50)\n sleep(sleep_speed)\n # Fade blue, green, yellow, orange, and red\n PYGLOW.color(\"blue\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 40)\n sleep(sleep_speed)\n # Fade green, yellow, orange, and red\n PYGLOW.color(\"green\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 30)\n sleep(sleep_speed)\n # Fade yellow, orange, and red\n PYGLOW.color(\"yellow\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 20)\n sleep(sleep_speed)\n # Fade orange, and red\n PYGLOW.color(\"orange\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 10)\n sleep(sleep_speed)\n # Fade red\n PYGLOW.color(\"red\", 0)\n sleep(sleep_speed)\n # Pause 1 second before the next one\n sleep(1)", "def dalf(x):\n return grad(alf)(x)", "def fourLegSimulator(beta_list, gamma_list, beta_list2, gamma_list2, beta_list3, gamma_list3, beta_list4, gamma_list4, bodyHeight, femur, tibia):\n \n #import necessary packages\n import numpy as np \n import itertools # This package is specifically used for having multiple variable \"for\" loop using zip function\n from numpy import pi, sin, cos, sqrt\n import matplotlib.pyplot as plt\n import matplotlib.animation as animation\n get_ipython().run_line_magic('matplotlib', 'qt')\n\n\n\n # input parameters\n Femur_one_leg = femur # Length of femur (upper bone)\n Tibia_one_leg = tibia # Length of Tibia (lower bone)\n\n\n # Making arrays for containing value of respective coordinates\n X1 = np.zeros(len(beta_list)) # array for x_coordinates of moving point of femur\n Y1 = np.zeros(len(beta_list)) # array for y_coordinates of moving point of femur\n X2 = np.zeros(len(gamma_list)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2 = np.zeros(len(gamma_list)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n X1_2 = np.zeros(len(beta_list2)) # array for x_coordinates of moving point of femur\n Y1_2 = np.zeros(len(beta_list2)) # array for y_coordinates of moving point of femur\n X2_2 = np.zeros(len(gamma_list2)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_2 = np.zeros(len(gamma_list2)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n\n X1_3 = np.zeros(len(beta_list3)) # array for x_coordinates of moving point of femur\n Y1_3 = np.zeros(len(beta_list3)) # array for y_coordinates of moving point of femur\n X2_3 = np.zeros(len(gamma_list3)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_3 = np.zeros(len(gamma_list3)) # array for y_coordinates of moving point of tibia i.e end effector in our case \n \n \n X1_4 = np.zeros(len(beta_list4)) # array for x_coordinates of moving point of femur\n Y1_4 = np.zeros(len(beta_list4)) # array for y_coordinates of moving point of femur\n X2_4 = np.zeros(len(gamma_list4)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_4 = np.zeros(len(gamma_list4)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n \n #Populating the above defined arrays currently filled with zeros to respective coordinates\n #Here in the for loop zip function is used to iterate two variales simultaneously and enumerate function to return index numbers\n\n for index,(beta,gamma) in enumerate(zip(beta_list,gamma_list)):\n x1 = Femur_one_leg*cos(-beta - (pi/2)) # x-cooridnate of femur\n y1 = Femur_one_leg*sin(-beta - (pi/2)) # y-cooridnate of femur\n x2 = x1 + Tibia_one_leg*cos(-pi/2 - (beta + gamma)) # x-coordinate of tibia\n y2 = y1 + Tibia_one_leg*sin(-pi/2 - (beta + gamma)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1[index] = x1 \n Y1[index] = y1 \n X2[index] = x2 \n Y2[index] = y2 \n \n for index2,(beta2,gamma2) in enumerate(zip(beta_list2,gamma_list2)):\n x1_2 = Femur_one_leg*cos(-beta2 - (pi/2)) # x-cooridnate of femur\n y1_2 = Femur_one_leg*sin(-beta2 - (pi/2)) # y-cooridnate of femur\n x2_2 = x1_2 + Tibia_one_leg*cos(-pi/2 - (beta2 + gamma2)) # x-coordinate of tibia\n y2_2 = y1_2 + Tibia_one_leg*sin(-pi/2 - (beta2 + gamma2)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_2[index2] = x1_2 \n Y1_2[index2] = y1_2 \n X2_2[index2] = x2_2 \n Y2_2[index2] = y2_2 \n\n for index3,(beta3,gamma3) in enumerate(zip(beta_list3,gamma_list3)):\n x1_3 = 40 + Femur_one_leg*cos(-beta3 - (pi/2)) # x-cooridnate of femur\n y1_3 = Femur_one_leg*sin(-beta3 - (pi/2)) # y-cooridnate of femur\n x2_3 = x1_3 + Tibia_one_leg*cos(-pi/2 - (beta3 + gamma3)) # x-coordinate of tibia\n y2_3 = y1_3 + Tibia_one_leg*sin(-pi/2 - (beta3 + gamma3)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_3[index3] = x1_3 \n Y1_3[index3] = y1_3 \n X2_3[index3] = x2_3 \n Y2_3[index3] = y2_3\n \n for index4,(beta4,gamma4) in enumerate(zip(beta_list4,gamma_list4)):\n x1_4 = 40 + Femur_one_leg*cos(-beta4 - (pi/2)) # x-cooridnate of femur\n y1_4 = Femur_one_leg*sin(-beta4 - (pi/2)) # y-cooridnate of femur\n x2_4 = x1_4 + Tibia_one_leg*cos(-pi/2 - (beta4 + gamma4)) # x-coordinate of tibia\n y2_4 = y1_4 + Tibia_one_leg*sin(-pi/2 - (beta4 + gamma4)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_4[index4] = x1_4 \n Y1_4[index4] = y1_4 \n X2_4[index4] = x2_4 \n Y2_4[index4] = y2_4 \n\n # Setting up figure and subplot\n\n fig = plt.figure()\n fig.canvas.set_window_title('One Leg trajectory Planning')\n ax = fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=(-30,70), ylim=(-50,50))\n ax.grid()\n ax.set_title('Leg Trajectory')\n ax.axes.xaxis.set_ticklabels([])\n ax.axes.yaxis.set_ticklabels([])\n \n line, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line2, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n line3, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line4, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n \n\n\n # initialization function\n def init():\n line.set_data([], [])\n line2.set_data([], [])\n line3.set_data([], [])\n line4.set_data([], [])\n return line,line2,line3,line4,\n\n # animation function\n def animate(i):\n x_points = [0, X1[i], X2[i]]\n y_points = [0, Y1[i], Y2[i]]\n \n x2_points = [0, X1_2[i], X2_2[i]]\n y2_points = [0, Y1_2[i], Y2_2[i]]\n \n x3_points = [40, X1_3[i], X2_3[i]]\n y3_points = [0, Y1_3[i], Y2_3[i]]\n \n x4_points = [40, X1_4[i], X2_4[i]]\n y4_points = [0, Y1_4[i], Y2_4[i]]\n \n\n line.set_data(x_points, y_points)\n line2.set_data(x2_points, y2_points)\n line3.set_data(x3_points, y3_points)\n line4.set_data(x4_points, y4_points)\n \n return line, line2, line3, line4\n\n # call the animation\n ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(X1), interval=100, blit=True, repeat=True)\n \n\n # plotting respective movement trajectories in the same plot\n plt.plot(X2,Y2, '#05143b')\n# plt.plot(X1,Y1)\n \n plt.plot(X2_2,Y2_2,'#37acf0')\n# plt.plot(X1_2,Y1_2)\n \n plt.plot(X2_3,Y2_3,'#05143b')\n# plt.plot(X1_3,Y1_3)\n \n plt.plot(X2_4,Y2_4,'#37acf0')\n# plt.plot(X1_4,Y1_4)\n \n \n \n plt.plot([-20,60],[-bodyHeight,-bodyHeight],'brown')\n plt.plot([-4,44],[0,0],'#010b24')\n plt.plot([-4,-4],[0,5],'#010b24')\n plt.plot([44,44],[0,5],'#010b24')\n plt.plot([-4,44],[5,5],'#010b24')\n \n for ind in range(100):\n plt.plot([-4,44],[ind*5/100,ind*5/100],'black')\n \n return None", "def build_random_function(min_depth, max_depth):\n listoffunctions = [ [\"x\"] , [\"y\"] , [\"\"] ]\n\n var = random.randrange(0,7) #this is not inclusive at the upper end\n halfvar = random.randrange(0,2)\n otherhalfvar = random.randrange(0,2)\n if max_depth == 0: # if you get the the max depth it ends\n if halfvar == 0:\n return [\"x\"]\n else:\n return [\"y\"]\n if min_depth == 0: # if you get to the the minimum recursion length\n if max_depth != 0: # There is a 1/2 possibility it will end there\n if halfvar == 0: \n if otherhalfvar == 0: \n return [\"x\"] \n else: \n return [\"y\"]\n\n # some of the below return two so that they cna evaluate more than onne value\n\n if var == 0:\n return [\"sin_pi\", build_random_function(min_depth-1, max_depth-1)]\n if var == 1:\n return [\"cos_pi\", build_random_function(min_depth-1, max_depth-1)]\n if var == 2:\n return [\"prod\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1)]\n if var == 3:\n return [\"avg\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1)]\n if var == 4:\n return [\"squ\", build_random_function(min_depth-1, max_depth-1)]\n if var == 5:\n return [\"cir\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1) ]\n if var == 6:\n return [\"sms\", build_random_function(min_depth-1, max_depth-1) , build_random_function(min_depth-1, max_depth-1) ]", "def fir(X, y, trial_index, window, tr):\n\n # Norm then pad.\n scaler = MinMaxScaler(feature_range=(0, 1))\n X = scaler.fit_transform(X.astype(np.float))\n X = np.vstack([X, np.ones((window, X.shape[1]), dtype=np.float)])\n\n # Save the org y names\n ynames = sorted(np.unique(y))\n ynames = unique_sorted_with_nan(ynames)\n \n # y becomes integers\n y = create_y(y)\n\n # Make the design matrix.\n dm = _create_dm(y, window)\n # dm DEBUG\n #import time\n #np.savetxt(\"dm-{0}\".format(time.strftime(\"%m_%d_%Y_%H_%s_%m\")), dm, fmt=\"%1.0f\")\n dm = np.matrix(dm)\n \n # FIR!\n fir_names = []\n firs = []\n for j in range(X.shape[1]):\n x = np.matrix(X[:,j])\n fir = np.array(np.linalg.pinv(dm.T * dm) * dm.T * x.T)[0:-1] \n ## Drop dummy\n fir = fir.reshape(len(ynames)-1, window) \n\n firs.append(fir)\n fir_names.extend(ynames[1:]) ## Drop nan/baseline\n\n Xfir = np.vstack(firs).transpose()\n fir_names = np.asarray(fir_names)\n\n assert checkX(Xfir)\n assert Xfir.shape[0] == window, (\"After FIR rows not equal to window\")\n assert Xfir.shape[1] == (len(ynames[1:]) * X.shape[1]), (\"After\" \n \"FIR wrong number of features\")\n assert fir_names.shape[0] == Xfir.shape[1], (\"fir_names and Xfir\" \n \"don't match\")\n\n return Xfir, fir_names", "def f(self,y,psi):\r\n raise NotImplementedError", "def fillax(x, y, *args, **kw):\n xx = np.concatenate((x, np.array([x[-1], x[0]], x.dtype)))\n yy = np.concatenate((y, np.zeros(2, y.dtype)))\n return pylab.fill(xx, yy, *args, **kw)", "def f(t,y):\n return np.array([lam*y[0] + (1.0-lam)*np.cos(t) - (1.0+lam)*np.sin(t)])", "def r8_f2(t, y):\n yp = np.zeros(np.size(y))\n yp[0] = y[1]\n yp[1] = -y[0]\n return(yp)", "def f(self):\n return self.g() + self.h()", "def f5(x):\n return 2* sin(x) + sin(2*x)", "def create_super_ball():\n super_balls.append(gen_super_ball())\n generate_velocity(super_balls)", "def evaluate_random_function(f, x, y):\n\n # your code goes here", "def __call__(self, X, Y=None, eval_gradient=False):\n list_pfunc = self._get_one_param('list_func')\n if(Y is None):\n Y =X\n for f in reversed(list_pfunc):\n X = f(X, Y=Y, eval_gradient=False)\n return X", "def _get_F(self, omega, y): \n x = y[:-1]\n newt_lambda = y[-1]\n F = np.zeros([len(x)+1, 1])\n F[:-1] = omega @ x - newt_lambda/x\n F[-1] = x.sum()-1\n return F", "def f(x):\n\treturn np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0)", "def __init__(self, BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb):\n _snap.TFfGGen_swiginit(self, _snap.new_TFfGGen(BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb))", "def evaluate_random_function(f, x, y):\n if len(f)==1: #check base case\n # print f[0],\"BASE\"\n if f[0]==\"x0\":\n return x0(x,y)\n if f[0]==\"y0\":\n return y0(x,y)\n if f[0]==\"prod\":\n return prod(x,y)\n if f[0]==\"avg\":\n return avg(x,y)\n if f[0]==\"square\":\n return square(x,y)\n if f[0]==\"squarert\":\n return squarert(x,y)\n return 'error1'\n #done checking base case. Only need to provide for possible base cases\n # print f[0], \"NORMAL\"\n if f[0]==\"x0\":\n return x0(evaluate_random_function(f[1],x,y),evaluate_random_function(f[2],x,y))\n if f[0]==\"y0\":\n return y0(evaluate_random_function(f[1],x,y),evaluate_random_function(f[2],x,y))\n if f[0]==\"prod\":\n return prod(evaluate_random_function(f[1],x,y),evaluate_random_function(f[2],x,y))\n if f[0]==\"avg\":\n return avg(evaluate_random_function(f[1],x,y),evaluate_random_function(f[2],x,y))\n if f[0]==\"cos_pi\":\n return cos_pi(evaluate_random_function(f[1],x,y))\n if f[0]==\"sin_pi\":\n return sin_pi(evaluate_random_function(f[1],x,y))\n if f[0]==\"square\":\n return square(evaluate_random_function(f[1],x,y))\n if f[0]==\"squarert\":\n return squarert(evaluate_random_function(f[1],x,y))\n \n return 'error'", "def f(xx, uu, uref, t, p):\n x1, x2, x3, x4 = xx # system state variables\n u1, = uu # input variable\n\n l = 0.5 # length of the pendulum rod\n g = 9.81 # gravitational acceleration\n M = 1.0 # mass of the cart\n m = 0.1 # mass of the pendulum\n\n s = sin(x3)\n c = cos(x3)\n\n ff = np.array([ x2,\n m*s*(-l*x4**2+g*c)/(M+m*s**2)+1/(M+m*s**2)*u1,\n x4,\n s*(-m*l*x4**2*c+g*(M+m))/(M*l+m*l*s**2)+c/(M*l+l*m*s**2)*u1\n ])\n return ff", "def ground_truth(x):\r\n\treturn x*np.sin(x)*np.sin(2*x)", "def fire_arrows():\n\n global fireball_cooldown\n\n angle = None\n if keys[pygame.K_UP]: \n angle = 270\n if keys[pygame.K_LEFT]: angle = 225\n elif keys[pygame.K_RIGHT]: angle = 315\n elif keys[pygame.K_DOWN]: \n angle = 90\n if keys[pygame.K_LEFT]: angle = 135\n elif keys[pygame.K_RIGHT]: angle = 45\n elif keys[pygame.K_LEFT]: angle = 180\n elif keys[pygame.K_RIGHT]: angle = 0\n\n\n if angle != None:\n fireball_cooldown += 1\n\n new_bullet = projectiles.Fireball(player.rect.center[0], player.rect.center[1], angle)\n bullets.add(new_bullet)", "def flipy(y):\n return -y + 600", "def fgrad_y(self,y,psi):\r\n raise NotImplementedError", "def fgrad_y(self, y, psi, return_precalc = False):\r\n\r\n mpsi = psi.copy()\r\n\r\n # vectorized version\r\n\r\n # S = (mpsi[:,1]*(y + mpsi[:,2])).T\r\n S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T\r\n R = np.tanh(S)\r\n D = 1-R**2\r\n\r\n # GRAD = (1+(mpsi[:,0:1]*mpsi[:,1:2]*D).sum(axis=0))[:,np.newaxis]\r\n GRAD = (1+(mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T\r\n\r\n if return_precalc:\r\n # return GRAD,S.sum(axis=1),R.sum(axis=1),D.sum(axis=1)\r\n return GRAD, S, R, D\r\n\r\n\r\n return GRAD", "def _F(x,gam):\n beta = np.sqrt(1 - gam**-2)\n B = 1 + 0.5 * (gam**2 - 1)\n C = 10 * x * gam * beta * (2 + gam * beta)\n C /= 1 + x**2 * (gam**2 - 1)\n\n F_1 = (17 - 3 * x**2 / (2 - x)**2 - C) * np.sqrt(1 - x)\n F_2 = 12 * (2 -x) - 7 * x**2 / (2 - x) - 3 * x**4 / (2 - x)**3\n F_3 = np.log((1 + np.sqrt(1 - x)) / np.sqrt(x))\n\n return B * F_1 + F_2 * F_3", "def build(self):\n\n # Create a custom grid, fe_set \n nfe = 6\n fe_a = 1/4.0\n fe_b = 0.2\n fe_set = [0, 0.004]\n for i in range(1,nfe+1):\n if i < nfe*fe_a:\n fe_set.append(i*fe_b/(nfe*fe_a))\n elif i == nfe: \n fe_set.append(1)\n else:\n fe_set.append(fe_b + (i-nfe*fe_a)*(1-fe_b)/(nfe*(1-fe_a)))\n\n \"\"\"\n Args:\n dae_method = method to use for calcuating derivatives (default = OCLR)\n - BFD1 - 1st order backwards finite difference\n - OCLR - Orthogonal collocation, Lagrange-Radau\n - OCLL - Orthogonal collocation, Lagrange-Legendre\n press_drop = Pressure drop correlation for superficial velocity calc.\n - SimplifiedP - simplified pressure correlations \n - Ergun - Ergun equation\n fe_set = set of normalised finite element locations\n nfe = number of finite elements for bed discretization (default = 15)\n (not used if fe_set specified)\n ncp = number of collocation points (OCLR or OCLL only, default = 3)\n \"\"\" \n\n # Create unit model for fuel reactor\n self.MB_fuel = MB_CLC_fuel.MB(\n parent=self,\n dae_method = 'OCLR',\n press_drop = 'Ergun',\n fe_set = fe_set,\n ncp = 3)", "def f(x):\r\n\treturn np.sin(x)", "def f(x, y=1):\r\n return x ** 2 + y", "def forward(self, x):\n #print(\"full frwd x shape:\",x.shape)\n y=np.zeros((x.shape[0],self.W.shape[0]))\n y=np.dot(x,np.transpose(self.W))+self.b\n self.x=np.copy(x)\n return y", "def fgrad_y(self, y, psi, return_precalc = False):\r\n\r\n\r\n mpsi = psi.copy()\r\n d = psi[-1]\r\n mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)\r\n\r\n # vectorized version\r\n\r\n S = (mpsi[:,1]*(y[:,:,None] + mpsi[:,2])).T\r\n R = np.tanh(S)\r\n D = 1-R**2\r\n\r\n GRAD = (d + (mpsi[:,0:1][:,:,None]*mpsi[:,1:2][:,:,None]*D).sum(axis=0)).T\r\n\r\n if return_precalc:\r\n return GRAD, S, R, D\r\n\r\n\r\n return GRAD", "def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])", "def return_lux_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[0,0]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0,0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[0,0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0,0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0,0]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0,0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def ground_truth(x):\n return x * np.sin(x) + np.sin(2 * x)", "def create_lambdas_equiG(ddG_x,ddG_y,nsim,already_interp=False):\n\tddG_x_interp,ddG_y_interp = interpolate_ddG(ddG_x,ddG_y,nsim,already_interp=already_interp)\n\tddG_y_interp_func = interpolate_ddG(ddG_x,ddG_y,nsim,print_interp_func=True,already_interp=already_interp)\n\n\tddG_y_min = np.amin(ddG_y_interp_func(ddG_x))\n\tddG_y_max = np.amax(ddG_y_interp_func(ddG_x))\n\tequi_ddG = np.abs(ddG_y_max-ddG_y_min)/nsim\n\n\tddG_y2 = np.asarray([ddG_y_min + i*equi_ddG for i in range(nsim)])\n\tddG_x2_func = interp1d(ddG_y_interp,ddG_x_interp,kind='linear')\n\tddG_x2 = ddG_x2_func(ddG_y2)\n\n\t# fix last interval by appending missing ddG_y value for lambda=1.0\n\t# ddG_x2 = np.append(ddG_x2,0.0)\n\t# ddG_y2 = np.append(ddG_y2,ddG_y[0])\n\n\t# reverse order of lambdas to 0->1\n\tddG_x2 = ddG_x2[::-1]\n\tddG_y2 = ddG_y2[::-1]\n\n\t# DEBUG\n\t#ddG_x2 = np.insert(ddG_x2,0,1.0)\n\t#ddG_y2 = np.insert(ddG_y2,0,ddG_y[-1])\n\t#print(np.insert(ddG_y2,1,ddG_y[-1]))\n\t#print(ddG_x)\n\t#print(ddG_y)\n\t#print(ddG_y2)\n\t#print(ddG_x2)\n\t#print(\"_lambdas = \",\" \".join(list(map(str,ddG_x2[::-1].tolist()))))\n\t#print(\"len(ddG_x) = \",len(ddG_x),\"\\nlen(ddG_y) = \",len(ddG_y),\"\\nlen(ddG_y_interp) = \",len(ddG_y_interp),\"\\nlen(ddG_x2) = \",len(ddG_x2),\"\\nlen(ddG_y2) = \",len(ddG_y2))\n\n\treturn ddG_x2,ddG_y2", "def call(self, y_true, y_pred):\n if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n\n ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n return ag_fn(y_true, y_pred, **self._fn_kwargs)", "def _apply_y(self, state, axes, **kwargs):\n return 1j * self._apply_x(self._apply_z(state, axes), axes)", "def genball(npt, ndim, rstate=None):\n # use Barthe2005\n x = rstate.standard_normal(size=(npt, ndim))\n y = rstate.exponential(0.5, size=npt)\n x1 = x / np.sqrt((y + (x**2).sum(axis=1)))[:, None]\n return x1", "def r8_f1(t, y):\n yp = np.zeros(np.size(y))\n yp[0] = 0.25 * y[0] * (1.0 - y[0] / 20.0)\n return(yp)", "def __call__(self, x):\n return self.y(x)", "def f(x, y=1):\n return x ** 2 + y", "def generate(self, x, **kwargs):\n\n assert self.parse_params(**kwargs)\n\n asserts = []\n\n # If a data range was specified, check that the input was in that range\n if self.clip_min is not None:\n asserts.append(x.any() >= self.clip_min)\n\n if self.clip_max is not None:\n asserts.append(x.any() <= self.clip_max)\n\n # Initialize loop variables\n if self.rand_init:\n eta = torch.FloatTensor(*x.shape).uniform_(-self.minmax, self.minmax)\n else:\n eta = torch.zeros_like(x)\n\n # Clip eta\n eta = clip_eta(eta, self.ord, self.eps)\n adv_x = x + eta\n\n if self.clip_min is not None or self.clip_max is not None:\n adv_x = torch.clamp(adv_x, self.clip_max, self.clip_min)\n\n if self.y_target is not None:\n y = self.y_target\n targeted = True\n elif self.y is not None:\n y = self.y\n targeted = False\n else:\n model_preds = self.model.get_probs(x)\n preds_max = reduce_max(model_preds, 1)\n y = torch.equals(model_preds, preds_max).float()\n y.requires_grad = False\n targeted = False\n del model_preds\n\n y_kwarg = 'y_target' if targeted else 'y'\n fgm_params = {\n 'eps' : self.eps_iter,\n y_kwarg: y,\n 'ord' : self.ord,\n 'clip_min' : self.clip_min,\n 'clip_max' : self.clip_max\n }\n\n if self.ord == 1:\n raise NotImplementedError(\"It's not clear that FGM is a good inner loop\"\n \" step for PGD when ord=1, because ord=1 FGM \"\n \" changes only one pixel at a time. We need \"\n \" to rigoursly test a strong ord=1 PGD \"\n \" before enabling this feature.\")\n\n FGM = self.FGM_CLASS(self.model, dtype)\n\n while i < self.nb_iter:\n \"\"\"\n Do a projected gradient step.\n \"\"\"\n adv_x = FGM.generate(adv_x, **fgm_params)\n\n # Clipping perturbation eta to self.ord norm ball\n eta = adv_x - x\n eta = clip_eta(eta, self.ord, self.eps)\n adv_x = x + eta\n\n # Redo the clipping.\n # FGM alread already did it, but subtracting and re-adding eta can add some\n # small numerical error\n if self.clip_min is not None or self.clip_max is not None:\n adv_x = torch.clamp(adv_x, self.clip_min, self.clip_max)\n\n # Asserts run only on CPU\n # When multi-GPU eval code tries to force all PGD ops onto GPU, this\n # can cause an error.\n common_dtype = torch.float32\n # NOTE Maybe this needs a cast\n asserts.append(self.eps <= (1e6 + self.clip_max - self.clip_min))\n \n return adv_x", "def f(x):\r\n return x * np.sin(x)", "def build_random_function(min_depth, max_depth):\n if max_depth == 1:\n rand = random.random()\n if rand < 1.0/(2+timeflag):\n return [\"x\"]\n elif rand < 2.0/(2+timeflag):\n return [\"y\"]\n else:\n return [\"t\"]\n elif min_depth <= 1:\n if random.random() > (max_depth - 1.0)/max_depth:\n rand = random.random()\n if rand < 1.0/(2+timeflag):\n return [\"x\"]\n elif rand < 2.0/(2+timeflag):\n return [\"y\"]\n else:\n return [\"t\"]\n\n rand = random.random()\n if rand < 1.0/NUM_FF:\n return [\"prod\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 2.0/NUM_FF:\n return [\"avg\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 3.0/NUM_FF:\n return [\"cos_pi\",build_random_function(min_depth-1,max_depth-1)]\n elif rand < 4.0/NUM_FF:\n return [\"sin_pi\",build_random_function(min_depth-1,max_depth-1)]\n elif rand < 5.0/NUM_FF:\n return [\"hypot\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n #elif rand < 6.0/NUM_FF:\n # return [\"pow\",build_random_function(min_depth-1,max_depth-1)]\n #elif rand < 6.0/NUM_FF:\n # return [\"add\",build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]\n elif rand < 6.0/NUM_FF:\n return [\"cube\",build_random_function(min_depth-1,max_depth-1)]", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def fire(self):\n prepare.SFX[\"explosion\"].play()\n pos = project(self.base_rect.center, self.angle, 38)\n return Gift(pos, self.angle, self.fire_speed)", "def TAY(self, *_):\n self.reg.Y = self.reg.A\n self.reg.N = self.reg.Y << 7\n self.reg.Z = self.reg.Y == 0", "def res(y):\n phi_s, phi_b, lamb_1, lamb_2 = y\n\n\n mS = 3.0e-4 # Mass of sleeve [kg]\n JS = 5.0e-9 # Moment of inertia of the sleeve [kgm]\n mB = 4.5e-3 # Mass of bird [kg]\n masstotal=mS+mB # total mass\n JB = 7.0e-7 # Moment of inertia of bird [kgm]\n r0 = 2.5e-3 # Radius of the bar [m]\n rS = 3.1e-3 # Inner Radius of sleeve [m]\n hS = 5.8e-3 # 1/2 height of sleeve [m]\n lS = 1.0e-2 # verical distance sleeve origin to spring origin [m]\n lG = 1.5e-2 # vertical distance spring origin to bird origin [m]\n hB = 2.0e-2 # y coordinate beak (in bird coordinate system) [m]\n lB = 2.01e-2 # -x coordinate beak (in bird coordinate system) [m]\n cp = 5.6e-3 # rotational spring constant [N/rad]\n g = 9.81 # [m/s^2]\n\n return scipy.array([(mS + mB) * g + lamb_2,\n cp * (phi_b - phi_s) - mB * lS * g - hS * lamb_1 - rS * lamb_2,\n cp * (phi_s - phi_b) - mB * lG * g,\n rS - r0 + hS * phi_s])", "def fire():\n print(\"FIRING\")\n GPIO.output(PIN1, 0)\n GPIO.output(PIN2, 0)\n GPIO.output(TRIGGER, 1)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def f(x):\n return x * np.sin(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)", "def func1(x,u):\r\n return 5*x*u+(x+7)*np.sin(x)", "def TargetFunction(p,x,y):\n \n return p[0] + p[1]*x + p[2]*(x**2) + p[3]*y + p[4]*(y**2) + p[5]*x*y", "def eulers_richardson_method(f, dx, y, yp, range, return_yp = False):\n x = min(range)\n y_space = [y]\n yp_space = [yp]\n x_space = [x]\n \n while x<=max(range):\n yp_mid = yp + 1/2*f(x,y,yp)*dx\n y_mid = y + 1/2*yp*dx\n ypp_mid = f(1/2*x*dx, y_mid, yp_mid)\n yp += ypp_mid*dx\n y += yp_mid*dx\n \n x += dx\n x_space.append(x)\n y_space.append(y)\n yp_space.append(yp)\n if (return_yp):\n return (x_space, y_space, yp_space)\n return (x_space, y_space)", "def animate_bresenham( x1, y1, x2, y2):\n\n dy = y2-y1\n dx = x2-x1\n d = 2*dy - dx\n x = x1\n y = y1\n \n pixels_per_unit = 50 \n\n #print(' distances x %s y %s '%(dx,dy) )\n #print(' d %s start x %s y %s '%(d,x,y) )\n \n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(pixels_per_unit)\n\n fr_cnt = 0 \n pt_bufr = [] \n while x <= x2:\n \n #draw the start and end point on each framebuffer \n bloody_simple_2drender('unit_circle_%s.png'%fr_cnt, pts=[(x1,y1), (x2,y2)], gridsize=pixels_per_unit, pfb=fb)\n\n #calc and draw each step in between \n pt_bufr.append( (x,y) )\n bloody_simple_2drender('unit_circle_%s.png'%fr_cnt, pts=pt_bufr, gridsize=pixels_per_unit, pfb=fb)\n\n x+=1\n if d<0 :\n d += dy + dy\n # print('D < 0 IN LOOP %s\\n'% d )\n\n else:\n d += 2*(dy-dx)\n # print('D !<0 IN LOOP %s\\n'% d )\n\n y+=1\n \n fr_cnt += 1", "def evaluate_random_function(f, x, y):\n\n if f[0] == \"prod\":\n return evaluate_random_function(f[1],x,y) * evaluate_random_function(f[2],x,y)\n elif f[0] == \"sin_pi\":\n return sin(evaluate_random_function(f[1],x,y) * pi)\n elif f[0] == \"cos_pi\":\n return cos(evaluate_random_function(f[1],x,y) * pi)\n elif f[0] == \"x\":\n return x\n else:\n return y", "def reconstructY(self, inputs):\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n return act_dec(self.decodeY(inputs))", "def fire_tick(self):\n SOUNDS['fire'].play()\n for room in self.room_list:\n if room.fire_level == 0:\n for j in room.adjacent:\n if self.lookup(j).fire_level == 2 and random.random() <= room.spread_chance:\n room.fire_level = 1\n self.num_onfire += 1\n break\n elif room.fire_level == 1:\n room.fire_level = 2", "def make_flower(shape, x, y, c1, c2, l, s):\n shape.penup()\n shape.speed(20)\n shape.setpos(x, y)\n shape.color(c2, c1)\n shape.begin_fill()\n shape.pendown()\n for side in range(6):\n shape.left(60)\n shape.forward(s) # s stands for short side\n shape.right(60)\n shape.forward(l) # l stands for long side\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.forward(l)\n shape.right(60)\n shape.forward(s)\n shape.right(60)\n shape.end_fill()\n shape.pendown()\n\n shape.color(\"green\")\n shape.right(90)\n shape.penup()\n shape.forward(10)\n shape.pendown()\n shape.forward(110)\n shape.left(90)\n\n\n\n # ...", "def f(self, x: np.array) -> float:\n return np.dot(x, self._w) + self._b", "def fun(_, y):\n return np.array([-self.r * self.beta * y[2] * y[0] / self.N,\n self.r * self.beta * y[2] * y[0] / self.N - self.sigma * y[1],\n self.sigma * y[1] - self.gamma * y[2],\n self.gamma * y[2]])", "def forward(self, x):\n # draw gumbel noise\n shp = x.shape\n eps = self.eps\n # footnote 1 from https://arxiv.org/pdf/1611.01144.pdf,\n # with a guard against log of 0\n # u = np.random.uniform(low=0, high=1, size=shp)\n # TODO: can this be replaced with rng.gumbel()?\n # what is the relatinship between low, high and gumbel parameters?\n u = self.rng.uniform(low=0, high=1, size=shp)\n g = -np.log(-np.log(u + eps) + eps)\n # x are the \"logits\" from the paper, add gumbel noise and normalize by temperature\n y = x + g\n yy = y / self.tau\n return self.smax.forward(yy)", "def runBrighterFatter():\n RunData([getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/')[0],], out='I800nmlow',\n wavelength='l800l')\n RunData([getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/')[2],], out='I800nmmed',\n wavelength='l800m')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[4],], out='I800nmhigh',\n wavelength='l800h')", "def birdfcn(x: np.ndarray) -> np.ndarray:\n\n n = x.shape[1]\n assert n == 2, \"Bird function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (\n np.sin(X) * np.exp((1 - np.cos(Y)) ** 2)\n + np.cos(Y) * np.exp((1 - np.sin(X)) ** 2)\n + (X - Y) ** 2\n )\n\n return scores", "def get_brownian_ball_motion(start, nframes):\n # The Wiener process parameter.\n delta = 60\n # Total time.\n T = 10.0\n # Number of steps.\n N = nframes\n # Time step size\n dt = T/N\n # Initial values of x.\n x = numpy.empty((2,N+1))\n x[:, 0] = start\n\n brownian(x[:,0], N, dt, delta, out=x[:,1:])\n\n out = []\n for i in x.transpose():\n out.append(( int(i[0]), int(i[1]) ))\n return out", "def fx(self, x):\n A = np.eye(3) + self.dt * np.array([[0, 1, 0], [0, 0, 0], [0, 0, 0]])\n xp = A @ x\n return xp", "def falcon():", "def eval(self, x):\n y = list(x)\n if not self.biased:\n y.insert(0, 1.0)\n y = np.array(y).reshape((self.Ws[0].shape[1], 1))\n for W, g in zip(self.Ws, self.gs):\n y = g(np.dot(W, y))\n return y.flatten()", "def makeFastFeedForwardFunction(self):\n\n\t\toutWeightMatrix = []\n\t\tfor unit in self.outputLayer:\n\n\t\t\trow = []\n\t\t\tfor b in unit.branchesIn:\n\t\t\t\tprint b.weight\n\t\t\t\trow.append(b.weight)\n\t\t\t\n\t\t\toutWeightMatrix.append(row)\n\t\toutWeightMatrix = np.array(outWeightMatrix).squeeze()\n\n\t\thiddenMatrices = []\n\t\tfor layer in self.hiddenLayers:\n\t\t\tmatrix = []\n\t\t\t#ignore the bias unit, since it has no branches in\n\t\t\tfor unit in layer[1:]:\n\t\t\t\trow = []\n\t\t\t\tfor b in unit.branchesIn:\n\t\t\t\t\trow.append(b.weight)\n\n\t\t\t\tmatrix.append(row)\n\t\t\tmatrix = np.array(matrix)\n\n\t\t\thiddenMatrices.append(matrix)\n\n\t\thidActFunc = (self.hiddenLayers[0])[1].activationFunction\n\t\toutActFunc = self.outputLayer[0].activationFunction\n\n\t\tdef ffFunc(inp):\n\t\n\t\t\tforward = np.insert(inp.T,0,1.0,axis=0)\n\t\t\tfor matrix in hiddenMatrices:\n\t\t\t\tnext = np.dot(matrix,forward)\n\t\t\t\tnext = hidActFunc(next)\n\t\t\t\tforward = np.insert(next,0,1.0,axis=0)\n\n\t\t\tout = np.dot(outWeightMatrix,forward)\n\n\t\t\treturn outActFunc(out)\n\n\t\treturn ffFunc", "def bealefcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"Beale's function is only defined on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (\n (1.5 - X + (X * Y)) ** 2\n + (2.25 - X + (X * (Y**2))) ** 2\n + (2.625 - X + (X * (Y**3))) ** 2\n )\n\n return scores", "def gyroHF2(self, GYRO, PFC):\n print(\"Calculating gyro orbit heat loads\")\n log.info(\"Calculating gyro orbit heat loads\")\n #get divertor HF\n qDiv = PFC.qDiv[PFC.PFC_GYROmap] / self.elecFrac\n Pdiv = qDiv * PFC.areas[PFC.PFC_GYROmap]\n #Get fractional multipliers for each helical trace\n gyroFrac = 1.0/GYRO.N_gyroPhase\n vPhaseFrac = 1.0/GYRO.N_vPhase\n vSliceFrac = GYRO.energyFracs\n #qMatrix = np.zeros((GYRO.N_gyroPhase,GYRO.N_vPhase,GYRO.N_vSlice,len(q)))\n Pgyro = np.zeros((GYRO.Nt))\n PNaN = 0.0\n sum=0\n sum1=0\n #loop through intersect record and redistribute power using multipliers\n for gyroPhase in range(GYRO.N_gyroPhase):\n for vPhase in range(GYRO.N_vPhase):\n for vSlice in range(GYRO.N_vSlice):\n idx = GYRO.intersectRecord[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap]\n hdotn = np.abs(GYRO.hdotn[gyroPhase,vPhase,vSlice,PFC.CADHOT_GYROmap])\n isNanFrom = np.where(np.isnan(idx)==True)[0] #include NaNs (NaNs = no intersection) index we map from\n notNanFrom = np.where(np.isnan(idx)==False)[0] #dont include NaNs (NaNs = no intersection) index we map from\n notNanTo = idx[~np.isnan(idx)] #indices we map power to\n notNanTo = notNanTo.astype(int) #cast as integer\n isNanTo = idx[np.isnan(idx)] #indices we map power to\n isNanTo = isNanTo.astype(int) #cast as integer\n\n if len(notNanFrom)>0:\n #multiple sources can load the same target face, so we loop\n for i in range(len(notNanFrom)):\n Pgyro[notNanTo[i]] += Pdiv[notNanFrom[i]]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[notNanFrom[i],vSlice]\n\n if len(isNanFrom)>0:\n PNaN += np.sum(Pdiv[isNanFrom]*GYRO.ionFrac*gyroFrac*vPhaseFrac*vSliceFrac[isNanFrom,vSlice])\n\n\n GYRO.gyroPowMatrix += Pgyro\n GYRO.gyroNanPower += PNaN\n return", "def gate(self):\n self.gatedFrames = IVUS_gating(self.images, self.ivusPullbackRate, self.dicom.CineRate)", "def concave_fun_eval(x):\r\n return np.stack([f1(x), f2(x)]), np.stack([f1_dx(x), f2_dx(x)])", "def f(x):\n return (2.0*math.sin(10.0*x+1.0)+1.0)" ]
[ "0.57334554", "0.54655373", "0.53826815", "0.53662187", "0.53366727", "0.5298539", "0.5281986", "0.52551913", "0.52137053", "0.520962", "0.5203715", "0.51821357", "0.51619667", "0.5104895", "0.51000744", "0.5096226", "0.5089348", "0.50706804", "0.50680226", "0.5058774", "0.5053141", "0.5038342", "0.501427", "0.5005138", "0.4999248", "0.49803016", "0.49708945", "0.49544317", "0.49491", "0.49428", "0.49405292", "0.49336326", "0.49042404", "0.48947", "0.48860198", "0.4880011", "0.48642716", "0.48622772", "0.48615074", "0.48579252", "0.48548377", "0.48322585", "0.48304987", "0.4828401", "0.48258522", "0.48163998", "0.48145416", "0.48142332", "0.48111978", "0.48008695", "0.48008004", "0.47999159", "0.47868845", "0.4774485", "0.47735697", "0.47726423", "0.47674477", "0.47597206", "0.4756187", "0.4756051", "0.4754305", "0.47522253", "0.47424805", "0.47338453", "0.4730408", "0.4728858", "0.47288424", "0.47211275", "0.47168985", "0.46921104", "0.4684553", "0.46752512", "0.46699822", "0.46699822", "0.46699822", "0.46699822", "0.46672985", "0.46612105", "0.4656165", "0.46557972", "0.46542472", "0.46501747", "0.4649988", "0.46460178", "0.46412188", "0.463921", "0.46365443", "0.463632", "0.46316254", "0.46229163", "0.46177775", "0.46176785", "0.46172005", "0.4615032", "0.4614486", "0.46143168", "0.46140993", "0.46053454", "0.4591136", "0.45834166" ]
0.68100685
0
Initialize the Route analysis for the given inputs.
def __init__(self, **kwargs): self.pair_type = kwargs["pair_type"] self.origins = kwargs["origins"] self.origin_id_field = kwargs["origin_id_field"] self.destinations = kwargs["destinations"] self.dest_id_field = kwargs["dest_id_field"] self.network_data_source = kwargs["network_data_source"] self.travel_mode = kwargs["travel_mode"] self.time_units = kwargs["time_units"] self.distance_units = kwargs["distance_units"] self.time_of_day = kwargs["time_of_day"] self.reverse_direction = kwargs["reverse_direction"] self.scratch_folder = kwargs["scratch_folder"] self.assigned_dest_field = kwargs["assigned_dest_field"] self.od_pair_table = kwargs["od_pair_table"] self.origin_transfer_fields = kwargs["origin_transfer_fields"] self.destination_transfer_fields = kwargs["destination_transfer_fields"] self.barriers = [] if "barriers" in kwargs: self.barriers = kwargs["barriers"] # Create a job ID and a folder for this job self._create_job_folder() # Setup the class logger. Logs for each parallel process are not written to the console but instead to a # process-specific log file. self.setup_logger("RoutePairs") # Get field objects for the origin and destination ID fields since we need this in multiple places self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0] self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0] # Set up other instance attributes self.is_service = helpers.is_nds_service(self.network_data_source) self.rt_solver = None self.solve_result = None self.input_origins_layer = "InputOrigins" + self.job_id self.input_destinations_layer = "InputDestinations" + self.job_id self.input_origins_layer_obj = None self.input_dests_layer_obj = None self.origin_unique_id_field_name = "OriginUniqueID" self.dest_unique_id_field_name = "DestinationUniqueID" self.od_pairs = None # Create a network dataset layer if needed if not self.is_service: self._make_nds_layer() # Prepare a dictionary to store info about the analysis results self.job_result = { "jobId": self.job_id, "jobFolder": self.job_folder, "solveSucceeded": False, "solveMessages": "", "outputRoutes": "", "logFile": self.log_file }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def initialize_rt_solver(self):\r\n # For a local network dataset, we need to checkout the Network Analyst extension license.\r\n if not self.is_service:\r\n arcpy.CheckOutExtension(\"network\")\r\n\r\n # Create a new Route object\r\n self.logger.debug(\"Creating Route object...\")\r\n self.rt_solver = arcpy.nax.Route(self.network_data_source)\r\n\r\n # Set the Route analysis properties.\r\n # Read properties from the rt_config.py config file for all properties not set in the UI as parameters.\r\n # Route properties documentation: https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route.htm\r\n # The properties have been extracted to the config file to make them easier to find and set so users don't have\r\n # to dig through the code to change them.\r\n self.logger.debug(\"Setting Route analysis properties from RT config file...\")\r\n for prop, value in RT_PROPS.items():\r\n if prop in RT_PROPS_SET_BY_TOOL:\r\n self.logger.warning((\r\n f\"Route config file property {prop} is handled explicitly by the tool parameters and will be \"\r\n \"ignored.\"\r\n ))\r\n continue\r\n try:\r\n setattr(self.rt_solver, prop, value)\r\n if hasattr(value, \"name\"):\r\n self.logger.debug(f\"{prop}: {value.name}\")\r\n else:\r\n self.logger.debug(f\"{prop}: {value}\")\r\n except Exception as ex: # pylint: disable=broad-except\r\n # Suppress warnings for older services (pre 11.0) that don't support locate settings and services\r\n # that don't support accumulating attributes because we don't want the tool to always throw a warning.\r\n if not (self.is_service and prop in [\r\n \"searchTolerance\", \"searchToleranceUnits\", \"accumulateAttributeNames\"\r\n ]):\r\n self.logger.warning(\r\n f\"Failed to set property {prop} from RT config file. Default will be used instead.\")\r\n self.logger.warning(str(ex))\r\n # Set properties explicitly specified in the tool UI as arguments\r\n self.logger.debug(\"Setting Route analysis properties specified tool inputs...\")\r\n self.rt_solver.travelMode = self.travel_mode\r\n self.logger.debug(f\"travelMode: {self.travel_mode}\")\r\n self.rt_solver.timeUnits = self.time_units\r\n self.logger.debug(f\"timeUnits: {self.time_units}\")\r\n self.rt_solver.distanceUnits = self.distance_units\r\n self.logger.debug(f\"distanceUnits: {self.distance_units}\")\r\n self.rt_solver.timeOfDay = self.time_of_day\r\n self.logger.debug(f\"timeOfDay: {self.time_of_day}\")", "def __init__(self, num_routes, beta, phi, theta, b, l, u): \r\n self.num_routes = num_routes\r\n self.beta = beta\r\n self.phi = phi\r\n self.theta = theta\r\n self.b = b\r\n self.l = l\r\n self.u = u\r\n\r\n self.lb_totalflow = self.compute_lb_totalflow()\r\n # Initialize the flow over each route to be 0\r\n self.flow = np.zeros(num_routes) \r\n # Initialize the capacity over a route to be 0\r\n self.capacity = np.zeros(num_routes)", "def __init__(self):\n \n self.linksDict = dict()\n self.nodesDict = dict()\n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n self.routeXref = dict()\n self.transitRoutes = dict()\n self.spIndex = None", "def __init__(self, rule_id, inputs, output, local=False, template=None, **kwargs):\n self.rule_id = rule_id\n self.inputs = inputs\n self.output = output\n self.local = local\n self.template = template\n self.params = kwargs", "def __init__(self):\n rospy.init_node('approach')\n\n rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n self.vel_pub = rospy.Publisher('/cmd_vel_mux/input/navi', Twist,\n queue_size=10)\n self.scan = None", "def do_setup(self, ants): \n log_filter = LogFilter()\n getLogger().addFilter(log_filter)\n\n self.hills = []\n self.directions = []\n\n self.seen = [] #areas that have been seen, use this to avoid repetition\n self.unseen = []\n self.stepped_on = []\n\n self.intent = {}\n self.lc = {} #center of mass for a location\n self.i = {} #number of iterations for an ant\n\n for row in range(ants.rows):\n for col in range(ants.cols):\n self.unseen.append((row, col))\n self.intent[(row,col)] = Intent.GATHER\n\n self.lc[(row,col)] = (-1.0,-1.0) #set up center of mass\n self.i[(row,col)] = -1", "def initialization_step(self):\n # Update where agents are\n self.update_agent_location_vector()\n # update task locations\n self.update_task_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()", "def RoutingInterfaceInitialize(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def initialise(self, road, cars, speed, grid):\n self.next_features = self.buildFeatures(road, cars, speed, grid)\n if not self.init:\n self.thetas = np.ones(self.next_features.shape[0])/self.next_features.shape[0]\n #accelerate action bias\n self.thetas[0] += self.thetas[0]\n self.init = True\n # Reset the total reward for the episode\n self.total_reward = 0", "def __init__(__self__,\n resource_name: str,\n args: RouteMapArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def initialize(self):\n self.candidate_disease_list = []\n self.candidate_symptom_list = []\n self.agent_action = {\n \"turn\":None,\n \"action\":None,\n \"request_slots\":{},\n \"inform_slots\":{},\n \"explicit_inform_slots\":{},\n \"implicit_inform_slots\":{},\n \"speaker\":\"agent\"\n }", "def init(self):\n\n seen_ports = set()\n next_ports = deque([self.In()])\n parameters = set()\n\n while next_ports:\n\n # Get the next port and update seen ports\n port = next_ports.popleft()\n if port in seen_ports:\n continue\n seen_ports.add(port)\n\n # Figure out the next port to check\n block = port.block\n\n # If the port has upstream then it is the port we check\n # Otherwise, In case the block is atomic, we pass to its inputs\n if port.upstream:\n next_ports.extend(port.upstream)\n elif block.atomic:\n next_ports.extend(block.In)\n\n # Add parameters if the block has them\n if isinstance(block, TorchModule):\n parameters.update(map(lambda x: x.tensor, ParameterIterator(block)))\n\n # Sets the optimizer and initializes the LR scheduler\n self.__optimizer = self.__optimizer_fn(parameters)\n self.__scheduler.init(self.__optimizer)", "def init(self, parameters, agent_parameters):\n pass", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def __init__(self, input_field: str, depth: int):\n self.maze = self._file_to_matrix(input_field, depth)\n\n # Determine the rim of the labyrinth.\n self.rim_x = self.maze.shape[2] - 4\n self.rim_y = self.maze.shape[1] - 4\n\n self.graph = nx.Graph()\n\n # Connect Path / points\n self.path_coordinates = np.argwhere(self.maze == PATH)\n self._build_path()\n\n # Determine Portal points and connect them to corresponding\n # other dimension.\n self.merge_portals_to_path()\n self.connect_portals()\n\n start = tuple(np.argwhere(self.maze == \"AA\")[0])\n goal = tuple(np.argwhere(self.maze == \"ZZ\")[0])\n\n try:\n self.shortest_path_length = nx.shortest_path_length(\n self.graph, start, goal)\n except nx.NetworkXNoPath:\n self.shortest_path_length = None", "def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)", "def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)", "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def initialize(self):\n \n # lumopt.figures_of_merit.modematch object need initialization and\n # forward setting. h is a spacemap.utilities.simulation object\n if self._fom_type == 'ModeMatch':\n self.fom.initialize(self._ha)\n self.fom.make_forward_sim(self._ha)", "def initialize_inputs(self) -> None:\n self.inputs.lifts_per_truck_day = self.operations.lifts_per_truck_day()\n self.inputs.avg_tonnes_per_m3 = self.operations.avg_tonnes_per_m3()\n self.inputs.revenue_per_m3 = (\n self.income_statement.revenue.operating_revenue\n * self.inputs.allocation_to_collection_unit\n / self.operations.productivity.total_m3_collected\n )\n self.inputs.num_customers = self.operations.productivity.num_customers", "def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route", "def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def __init__(self):\n self.inputs = {}", "def __init__(self, my_map, paths, starts, goals, agent_goals, predictions):\n\n self.my_map = my_map\n self.paths = paths\n self.starts = starts\n self.goals = goals\n self.agent_goals = agent_goals\n self.num_of_agents = len(starts)\n self.predictions = predictions\n\n self.CPU_time = 0\n\n # compute heuristics for the low-level search\n self.heuristics = []\n for goal in self.goals:\n self.heuristics.append(compute_heuristics(my_map, goal))", "def __init__(self):\n self._names = []\n self._forwardFactories = []\n self._inputs = []\n self._inputFilters = {}\n self._outputFilters = []\n self._inputCheckers = []\n pass", "def __init__(__self__, *,\n labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableRouteArgs']]]] = None,\n virtual_hub_id: Optional[pulumi.Input[str]] = None):\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if virtual_hub_id is not None:\n pulumi.set(__self__, \"virtual_hub_id\", virtual_hub_id)", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]", "def __init__(self) -> None:\n\n #: The underlying route table config file\n self._route_table = None", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def __init__(self, inputs):\n self.x = None # x values for grid\n self.t = None # t values for grid\n self.R = None # Perturbation values\n self.uR = None # solution with disturbance\n self.first = True # place-holder variable for plotting\n self.uE = None # Exact solution for the problem\n self.h = None # step in x\n self.k = None # step in t\n self.step_x = inputs['step_x']\n\n self.step_t = inputs['step_t']\n self.min_t = inputs['min_t']\n self.max_t = inputs['max_t'] # change this to get the desired t\n self.min_x = inputs['min_x']\n self.max_x = inputs['max_x']\n self.epsilon = inputs['pertubation_epsilon']", "def _initialize(self,points_data,points_test=None):\n \n if self.type_init=='resp':\n log_assignements = initialize_log_assignements(self.init,self.n_components,points_data,points_test,\n self.covariance_type)\n self._step_M(points_data,log_assignements)\n \n elif self.type_init=='mcw':\n means,cov,log_weights = initialize_mcw(self.init,self.n_components,points_data,points_test,\n self.covariance_type)\n self.means = means\n self.cov = cov\n self.log_weights = log_weights\n \n elif self.type_init=='kmeans':\n self._initialize_cov(points_data)\n \n self._is_initialized = True", "def __init__(__self__, *,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if subnets is not None:\n pulumi.set(__self__, \"subnets\", subnets)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def RoutingInterfaceInitialize(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def _initialize_weights(self, inputs):\n if self.data_init:\n self._data_dep_init(inputs)\n else:\n self._init_norm()\n self._initialized = True", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(InitialSceneRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.numberOfTSPTurtles is None:\n self.numberOfTSPTurtles = 0\n else:\n self.numberOfTSPTurtles = 0", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def initialize(self, *args, **kwargs):\n pass", "def __init__(self, input, output):\n # init classes\n self.input = input\n self.output = output", "def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def RoutingInterfaceInitialize(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, **kwargs):\n # Handle whatever kwargs we want here\n self.io_mapping = kwargs.get(\"io_mapping\", [])\n\n self.create_inputs = kwargs.get(\"create_inputs\", [])\n self.min_completion_fraction = kwargs.get(\"min_completion_fraction\", 1.0)\n\n # Now pass all of them to the parent class\n super(self.__class__, self).__init__(**kwargs)", "def test_route_init() -> None:\n rschema = RouteSchema(SpecificLocation())\n route = Route(schema=rschema)\n\n assert route.schema is rschema # Cannot use __eq__\n assert isinstance(route.stops, list)", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def __init__(__self__, *,\n as_path_match_mode: Optional[pulumi.Input[str]] = None,\n cen_id: Optional[pulumi.Input[str]] = None,\n cen_region_id: Optional[pulumi.Input[str]] = None,\n cidr_match_mode: Optional[pulumi.Input[str]] = None,\n community_match_mode: Optional[pulumi.Input[str]] = None,\n community_operate_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n destination_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n map_result: Optional[pulumi.Input[str]] = None,\n match_asns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n match_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n next_priority: Optional[pulumi.Input[int]] = None,\n operate_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preference: Optional[pulumi.Input[int]] = None,\n prepend_as_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n route_map_id: Optional[pulumi.Input[str]] = None,\n route_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n source_region_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_route_table_id: Optional[pulumi.Input[str]] = None,\n transmit_direction: Optional[pulumi.Input[str]] = None):\n if as_path_match_mode is not None:\n pulumi.set(__self__, \"as_path_match_mode\", as_path_match_mode)\n if cen_id is not None:\n pulumi.set(__self__, \"cen_id\", cen_id)\n if cen_region_id is not None:\n pulumi.set(__self__, \"cen_region_id\", cen_region_id)\n if cidr_match_mode is not None:\n pulumi.set(__self__, \"cidr_match_mode\", cidr_match_mode)\n if community_match_mode is not None:\n pulumi.set(__self__, \"community_match_mode\", community_match_mode)\n if community_operate_mode is not None:\n pulumi.set(__self__, \"community_operate_mode\", community_operate_mode)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_child_instance_types is not None:\n pulumi.set(__self__, \"destination_child_instance_types\", destination_child_instance_types)\n if destination_cidr_blocks is not None:\n pulumi.set(__self__, \"destination_cidr_blocks\", destination_cidr_blocks)\n if destination_instance_ids is not None:\n pulumi.set(__self__, \"destination_instance_ids\", destination_instance_ids)\n if destination_instance_ids_reverse_match is not None:\n pulumi.set(__self__, \"destination_instance_ids_reverse_match\", destination_instance_ids_reverse_match)\n if destination_route_table_ids is not None:\n pulumi.set(__self__, \"destination_route_table_ids\", destination_route_table_ids)\n if map_result is not None:\n pulumi.set(__self__, \"map_result\", map_result)\n if match_asns is not None:\n pulumi.set(__self__, \"match_asns\", match_asns)\n if match_community_sets is not None:\n pulumi.set(__self__, \"match_community_sets\", match_community_sets)\n if next_priority is not None:\n pulumi.set(__self__, \"next_priority\", next_priority)\n if operate_community_sets is not None:\n pulumi.set(__self__, \"operate_community_sets\", operate_community_sets)\n if preference is not None:\n pulumi.set(__self__, \"preference\", preference)\n if prepend_as_paths is not None:\n pulumi.set(__self__, \"prepend_as_paths\", prepend_as_paths)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if route_map_id is not None:\n pulumi.set(__self__, \"route_map_id\", route_map_id)\n if route_types is not None:\n pulumi.set(__self__, \"route_types\", route_types)\n if source_child_instance_types is not None:\n pulumi.set(__self__, \"source_child_instance_types\", source_child_instance_types)\n if source_instance_ids is not None:\n pulumi.set(__self__, \"source_instance_ids\", source_instance_ids)\n if source_instance_ids_reverse_match is not None:\n pulumi.set(__self__, \"source_instance_ids_reverse_match\", source_instance_ids_reverse_match)\n if source_region_ids is not None:\n pulumi.set(__self__, \"source_region_ids\", source_region_ids)\n if source_route_table_ids is not None:\n pulumi.set(__self__, \"source_route_table_ids\", source_route_table_ids)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if transit_router_route_table_id is not None:\n pulumi.set(__self__, \"transit_router_route_table_id\", transit_router_route_table_id)\n if transmit_direction is not None:\n pulumi.set(__self__, \"transmit_direction\", transmit_direction)", "def __init__(self, input_path, **params):\n self.input_path = input_path\n self.params = {name: range_ for name, range_ in params.items() if range_ is not None}\n self.tuned = False\n self.optimal = {}", "def __init__(self, mapper):\n self.map = mapper\n self._router = routes.middleware.RoutesMiddleware(self._dispatch,\n self.map)", "def __init__(self, inputs=None, dependencies=None, **kwargs):\n self.target = False\n self.name = None\n\n if not hasattr(self, 'inputs'):\n self.inputs = inputs if inputs is not None else []\n self.dependencies = dependencies if dependencies is not None else []\n\n for k, v in kwargs.items():\n setattr(self, k, v)", "def init(a: str, h: str, c: str, r: bool, A: str, lock: Lock) -> None:\n global host, action, report, router, algorithm\n\n action = a\n algorithm = A\n\n if r:\n report = r\n if h:\n host = h\n if c:\n lock.acquire()\n try:\n router = PyOSRM(c, use_shared_memory=False, algorithm=algorithm)\n LOGGER.debug(\"Router instantiated\")\n finally:\n lock.release()", "def __init__(\n self, rule_id, parent_id, input, output, local=False, template=None, **kwargs\n ):\n self.rule_id = rule_id\n self.parent_id = parent_id\n self.input = input\n self.output = output\n self.local = local\n self.template = template\n self.params = kwargs", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def initialize(self, args):\n\t\tpass", "def __init__(self):\n\n self.gm = GradientMapper()\n self.im = SpringMapper()\n self.fm = FullMapper(self.im, self.gm)\n # self.lm = LineMapper(self.fm)\n self.exit = False", "def __init__(__self__, *,\n app_resource_id: Optional[pulumi.Input[str]] = None,\n filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n open_api: Optional[pulumi.Input['GatewayRouteConfigOpenApiPropertiesArgs']] = None,\n predicates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocol: Optional[pulumi.Input[Union[str, 'GatewayRouteConfigProtocol']]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['GatewayApiRouteArgs']]]] = None,\n sso_enabled: Optional[pulumi.Input[bool]] = None):\n if app_resource_id is not None:\n pulumi.set(__self__, \"app_resource_id\", app_resource_id)\n if filters is not None:\n pulumi.set(__self__, \"filters\", filters)\n if open_api is not None:\n pulumi.set(__self__, \"open_api\", open_api)\n if predicates is not None:\n pulumi.set(__self__, \"predicates\", predicates)\n if protocol is None:\n protocol = 'HTTP'\n if protocol is not None:\n pulumi.set(__self__, \"protocol\", protocol)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if sso_enabled is not None:\n pulumi.set(__self__, \"sso_enabled\", sso_enabled)", "def initialize(self,inputDict):\n pass", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def init(self, parameters):\n pass", "def init():\n \n # General parameters\n vect_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/' # graphs directory\n csv_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/nodes_for_tracking.csv' # csv file \n dest_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/' # output directory\n verbose = True\n main_params = [vect_path, csv_path, dest_path, verbose]\n \n # Linking parameters\n createCSV = True \n forced_matching = True\n search_range = 10\n memory = 3\n adaptive_stop = 5 \n link_params = [createCSV, forced_matching, search_range, memory, \n adaptive_stop]\n \n # Tracking check parameters\n check = True # True to create a check image\n img_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking2/MosaicTest_t070.jpg' # image file on which to draw\n size = 1 # size of the nodes drawing\n check_params = [check, img_path, size]\n \n return main_params, link_params, check_params", "def _populate_input_data_transfer_fields(self):\r\n # Valid fields for the Route Stops input are described here:\r\n # https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route-input-data-types.htm\r\n # Do not transfer RouteName or Sequence as these are explicitly controlled by this tool. Do not transfer\r\n # LocationType because we want all inputs to be Stops. Waypoints don't make sense for this analysis.\r\n int_types = [\"Integer\", \"SmallInteger\"]\r\n numerical_types = [\"Double\", \"Single\"] + int_types\r\n rt_stops_input_fields = {\r\n \"Name\": [\"String\"],\r\n \"AdditionalTime\": numerical_types,\r\n \"AdditionalDistance\": numerical_types,\r\n \"AdditionalCost\": numerical_types,\r\n \"TimeWindowStart\": [\"Date\"],\r\n \"TimeWindowEnd\": [\"Date\"],\r\n \"CurbApproach\": int_types,\r\n \"Bearing\": numerical_types,\r\n \"BearingTol\": numerical_types,\r\n \"NavLatency\": numerical_types,\r\n \"SourceID\": int_types,\r\n \"SourceOID\": int_types,\r\n \"PosAlong\": numerical_types,\r\n \"SideOfEdge\": int_types\r\n }\r\n # Preserve origin and destination input fields that match names and types\r\n origin_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.origins) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"origin_transfer_fields\"] = origin_transfer_fields\r\n if origin_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Origins table that will be used in the analysis: \"\r\n f\"{origin_transfer_fields}\"\r\n ))\r\n destination_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.destinations) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"destination_transfer_fields\"] = destination_transfer_fields\r\n if destination_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Destinations table that will be used in the analysis: \"\r\n f\"{destination_transfer_fields}\"\r\n ))", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n core_network_arn: Optional[pulumi.Input[str]] = None,\n core_network_attachment_arn: Optional[pulumi.Input[str]] = None,\n customer_gateway_configuration: Optional[pulumi.Input[str]] = None,\n customer_gateway_id: Optional[pulumi.Input[str]] = None,\n enable_acceleration: Optional[pulumi.Input[bool]] = None,\n local_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n local_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n outside_ip_address_type: Optional[pulumi.Input[str]] = None,\n remote_ipv4_network_cidr: Optional[pulumi.Input[str]] = None,\n remote_ipv6_network_cidr: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionRouteArgs']]]] = None,\n static_routes_only: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n transit_gateway_id: Optional[pulumi.Input[str]] = None,\n transport_transit_gateway_attachment_id: Optional[pulumi.Input[str]] = None,\n tunnel1_address: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel1_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel1_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel1_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel1_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel1_log_options: Optional[pulumi.Input['VpnConnectionTunnel1LogOptionsArgs']] = None,\n tunnel1_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel1_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel1_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel1_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel1_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel1_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel1_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel1_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_address: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_asn: Optional[pulumi.Input[str]] = None,\n tunnel2_bgp_holdtime: Optional[pulumi.Input[int]] = None,\n tunnel2_cgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_action: Optional[pulumi.Input[str]] = None,\n tunnel2_dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_enable_tunnel_lifecycle_control: Optional[pulumi.Input[bool]] = None,\n tunnel2_ike_versions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_inside_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_inside_ipv6_cidr: Optional[pulumi.Input[str]] = None,\n tunnel2_log_options: Optional[pulumi.Input['VpnConnectionTunnel2LogOptionsArgs']] = None,\n tunnel2_phase1_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase1_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase1_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_phase2_dh_group_numbers: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n tunnel2_phase2_encryption_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_integrity_algorithms: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tunnel2_phase2_lifetime_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_preshared_key: Optional[pulumi.Input[str]] = None,\n tunnel2_rekey_fuzz_percentage: Optional[pulumi.Input[int]] = None,\n tunnel2_rekey_margin_time_seconds: Optional[pulumi.Input[int]] = None,\n tunnel2_replay_window_size: Optional[pulumi.Input[int]] = None,\n tunnel2_startup_action: Optional[pulumi.Input[str]] = None,\n tunnel2_vgw_inside_address: Optional[pulumi.Input[str]] = None,\n tunnel_inside_ip_version: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n vgw_telemetries: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionVgwTelemetryArgs']]]] = None,\n vpn_gateway_id: Optional[pulumi.Input[str]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if core_network_arn is not None:\n pulumi.set(__self__, \"core_network_arn\", core_network_arn)\n if core_network_attachment_arn is not None:\n pulumi.set(__self__, \"core_network_attachment_arn\", core_network_attachment_arn)\n if customer_gateway_configuration is not None:\n pulumi.set(__self__, \"customer_gateway_configuration\", customer_gateway_configuration)\n if customer_gateway_id is not None:\n pulumi.set(__self__, \"customer_gateway_id\", customer_gateway_id)\n if enable_acceleration is not None:\n pulumi.set(__self__, \"enable_acceleration\", enable_acceleration)\n if local_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv4_network_cidr\", local_ipv4_network_cidr)\n if local_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"local_ipv6_network_cidr\", local_ipv6_network_cidr)\n if outside_ip_address_type is not None:\n pulumi.set(__self__, \"outside_ip_address_type\", outside_ip_address_type)\n if remote_ipv4_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv4_network_cidr\", remote_ipv4_network_cidr)\n if remote_ipv6_network_cidr is not None:\n pulumi.set(__self__, \"remote_ipv6_network_cidr\", remote_ipv6_network_cidr)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if static_routes_only is not None:\n pulumi.set(__self__, \"static_routes_only\", static_routes_only)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if transit_gateway_attachment_id is not None:\n pulumi.set(__self__, \"transit_gateway_attachment_id\", transit_gateway_attachment_id)\n if transit_gateway_id is not None:\n pulumi.set(__self__, \"transit_gateway_id\", transit_gateway_id)\n if transport_transit_gateway_attachment_id is not None:\n pulumi.set(__self__, \"transport_transit_gateway_attachment_id\", transport_transit_gateway_attachment_id)\n if tunnel1_address is not None:\n pulumi.set(__self__, \"tunnel1_address\", tunnel1_address)\n if tunnel1_bgp_asn is not None:\n pulumi.set(__self__, \"tunnel1_bgp_asn\", tunnel1_bgp_asn)\n if tunnel1_bgp_holdtime is not None:\n pulumi.set(__self__, \"tunnel1_bgp_holdtime\", tunnel1_bgp_holdtime)\n if tunnel1_cgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel1_cgw_inside_address\", tunnel1_cgw_inside_address)\n if tunnel1_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_action\", tunnel1_dpd_timeout_action)\n if tunnel1_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel1_dpd_timeout_seconds\", tunnel1_dpd_timeout_seconds)\n if tunnel1_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel1_enable_tunnel_lifecycle_control\", tunnel1_enable_tunnel_lifecycle_control)\n if tunnel1_ike_versions is not None:\n pulumi.set(__self__, \"tunnel1_ike_versions\", tunnel1_ike_versions)\n if tunnel1_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_cidr\", tunnel1_inside_cidr)\n if tunnel1_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel1_inside_ipv6_cidr\", tunnel1_inside_ipv6_cidr)\n if tunnel1_log_options is not None:\n pulumi.set(__self__, \"tunnel1_log_options\", tunnel1_log_options)\n if tunnel1_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase1_dh_group_numbers\", tunnel1_phase1_dh_group_numbers)\n if tunnel1_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_encryption_algorithms\", tunnel1_phase1_encryption_algorithms)\n if tunnel1_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase1_integrity_algorithms\", tunnel1_phase1_integrity_algorithms)\n if tunnel1_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase1_lifetime_seconds\", tunnel1_phase1_lifetime_seconds)\n if tunnel1_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel1_phase2_dh_group_numbers\", tunnel1_phase2_dh_group_numbers)\n if tunnel1_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_encryption_algorithms\", tunnel1_phase2_encryption_algorithms)\n if tunnel1_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel1_phase2_integrity_algorithms\", tunnel1_phase2_integrity_algorithms)\n if tunnel1_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel1_phase2_lifetime_seconds\", tunnel1_phase2_lifetime_seconds)\n if tunnel1_preshared_key is not None:\n pulumi.set(__self__, \"tunnel1_preshared_key\", tunnel1_preshared_key)\n if tunnel1_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel1_rekey_fuzz_percentage\", tunnel1_rekey_fuzz_percentage)\n if tunnel1_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel1_rekey_margin_time_seconds\", tunnel1_rekey_margin_time_seconds)\n if tunnel1_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel1_replay_window_size\", tunnel1_replay_window_size)\n if tunnel1_startup_action is not None:\n pulumi.set(__self__, \"tunnel1_startup_action\", tunnel1_startup_action)\n if tunnel1_vgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel1_vgw_inside_address\", tunnel1_vgw_inside_address)\n if tunnel2_address is not None:\n pulumi.set(__self__, \"tunnel2_address\", tunnel2_address)\n if tunnel2_bgp_asn is not None:\n pulumi.set(__self__, \"tunnel2_bgp_asn\", tunnel2_bgp_asn)\n if tunnel2_bgp_holdtime is not None:\n pulumi.set(__self__, \"tunnel2_bgp_holdtime\", tunnel2_bgp_holdtime)\n if tunnel2_cgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel2_cgw_inside_address\", tunnel2_cgw_inside_address)\n if tunnel2_dpd_timeout_action is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_action\", tunnel2_dpd_timeout_action)\n if tunnel2_dpd_timeout_seconds is not None:\n pulumi.set(__self__, \"tunnel2_dpd_timeout_seconds\", tunnel2_dpd_timeout_seconds)\n if tunnel2_enable_tunnel_lifecycle_control is not None:\n pulumi.set(__self__, \"tunnel2_enable_tunnel_lifecycle_control\", tunnel2_enable_tunnel_lifecycle_control)\n if tunnel2_ike_versions is not None:\n pulumi.set(__self__, \"tunnel2_ike_versions\", tunnel2_ike_versions)\n if tunnel2_inside_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_cidr\", tunnel2_inside_cidr)\n if tunnel2_inside_ipv6_cidr is not None:\n pulumi.set(__self__, \"tunnel2_inside_ipv6_cidr\", tunnel2_inside_ipv6_cidr)\n if tunnel2_log_options is not None:\n pulumi.set(__self__, \"tunnel2_log_options\", tunnel2_log_options)\n if tunnel2_phase1_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase1_dh_group_numbers\", tunnel2_phase1_dh_group_numbers)\n if tunnel2_phase1_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_encryption_algorithms\", tunnel2_phase1_encryption_algorithms)\n if tunnel2_phase1_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase1_integrity_algorithms\", tunnel2_phase1_integrity_algorithms)\n if tunnel2_phase1_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase1_lifetime_seconds\", tunnel2_phase1_lifetime_seconds)\n if tunnel2_phase2_dh_group_numbers is not None:\n pulumi.set(__self__, \"tunnel2_phase2_dh_group_numbers\", tunnel2_phase2_dh_group_numbers)\n if tunnel2_phase2_encryption_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_encryption_algorithms\", tunnel2_phase2_encryption_algorithms)\n if tunnel2_phase2_integrity_algorithms is not None:\n pulumi.set(__self__, \"tunnel2_phase2_integrity_algorithms\", tunnel2_phase2_integrity_algorithms)\n if tunnel2_phase2_lifetime_seconds is not None:\n pulumi.set(__self__, \"tunnel2_phase2_lifetime_seconds\", tunnel2_phase2_lifetime_seconds)\n if tunnel2_preshared_key is not None:\n pulumi.set(__self__, \"tunnel2_preshared_key\", tunnel2_preshared_key)\n if tunnel2_rekey_fuzz_percentage is not None:\n pulumi.set(__self__, \"tunnel2_rekey_fuzz_percentage\", tunnel2_rekey_fuzz_percentage)\n if tunnel2_rekey_margin_time_seconds is not None:\n pulumi.set(__self__, \"tunnel2_rekey_margin_time_seconds\", tunnel2_rekey_margin_time_seconds)\n if tunnel2_replay_window_size is not None:\n pulumi.set(__self__, \"tunnel2_replay_window_size\", tunnel2_replay_window_size)\n if tunnel2_startup_action is not None:\n pulumi.set(__self__, \"tunnel2_startup_action\", tunnel2_startup_action)\n if tunnel2_vgw_inside_address is not None:\n pulumi.set(__self__, \"tunnel2_vgw_inside_address\", tunnel2_vgw_inside_address)\n if tunnel_inside_ip_version is not None:\n pulumi.set(__self__, \"tunnel_inside_ip_version\", tunnel_inside_ip_version)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if vgw_telemetries is not None:\n pulumi.set(__self__, \"vgw_telemetries\", vgw_telemetries)\n if vpn_gateway_id is not None:\n pulumi.set(__self__, \"vpn_gateway_id\", vpn_gateway_id)", "def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)", "def initialise(self, **kwargs):\n pass", "def __init__(self):\n\n\t\tself.Helpers = Helpers(\"TassAI\", False)\n\n\t\tself.qs = 16\n\t\tself.context = InferenceContext([self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"], self.Helpers.confs[\"iotJumpWay\"][\"MQTT\"][\"TassAI\"][\"runas\"]], \"\", \"\", \"\")\n\n\t\tself.Helpers.logger.info(\"TassAI Helper Class initialization complete.\")", "def __init__(self):\n self.parameter = [[(0,1),(1,1),(0,0),(1,0)],\n [(1,0),(1,0),(0,1),(0,1)],\n [(1,0),(0,1),(1,0),(0,1)],\n [(0,0),(0,0),(0,0),(0,0)]]\n \"\"\"Distance is number of whole route from origin to destination\"\"\"\n self.distance = 100\n \"\"\"action sets\"\"\"\n self.action = [0,55,75]\n self.maxSpeed = self.action[-1]\n \"\"\"time period for each stage\"\"\"\n self.time_interval = 0.5\n \"\"\"Number of stages we want to check. Here we can use this to limit the\n travel time, since sometimes we want driver to arrive in a time window.\n For example, if stage is 4, that means we want to driver finish route within \n 2 hours. \"\"\"\n self.stage = 4\n self.time_block = 0.5\n self.distance_block = 25", "def __init__(__self__, *,\n location: pulumi.Input[str],\n resourcegroup: pulumi.Input[str],\n accesslevel: Optional[pulumi.Input[str]] = None,\n accountname: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"resourcegroup\", resourcegroup)\n if accesslevel is not None:\n pulumi.set(__self__, \"accesslevel\", accesslevel)\n if accountname is not None:\n pulumi.set(__self__, \"accountname\", accountname)", "def __init__(__self__,\n resource_name: str,\n args: RouteTableArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n actor_actor_matrix_obj.fetchActorActorSimilarityMatrix()", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_cidr_block: Optional[pulumi.Input[str]] = None,\n destination_port_range: Optional[pulumi.Input['TrafficMirrorFilterRuleDestinationPortRangeArgs']] = None,\n protocol: Optional[pulumi.Input[int]] = None,\n rule_action: Optional[pulumi.Input[str]] = None,\n rule_number: Optional[pulumi.Input[int]] = None,\n source_cidr_block: Optional[pulumi.Input[str]] = None,\n source_port_range: Optional[pulumi.Input['TrafficMirrorFilterRuleSourcePortRangeArgs']] = None,\n traffic_direction: Optional[pulumi.Input[str]] = None,\n traffic_mirror_filter_id: Optional[pulumi.Input[str]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_cidr_block is not None:\n pulumi.set(__self__, \"destination_cidr_block\", destination_cidr_block)\n if destination_port_range is not None:\n pulumi.set(__self__, \"destination_port_range\", destination_port_range)\n if protocol is not None:\n pulumi.set(__self__, \"protocol\", protocol)\n if rule_action is not None:\n pulumi.set(__self__, \"rule_action\", rule_action)\n if rule_number is not None:\n pulumi.set(__self__, \"rule_number\", rule_number)\n if source_cidr_block is not None:\n pulumi.set(__self__, \"source_cidr_block\", source_cidr_block)\n if source_port_range is not None:\n pulumi.set(__self__, \"source_port_range\", source_port_range)\n if traffic_direction is not None:\n pulumi.set(__self__, \"traffic_direction\", traffic_direction)\n if traffic_mirror_filter_id is not None:\n pulumi.set(__self__, \"traffic_mirror_filter_id\", traffic_mirror_filter_id)", "def initialize(self):\n remote_event = self.args.get(\"remote_event\", \"deconz_event\")\n remote_event_filter = self.args.get(\"remote_event_filter\", {})\n motion_sensors = self.args.get(\"motion_sensors\", [])\n self._light_group = self.args.get(\"light_group\", \"light.cocina\")\n self._main_constrain = self.args.get(\"toggle_automation\")\n self._delay_re_enable_motion_control = int(\n self.args.get(\"delay_re_enable_motion_control\", 120)\n )\n self._max_delay_motion_off = int(\n self.args.get(\"max_delay_motion_off\", 900)\n )\n\n self._scene_rotation = {\n scene_key: i\n for i, scene_key in enumerate(self.args.get(\"rotate_scene_order\"))\n }\n _schedule_config = self.args.get(\"scene_schedule\")\n self.log(\n f\"[DEBUG] APP INIT with schedule_config {_schedule_config}\",\n level=\"WARNING\",\n log=LOGGER,\n )\n\n self._default_scene = self.args.get(\"default_scene\")\n self._scenes = {}\n self._time_windows = {}\n for scene_key, scene_data in self.args.get(\"scene_schedule\").items():\n self._time_windows[scene_key] = (\n scene_data.get(\"from\", \"00:00:00\"),\n scene_data.get(\"to\", \"00:00:00\"),\n )\n self._scenes[scene_key] = (\n scene_data[\"turn_on_service_call\"],\n scene_data[\"wait_to_turn_off\"],\n )\n\n light_st = self.get_state(self._light_group)\n self._light_on = light_st == \"on\"\n self._last_switch_press = 0.0\n self._last_scene = self._default_scene\n\n self._motion_states = {}\n for sensor in motion_sensors:\n self._motion_states[sensor] = self.get_state(sensor) == \"on\"\n self.listen_state(\n self._motion_detected,\n sensor,\n constrain_input_boolean=self._main_constrain,\n )\n self._motion_on = any(self._motion_states.values())\n self._last_light_on = 0.0 if not self._motion_on else monotonic()\n self._motion_light_enabled = True\n\n self.listen_state(self._light_changed, self._light_group)\n self.listen_event(\n self._switch_event, remote_event, **remote_event_filter\n )\n # Add listener to check light off after a long time\n self.listen_state(\n self._no_motion_for_long_time,\n motion_sensors[0],\n new=\"off\",\n duration=self._max_delay_motion_off,\n # constrain_input_boolean=self._main_constrain,\n )\n self.log(\n f\"APP INIT with light {light_st}, motion: {self._motion_states}\",\n level=\"WARNING\",\n log=LOGGER,\n )", "def initialize(self, **kwargs):", "def __init__(self, resource, meta, sam_sys_inputs, site_sys_inputs=None,\n output_request=None, drop_leap=False):\n\n # need to check tilt=lat and azimuth for pv systems\n meta = self._parse_meta(meta)\n sam_sys_inputs = self.set_latitude_tilt_az(sam_sys_inputs, meta)\n\n super().__init__(resource, meta, sam_sys_inputs,\n site_sys_inputs=site_sys_inputs,\n output_request=output_request,\n drop_leap=drop_leap)", "def __init__(self):\n\n # Set a node name - something relevant\n rospy.init_node('waypoint_updater')\n\n # Most recent pose\n self.pose = None\n\n # Map waypoint list \n self.waypoints = None\n\n # Map waypoint list xy only \n self.waypoints_2d = None\n\n # Map waypoint list xy only as KDTree\n self.waypoint_tree = None\n\n # Index at which to stop the vehicle\n # Negative one is a sentinel meaning no stop is required\n self.stopline_waypoint_idx = -1\n\n # Add subscriptions and handlers for relevant messages\n rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_cb)\n rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_waypoint_cb)\n\n # Create publisher for final waypoints\n self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)\n\n # Start loop\n self.loop()", "def __init__(self, router):\n\n self.router = router", "def __init__(self, jobs, stores, deliverers, distances_matrix, evaluator, codec, driver_ends_at_start=True,\n route_initialization_method=\"random\"):\n self.jobs = jobs\n self.stores = stores\n self.deliverers = deliverers\n self.distances_matrix = distances_matrix\n self.evaluator = evaluator\n self.solution = None\n self.codec = codec\n self.driver_ends_at_start = driver_ends_at_start\n self.route_initialization_method = route_initialization_method", "def __init__(self, *args, **kwargs):\n if len(args) > 0:\n self._init_from_ascii(*args)\n # The ASCII files store ra in hours, so convert to degrees.\n # RA as a keyword init argument should still be degrees.\n self.ra = 15.0*self.ra_hour\n if len(kwargs) > 0: self._init_from_keywords(**kwargs)", "def __init__(self, CalcType=CALC_TYPE_SRMP_TO_ARM, SR=\"\", RRT=\"\",\n RRQ=\"\", ABIndicator=\"\", ReferenceDate=None, ARM=0,\n SRMP=0, ResponseDate=None, TransId=None):\n # Calculation Type. 0 = SRMP to ARM, 1 = ARM to SRMP *\n self.CalcType = CalcType\n # Three digit state route ID. *\n self.SR = SR\n # Related Route Type *\n self.RRT = RRT\n # Related Route Qualifier *\n self.RRQ = RRQ\n # Ahead / Back indicator for SRMP. \"A\" or null, or \"B\" *\n self.ABIndicator = ABIndicator\n # Input data collection date *\n self.ReferenceDate = ReferenceDate\n # Accumulated Route Mileage. Actual measure *\n self.ARM = ARM\n # State Route Milepost - Posted milepost. May not match actual measure\n # due to route adjustments over time. *\n self.SRMP = SRMP\n # Output date. Use self to match an LRS publication date. *\n self.ResponseDate = ResponseDate\n # Transaction ID. Use a unique ID with batch results. *\n self.TransId = TransId", "def __init__(self, *args):\n \n self.steps = args", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def initialize_from_http_request(self, request):\n self.tool_id = request.path.strip('/').split('/')[0]\n self.path = request.path\n self._set_response_format(request)\n self._set_ip_address(request)\n self._set_tool_address_family(request)\n self._set_user_agent(request)\n self._set_path_qs(request)\n self._set_geolocation(request)\n self.metro = request.get(message.METRO, default_value=None)\n self._set_policy(request)", "def initialize(self, **params):\n\n self._n_x = params['n_x']\n self._n_y = params['n_y']\n self._dx = params['dx']\n self._dy = params['dy']\n\n self._r_corr_in_points = self._r_corr_in_meters // max(self._dx, self._dy)", "def __init__(self, route_idx=None, arr_date=None, dep_time=None, lon=None, dep_date=None, track=None, rt_track=None, id=None, rt_dep_time=None, rt_arr_time=None, name=None, arr_time=None, lat=None, rt_dep_date=None, rt_arr_date=None):\n\n self._route_idx = None\n self._arr_date = None\n self._dep_time = None\n self._lon = None\n self._dep_date = None\n self._track = None\n self._rt_track = None\n self._id = None\n self._rt_dep_time = None\n self._rt_arr_time = None\n self._name = None\n self._arr_time = None\n self._lat = None\n self._rt_dep_date = None\n self._rt_arr_date = None\n\n self.route_idx = route_idx\n if arr_date is not None:\n self.arr_date = arr_date\n if dep_time is not None:\n self.dep_time = dep_time\n self.lon = lon\n if dep_date is not None:\n self.dep_date = dep_date\n self.track = track\n if rt_track is not None:\n self.rt_track = rt_track\n self.id = id\n if rt_dep_time is not None:\n self.rt_dep_time = rt_dep_time\n if rt_arr_time is not None:\n self.rt_arr_time = rt_arr_time\n self.name = name\n if arr_time is not None:\n self.arr_time = arr_time\n self.lat = lat\n if rt_dep_date is not None:\n self.rt_dep_date = rt_dep_date\n if rt_arr_date is not None:\n self.rt_arr_date = rt_arr_date", "def __init__(self, tfInputGraph=None, inputMapping=None, outputMapping=None, tfHParms=None):\n super(TFTransformer, self).__init__()\n kwargs = self._input_kwargs\n self.setParams(**kwargs)", "def __init__(self):\r\n\t\tself.params = arcpy.GetParameterInfo()\r\n\r\n\t\tself.input_raster = self.params[0]\r\n\t\tself.approach = self.params[1]\r\n\t\tself.predefined_pattern = self.params[2]\r\n\t\tself.pattern_workspace = self.params[3]\r\n\t\tself.point_matrix_size = self.params[4]\r\n\t\tself.point_vectors = self.params[5]\r\n\t\tself.mapping_field = self.params[6]\r\n\t\tself.move_to_max = self.params[7]\r\n\t\tself.move_to_max_distance = self.params[8]\r\n\t\tself.mh_iteration = self.params[9]\r\n\t\tself.mh_dil_val = self.params[10]\r\n\t\tself.mh_dil_start = self.params[11]\r\n\t\tself.mh_dil_stop = self.params[12]\r\n\t\tself.mh_dil_step = self.params[13]\r\n\t\tself.transform = self.params[14]\r\n\t\tself.size_of_the_cell = self.params[15]\r\n\t\tself.output_sim_matrix = self.params[16]\r\n\t\tself.output_table = self.params[17]\r\n\t\tself.output_raster_workspace = self.params[18]", "def __init__(self, router):\n self._router = router", "def __init__(self):\n \n rospy.init_node('trajectory_planner', anonymous=True)\n \n rospy.Subscriber('/costmap_2d', OccupancyGrid, self.costmap_callback)\n rospy.Subscriber('/exploration_complete', Bool, self.exploration_complete_callback)\n self.trans_listener = tf.TransformListener()\n \n self.traj_pub = rospy.Publisher('/cmd_path', Path, queue_size=1)\n\tself.auto_goal_pub = rospy.Publisher('/auto_goal', Pose2D, queue_size=1)\n self.exp_complete_pub = rospy.Publisher('/exploration_complete', Bool, queue_size=1)\n\n self.number_of_fails = 0", "def initialize(self, request, args, kwargs):\n raise NotImplementedError()", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n self.path = []\n MyAgent.customFood = None\n MyAgent.foodLeft = 0\n MyAgent.specialWalls = {}\n self.followOne = False\n if self.index == 0:\n MyAgent.finding = []\n MyAgent.finding.append(False)", "def __init__(self):\n rospy.init_node(\"kinect_transformer\")\n self.kinect_depth_sub = rospy.Subscriber(\"kinect/depth/points\", pc2.PointCloud2, self.kinect_cb, queue_size=10)\n self.left_obs_pub = rospy.Publisher(\"left_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.right_obs_pub = rospy.Publisher(\"right_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.tf = tf.TransformListener()\n self.closest_rgb_points = []\n # create collision checkers with the left and right kin solver instances\n self.left_cc = CollisionChecker([], KDLIKSolver(\"left\"))\n self.right_cc = CollisionChecker([], KDLIKSolver(\"right\"))", "def __init__(self, *arg, **kw):\n #: Input connection mapping\n self._inputs = {}\n for name, port in six.iteritems(self.input_ports):\n self._inputs[name] = port(self, name=name)\n\n #: Output connection mapping\n self._outputs = {}\n for name, port in six.iteritems(self.output_ports):\n self._outputs[name] = port(self, name=name)\n\n #: data cache\n self._output_data = {}", "def _initialize_model_params(self):\n\n if 'model' not in self._raw_data_dict:\n raise Error('The \"model\" key is not found in the configuration file. Looks like the parsed file is not '\n 'Object Detection API model configuration file.')\n params = list(self._raw_data_dict['model'].values())[0]\n for rule in mapping_rules:\n self._update_param_using_rule(params, rule)", "def __init__(__self__, *,\n acl_id: pulumi.Input[str],\n dest_cidr: pulumi.Input[str],\n dest_port_range: pulumi.Input[str],\n direction: pulumi.Input[str],\n ip_protocol: pulumi.Input[str],\n policy: pulumi.Input[str],\n source_cidr: pulumi.Input[str],\n source_port_range: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"acl_id\", acl_id)\n pulumi.set(__self__, \"dest_cidr\", dest_cidr)\n pulumi.set(__self__, \"dest_port_range\", dest_port_range)\n pulumi.set(__self__, \"direction\", direction)\n pulumi.set(__self__, \"ip_protocol\", ip_protocol)\n pulumi.set(__self__, \"policy\", policy)\n pulumi.set(__self__, \"source_cidr\", source_cidr)\n pulumi.set(__self__, \"source_port_range\", source_port_range)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)", "def __init__(self, inputs=[]):\n self.inputs = inputs # input_list <- C, Java <- 匈牙利命名法 -> Python 特别不建议\n # self.outputs = outputs # output_list\n self.value = None\n self.outputs = []\n self.gradients = {}\n\n for node in self.inputs:\n node.outputs.append(self) # build a connection relationship", "def __init__(self):\n rospy.init_node(\"navigate_map\") # start node\n\tself.frontier_show = []\n\tself.first = True\n\tself.initialS = None\n\tself.once = False\n self.frontier = []\n self.centroidValue = None\n self.regions = []\n self.detected = False\n\tself.resolution = 0.05\n \tself.start = None\n\tself.transformed_map = None\n \tself.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n \tself.sub = rospy.Subscriber(\"/map\", OccupancyGrid, self.handle_navigate_map)\n \tself.robot_path_pub = rospy.Publisher('/robot_path', Path, queue_size = 5)\n self.cent_pub = rospy.Publisher('/cent_set', GridCells, queue_size = 5)\n\tself.front_pub = rospy.Publisher('/frontier_set', GridCells, queue_size = 5)\n\tself.goal_pos_pub = rospy.Subscriber('/move_base_simple/goal',PoseStamped ,self.goal_call_back)\n\tself.tf_listener = tf.TransformListener()", "def setup(self):\n super().setup()\n self.ctx.restart_calc = None\n self.ctx.inputs = AttributeDict(self.exposed_inputs(XspectraCalculation, 'xspectra'))\n\n self.ctx.inputs.parameters = self.ctx.inputs.parameters.get_dict()", "def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()", "def init_rules_frame(self):\n self.rules_frame_class = RulesInput(self)", "def __init__(self, *args, **kwargs):\n if len(args) > 0: self._init_from_ascii(*args)\n if len(kwargs) > 0: self._init_from_keywords(**kwargs)\n self.ra = ephem.hours(self.ra_str)*180/ephem.pi\n self.dec = ephem.degrees(self.dec_str)*180/ephem.pi\n if self.sn_z is None: self.sn_z = 0.0\n if self.host_z is None: self.host_z = 0.0" ]
[ "0.6117074", "0.59076273", "0.5906306", "0.58633006", "0.5775825", "0.5771856", "0.57236266", "0.5721005", "0.57191586", "0.5712019", "0.5708202", "0.56965554", "0.56650615", "0.56602186", "0.5631149", "0.56289285", "0.56277144", "0.56277144", "0.56152123", "0.5613303", "0.56115943", "0.5600614", "0.5554972", "0.55544186", "0.55434805", "0.55326265", "0.5513063", "0.55073655", "0.54984605", "0.5497366", "0.54752713", "0.5470086", "0.5467844", "0.54373926", "0.543513", "0.54200107", "0.5418522", "0.5389054", "0.53824747", "0.53776693", "0.53702575", "0.53693205", "0.5359966", "0.5359187", "0.53585774", "0.5347081", "0.534632", "0.53440183", "0.5342446", "0.5339945", "0.5331006", "0.53256077", "0.5324572", "0.5324216", "0.5323252", "0.53126407", "0.530903", "0.53086895", "0.53077394", "0.5293158", "0.5293149", "0.52883637", "0.5282647", "0.52667016", "0.5263313", "0.52605915", "0.52538383", "0.5250798", "0.52494335", "0.52464724", "0.5243266", "0.52397376", "0.52391726", "0.523387", "0.5225344", "0.52230376", "0.5220324", "0.52158195", "0.52126557", "0.5208968", "0.5208628", "0.52070314", "0.5206511", "0.5206304", "0.52029014", "0.52023804", "0.51942384", "0.5190732", "0.5188603", "0.5188145", "0.5185584", "0.5182606", "0.51801634", "0.5175357", "0.51723963", "0.5172134", "0.5170957", "0.51692116", "0.5167191", "0.51574475", "0.51571286" ]
0.0
-1
Initialize a Route solver object and set properties.
def initialize_rt_solver(self): # For a local network dataset, we need to checkout the Network Analyst extension license. if not self.is_service: arcpy.CheckOutExtension("network") # Create a new Route object self.logger.debug("Creating Route object...") self.rt_solver = arcpy.nax.Route(self.network_data_source) # Set the Route analysis properties. # Read properties from the rt_config.py config file for all properties not set in the UI as parameters. # Route properties documentation: https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route.htm # The properties have been extracted to the config file to make them easier to find and set so users don't have # to dig through the code to change them. self.logger.debug("Setting Route analysis properties from RT config file...") for prop, value in RT_PROPS.items(): if prop in RT_PROPS_SET_BY_TOOL: self.logger.warning(( f"Route config file property {prop} is handled explicitly by the tool parameters and will be " "ignored." )) continue try: setattr(self.rt_solver, prop, value) if hasattr(value, "name"): self.logger.debug(f"{prop}: {value.name}") else: self.logger.debug(f"{prop}: {value}") except Exception as ex: # pylint: disable=broad-except # Suppress warnings for older services (pre 11.0) that don't support locate settings and services # that don't support accumulating attributes because we don't want the tool to always throw a warning. if not (self.is_service and prop in [ "searchTolerance", "searchToleranceUnits", "accumulateAttributeNames" ]): self.logger.warning( f"Failed to set property {prop} from RT config file. Default will be used instead.") self.logger.warning(str(ex)) # Set properties explicitly specified in the tool UI as arguments self.logger.debug("Setting Route analysis properties specified tool inputs...") self.rt_solver.travelMode = self.travel_mode self.logger.debug(f"travelMode: {self.travel_mode}") self.rt_solver.timeUnits = self.time_units self.logger.debug(f"timeUnits: {self.time_units}") self.rt_solver.distanceUnits = self.distance_units self.logger.debug(f"distanceUnits: {self.distance_units}") self.rt_solver.timeOfDay = self.time_of_day self.logger.debug(f"timeOfDay: {self.time_of_day}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def __init__(self):\n\n # Set a node name - something relevant\n rospy.init_node('waypoint_updater')\n\n # Most recent pose\n self.pose = None\n\n # Map waypoint list \n self.waypoints = None\n\n # Map waypoint list xy only \n self.waypoints_2d = None\n\n # Map waypoint list xy only as KDTree\n self.waypoint_tree = None\n\n # Index at which to stop the vehicle\n # Negative one is a sentinel meaning no stop is required\n self.stopline_waypoint_idx = -1\n\n # Add subscriptions and handlers for relevant messages\n rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_cb)\n rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_waypoint_cb)\n\n # Create publisher for final waypoints\n self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)\n\n # Start loop\n self.loop()", "def __init__(self):\n self.parameter = [[(0,1),(1,1),(0,0),(1,0)],\n [(1,0),(1,0),(0,1),(0,1)],\n [(1,0),(0,1),(1,0),(0,1)],\n [(0,0),(0,0),(0,0),(0,0)]]\n \"\"\"Distance is number of whole route from origin to destination\"\"\"\n self.distance = 100\n \"\"\"action sets\"\"\"\n self.action = [0,55,75]\n self.maxSpeed = self.action[-1]\n \"\"\"time period for each stage\"\"\"\n self.time_interval = 0.5\n \"\"\"Number of stages we want to check. Here we can use this to limit the\n travel time, since sometimes we want driver to arrive in a time window.\n For example, if stage is 4, that means we want to driver finish route within \n 2 hours. \"\"\"\n self.stage = 4\n self.time_block = 0.5\n self.distance_block = 25", "def __init__(self, num_routes, beta, phi, theta, b, l, u): \r\n self.num_routes = num_routes\r\n self.beta = beta\r\n self.phi = phi\r\n self.theta = theta\r\n self.b = b\r\n self.l = l\r\n self.u = u\r\n\r\n self.lb_totalflow = self.compute_lb_totalflow()\r\n # Initialize the flow over each route to be 0\r\n self.flow = np.zeros(num_routes) \r\n # Initialize the capacity over a route to be 0\r\n self.capacity = np.zeros(num_routes)", "def __init__(self):\n \n self.linksDict = dict()\n self.nodesDict = dict()\n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n self.routeXref = dict()\n self.transitRoutes = dict()\n self.spIndex = None", "def __init__(self) -> None:\n\n #: The underlying route table config file\n self._route_table = None", "def __init__(self, vrpdata):\n self.vrpdata = vrpdata\n self.objective = 0\n self.routes = []\n self.solutionValid = False", "def __init__(self, router):\n self._router = router", "def __init__(__self__, *,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if subnets is not None:\n pulumi.set(__self__, \"subnets\", subnets)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(self, router):\n\n self.router = router", "def test_route_init() -> None:\n rschema = RouteSchema(SpecificLocation())\n route = Route(schema=rschema)\n\n assert route.schema is rschema # Cannot use __eq__\n assert isinstance(route.stops, list)", "def _initialize(self, vp):\n\n # Initialize vehicle properties\n self._mass = vp[\"Mass\"]\n\n # Torque coefficients\n self._a = vp[\"Torque Coefficients\"]\n\n # Gear ratio, effective radius and inertia\n self._GR = vp[\"Gear Ratio\"]\n self._r_eff = vp[\"Effective Radius\"]\n self._J_e = vp[\"Inertia\"]\n\n # Aerodynamic and friction coefficients\n self._c_a = vp[\"Aerodynamic Coefficient\"]\n self._c_rl = vp[\"Friction Coefficient\"]\n\n # Tire forces\n self._c = vp[\"C\"]\n self._F_max = vp[\"Max Force\"]\n\n self._L = vp[\"Wheelbase\"]\n (self._min_steering, self._max_steering) = vp[\"Steering\"]\n (self._min_throttle, self._max_throttle) = vp[\"Throttle\"]\n (self._min_braking, self._max_braking) = vp[\"Braking\"]", "def __init__(self, road):\n # TODO: move speed parameter to class Lane?\n self.road = road\n self.source = road.source\n self.target = road.target\n self.leftAdjacent = None\n self.rightAdjacent = None\n self.leftmostAdjacent = None\n self.rightmostAdjacent = None\n self.length = haversine(self.source.center, self.target.center)\n # self.middleLine = None\n self.carsPosition = {}\n\n # self.update()", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def __init__(self, project=None):\n HyppopySolver.__init__(self, project)", "def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(self, jobs, stores, deliverers, distances_matrix, evaluator, codec, driver_ends_at_start=True,\n route_initialization_method=\"random\"):\n self.jobs = jobs\n self.stores = stores\n self.deliverers = deliverers\n self.distances_matrix = distances_matrix\n self.evaluator = evaluator\n self.solution = None\n self.codec = codec\n self.driver_ends_at_start = driver_ends_at_start\n self.route_initialization_method = route_initialization_method", "def __init__(self):\n self.routingTable = dict()", "def test_solo_route_init() -> None:\n # Test SoloRoute with VirtualClientConnection (ClientConnection) in constructor\n destination = SpecificLocation()\n virtual_server = VirtualServerConnection(node=Node())\n virtual_client = VirtualClientConnection(server=virtual_server)\n h_solo = SoloRoute(destination=destination, connection=virtual_client)\n\n assert h_solo.schema.destination is destination\n assert h_solo.connection is virtual_client", "def __init__(self):\r\n \r\n # World params\r\n self.spawn_distance = 0\r\n\r\n # Nest planning\r\n self.done_init = False\r\n self.wall_set = None\r\n self.planned_nest_set = None\r\n self.nest_completion_set = None\r\n\r\n # Task mapping\r\n self.uuid_task_map = {}\r\n\r\n self.turn = 0", "def __init__(self, route_idx=None, arr_date=None, dep_time=None, lon=None, dep_date=None, track=None, rt_track=None, id=None, rt_dep_time=None, rt_arr_time=None, name=None, arr_time=None, lat=None, rt_dep_date=None, rt_arr_date=None):\n\n self._route_idx = None\n self._arr_date = None\n self._dep_time = None\n self._lon = None\n self._dep_date = None\n self._track = None\n self._rt_track = None\n self._id = None\n self._rt_dep_time = None\n self._rt_arr_time = None\n self._name = None\n self._arr_time = None\n self._lat = None\n self._rt_dep_date = None\n self._rt_arr_date = None\n\n self.route_idx = route_idx\n if arr_date is not None:\n self.arr_date = arr_date\n if dep_time is not None:\n self.dep_time = dep_time\n self.lon = lon\n if dep_date is not None:\n self.dep_date = dep_date\n self.track = track\n if rt_track is not None:\n self.rt_track = rt_track\n self.id = id\n if rt_dep_time is not None:\n self.rt_dep_time = rt_dep_time\n if rt_arr_time is not None:\n self.rt_arr_time = rt_arr_time\n self.name = name\n if arr_time is not None:\n self.arr_time = arr_time\n self.lat = lat\n if rt_dep_date is not None:\n self.rt_dep_date = rt_dep_date\n if rt_arr_date is not None:\n self.rt_arr_date = rt_arr_date", "def __init__(__self__,\n resource_name: str,\n args: RouteMapArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def init_solver(param):\n return param.solver(learning_rate=param.learning_rate,\n beta1=param.beta1,\n beta2=param.beta2)", "def AssignRoutingEngine(self, Routing):\n self.routing = Routing(self.topo)\n self.Qlearning_enable = 0\n if str(Routing) == \"Routing.Qlearning_SpineLeaf.Qlearning\":\n self.Qlearning_enable = 1\n self.state = [0.0]*len(self.topo.GetLinks())\n #print len(self.topo.GetLinks())\n self.reward = [0.0, 0.0]\n self.stateId = 0\n self.logDir = \"LogInfo/\"\n self.logfname = \"StateLog.csv\"\n self.logf = open(self.logDir + self.logfname, \"w\")\n #self.updatenum=0\n\n # We can get path by\n # path_3_5 = self.routing.GetPath(3,5) # result is a list with node ids", "def __init__(self):\n \n rospy.init_node('trajectory_planner', anonymous=True)\n \n rospy.Subscriber('/costmap_2d', OccupancyGrid, self.costmap_callback)\n rospy.Subscriber('/exploration_complete', Bool, self.exploration_complete_callback)\n self.trans_listener = tf.TransformListener()\n \n self.traj_pub = rospy.Publisher('/cmd_path', Path, queue_size=1)\n\tself.auto_goal_pub = rospy.Publisher('/auto_goal', Pose2D, queue_size=1)\n self.exp_complete_pub = rospy.Publisher('/exploration_complete', Bool, queue_size=1)\n\n self.number_of_fails = 0", "def __init__(self, env):\n self._env = env\n self._routes = []\n self._proto = None\n self._port = None\n self._state = False\n self._key = None", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def __init__(self, input_field: str, depth: int):\n self.maze = self._file_to_matrix(input_field, depth)\n\n # Determine the rim of the labyrinth.\n self.rim_x = self.maze.shape[2] - 4\n self.rim_y = self.maze.shape[1] - 4\n\n self.graph = nx.Graph()\n\n # Connect Path / points\n self.path_coordinates = np.argwhere(self.maze == PATH)\n self._build_path()\n\n # Determine Portal points and connect them to corresponding\n # other dimension.\n self.merge_portals_to_path()\n self.connect_portals()\n\n start = tuple(np.argwhere(self.maze == \"AA\")[0])\n goal = tuple(np.argwhere(self.maze == \"ZZ\")[0])\n\n try:\n self.shortest_path_length = nx.shortest_path_length(\n self.graph, start, goal)\n except nx.NetworkXNoPath:\n self.shortest_path_length = None", "def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes", "def __init__(self, params):\n self.time = timedelta()\n self._cars = []\n self._enterQueue = []\n self._lights = []\n self._road = Road()\n self._lastCarGenerationTime = timedelta()\n self.params = params\n self._loggerName = 'kts46.roadModel'\n self._logger = logging.getLogger(self._loggerName)\n self._lastCarId = -1", "def _instantiate_pathway(self, context):\n # DOCUMENT: Projections SPECIFIED IN A PATHWAY MUST BE A MappingProjection\n # DOCUMENT:\n # Each item in Pathway can be a Mechanism or Projection object, class ref, or specification dict,\n # str as name for a default Mechanism,\n # keyword (IDENTITY_MATRIX or FULL_CONNECTIVITY_MATRIX) as specification for a default Projection,\n # or a tuple with any of the above as the first item and a param dict as the second\n pathway = self.paramsCurrent[PATHWAY]\n self._mech_tuples = []\n self._monitoring_mech_tuples = []\n self._target_mech_tuples = []\n\n from PsyNeuLink.Globals.Run import _get_unique_id\n\n self._standardize_config_entries(pathway=pathway, context=context)\n\n # VALIDATE PATHWAY THEN PARSE AND INSTANTIATE MECHANISM ENTRIES ------------------------------------\n self._parse_and_instantiate_mechanism_entries(pathway=pathway, context=context)\n\n # Identify origin and terminal mechanisms in the process and\n # and assign the mechanism's status in the process to its entry in the mechanism's processes dict\n self.firstMechanism = pathway[0][OBJECT_ITEM]\n self.firstMechanism.processes[self] = ORIGIN\n self._origin_mech_tuples = [pathway[0]]\n self.originMechanisms = MechanismList(self, self._origin_mech_tuples)\n\n self.lastMechanism = pathway[-1][OBJECT_ITEM]\n if self.lastMechanism is self.firstMechanism:\n self.lastMechanism.processes[self] = SINGLETON\n else:\n self.lastMechanism.processes[self] = TERMINAL\n self._terminal_mech_tuples = [pathway[-1]]\n self.terminalMechanisms = MechanismList(self, self._terminal_mech_tuples)\n\n # # Assign process outputState to last mechanisms in pathway\n # self.outputState = self.lastMechanism.outputState\n\n # PARSE AND INSTANTIATE PROJECTION ENTRIES ------------------------------------\n\n self._parse_and_instantiate_projection_entries(pathway=pathway, context=context)\n\n self.pathway = pathway\n\n self._instantiate__deferred_inits(context=context)\n\n if self.learning:\n self._check_for_target_mechanism()\n if self.targetMechanism:\n self._instantiate_target_input()\n self._learning_enabled = True\n else:\n self._learning_enabled = False\n\n self._allMechanisms = MechanismList(self, self._mech_tuples)\n self.monitoringMechanisms = MechanismList(self, self._monitoring_mech_tuples)\n self.targetMechanisms = MechanismList(self, self._target_mech_tuples)", "def __init__(self, waypoints: Tuple[Waypoint]):\n self._waypoints = waypoints", "def __init__(self, waypoints: Tuple[Waypoint]):\n self._waypoints = waypoints", "def __init__(self, name=\"\"):\n # Name of the SMT solver that this object represents.\n self.name = name", "def __init__(self, root_handler=None):\n self.root = RouteTrieNode(root_handler)", "def __init__(self,\n world: carla.World,\n spawn_location: carla.Location,\n debug=True,\n verbose=True,\n ):\n self.world = world\n self.map = self.world.get_map()\n self.debug_helper = self.world.debug\n\n # get junction by ego spawn waypoint\n self.junction = None\n spawn_waypoint = self.map.get_waypoint(spawn_location,\n project_to_road=True, # must set to True, center of lane\n )\n self.get_junction_by_route(spawn_waypoint)\n\n # # route in list of tuple\n # self.transform_route = []\n # self.waypoint_route = []\n # self.location_route = []\n\n # debug\n self.debug = debug\n # verbose option to visualize everything\n self.verbose = verbose", "def __init__(self):\n\n if not os.path.exists('topology.json'):\n print('Could not find topology object!!!\\n')\n raise Exception\n\n self.topo = load_topo('topology.json')\n self.controllers = {}\n self.init()\n\n # sorted by timeouts\n self.current_reservations = {}\n # initial link capacity\n self.links_capacity = self.build_links_capacity()\n\n self.update_lock = threading.Lock()\n self.timeout_thread = threading.Thread(target=self.reservations_timeout_thread, args=(1, ))\n self.timeout_thread.daemon = True\n self.timeout_thread.start()", "def __init__(self):\n self.mu = None\n self.type = None\n self.direction = None\n self.eq_points = None\n self.init_conds = None\n self.time = None\n self.trajectory = None\n self.initial_point = None\n self.contour_levels = []", "def init(a: str, h: str, c: str, r: bool, A: str, lock: Lock) -> None:\n global host, action, report, router, algorithm\n\n action = a\n algorithm = A\n\n if r:\n report = r\n if h:\n host = h\n if c:\n lock.acquire()\n try:\n router = PyOSRM(c, use_shared_memory=False, algorithm=algorithm)\n LOGGER.debug(\"Router instantiated\")\n finally:\n lock.release()", "def AssignRoutingEngine(self, Routing):\r\n self.routing = Routing(self.topo)\r\n # We can get path by\r\n # path_3_5 = self.routing.GetPath(3,5) # result is a list with node ids\r", "def __init__(self, node_dict={}, link_dict={}, routing_method = 'Dijkstra'):\n\n topo_name = \"geant\"\n #preprocess_metadata(topo_name)\n\n # Get nodes and links data\n current_dir = os.path.dirname(__file__)\n db_path = os.path.join(current_dir, topo_name , topo_name + \"DB\")\n node_dict = read_from_json(db_path + \"/nodes.json\")\n link_dict = read_from_json(db_path + \"/links.json\")\n\n super().__init__(name=topo_name, node_dict=node_dict, link_dict=link_dict, routing_method=routing_method)", "def initialization_step(self):\n # Update where agents are\n self.update_agent_location_vector()\n # update task locations\n self.update_task_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()", "def initialise(self, road, cars, speed, grid):\n self.next_features = self.buildFeatures(road, cars, speed, grid)\n if not self.init:\n self.thetas = np.ones(self.next_features.shape[0])/self.next_features.shape[0]\n #accelerate action bias\n self.thetas[0] += self.thetas[0]\n self.init = True\n # Reset the total reward for the episode\n self.total_reward = 0", "def __init__(self, handler=None):\n self.root = RouteTrieNode(handler=handler)", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def setup_solver(self):\n option = Options()\n if logger.getEffectiveLevel() == logging.DEBUG:\n # option.printLevel = PrintLevel.HIGH\n option.printLevel = PrintLevel.NONE\n else:\n option.printLevel = PrintLevel.NONE\n self.solver_minimizing = SQProblem(self.nV, self.nC)\n self.solver_minimizing.setOptions(option)\n self.solver_maximizing = SQProblem(self.nV, self.nC)\n self.solver_maximizing.setOptions(option)\n\n self.solver_minimizing_recent_index = -2\n self.solver_maximizing_recent_index = -2", "def __init__(self, *args):\n _hypre.HypreTriSolve_swiginit(self, _hypre.new_HypreTriSolve(*args))", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius", "def __init__(self, nav,\n waypoint=ll.LatLon(50.742810, 1.014469), # somewhere in the solent\n target_radius=2, waypoint_id=None,\n ):\n self.nav = nav\n self.waypoint = waypoint\n self.waypoint_id = waypoint_id\n x, y = self.nav.latlon_to_utm(waypoint.lat.decimal_degree, waypoint.lon.decimal_degree)\n self.waypoint_xy = Point(x, y)\n self.target_area = self.waypoint_xy.buffer(target_radius)", "def initialize(self):\n \n # lumopt.figures_of_merit.modematch object need initialization and\n # forward setting. h is a spacemap.utilities.simulation object\n if self._fom_type == 'ModeMatch':\n self.fom.initialize(self._ha)\n self.fom.make_forward_sim(self._ha)", "def __init__(self, nodes, fast=False):\n self.nodes = nodes\n self.fast = fast\n\n self.initial_path = nodes\n self.initial_cost = self.pathCost(nodes)\n # Do not save the initial path as it is not optimised\n self.heuristic_path = self.initial_path\n self.heuristic_cost = self.initial_cost", "def __init__(self):\n super().__init__()\n self.waypoint_vector = [-1, 10]", "def __init__(self, path, reference_structure=None, distorted_structure=None, vasp_run_inputs_dictionary=None):\n\n\t\tself.path = Path.expand(path)\n\n\t\tif (reference_structure == None) or (distorted_structure == None) or (vasp_run_inputs_dictionary == None):\n\t\t\tself.load()\n\t\telse:\n\n\t\t\tStructure.validate(reference_structure)\n\t\t\tStructure.validate(distorted_structure)\n\n\t\t\tif not reference_structure.lattice.equals(distorted_structure.lattice):\n\t\t\t\traise Exception(\"Warning: It's very difficult to interpret polarization results when the lattices of the reference and distorted structures are not equal. This is likely an error.\", reference_structure.lattice, distorted_structure.lattice)\n\n\n\t\t\tself.reference_structure = reference_structure\n\t\t\tself.distorted_structure = distorted_structure\n\t\t\tself.vasp_run_inputs = copy.deepcopy(vasp_run_inputs_dictionary)\n\t\t\tself.vasp_run_list = []\n\n\t\t\tself.save()\n\n\t\tPath.make(path)\n\n\t\tself.initialize_vasp_runs()", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def __init__(__self__,\n resource_name: str,\n args: RouteTableArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, mapper):\n self.map = mapper\n self._router = routes.middleware.RoutesMiddleware(self._dispatch,\n self.map)", "def __init__(self):\n self.action_server = actionlib.SimpleActionServer(\"navigate_2D_action\",\n Navigate2DAction, self.navigate_cb)\n\n self.robot_point_sub = rospy.Subscriber(\"robot/point\", Point, self.update_robot_position)\n self.robot_current_point = None\n self.robot_goal_point = None\n self.distance_threshold = 0.35\n self.feedback_rate = rospy.Rate(1)", "def RoutingInterfaceInitialize(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def __init__(self):\n rospy.init_node('approach')\n\n rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n self.vel_pub = rospy.Publisher('/cmd_vel_mux/input/navi', Twist,\n queue_size=10)\n self.scan = None", "def __init__(self, name, waypoints, position2d_proxy, waypoint_distance_tolerance):\n\n self.name = name\n self.waypoints = waypoints\n self.pp = position2d_proxy\n self.waypoint_distance_tolerance = waypoint_distance_tolerance\n\n self.active_waypoint_index = 0\n self.active_waypoint = self.waypoints[self.active_waypoint_index]\n self.first_update = True\n self.finished = False\n self.last_read = None", "def __init__(self, solution, **kwargs):\n self.solution = solution # set solution\n self.parameters = kwargs # set solution parameters", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_NAME = 'Helper'\n self.AGENT_MOVEMENT_TYPE = 'Absolute' # Note the helper needs absolute movements\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = StateSpace()\n self.solution_report = solution_report; # Python is call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(self, CalcType=CALC_TYPE_SRMP_TO_ARM, SR=\"\", RRT=\"\",\n RRQ=\"\", ABIndicator=\"\", ReferenceDate=None, ARM=0,\n SRMP=0, ResponseDate=None, TransId=None):\n # Calculation Type. 0 = SRMP to ARM, 1 = ARM to SRMP *\n self.CalcType = CalcType\n # Three digit state route ID. *\n self.SR = SR\n # Related Route Type *\n self.RRT = RRT\n # Related Route Qualifier *\n self.RRQ = RRQ\n # Ahead / Back indicator for SRMP. \"A\" or null, or \"B\" *\n self.ABIndicator = ABIndicator\n # Input data collection date *\n self.ReferenceDate = ReferenceDate\n # Accumulated Route Mileage. Actual measure *\n self.ARM = ARM\n # State Route Milepost - Posted milepost. May not match actual measure\n # due to route adjustments over time. *\n self.SRMP = SRMP\n # Output date. Use self to match an LRS publication date. *\n self.ResponseDate = ResponseDate\n # Transaction ID. Use a unique ID with batch results. *\n self.TransId = TransId", "def __init__(self):\n\n self.gm = GradientMapper()\n self.im = SpringMapper()\n self.fm = FullMapper(self.im, self.gm)\n # self.lm = LineMapper(self.fm)\n self.exit = False", "def RoutingInterfaceInitialize(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def __init__(self, variables, constraints):\n self.__variables = variables\n self.__constraints = constraints\n\n self.__make_node_consistent()", "def office_setup_solver(parser, args, params):\n parser.parse_known_args(args)\n control.setup_solver(params)", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteTableRouteArgs']]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def test_route_schema_init() -> None:\n destination = SpecificLocation()\n rschema = RouteSchema(destination)\n\n assert rschema.destination is not None\n assert rschema.destination._id == destination._id", "def __init__(self):\r\n\t\tself.params = arcpy.GetParameterInfo()\r\n\r\n\t\tself.input_raster = self.params[0]\r\n\t\tself.approach = self.params[1]\r\n\t\tself.predefined_pattern = self.params[2]\r\n\t\tself.pattern_workspace = self.params[3]\r\n\t\tself.point_matrix_size = self.params[4]\r\n\t\tself.point_vectors = self.params[5]\r\n\t\tself.mapping_field = self.params[6]\r\n\t\tself.move_to_max = self.params[7]\r\n\t\tself.move_to_max_distance = self.params[8]\r\n\t\tself.mh_iteration = self.params[9]\r\n\t\tself.mh_dil_val = self.params[10]\r\n\t\tself.mh_dil_start = self.params[11]\r\n\t\tself.mh_dil_stop = self.params[12]\r\n\t\tself.mh_dil_step = self.params[13]\r\n\t\tself.transform = self.params[14]\r\n\t\tself.size_of_the_cell = self.params[15]\r\n\t\tself.output_sim_matrix = self.params[16]\r\n\t\tself.output_table = self.params[17]\r\n\t\tself.output_raster_workspace = self.params[18]", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space):\n self.AGENT_MOVEMENT_TYPE = 'Discrete' # HINT: You can change this if you want {Absolute, Discrete, Continuous}\n self.AGENT_NAME = 'Simple'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space\n self.solution_report = solution_report # Python calls by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def __init__(self):\n super(RouteLayer, self).__init__()\n\n routes = [(\"^/ping\", views.ping),\n (\"^/e(co)?(?P<eco_message>[^$]+)$\", views.echo),\n (\"^/p(iada)?\\s*$\", views.get_piada)]\n\n routes.extend(MediaViews(self).routes)\n routes.extend(StaticViews(self).routes)\n # routes.extend(GroupAdminViews(self).routes)\n\n self.views = [(re.compile(pattern), callback) for pattern, callback in routes]", "def __init__(self, params):\n\n # check if the ode is provided. If not, raise an error\n\n if \"ode\" not in params.keys():\n raise ValueError(\"Please specify the ODE to solve for the Integrator class\")\n else:\n self.rhs = params[\"ode\"]\n\n if \"type\" not in params.keys():\n params[\"type\"] = \"dopri5\" # set default to RK45\n\n if params[\"type\"] not in [\"dopri5\", \"dop853\"]:\n raise ValueError(\n \"Please specify the correct type of RK solver, dopri5 for RK45, dop853 for RK853\"\n )\n\n if \"rtol\" not in params.keys():\n params[\"rtol\"] = 1e-7 # set to default value\n self.rtol = params[\"rtol\"]\n\n if \"args\" not in params.keys():\n params[\"args\"] = None\n self.args = params[\"args\"]\n\n # set up the integrator\n self.integrator = ode(self.rhs).set_integrator(\n params[\"type\"], rtol=params[\"rtol\"]\n )\n\n super().__init__(params)", "def init(self):\n\n seen_ports = set()\n next_ports = deque([self.In()])\n parameters = set()\n\n while next_ports:\n\n # Get the next port and update seen ports\n port = next_ports.popleft()\n if port in seen_ports:\n continue\n seen_ports.add(port)\n\n # Figure out the next port to check\n block = port.block\n\n # If the port has upstream then it is the port we check\n # Otherwise, In case the block is atomic, we pass to its inputs\n if port.upstream:\n next_ports.extend(port.upstream)\n elif block.atomic:\n next_ports.extend(block.In)\n\n # Add parameters if the block has them\n if isinstance(block, TorchModule):\n parameters.update(map(lambda x: x.tensor, ParameterIterator(block)))\n\n # Sets the optimizer and initializes the LR scheduler\n self.__optimizer = self.__optimizer_fn(parameters)\n self.__scheduler.init(self.__optimizer)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(InitialSceneRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.numberOfTSPTurtles is None:\n self.numberOfTSPTurtles = 0\n else:\n self.numberOfTSPTurtles = 0", "def __init__(self, ox, oy, reso, rr, start, goal):\n self.reso = reso\n self.rr = rr\n self.calc_obstacle_map(ox, oy)\n self.motion = self.get_motion_model()\n self.start = start\n self.now_pose = self.start\n self.goal = goal\n\n self.Inf = 10000\n self.queue = []\n self.km = 0", "def __init__(self, logging=True):\n self.matrix_creator = MatrixCreator()\n self.matrix_computer = MatrixComputer()\n self.equation_parser = EquationParser()\n self.balancing_validator = BalancingValidator(logging=logging)\n self.logger = Logger(active=logging)", "def __init__(self, port):\n pjGateWay.__init__(self, port)\n self.ga = self.way.entry_point", "def __init__(self, problem, options, tr_method):\n\n # User-defined optimal-control problem\n self.problem = problem\n\n # General options\n self.options = options\n\n # Transcription method\n self.tr_method = tr_method\n\n # Construction of the lower and upper constraints boundaries\n self.low, self.upp = self.build_constraints_boundaries()", "def __init__(self, mass, jsa, length):\n RigidBody.counter += 1\n self.length = length\n self.ID = RigidBody.counter\n self.mass_matrix = None\n self.constraints = []\n self.symbolic_variables = []\n self.r_i = BaseObject('r_i', self.ID)\n self.r_j = BaseObject('r_j', self.ID)\n self.u = BaseObject('u', self.ID)\n self.v = BaseObject('v', self.ID)\n self.base_points = [self.r_i,\n self.r_j]\n self.base_vectors = [self.u,\n self.v]\n self.r_i.local_coordinates = np.array([0, 0, 0])\n self.r_j.local_coordinates = np.array([0, 0, length])\n self.r_i.global_coordinates = np.array([0, 0, 0])\n self.r_j.global_coordinates = np.array([0, 0, length])\n self.u.local_coordinates = np.array([1, 0, 0])\n self.v.local_coordinates = np.array([0, 1, 0])\n self.u.global_coordinates = np.array([1, 0, 0])\n self.v.global_coordinates = np.array([0, 1, 0])\n self.r_g_loc = np.array([0, 0, 0.5 * length])\n self.make_symbolic_variables()\n self.calculate_mass_matrix(mass, jsa)\n self.rigid_body_constraints()", "def __init__(self,\n vehicle_body: str,\n initial_time: float,\n parameter_vector: list):\n print('Initializing guidance...')\n # Set arguments as attributes\n self.vehicle_body = vehicle_body\n self.initial_time = initial_time\n self.parameter_vector = parameter_vector\n self.thrust_magnitude = parameter_vector[0]\n self.time_interval = parameter_vector[1]\n # Prepare dictionary for thrust angles\n self.thrust_angle_dict = {}\n # Initialize time\n current_time = initial_time\n # Loop over nodes\n for i in range(len(parameter_vector) - 2):\n # Store time as key, thrust angle as value\n self.thrust_angle_dict[current_time] = parameter_vector[i + 2]\n # Increase time\n current_time += self.time_interval\n # Create interpolator settings\n interpolator_settings = interpolators.linear_interpolation(\n boundary_interpolation=interpolators.use_boundary_value)\n # Create the interpolator between nodes and set it as attribute\n self.thrust_angle_interpolator = interpolators.create_one_dimensional_interpolator(self.thrust_angle_dict,\n interpolator_settings)", "def __init__(self, robot, x, y, rz, radius=0.15, frame_id=\"map\"):\n super(NavigateToPose, self).__init__(robot, lambda: pose_constraints(x, y, rz, radius, frame_id))", "def __init__(self, root_handler, path_not_found):\n if self.__test_path(root_handler) and self.__test_path(path_not_found):\n self.route_trie = RouteTrie(root_handler) # Passes root handler to the initialised trie\n self.path_not_found = path_not_found # Stores 404 not found response", "def __init__(self, agent_host, agent_port, mission_type, mission_seed, solution_report, state_space_graph):\n self.AGENT_MOVEMENT_TYPE = 'Continuous'\n self.AGENT_NAME = 'Random'\n\n self.agent_host = agent_host\n self.agent_port = agent_port\n self.mission_seed = mission_seed\n self.mission_type = mission_type\n self.state_space = state_space\n self.solution_report = solution_report # Python makes call by reference !\n self.solution_report.setMissionType(self.mission_type)\n self.solution_report.setMissionSeed(self.mission_seed)", "def __init__(self):\n rospy.init_node('square')\n rospy.Subscriber('/odom', Odometry, self.processOdom)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n\n self.sleepy = rospy.Rate(2)\n\n # make dictionary that calls functions\n self.state = {'i':self.forward, ',':self.backward,\n 'l':self.rightTurn, 'j':self.leftTurn,\n 'k':self.stop}\n\n self.x = 0 # position in meters\n self.y = 0 # position in meters\n self.z = 0 # angle in degrees\n self.desiredX = 0\n self.desiredY = 0\n self.desiredZ = 0\n\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.sendMessage()\n\n self.start = time()\n\n # get key interupt things\n self.settings = termios.tcgetattr(sys.stdin)\n self.key = None", "def __init__(self, namespace, waypoints, update_frequency=10.):\n self.current_mode = ''\n self.previous_mode = ''\n self.namespace = namespace['name']\n self.battery_rate_mean = 1.0\n self.battery_rate_std = 1.0\n self.battery_voltages = list()\n self.low_battery = False\n self.set_battery(namespace['max_fuel'], namespace['min_fuel'],\n namespace['fuel_rate'])\n self._cancel_action = False\n self.external_intervened = False\n self.state = State()\n self.home = HomePosition()\n self.global_pose = NavSatFix()\n self.local_pose = PoseStamped()\n self.heading = 0.0\n self.waypoints = [None]\n self._current_wp = -1\n self._radius = 1e-5\n self._rate = rospy.Rate(update_frequency)\n # UAV specific variables\n self.irr_name = namespace['irr_attached']\n self._irr_ready_to_be_picked = 0\n self.landed = True\n self.home_moved = False\n self.rel_alt = 0.\n self.rangefinder = -1.\n self._alt_radius = 0.5\n self._rel_alt = [0. for _ in range(5)]\n self._rangefinder = [-1. for _ in range(5)]\n self._min_range = -1.\n self.deploy_msg = Int64()\n self.target_heading = [0.0 for _ in range(5)]\n self.target_global_pose = [NavSatFix() for _ in range(5)]\n self.target_imu = [Imu() for _ in range(5)]\n # LHM Controller\n if namespace['retrieve_system'] and (\n \"simulation\" not in rospy.get_param(\"~scenario_type\",\n \"simulation\")):\n self.lhm = LHMExecutor(self.namespace, update_frequency)\n if \"simulation\" in rospy.get_param(\"~scenario_type\", \"simulation\"):\n self.blade_pose = [[0., 0., 0.] for _ in range(10)]\n rospy.Subscriber('/%s/edge_wt_detector' % self.namespace,\n PoseArray,\n self._wt_cb,\n queue_size=1)\n # simulated winch system\n self._lhm_pub = rospy.Publisher('/attach_plugin/attach',\n String,\n queue_size=3)\n\n # Subscribers\n rospy.Subscriber('/%s/mavros/state' % self.namespace,\n State,\n self._state_cb,\n queue_size=1)\n # halt until mavros is connected to a uav\n rospy.loginfo('Waiting for a connection to %s ...' % self.namespace)\n while (not self.state.connected):\n self._rate.sleep()\n rospy.Subscriber('/%s/mavros/home_position/home' % self.namespace,\n HomePosition,\n self._home_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/rel_alt' % self.namespace,\n Float64,\n self._relative_alt_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/modified_battery' % self.namespace,\n BatteryState,\n self._battery_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/raw/unfix' %\n self.namespace,\n NavSatFix,\n self._global_pose_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/local_position/pose' % self.namespace,\n PoseStamped,\n self._local_pose_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/rangefinder/rangefinder' % self.namespace,\n Range,\n self._rangefinder_cb,\n queue_size=1)\n rospy.Subscriber('/%s/mavros/global_position/compass_hdg' %\n self.namespace,\n Float64,\n self._heading_cb,\n queue_size=1)\n\n # Service proxies\n rospy.loginfo('Waiting for /%s/mavros/cmd/set_home ...' %\n self.namespace)\n rospy.wait_for_service('/%s/mavros/cmd/set_home' % self.namespace)\n self._set_home_proxy = rospy.ServiceProxy(\n '/%s/mavros/cmd/set_home' % self.namespace, CommandHome)\n\n rospy.loginfo('Waiting for /%s/mavros/set_mode ...' % self.namespace)\n rospy.wait_for_service('/%s/mavros/set_mode' % self.namespace)\n self._set_mode_proxy = rospy.ServiceProxy(\n '/%s/mavros/set_mode' % self.namespace, SetMode)\n rospy.loginfo('Waiting for /%s/mavros/cmd/takeoff ...' %\n self.namespace)\n rospy.wait_for_service('/%s/mavros/cmd/takeoff' % self.namespace)\n self._takeoff_proxy = rospy.ServiceProxy(\n '/%s/mavros/cmd/takeoff' % self.namespace, CommandTOL)\n # Publisher\n self._setpoint_pub = rospy.Publisher('/%s/mavros/setpoint_raw/global' %\n self.namespace,\n GlobalPositionTarget,\n queue_size=1)\n\n rospy.sleep(3)\n self.set_current_location_as_home()\n # Adding initial waypoints' configuration\n while self.waypoints[0] is None:\n self._rate.sleep()\n self.waypoints = self.waypoints + waypoints\n # Auto call functions\n rospy.Timer(self._rate.sleep_dur, self.update_wp_position)\n rospy.Timer(self._rate.sleep_dur, self.update_landing_status)\n rospy.Timer(10 * self._rate.sleep_dur, self.intervene_observer)\n # change mode just to fill self.current_mode and self.previous_mode\n self.guided_mode()", "def __init__(self, horizon, deadline, theta, my_graph, solver_type='central', timeout=30*60):\n\n # objective function value for each time the solver runs\n self.obj_value = {}\n # solving time for each time the solver runs\n self.solve_time = {}\n # gap for each time the solver runs\n self.gap = {}\n\n self.x_s = {}\n\n self.belief = {}\n\n # horizon can be mutable in future experiments\n self.horizon = dict()\n self.horizon[0] = horizon\n\n # input parameters (immutable)\n self.g = None\n self.g = my_graph\n self.V, self.n = ext.get_set_vertices(my_graph)\n\n self.theta = theta\n self.deadline = deadline\n\n self.solver_type = solver_type\n self.timeout = timeout\n\n self.threads = {}\n\n self.gamma = 0.99", "def __init__ (self):\n self.start_timer() # Starts calling handle_timer() at correct rate\n self.neighbours = dict()\n self.routesToDest = dict()\n self.hosts = dict()", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def __init__(self, model, **kwargs):\n super(CpoSolver, self).__init__()\n self.agent = None\n self.process_infos = CpoProcessInfos()\n self.cpostr = None\n self.expr_map = None\n self.blackbox_map = None\n self.last_result = None\n self.status = STATUS_IDLE\n self.status_lock = threading.Lock()\n self.listeners = []\n self.callbacks = []\n self.operation = None\n self.abort_supported = False\n self.model_published = False\n self.model_sent = False\n self.callbacks_registered = False\n\n # Build effective context from args\n # OO's version\n # context = config._get_effective_context(**kwargs)\n # context.params = model.merge_with_parameters(context.params)\n ## trying to fix CP#303\n ctx = config._get_effective_context()\n if model.parameters:\n ctx.params.set_other(model.parameters)\n ctx = config._get_effective_context(context=ctx, **kwargs)\n\n # If defined, limit the number of threads\n mxt = ctx.solver.max_threads\n if isinstance(mxt, int):\n # Maximize number of workers\n nbw = ctx.params.Workers\n if (nbw is None) or (nbw > mxt):\n ctx.params.Workers = mxt\n print(\"WARNING: Number of workers has been reduced to \" + str(mxt) + \" to comply with platform limitations.\")\n\n # Save attributes\n self.model = model\n self.context = ctx\n\n # Determine appropriate solver agent\n self.agent = self._get_solver_agent()\n self.abort_supported = self.agent._is_abort_search_supported()\n\n # Add configured default listeners if any\n # Note: calling solver_created() is not required as it is done by add_listener().\n lstnrs = ctx.solver.listeners\n if lstnrs is not None:\n if is_array(lstnrs):\n for lstnr in lstnrs:\n self._add_listener_from_class(lstnr)\n else:\n self._add_listener_from_class(lstnrs)", "def _set_solver(self):\n self.solver = Solver.select_solver(self.method, self.solver_args)\n if self.method.lower()==\"elastic-net\":\n self.solver.elements=self.basis.elements", "def __init__(self, A, r):\n import numpy as np\n\n self.INFINITY = np.Inf\n self.init_adjacency(A)\n self.remove_route(A, r)\n self.degree = np.zeros(len(A))\n self.tree_nbr= [[] for x in A]", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def __init__(self,\n coefficients=None, traces=None, info=None, system=None,\n residuals=None\n ):\n assert coefficients is not None or traces is not None, \\\n \"\"\"Either coefficients or traces must be supplied to the\n Solutions constructor\"\"\"\n self._coefficients = coefficients\n self._traces = traces\n self.info = info\n self.system = system\n self.residuals = residuals", "def initialize_solution(self, graph):\n start = self.get_starting_node(graph)\n return Solution(graph, start, ant=self)", "def __init__(\n self, router_name=None, disable_auto_logout=None, enable_validation_code=None\n ):\n self.router_name = router_name\n self.disable_auto_logout = disable_auto_logout\n self.enable_validation_code = enable_validation_code", "def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route" ]
[ "0.70928293", "0.6806993", "0.67323405", "0.6620287", "0.65866923", "0.6567693", "0.6503277", "0.6323346", "0.6312729", "0.62945217", "0.6200703", "0.6181314", "0.61419207", "0.6131141", "0.6076377", "0.6046717", "0.6043842", "0.59901327", "0.59900606", "0.5955468", "0.59459114", "0.5925245", "0.58918774", "0.58797276", "0.58737445", "0.58725286", "0.5867947", "0.5865042", "0.585659", "0.5854621", "0.58442503", "0.58417624", "0.58417624", "0.5841436", "0.5826466", "0.58216465", "0.581621", "0.5797767", "0.57897145", "0.5787672", "0.5779846", "0.57595396", "0.5757431", "0.57486033", "0.5743045", "0.57299566", "0.57170737", "0.5712534", "0.5702716", "0.5692845", "0.5681499", "0.568093", "0.5676226", "0.5668018", "0.564562", "0.56437355", "0.56365055", "0.5635555", "0.56344277", "0.56273985", "0.5618601", "0.5617159", "0.5614052", "0.56107557", "0.56031424", "0.559659", "0.5596092", "0.5587338", "0.5581561", "0.55812377", "0.5579615", "0.557918", "0.5549239", "0.5544433", "0.5536568", "0.5536419", "0.55346173", "0.5527367", "0.55267394", "0.55159235", "0.55137116", "0.55127263", "0.5511977", "0.55061114", "0.5501614", "0.54971296", "0.548844", "0.5484429", "0.54804385", "0.546982", "0.5468853", "0.5466287", "0.54660064", "0.5457376", "0.54525805", "0.5448101", "0.5446644", "0.5445878", "0.5435685", "0.5426938" ]
0.75314903
0
Add fields to input Stops with the origin and destination's original unique IDs.
def _add_unique_id_fields(self): field_types = {"String": "TEXT", "Single": "FLOAT", "Double": "DOUBLE", "SmallInteger": "SHORT", "Integer": "LONG", "OID": "LONG"} origin_field_def = [self.origin_unique_id_field_name, field_types[self.origin_id_field_obj.type]] if self.origin_id_field_obj.type == "String": origin_field_def += [self.origin_unique_id_field_name, self.origin_id_field_obj.length] dest_field_def = [self.dest_unique_id_field_name, field_types[self.dest_id_field_obj.type]] if self.dest_id_field_obj.type == "String": dest_field_def += [self.dest_unique_id_field_name, self.dest_id_field_obj.length] self.rt_solver.addFields(arcpy.nax.RouteInputDataType.Stops, [origin_field_def, dest_field_def])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.origin_transfer_fields\r\n ) as icur:\r\n # Loop through origins and insert them into Stops along with their assigned destinations\r\n for origin in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [\"SHAPE@\", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields\r\n ):\r\n dest_id = origin[2]\r\n if dest_id is None:\r\n continue\r\n if dest_id not in destinations:\r\n dest_val = f\"'{dest_id}'\" if isinstance(dest_id, str) else dest_id\r\n with arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [\"SHAPE@\", self.dest_id_field] + self.destination_transfer_fields,\r\n where_clause=f\"{self.dest_id_field} = {dest_val}\"\r\n ) as cur:\r\n try:\r\n destinations[dest_id] = next(cur)\r\n except StopIteration:\r\n # The origin's destination is not present in the destinations table. Just skip the origin.\r\n continue\r\n # Insert origin and destination\r\n destination = destinations[dest_id]\r\n if self.reverse_direction:\r\n route_name = f\"{dest_id} - {origin[1]}\"\r\n origin_sequence = 2\r\n destination_sequence = 1\r\n else:\r\n route_name = f\"{origin[1]} - {dest_id}\"\r\n origin_sequence = 1\r\n destination_sequence = 2\r\n # Define the final origin and destination rows for the input Stops\r\n origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:]\r\n destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \\\r\n list(destination)[2:]\r\n icur.insertRow(origin_row)\r\n destination_rows.append(destination_row)\r\n\r\n # Insert destinations\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.destination_transfer_fields\r\n ) as dcur:\r\n for row in destination_rows:\r\n dcur.insertRow(row)", "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)", "def _populate_input_data_transfer_fields(self):\r\n # Valid fields for the Route Stops input are described here:\r\n # https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route-input-data-types.htm\r\n # Do not transfer RouteName or Sequence as these are explicitly controlled by this tool. Do not transfer\r\n # LocationType because we want all inputs to be Stops. Waypoints don't make sense for this analysis.\r\n int_types = [\"Integer\", \"SmallInteger\"]\r\n numerical_types = [\"Double\", \"Single\"] + int_types\r\n rt_stops_input_fields = {\r\n \"Name\": [\"String\"],\r\n \"AdditionalTime\": numerical_types,\r\n \"AdditionalDistance\": numerical_types,\r\n \"AdditionalCost\": numerical_types,\r\n \"TimeWindowStart\": [\"Date\"],\r\n \"TimeWindowEnd\": [\"Date\"],\r\n \"CurbApproach\": int_types,\r\n \"Bearing\": numerical_types,\r\n \"BearingTol\": numerical_types,\r\n \"NavLatency\": numerical_types,\r\n \"SourceID\": int_types,\r\n \"SourceOID\": int_types,\r\n \"PosAlong\": numerical_types,\r\n \"SideOfEdge\": int_types\r\n }\r\n # Preserve origin and destination input fields that match names and types\r\n origin_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.origins) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"origin_transfer_fields\"] = origin_transfer_fields\r\n if origin_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Origins table that will be used in the analysis: \"\r\n f\"{origin_transfer_fields}\"\r\n ))\r\n destination_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.destinations) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"destination_transfer_fields\"] = destination_transfer_fields\r\n if destination_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Destinations table that will be used in the analysis: \"\r\n f\"{destination_transfer_fields}\"\r\n ))", "def __init__(__self__, *,\n destination_region_id: pulumi.Input[str],\n destination_zone_id: pulumi.Input[str],\n source_region_id: pulumi.Input[str],\n source_zone_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"destination_region_id\", destination_region_id)\n pulumi.set(__self__, \"destination_zone_id\", destination_zone_id)\n pulumi.set(__self__, \"source_region_id\", source_region_id)\n pulumi.set(__self__, \"source_zone_id\", source_zone_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if group_name is not None:\n pulumi.set(__self__, \"group_name\", group_name)\n if rpo is not None:\n pulumi.set(__self__, \"rpo\", rpo)", "def add_destination(self):\n pass", "def __init__(self, trip_update, stops, position_in_list):\n self.trip_update = trip_update\n self.stops = stops\n self.routeID = str(self.trip_update.trip.route_id)\n # A minor quirk in the MTA's data is fixed here. S trains were listed as GS for some reason\n if self.routeID == \"GS\":\n self.routeID = \"S\"\n self.index = position_in_list", "def _select_inputs_many_to_many(self):\r\n # Select the origins present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting origins for this chunk...\")\r\n origins_in_chunk = set([pair[0] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][0], (int, float,)):\r\n origin_string = \", \".join([str(o_id) for o_id in origins_in_chunk])\r\n else:\r\n origin_string = \"'\" + \"', '\".join([str(o_id) for o_id in origins_in_chunk]) + \"'\"\r\n origins_where_clause = f\"{self.origin_id_field} IN ({origin_string})\"\r\n self.logger.debug(f\"Origins where clause: {origins_where_clause}\")\r\n self.input_origins_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.origins, self.input_origins_layer, origins_where_clause]\r\n ).getOutput(0)\r\n num_origins = int(arcpy.management.GetCount(self.input_origins_layer).getOutput(0))\r\n self.logger.debug(f\"Number of origins selected: {num_origins}\")\r\n # Select the destinations present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting destinations for this chunk...\")\r\n dests_in_chunk = set([pair[1] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][1], (int, float,)):\r\n dest_string = \", \".join([str(d_id) for d_id in dests_in_chunk])\r\n else:\r\n dest_string = \"'\" + \"', '\".join([str(d_id) for d_id in dests_in_chunk]) + \"'\"\r\n dests_where_clause = f\"{self.dest_id_field} IN ({dest_string})\"\r\n self.logger.debug(f\"Destinations where clause: {dests_where_clause}\")\r\n self.input_dests_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.destinations, self.input_destinations_layer, dests_where_clause]\r\n ).getOutput(0)\r\n num_dests = int(arcpy.management.GetCount(self.input_destinations_layer).getOutput(0))\r\n self.logger.debug(f\"Number of destinations selected: {num_dests}\")", "def destination_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"destination_id\")", "def _add_stops_to_df(self, stop_coords, signal_coords, route_df):\n\n self.stop_nn_indicies, self.stop_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n stop_coords\n )\n\n\n signal_nn_indicies, singal_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n signal_coords)\n\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_signal = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_stop = ([False] * len(route_df.index))\n )\n \n for i in self.stop_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_bus_stop'] = True\n route_df.at[i, 'is_stop'] = True\n \n for i in signal_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_stop'] = True\n route_df.at[i, 'is_signal'] = True\n\n # route_df.at[0, 'is_bus_stop'] = True\n # route_df.at[-1, 'is_bus_stop'] = True\n\n return route_df", "def __init__(self, origin, destination):\n self.origin = origin\n self.destination = destination", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n destination_region_id: Optional[pulumi.Input[str]] = None,\n destination_zone_id: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None,\n source_region_id: Optional[pulumi.Input[str]] = None,\n source_zone_id: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_region_id is not None:\n pulumi.set(__self__, \"destination_region_id\", destination_region_id)\n if destination_zone_id is not None:\n pulumi.set(__self__, \"destination_zone_id\", destination_zone_id)\n if group_name is not None:\n pulumi.set(__self__, \"group_name\", group_name)\n if rpo is not None:\n pulumi.set(__self__, \"rpo\", rpo)\n if source_region_id is not None:\n pulumi.set(__self__, \"source_region_id\", source_region_id)\n if source_zone_id is not None:\n pulumi.set(__self__, \"source_zone_id\", source_zone_id)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def make_destiID(self):\n\n desti_ob_list = self.list_all_destinations()\n number = 1 + len(desti_ob_list)\n self.destiID_number = \"0\" + str(number)\n\n return self.destiID_number", "def post(self, destination_id, add_to_beginning=\"False\",clear_other_waypoints=\"False\",datasource=\"tranquility\",**kwargs):\n kwargs_dict ={\n\"destination_id\" : destination_id, \"add_to_beginning\" : add_to_beginning, \"clear_other_waypoints\" : clear_other_waypoints, \"datasource\" : datasource, \n }\n kwargs_dict.update(kwargs)\n return EsiRequestObject(self.base_url, self.post_responses) \\\n .post(**kwargs_dict)", "def _set_edit_fields(self):\n # Must always have the 'id' field\n has_id = False\n \n # import pdb;pdb.set_trace()\n \n for field in self.edit_fields:\n if not field['name']:\n raise ValueError(\"The 'name' property in edit_fields may not be empty \")\n break\n \n if field['name'] == 'id':\n has_id = True\n \n # special case\n if 'label' not in field or not field['label']:\n field['label'] = field['name'].replace('_',' ').title()\n \n for k, v in self._get_field_list_dict().items():\n if k not in field:\n field[k] = v\n \n if not has_id:\n #add an id field\n # import pdb;pdb.set_trace()\n id_field = self._get_field_list_dict()\n id_field.update({'name':'id','type':'hidden','default':0})\n self.edit_fields.append(id_field)", "def save_journey():\n destination = request.form.get('destination_id', ''), request.form.get('destination_name', '')\n origin = request.form.get('origin_id', ''), request.form.get('origin_name', '')\n if '' not in destination or '' not in origin:\n trip_db: Cache = g.trip_db\n trip_db.read_db()\n trip_db.write_db((origin, destination))\n print(trip_db.data)\n return redirect('/')", "def put (id, travel_stop):\n travel_stop['source'] = \"otherDB\"\n travel_stop['id'] = id\n travel_stops[id] = travel_stop\n \n return travel_stop, 200", "def populate_stops(self):\n stops = self.load_csv('stops.txt')\n stops = self.process_stops(stops)\n\n connection = db.connect()\n for stop in stops:\n try:\n connection.execute(schema.stops.insert(), stop)\n except DataError:\n print \"Missing data for stop: %s\" % (stop)", "def update_ids(self, value, destination_ids_by_source):\n return value", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))", "def origin_id(self, origin_id):\n\n self._origin_id = origin_id", "def calculer_indicateur(self):\n source_data = self.source.get_values()\n destionnation_data = self.preparer_data_pour_destionnataire(source_data)\n self.destionnation.insert_values(destionnation_data)", "def _add_rid_to_vrf_list(self, ri):\n if ri.ex_gw_port or ri.router.get('gw_port'):\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n if not vrf_name:\n return\n if not self._router_ids_by_vrf.get(vrf_name):\n LOG.debug(\"++ CREATING VRF %s\" % vrf_name)\n driver._do_create_vrf(vrf_name)\n self._router_ids_by_vrf.setdefault(vrf_name, set()).add(\n ri.router['id'])", "def set_destination_to_add_destination(self, destination):\n self.multiple_items_selection_from_kendo_dropdown(self.destination_multiple_kendo_dropdown_locator, destination)\n self.click_element(self.new_destination_header_locator)", "def _build_directions(self):\n d = {'start': self.get_start(), 'end': self.get_end(), 'duration': self.get_duration(),\n 'mode': self.get_primary_mode(), 'price_range': self.get_price_range(), 'legs': self.get_legs(),\n 'start_location': self.get_start_location(), 'end_location': self.get_end_location()}\n self.set_directions(d)", "def destinatarios(self, destinatarios):\n self._destinatarios = destinatarios", "def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)", "def set_dests(self, increment=1000000):\n modified = 0\n pb = Progress(len(self.graph.routers), 'Setting destinations', increment=increment, callback=lambda: 'Modified {:,d}'.format(modified))\n for router in pb.iterator(self.graph.routers.values()):\n for interface in router.interfaces:\n # Copy destination ASes to avoid messing up original\n idests: Set[int] = set(interface.dests)\n # If last hop, interface has non-IXP AS mapping, and interface has destination ASes\n if not router.succ and idests and interface.asn > 0:\n origin = interface.asn\n # Interface must have exactly 2 destination ASes and one must be its origin AS\n if len(idests) == 2 and origin in idests:\n other_asn = peek(idests - {origin}) # other AS\n # If other AS is likely customer of interface origin AS, and it's a small AS\n if self.bgp.conesize[origin] > self.bgp.conesize[other_asn] and self.bgp.conesize[other_asn] < 5:\n idests.discard(origin)\n modified += 1\n # Add all remaining destination ASes to the router destination AS set\n router.dests.update(idests)", "def __init__(__self__, *,\n group_id: pulumi.Input[str],\n ip_protocol: pulumi.Input[str],\n cidr_ip: Optional[pulumi.Input[str]] = None,\n cidr_ipv6: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_prefix_list_id: Optional[pulumi.Input[str]] = None,\n destination_security_group_id: Optional[pulumi.Input[str]] = None,\n from_port: Optional[pulumi.Input[int]] = None,\n to_port: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"group_id\", group_id)\n pulumi.set(__self__, \"ip_protocol\", ip_protocol)\n if cidr_ip is not None:\n pulumi.set(__self__, \"cidr_ip\", cidr_ip)\n if cidr_ipv6 is not None:\n pulumi.set(__self__, \"cidr_ipv6\", cidr_ipv6)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_prefix_list_id is not None:\n pulumi.set(__self__, \"destination_prefix_list_id\", destination_prefix_list_id)\n if destination_security_group_id is not None:\n pulumi.set(__self__, \"destination_security_group_id\", destination_security_group_id)\n if from_port is not None:\n pulumi.set(__self__, \"from_port\", from_port)\n if to_port is not None:\n pulumi.set(__self__, \"to_port\", to_port)", "def origen_destino(self):\n origin = self.html.xpath(self.xpath_origin)\n destination = self.html.xpath(self.xpath_destination)\n origen = map(self.limpieza_texto, origin)\n destino = map(self.limpieza_texto, destination)\n return origen, destino", "def _add_id(self, attrs):\n _id = {}\n _id['id'] = str(attrs.get('name', ''))\n _id['valid_from'] = (\n _get_date_from_string(attrs.get('validFrom', '')))\n _id['created'] = (\n _get_date_from_string(attrs.get('created', '')))\n _id['device'] = str(attrs.get('device', ''))\n self._ids[str(attrs.get('name', ''))] = _id", "def __init__(self, orcid, start, stop):\n self.id = orcid.replace(\"-\", \"\")\n self.start = Date(*start.split(\"-\"))\n self.stop = Date(*stop.split(\"-\")) if stop else Date(None, None, None)", "def set_design_origin(self,origin):\n for d in self.features:\n for i in range(len(self.get_coord(d))):\n self.add_to_coord(d,i,origin)\n return self", "def get_inputs(self, id):", "def action_move_create(self):\n\n res = super(account_invoice, self).action_move_create()\n\n for inv in self:\n if not inv.move_id:\n return res\n for ml in inv.move_id.line_id:\n ml_vals = {\n 'emp_police': inv.pol_numpol,\n 'emp_quittance': inv.prm_numero_quittance,\n 'emp_effet': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'emp_datech': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n ml.update(ml_vals)\n move_vals = {\n 'num_police': inv.pol_numpol,\n 'num_quittance': inv.prm_numero_quittance,\n 'date_effect': datetime.datetime.strptime(inv.prm_datedeb, '%Y-%m-%d').date() if inv.prm_datedeb else datetime.datetime.today(),\n 'date_end': datetime.datetime.strptime(inv.prm_datefin, '%Y-%m-%d').date() if inv.prm_datefin else datetime.datetime.today(),\n }\n inv.move_id.update(move_vals)\n self._log_event()\n return res", "def _associate_floating_ip(self, context, domain_id, extra, floating_ip_id, floating_ip, port_id):\n\n addresses = [{\n 'version': 4,\n 'address': floating_ip,\n }]\n try:\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n except (designate.exceptions.DuplicateRecord, CirrusRecordExists):\n LOG.warn('Could not create record for %s using default format, '\n 'trying fallback format' % (extra['instance_name']))\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format_fallback,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n LOG.info(\"Created %s to point at %s\" % (','.join(names), floating_ip))", "def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data", "def connect(self, origin, destination):\n origin_section = self.sections[origin]\n destination_section = self.sections[destination]\n \"The junction of both origin and destination must be the same.\"\n \"\"\"A turn transitions a vehicle from the last segment of the origin to the first\n segment of the destination.\"\"\"\n edge = self.graph.add_edge(origin_section[-1], destination_section[0])\n self.edge_weights[edge] = 0 # The distance between the same location is 0.\n return", "def info_for_model(stop_list, stops, route):\n\n # Need to know where the bus number 1 and 2 are\n # This if and elif were put in due to an error where the bus list for bus 1 would come up empty, but not sure if necessary\n if len(stops[0]) == 0:\n bus_1 = stops[1][len(stops[1]) - 1]\n elif len(stops[0]) != 0:\n bus_1 = stops[0][0]\n bus_2 = stops[1][0]\n\n # Create empty lists to hold the information for each bus\n stops_bus_1 = []\n stops_bus_2 = []\n stops_bus_3 = []\n\n # Ste bus_number to 3, we will start filling the buses from the end, the last bus first\n bus_number = 3\n\n # Populate our lists\n for i in stops[len(stops) - 1]:\n # Get the times for the buses at the given stop\n first_3_buses = get_due_time(str(i), route)\n\n if len(first_3_buses) == 0:\n # print('Something went wrong!')\n continue\n # Add in the delay\n get_delay(first_3_buses)\n\n # Have to check if the bus it at the first stop, in which case, we just say 'Starting stop' for previous_stop\n if i == stop_list[0]:\n previous_stop = 'Starting stop'\n # Else, we get the previous stop\n else:\n previous_stop = stop_list[stop_list.index(i) - 1]\n\n # If the bus is the last one, we will only append to bus_number_3\n if bus_number == 3:\n # If we reach the stop where bus number 2 is, we must append this stop to both bus_number_3 and bus_number2 and\n # decrease the bus_number counter\n if i == bus_2:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Now, we keep adding bus 2 and bus 3\n elif bus_number == 2:\n # If we reach the stop where bus number 1 is, we must append this stop to both bus_number_3 and bus_number2 and\n # bus_number1 and decrease the bus_number counter\n if i == bus_1:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Here, we are now appending all the buses, until we finally reach the source stop\n elif bus_number == 1:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n joined = [stops_bus_1, stops_bus_2, stops_bus_3]\n return joined", "def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue", "def end_points(self, origin, destination):\n # origin and destination are components with bounding-boxes\n # direction is a 2 char code representing starting and ending directions\n # 'h' horizontal, 'v' vertical\n o_coords = origin.bounding_coords()\n d_coords = destination.bounding_coords()\n\n start = {\n \"h\": core.Coords(o_coords.x2, o_coords.y1 + origin.height / 2),\n \"v\": core.Coords(origin.x + (o_coords.x2 - o_coords.x1) / 2, o_coords.y2),\n }\n end = {\n \"h\": core.Coords(d_coords.x1, d_coords.y1 + destination.height / 2),\n \"v\": core.Coords(\n destination.x + (d_coords.x2 - d_coords.x1) / 2, d_coords.y1\n ),\n }\n self.start = start[self.direction[0]]\n self.end = end[self.direction[-1]]\n return (self.start, self.end)", "def lines(self, form, ids=None, done=None):\n moveline_obj = self.pool.get('account.move.line')\n cr,uid = self.cr,self.uid\n ctx = self.context.copy()\n ctx['fiscalyear'] = form['fiscalyear_id']\n if form['filter'] == 'filter_period':\n ctx['period_from'] = form['period_from']\n ctx['period_to'] = form['period_to']\n elif form['filter'] == 'filter_date':\n ctx['date_from'] = form['date_from']\n ctx['date_to'] = form['date_to']\n ctx['state'] = form['target_move']\n\n account_ids = self.pool.get('account.account')._get_children_and_consol(cr, uid, [form['account_id'][0]], context=ctx)\n if not account_ids: return []\n move_query = moveline_obj._query_get(cr, uid, obj='l', context=ctx)\n\n cr.execute(\"\"\"\n select\n min(l.id) as id,\n to_char(date,'MONTH') as name,\n sum(l.debit-l.credit) as balance,\n sum(l.debit) as debit,\n sum(l.credit) as credit \n from\n account_move_line l\n left join\n account_account a on (l.account_id=a.id)\n where \n l.account_id in %s \n AND \"\"\"+move_query+\"\"\"\n group by\n to_char(date,'MONTH'),to_char(date,'MM') \n ORDER BY to_char(date,'MM')\n \"\"\", (tuple(account_ids),))\n\n self.data = cr.dictfetchall()\n return self.data", "def add_pattern(self, start, stop, pattern):\n self.coord2pattern[start] = []\n self.coord2pattern[start].append(pattern)", "def update_log_forwarding_destinations(\n self,\n label: str,\n sources: list,\n consumer: str,\n credentials: dict,\n address: str,\n destination_uuid: str,\n ) -> Session:\n uri = f\"{self.uri}/log-forwarding-destinations/{destination_uuid}\"\n data = {\n \"label\": label,\n \"sources\": sources,\n \"consumer\": consumer,\n \"credentials\": credentials,\n \"address\": address,\n }\n response = self.request(uri=uri, method=\"PUT\", data=data)\n\n return response", "def align_tour_ids(self, tour_ids: list) -> list:\n n = self.size()\n \n # levels is list of number of movable cities for each city\n levels = np.zeros(len(tour_ids), dtype=np.uint8)\n for i in tour_ids:\n levels[i] = len(self.get_movable_city_ids(i))\n # print(levels)\n\n for i in range(1, n):\n # Find all posible city indices that (tour_ids[i-1])-th city are linked to\n list_ids = self.get_movable_city_ids(tour_ids[i-1])\n \n # Check if (tour_ids[i-1])-th city and (tour_ids[i])-th city are not linked\n if self.is_movable(tour_ids[i - 1], tour_ids[i]) == False:\n # print(tour_ids[i - 1], tour_ids[i])\n # print(list_ids)\n sub_list_ids = []\n # Loop over posible city indices to filter out connectable city at current time\n for j in list_ids:\n # Make sure j is not in previous part of tour_ids\n # Because i just want to change only the after part of tour_ids which is counted from i position\n if j not in tour_ids[0:i]: \n sub_list_ids.append(j)\n # print(sub_list_ids)\n\n # If sub_list_ids is empty, we drop this tour\n if len(sub_list_ids) == 0:\n return None\n\n # Find argmin of levels\n j_min_in_slice = np.argmin(levels[sub_list_ids])\n j_min = sub_list_ids[j_min_in_slice]\n # print(j_min) \n \n idx_of_j = tour_ids.index(j_min) # find position of j_min value in tour_ids\n # swap two postions\n tour_ids[i], tour_ids[idx_of_j] = j_min, tour_ids[i]\n\n # print(tour_ids)\n \n # Set level of (tour_ids[i-1])-th city is equal 0 when it satisfied the constraint\n levels[tour_ids[i-1]] = 0\n # Update levels of cities has connect with (tour_ids[i-1])-th city:\n for j in list_ids:\n # Avoid levels of city is already satisfied\n if levels[j] > 0:\n levels[j] -= 1\n # print(levels)\n\n return tour_ids", "def duplicate_elimination_by_origin_id(self, original, view):\n\t\tids = []\n\t\tnames = []\n\t\tfor i in original:\n\t\t\tif i['origin']['id'] not in ids:\n\t\t\t\tids.append(i['origin']['id'])\n\t\t\t\tif view:\n\t\t\t\t\tnames.append(i['origin']['name'])\n\t\tif view:\n\t\t\treturn names\n\t\telse:\n\t\t\treturn ids", "def addDistance(layer, tracks):\n\n #Create field for store Distance\n newColumn (layer,\"Distance\", QVariant.Double)\n\n #Select seperate locations(points) of each route and save their properties in to lists\n for m in range(0,len(tracks)):\n #create empty lists to save UTM coordinates, track number, feature ID\n L_north=[]\n L_east=[]\n L_ID=[]\n L_distance=[]\n\n layer.selectByExpression(tracks[m], QgsVectorLayer.SetSelection)\n selection = layer.selectedFeatures()\n for feature in selection:\n east=feature['utm_east']\n north=feature['utm_north']\n L_north.append(north)\n L_east.append(east)\n L_ID.append(feature.id())\n\n #Calculate the euclidean distance between a point and its previous point\n for j in range (0,(len(L_north))):\n if j==0:\n distance=0\n L_distance.append(distance)\n else:\n D_north=(L_north[j]-L_north[j-1])**2\n D_East=(L_east[j]-L_east[j-1])**2\n distance=(math.sqrt(D_north+D_East))/1000\n L_distance.append(distance)\n\n #Update distances to a new field\n updates_distance={}\n for i in range (0,(len(L_north))):\n # Get the distance value from the gpx\n distance=L_distance[i]\n index=L_ID[i]\n\n # Update the empty fields in the shapefile\n indexDi=layer.fields().indexFromName('Distance')\n updates_distance[index] = {indexDi:distance}\n\n layer.dataProvider().changeAttributeValues(updates_distance)\n # Update to propagate the changes\n layer.updateFields()\n layer.removeSelection()\n\n L_north.clear()\n L_east.clear()\n L_ID.clear()\n L_distance.clear()", "def destination_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_instance_ids\")", "def destination_instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_instance_ids\")", "def set_pin(self, xpos1, ypos1, xpos2, ypos2):\n\n distx = xpos1-xpos2\n disty = ypos1-ypos2\n\n if distx < 0 and disty == 0:\n self.def_field['direction'] = 'R'\n if distx > 0 and disty == 0:\n self.def_field['direction'] = 'L'\n\n if distx == 0 and disty > 0:\n self.def_field['direction'] = 'D'\n if distx == 0 and disty < 0:\n self.def_field['direction'] = 'U'\n\n self.def_field['length'] = int(math.sqrt(distx * distx\n + disty * disty))\n self.def_field['x'] = self.offset[0] + xpos1\n self.def_field['y'] = self.offset[1] + ypos1", "def Destination(self) -> _n_0_t_1:", "def convert_data_to_model_input(self, origin_datas, add_unknow_words=True):\n def _word_to_id(word):\n if word in self.word_to_idx:\n return self.word_to_idx[word]\n elif add_unknow_words:\n self.word_to_idx[word] = len(self.vocab)\n self.vocab.append(word)\n self.embeddings.append(np.random.uniform(-0.25, 0.25,\n self.embedding_size))\n return self.word_to_idx[word]\n else:\n # padding id\n return 0\n\n x_inputs = []\n y_inputs = []\n\n for data in origin_datas:\n y_inputs.append(self.labels_to_idx[data[0]])\n\n words = data[1].split(\" \")\n ids = list(map(_word_to_id, words))\n\n x_inputs.append(ids)\n\n return x_inputs, y_inputs", "def register_inputs(self, args_):\n # TODO Should we be able to rebuild?\n def traversal_function(obj):\n if obj.id.value not in self.placeholders:\n self.placeholders[obj.id.value] = obj\n self.input_placeholder_ids.append(obj.id.value)\n\n self.input_placeholder_ids = []\n Role.nested_object_traversal(args_, traversal_function, PlaceHolder)\n self.input_placeholder_ids = tuple(self.input_placeholder_ids)", "def export_trip_stop_information(trip_id,stop_sequence,stop_number,\n prev_stop_distance,cumulative_distance,\n prev_stop_travel_time):\n \n sql=\"\"\"insert into gtf_stoptimes_information (trip_id, stop_sequence,\n trip_stop_number,prev_stop_distance_meters,\n cumulative_distance_meters,travel_time_seconds)\n values\n (%(tid)s,%(seq)s,%(num)s,%(dist)s,%(cdist)s,%(time)s)\"\"\"\n cur = get_cursor()\n if prev_stop_distance is not None:\n prev_stop_distance = str(prev_stop_distance)\n SQLExec(cur,sql,{'tid':trip_id,'seq':stop_sequence,'num':stop_number,\n 'dist':prev_stop_distance,\n 'cdist':str(cumulative_distance),\n 'time':prev_stop_travel_time});\n cur.close()", "def __call__(self, parser, namespace, values, option_string=None):\n ip_split = values.split(\",\")\n [ip_address(ip) for ip in ip_split]\n setattr(namespace, self.dest, ip_split)", "def __init__(__self__, *,\n end_ip: pulumi.Input[str],\n start_ip: pulumi.Input[str]):\n pulumi.set(__self__, \"end_ip\", end_ip)\n pulumi.set(__self__, \"start_ip\", start_ip)", "def derive_features(self):\n\n temp = int(self.stop_id)\n\n while temp not in self.stops_latlon.keys():\n if temp < 7692:\n temp += 1\n else:\n while temp not in self.stops_latlon.keys():\n temp -= 1\n\n self.latitude = self.stops_latlon[temp][0]\n self.longitude = self.stops_latlon[temp][1]\n\n self.distance_centre = FormatInput.haversine(self.latitude, self.longitude)\n\n self.cluster = FormatInput.map_stop_to_cluster(self.cluster_map, self.stop_id)\n\n self.holiday = FormatInput.add_holiday(self.date)", "def _create_update_move_finished(self):\n # keep manual entries\n list_move_finished = [(4, move.id) for move in self.move_finished_ids.filtered(\n lambda m: not m.byproduct_id and m.product_id != self.product_id)]\n list_move_finished = []\n moves_finished_values = self._get_moves_finished_values()\n moves_byproduct_dict = {move.byproduct_id.id: move for move in self.move_finished_ids.filtered(lambda m: m.byproduct_id)}\n move_finished = self.move_finished_ids.filtered(lambda m: m.product_id == self.product_id)\n for move_finished_values in moves_finished_values:\n if move_finished_values.get('byproduct_id') in moves_byproduct_dict:\n # update existing entries\n list_move_finished += [(1, moves_byproduct_dict[move_finished_values['byproduct_id']].id, move_finished_values)]\n elif move_finished_values.get('product_id') == self.product_id.id and move_finished:\n list_move_finished += [(1, move_finished.id, move_finished_values)]\n else:\n # add new entries\n list_move_finished += [(0, 0, move_finished_values)]\n self.move_finished_ids = list_move_finished", "def _update_input_after_create_node(self):\n for node in self._normal_node_map.values():\n for src_node_id, input_attr in dict(node.inputs).items():\n node.delete_inputs(src_node_id)\n if not self._is_node_exist(node_id=src_node_id):\n message = f\"The input node could not be found by node id({src_node_id}) \" \\\n f\"while updating the input of the node({node})\"\n logger.warning(message)\n\n continue\n\n src_node = self._get_normal_node(node_id=src_node_id)\n input_attr['shape'] = src_node.output_shape\n input_attr['data_type'] = src_node.output_data_type\n node.add_inputs(src_name=src_node.name, input_attr=input_attr)", "def add_route(g, origin, destination, distance, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n distance = int(distance)\n # Add route both ways\n if(choice_dir == \"y\"):\n g.city_dict[origin_code].add_flights_in((destination_code, distance))\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n \n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n g.city_dict[destination_code].add_flights_out((origin_code, distance))\n # Add route one way \n if(choice_dir == \"n\"):\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n \n \n \n return g", "def buildStopsDict(self):\n \n if len(self.nodesDict) == 0:\n raise Exception('Nodes dictionary is empty!')\n if len(self.linksDict) == 0:\n raise Exception('Links dictionary is empty!')\n \n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n \n tempStops = \"temp_stops\"\n tempStopsSp = \"temp_stops_sp\"\n \n # Delete temp_stops and temp_stops_sp feature classes if they exist.\n if arcpy.Exists(tempStops):\n arcpy.Delete_management(tempStops)\n if arcpy.Exists(tempStopsSp):\n arcpy.Delete_management(tempStopsSp)\n arcpy.CopyFeatures_management(PublicTransit.RTD_PATH + PublicTransit.RTD_STOPS,\n tempStops)\n \n # Project temp_stops to CA state plane and add XY.\n install_dir = arcpy.GetInstallInfo()['InstallDir']\n out_coordinate_system = os.path.join(install_dir, PublicTransit.NAD_83_DIRECTORY)\n arcpy.Project_management(tempStops, tempStopsSp, out_coordinate_system,\n \"NAD_1983_To_WGS_1984_1\")\n arcpy.AddXY_management(tempStopsSp)\n \n # Create a search cursor to traverse all stops.\n stops = arcpy.SearchCursor(tempStopsSp, \"\", \"\",\n \"CPT_STOPPOINTID; SCH_STOPPOINTSEQNO; \" +\n \"SCH_ROUTEID; SCH_PATTERNID; ROUTE_PATTERN; \" +\n \"SourceOID; POINT_X; POINT_Y\",\n \"ROUTE_PATTERN A; SCH_STOPPOINTSEQNO A\")\n numStops = int(arcpy.GetCount_management(tempStopsSp).getOutput(0))\n print \"Found %d stops\" % numStops\n \n p = index.Property()\n p.overwrite = True\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE,properties=p)\n \n # For each stop determine the nearest network node.\n scount = 0\n icount = 0\n for s in stops:\n # only create stops for routes which exist in RTD\n if not s.ROUTE_PATTERN in self.transitRoutes:\n continue\n scount += 1\n st = TransitStop(s.CPT_STOPPOINTID, s.SCH_ROUTEID, s.SCH_PATTERNID,\n s.ROUTE_PATTERN, s.SourceOID, s.SCH_STOPPOINTSEQNO)\n # If the stop's linkId is in the links dictionary use the link from\n # and to node (these should all be bus routes since MTC's route\n # traversal FC was created for buses only at this time).\n if s.SourceOID in self.linksDict:\n link = self.linksDict[s.SourceOID]\n # Determine which node is nearest and snap to it.\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.fromNode.x,\n link.fromNode.y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.toNode.x,\n link.toNode.y):\n st.tanaNode = link.fromNode.nodeId\n else:\n st.tanaNode = link.toNode.nodeId\n st.inRegion = True\n \n # The stop's link is not in linksDict. These are either stops \n # outside the region or non-bus routes for which there are no\n # route traversal edges. Do a link lookup from the Roadways\n # feature class.\n else:\n arcpy.env.workspace = PublicTransit.RTD_PATH\n roadwaysSearch = arcpy.SearchCursor(PublicTransit.ROADWAYS_FC,\n \"LinkId = \" + str(s.SourceOID),\n \"\", \"\", \"F_JNCTID; T_JNCTID\", \"\")\n for r in roadwaysSearch:\n fromNode = self.__getIdHash(r.F_JNCTID)\n toNode = self.__getIdHash(r.T_JNCTID)\n if fromNode in self.nodesDict and toNode in self.nodesDict:\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[fromNode].x,\n self.nodesDict[fromNode].y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[toNode].x,\n self.nodesDict[toNode].y):\n st.tanaNode = fromNode\n else:\n st.tanaNode = toNode\n st.inRegion = True\n else:\n st.inRegion = False\n \n # Add the stop to stopsByRoute and stopsByNode dictionaries\n if s.ROUTE_PATTERN in self.stopsByRoute:\n self.stopsByRoute[s.ROUTE_PATTERN].append(st)\n else:\n self.stopsByRoute[s.ROUTE_PATTERN] = [st]\n if (st.tanaNode in self.stopsByNode):\n self.stopsByNode[st.tanaNode].append(st)\n else:\n self.stopsByNode[st.tanaNode] = [st]\n # add the stop node to the spatial index\n if st.tanaNode in self.nodesDict:\n icount += 1\n self.spIndex.insert(st.stopPointId,\n (self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y,\n self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y))\n del stops", "def __init__( self, source, label, destination ):\n self.source = source;\n self.label = label;\n self.destination = destination;", "def endpoints(self):\n return (self._origin,self._destination)", "def add_new_arrival(self):\n pass", "def create_I_list(self):\r\n self.I_list = ['0']\r\n self.list_label = (\"Enter a list of source currents (A) separated by \"\r\n \"commas (e.g., 1e-3, 2e-6, 3e-4). Range is -0.105 \"\r\n \"to 0.105. If empty list, sets 1-point list with \"\r\n \"output current 0.\")\r\n self.window_title = \"Current Biases\"\r\n\r\n if self.spd_type_index == 2 and self.connected:\r\n self.I_list = self.list_box.getText(self, self.window_title,\r\n self.list_label)[0]\r\n self.I_source.write(\"SOUR:LIST:CURR \" + self.I_list)\r\n print(self.I_source.query(\"SOUR:LIST:CURR?\"))\r\n self.I_list_float = list(\r\n map(float, re.sub(',', '', self.I_list).split())\r\n )\r\n if not self.I_list:\r\n self.I_source.write(\"SOUR:LIST:CURR 0\")\r\n self.I_list = ['0']\r\n self.I_list_float = [0]\r\n else:\r\n self.SweepPulseDeltaStartI.setValue(self.I_list_float[0]*1E6)\r\n self.SweepPulseDeltaEndI.setValue(self.I_list_float[-1]*1E6)\r\n# self.spd_points_switch[2] = int(self.I_source.query(\r\n# \"SOUR:LIST:CURR:POIN?\"))\r\n #self.spd_points = int(self.I_source.query(\"SOUR:LIST:CURR:POIN?\"))\r\n self.update_spd_sweep_type()", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def __init__(self, data_info, source_word2id, target_word2id, mem_word2id):\n self.ID = data_info['ID']\n # list of domains by TURN, not by dialogue\n # self.turn_domain = data_info['turn_domain']\n # list of turn indices per dialogue [0,1,2,3,4, 0,1,2,3, etc]\n self.turn_id = data_info['turn_id']\n # dialogue history by turn\n self.dialog_history = data_info['dialog_history']\n # list of domain-slot value pairs by turn\n self.turn_belief = data_info['turn_belief']\n # list of domain gate labels by turn\n self.domain_gate_label = data_info['domain_gate_label']\n # list of slot gate labels by turn\n self.slot_gate_label = data_info['slot_gate_label']\n # list of value gate labels by turn\n self.value_gate_label = data_info['value_gate_label']\n # map of domain_gate_label's to domain\n self.domain_map = data_info['domain_map']\n # map of slot_gate_label's to domain_slots\n self.domain_slot_map = data_info['domain_slot_map']\n # self.turn_uttr = data_info['turn_uttr']\n # list of accumulated slot values by turn\n self.generate_y = data_info[\"generate_y\"]\n self.num_total_seqs = len(self.dialog_history)\n self.src_word2id = source_word2id\n self.trg_word2id = target_word2id\n self.mem_word2id = mem_word2id", "def __rshift__(self, other):\n # softly check if the \"other\" is a Node with inputs\n if hasattr(other, \"inputs\"):\n for iname, iplug in other.inputs.items():\n if iname == self.name:\n target = iplug\n else:\n target = other\n self.connect(target)", "def add_desire(origin,desire_id, desire_type,desire_utility,desire_intensity, desire_params=None):\n des = Desire()\n des.id = desire_id\n des.type = desire_type\n des.utility = desire_utility\n des.intensity = desire_intensity\n if desire_params is not None:\n des.params = desire_params\n \n rospy.loginfo(\"adding desire: \" + des.id)\n\n origin.add_desires_service.call([des])", "def combineGPSandPhoneStops(arg):\r\n\r\n # unpack parameters\r\n user_gps, user_cell, dur_constr, spat_constr_gps, spat_cell_split = arg\r\n\r\n # combine cellular stay if it is close to a gps stay\r\n cell_stays = list(set([(trace[6],trace[7]) for d in user_cell for trace in user_cell[d] if int(trace[9]) >= dur_constr]))\r\n gps_stays = list(set([(trace[6],trace[7]) for d in user_gps for trace in user_gps[d] if int(trace[9]) >= dur_constr]))\r\n pairs_close = set()\r\n for cell_stay in cell_stays:\r\n for gps_stay in gps_stays:\r\n if distance(cell_stay[0],cell_stay[1],gps_stay[0],gps_stay[1]) <= spat_constr_gps:\r\n pairs_close.add((gps_stay[0],gps_stay[1],cell_stay[0],cell_stay[1]))\r\n break\r\n # find all pair[1]s in list, and replace it with pair[0]\r\n for pair in list(pairs_close):\r\n for d in user_cell.keys():\r\n for trace in user_cell[d]:\r\n if trace[6] == pair[2] and trace[7] == pair[3]:\r\n trace[5], trace[6], trace[7] = 99, pair[0], pair[1] #pretend as gps\r\n\r\n user = user_gps\r\n for d in user.keys():\r\n if len(user_cell[d]):\r\n user[d].extend(user_cell[d])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr) #OscillationPairList = oscillation_h1_oscill(user, dur_constr)\r\n # ## when replaced, can only replaced with a gps stay; so let modify exchange ping-pong pair in the pairList\r\n # gpslist_temp = {(trace[6], trace[7]):int(trace[5]) for d in user.keys() for trace in user[d]}\r\n # for pair_i in range(len(OscillationPairList)):\r\n # if gpslist_temp[(OscillationPairList[pair_i][0],OscillationPairList[pair_i][1])] <= spat_constr_gps:# wrong(2,3)\r\n # OscillationPairList[pair_i] = [OscillationPairList[pair_i][2],OscillationPairList[pair_i][3],\r\n # OscillationPairList[pair_i][0],OscillationPairList[pair_i][1]]\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n for d in user:\r\n phone_index = [k for k in range(len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) == 0: # if no phone trace\r\n continue\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr: # passing phone observ\r\n user[d][i].append('checked')\r\n # combine consecutive obsv on a phone stay into two observ\r\n i = min(phone_index) # i has to be a phone index\r\n j = i + 1\r\n while i < len(user[d]) - 1:\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n break\r\n if int(user[d][j][5]) > spat_cell_split and user[d][j][6] == user[d][i][6] \\\r\n and user[d][j][7] == user[d][i][7] and j < len(user[d]):\r\n j += 1\r\n else:\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n phone_index = [k for k in range(j, len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) < 3: # if no phone trace\r\n break\r\n i = min(phone_index) ##i has to be a phone index\r\n j = i + 1\r\n i = 0 # remove []\r\n while i < len(user[d]):\r\n if len(user[d][i]) == 0:\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # adress phone stay one by one\r\n flag_changed = True\r\n phone_list_check = []\r\n while (flag_changed):\r\n # print('while........')\r\n flag_changed = False\r\n gps_list = []\r\n phone_list = []\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) <= spat_cell_split:#or user[d][i][2] == 'addedphonestay': #changed on 0428\r\n gps_list.append(user[d][i])\r\n else:\r\n phone_list.append(user[d][i])\r\n\r\n phone_list.extend(phone_list_check)\r\n # when updating duration for phone stay, we have to put back passing obs\r\n phone_list = sorted(phone_list, key=itemgetter(0))\r\n # update phone stay\r\n i = 0\r\n j = i\r\n while i < len(phone_list):\r\n if j >= len(phone_list): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n # we don't want to change a pssing into a stay; as we have not process the combine this stay\r\n # this is possible when a stay that prevents two passing is mergeed into gps as gps points\r\n phone_list[k][9] = dur\r\n break\r\n if phone_list[j][6] == phone_list[i][6] and phone_list[j][7] == phone_list[i][7] and j < len(\r\n phone_list):\r\n j += 1\r\n else:\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n phone_list[k][9] = dur\r\n i = j\r\n for trace in phone_list: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(phone_list) == 1: phone_list[0][9] = -1\r\n\r\n # update check lable\r\n for i in range(len(phone_list)):\r\n if int(phone_list[i][5]) > spat_cell_split and int(phone_list[i][9]) < dur_constr \\\r\n and phone_list[i][-1] != 'checked':\r\n # passing phone observ\r\n phone_list[i].append('checked')\r\n\r\n # put those not checked together with gps\r\n user[d] = gps_list\r\n phone_list_check = []\r\n for i in range(len(phone_list)):\r\n if phone_list[i][-1] == 'checked':\r\n phone_list_check.append(phone_list[i])\r\n else:\r\n user[d].append(phone_list[i])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # find a stay which is not checked\r\n flag_phonestay_notchecked = False\r\n phonestay_left, phonestay_right = -1, -1\r\n for i in range(max(0, phonestay_right+1), len(user[d])):\r\n phonestay_left, phonestay_right = -1, -1\r\n if int(user[d][i][5]) > spat_cell_split \\\r\n and int(user[d][i][9]) >= dur_constr and user[d][i][-1] != 'checked':\r\n phonestay_left = phonestay_right\r\n phonestay_right = i\r\n if phonestay_left != -1 and phonestay_right != -1 \\\r\n and user[d][phonestay_left][9] == user[d][phonestay_right][9]:\r\n flag_phonestay_notchecked = True\r\n\r\n ## modified on 04152019\r\n if flag_phonestay_notchecked == False or len(phone_list) == 0: # if all phone observation are checked, end\r\n break\r\n # if they are not two consecutive observation\r\n if phonestay_right != phonestay_left + 1: # attention: only phonestay_left is addressed\r\n # not consecutive two observations\r\n if any([int(user[d][j][9]) >= dur_constr for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # found a gps stay in betw\r\n # print('23: found a gps stay in betw, just use one gps stay trade one phone stay')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # find close gps\r\n # print('24: do not found a gps stay in betw')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n if all([(phone_uncernt + int(user[d][j][5])) > 1000 * distance(user[d][j][3], user[d][j][4],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # total uncerty larger than distance\r\n # this case should be rare, as those close gps may be clustered\r\n # print('241: all gps falling betw are close with phone stay')\r\n temp = user[d][phonestay_left][3:] # copy neighbor gps\r\n user[d][phonestay_left][3:] = user[d][phonestay_left + 1][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n flag_changed = True\r\n else:\r\n # print('242: find a gps in betw,\r\n # which is far away with phone stay, contradic with a stay (with phone obsv)')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # if they are two consecutive traces\r\n # two consecutive observation\r\n # if phonestay_left != 0 and phonestay_right < len(user[d]) - 1:\r\n # ignore if they are at the beginning or the end of traj\r\n prev_gps = next_gps = 0 # find prevous and next gps\r\n found_prev_gps = False\r\n found_next_gps = False\r\n for prev in range(phonestay_left - 1, -1, -1):\r\n # if int(user[d][prev][5]) <= spat_cell_split: ########## changed on 04282018\r\n if int(user[d][prev][5]) <= spat_cell_split and int(user[d][prev][9]) >= dur_constr:\r\n prev_gps = prev\r\n found_prev_gps = True\r\n break\r\n for nxt in range(phonestay_right + 1, len(user[d])):\r\n if int(user[d][nxt][5]) <= spat_cell_split and int(user[d][nxt][9]) >= dur_constr:\r\n next_gps = nxt\r\n found_next_gps = True\r\n break\r\n\r\n if found_prev_gps and found_next_gps and user[d][prev_gps][6] == user[d][next_gps][6]:\r\n # this is a phone stay within a gps stay\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 \\\r\n and dist > 1000*spat_constr_gps and speed_dep < 200 and speed_retn < 200:\r\n # print('1111: distance larger than acc, and can travel, add phone stay, shorten gps stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else: # merge into gps stay\r\n # print('1112: distance less than acc, or cannot travel, merge into gps stay')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_prev_gps and found_next_gps and user[d][prev_gps][6] != user[d][next_gps][6]:\r\n phone_uncernt_l = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_l = int(user[d][prev_gps][8])\r\n dist_l = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist_l - phone_uncernt_l - gps_uncernt_l) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n phone_uncernt_r = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_r = int(user[d][next_gps][8])\r\n dist_r = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist_r - phone_uncernt_r - gps_uncernt_r) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n comb_l = 0 #revised on 03202019 to pick up one gps stay to combine with; if spatial conti with multi\r\n comb_r = 0\r\n if (dist_l - phone_uncernt_l - gps_uncernt_l) < 0 \\\r\n or dist_l < 1000*spat_constr_gps or speed_dep > 200:\r\n comb_l = 1\r\n if (dist_r - phone_uncernt_r - gps_uncernt_r) < 0 \\\r\n or dist_r < 1000 * spat_constr_gps or speed_retn > 200:\r\n comb_r = 1\r\n if comb_l*comb_r == 1:\r\n if dist_l < dist_r:\r\n comb_r = 0\r\n else:\r\n comb_l = 0\r\n if comb_l:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif comb_r:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else:\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n elif found_prev_gps: # a gps stay #right# before\r\n # print('113: before phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_dep < 200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1132: dist>low_acc, add phone stay')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:\r\n # print('1131: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_next_gps: # a gps stay #right# after\r\n # print('112: after phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][next_gps][8])\r\n dist = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_retn<200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1122: dist>low_acc, add phone stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:# remain phone observ, but use gps location\r\n # print('1121: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else: # if don't match any case, just add it\r\n # print('donot match any case, just add it (e.g., consecutive two phone stays)')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n\r\n\r\n # user[d].extend(phone_list_check)\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace[:])\r\n # remove passingby cellular traces\r\n i = 0\r\n while i<len(user[d]):\r\n if user[d][i][5] == 99 and float(user[d][i][9]) < dur_constr:\r\n del user[d][i]\r\n else:\r\n i+=1\r\n # remove passing traces\r\n ## Flag_changed = True\r\n ## while (Flag_changed):\r\n ## Flag_changed = False\r\n # i = 0\r\n # while i < len(user[d]):\r\n # if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr:\r\n # # Flag_changed = True\r\n # del user[d][i]\r\n # else:\r\n # i += 1\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n # update duration\r\n i = 0\r\n j = i\r\n while i < len(user[d]):\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n break\r\n if user[d][j][6] == user[d][i][6] and user[d][j][7] == user[d][i][7] and j < len(\r\n user[d]):\r\n j += 1\r\n else:\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n i = j\r\n for trace in user[d]: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(user[d]) == 1: user[d][0][9] = -1\r\n # remove and add back; because phone stays are distroyed as multiple, should be combined as one\r\n i = 0\r\n while i < len(user[d]):\r\n if user[d][i][2] == 'addedphonestay':\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # add back and sort\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace)\r\n\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # remove temp marks\r\n user[d]=[trace[:12] for trace in user[d]]\r\n\r\n # oscillation\r\n # modify grid\r\n for day in user.keys():\r\n for trace in user[day]:\r\n if float(trace[6]) == -1:\r\n found_stay = False\r\n if found_stay == False:\r\n trace[6] = trace[3] + '000' # in case do not have enough digits\r\n trace[7] = trace[4] + '000'\r\n digits = (trace[6].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2]) / 2)\r\n trace[6] = (trace[6].split('.'))[0] + '.' + digits\r\n # trace[6] = trace[6][:5] + str(int(trace[6][5]) / 2) # 49.950 to 49.952 220 meters\r\n digits = (trace[7].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2:4]) / 25)\r\n trace[7] = (trace[7].split('.'))[0] + '.' + digits\r\n # trace[7] = trace[7][:7] + str(int(trace[7][7:9]) / 25) # -122.3400 to -122.3425 180 meters\r\n\r\n # added to address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr)\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n # end addressing oscillation\r\n # those newly added stays should be combined with close stays\r\n user = cluster_incremental(user, spat_constr_gps, dur_constr=dur_constr)\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n # use only one record for one stay\r\n for d in user:\r\n i = 0\r\n while i < len(user[d]) - 1:\r\n if user[d][i + 1][6] == user[d][i][6] and user[d][i + 1][7] == user[d][i][7] \\\r\n and user[d][i + 1][9] == user[d][i][9] and int(user[d][i][9]) >= dur_constr:\r\n del user[d][i + 1]\r\n else:\r\n i += 1\r\n # mark stay\r\n staylist = set() # get unique staylist\r\n for d in user.keys():\r\n for trace in user[d]:\r\n if float(trace[9]) >= dur_constr:\r\n staylist.add((trace[6], trace[7]))\r\n else: # change back keep full trajectory: do not use center for those are not stays\r\n trace[6], trace[7], trace[8], trace[9] = -1, -1, -1, -1 # for non stay, do not give center\r\n staylist = list(staylist)\r\n for d in user.keys():\r\n for trace in user[d]:\r\n for i in range(len(staylist)):\r\n if trace[6] == staylist[i][0] and trace[7] == staylist[i][1]:\r\n trace[10] = 'stay' + str(i)\r\n break\r\n\r\n return user", "def edit_a_parcel(destination, id):\n query = \"\"\"UPDATE parcels SET destination = %s WHERE id = %s\"\"\"\n tuple =(destination , id)\n db.insert(query, tuple)", "def _action_done(self):\n for ml in self:\n if ml.lot_name_in == ml.lot_name_repeat:\n if ml.lot_id and ml.lot_name_in and ml.product_id.tracking == 'serial':\n ml.lot_id.name = ml.lot_name_in\n ml.lot_id.lot_name_chasis = ml.lot_name\n ml.lot_id.edicion = ml.move_id.edicion\n ml.lot_id.colorinterno = ml.move_id.colorinterno.id\n ml.lot_id.colorexterno = ml.move_id.colorexterno.id\n ml.lot_id.n_llaves = ml.n_llaves\n ml.lot_id.cant_llaves = ml.cant_llaves\n ml.lot_id.n_caja = ml.n_caja\n ml.lot_id.mot_desarmada = ml.mot_desarmada\n ml.lot_name = ml.lot_name_in\n ml.lot_id.embarque = ml.picking_id.embarque\n for incidence in ml.incidencia:\n ml.lot_id.incidencia = [(4, incidence.id)]\n for incid in ml.lot_id.incidencia:\n incid.lot_id = ml.lot_id.id\n else:\n raise ValidationError(_(\n 'El numero de chasis \"%s\" no esta igual que el repetido') % ml.lot_name_in)\n\n super(StockMoveLine, self)._action_done()", "def reset_id(self, new_id):\n if self.id != new_id:\n self.id = new_id\n self.doc_js_name = new_id\n self.javascript_binding_name = new_id\n for member in self.operations:\n member.doc_js_interface_name = new_id\n for member in self.attributes:\n member.doc_js_interface_name = new_id\n for member in self.constants:\n member.doc_js_interface_name = new_id", "def destination_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"destination_id\")", "def _change_objs_to_IDs(self):\n if self.location:\n self.location = self.location.id\n if self.contents:\n self.contents = [obj.id for obj in self.contents]", "def import_data(self):\n\n # Import ordered names of origins\n origins_file = os.path.join(self.data_directory,'origins.txt')\n self.origins = np.loadtxt(origins_file,dtype=str,ndmin=1)\n\n # Import ordered names of destinations\n destinations_file = os.path.join(self.data_directory,'destinations.txt')\n self.destinations = np.loadtxt(destinations_file,dtype=str,ndmin=1)\n\n # Import origin supply\n originsupply_file = os.path.join(self.data_directory,'origin_supply.txt')\n self.origin_supply = np.loadtxt(originsupply_file,ndmin=1).astype('float64')\n\n # In case origin supply is not a list\n if not isinstance(self.origin_supply,(np.ndarray, np.generic)):\n self.origin_supply = np.array([self.origin_supply])\n\n # Import destination demand\n destinationdemand_file = os.path.join(self.data_directory,'destination_demand.txt')\n self.destination_demand = np.loadtxt(destinationdemand_file,ndmin=1).astype('float64')\n\n # In case destination demand is not a list\n if not isinstance(self.destination_demand,(np.ndarray, np.generic)):\n self.destination_demand = np.array([self.destination_demand])\n\n # Import origin locations\n originlocations_file = os.path.join(self.data_directory,'origin_locations.txt')\n self.origin_locations = np.loadtxt(originlocations_file,ndmin=1)\n\n # Import destination locations\n destinationlocations_file = os.path.join(self.data_directory,'destination_locations.txt')\n self.destination_locations = np.loadtxt(destinationlocations_file,ndmin=1)\n\n # Import initial and final destination sizes\n initialdestinationsizes_file = os.path.join(self.data_directory,'initial_destination_sizes.txt')\n self.initial_destination_sizes = np.loadtxt(initialdestinationsizes_file,ndmin=1)\n\n # In case destination sizes are not a list\n if not isinstance(self.initial_destination_sizes,(np.ndarray, np.generic)):\n self.initial_destination_sizes = np.array([self.initial_destination_sizes])\n\n # Import N,M\n self.N = self.origin_supply.shape[0]\n self.M = self.initial_destination_sizes.shape[0]\n\n # Import cost matrix\n costmatrix_file = os.path.join(self.data_directory,'cost_matrix.txt')\n self.cost_matrix = np.loadtxt(costmatrix_file).astype('float64')\n\n # Reshape cost matrix if necessary\n if self.N == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[:,np.newaxis],(self.N,self.M))\n if self.M == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[np.newaxis,:],(self.N,self.M))\n\n # Compute total initial and final destination sizes\n self.total_initial_sizes = np.sum(self.initial_destination_sizes)\n\n # Compute naive total cost\n self.total_cost = 0\n for i in range(self.N):\n for j in range(self.M):\n self.total_cost += self.cost_matrix[i,j]*(self.origin_supply[i]/self.N)", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def set_uuid(self, value):\n if self.id.find(value) >= 0:\n return\n\n self._id = graph.append_uuid(self.id, value)\n\n for input_path in self.required_inputs:\n\n input_path.append_uuid(value)\n\n value_references = self.get_value_references(input_path)\n\n for value_reference in value_references.values():\n value_reference.append_uuid(value)\n\n for output_path in self.provided_outputs:\n output_path.append_uuid(value)", "def create_adjacent_stop_pairs(trips):\n\n stop_pairs = []\n\n sorted_trips = trips.sort_values(\n ['TRIPID', 'PROGRNUMBER']).reset_index(drop=True)\n\n # For each trip match up pairs of adjacent stops and calculate how long it\n # took to travel between them\n for trip_id in sorted_trips['TRIPID'].unique():\n\n # Filter down to just this trip\n trip = sorted_trips[sorted_trips['TRIPID'] ==\n trip_id].sort_values(['TRIPID', 'PROGRNUMBER'])\n\n stop_pairs_trip = trip.copy()\n stop_pairs_trip = stop_pairs_trip.rename(columns={\n 'ACTUALTIME_DEP': 'time_departure',\n 'STOPPOINTID': 'departure_stop'\n })\n\n # arrival stop and time are from the next row\n stop_pairs_trip.loc[:,\n 'arrival_stop'] = stop_pairs_trip.shift(-1).loc[:, 'departure_stop']\n stop_pairs_trip.loc[:,\n 'time_arrival'] = stop_pairs_trip.shift(-1).loc[:, 'ACTUALTIME_DEP']\n stop_pairs_trip['travel_time'] = stop_pairs_trip[\"time_arrival\"] - \\\n stop_pairs_trip[\"time_departure\"]\n\n # Only keep pairs with consecutive PROGRNUMBER\n stop_pairs_trip = stop_pairs_trip[stop_pairs_trip['PROGRNUMBER'] == (\n stop_pairs_trip.shift(-1)['PROGRNUMBER'] - 1)]\n\n # Add it to the DF of all stop pairs\n stop_pairs.append(stop_pairs_trip)\n\n return pd.concat(stop_pairs)", "def _add_to_fields(self, tag, data):\n # Addressee data\n if 'Recipient' == tag:\n names = data.split()\n for name in names:\n self.__fields.addressee_line['all_names'].append(name) \n\n # Probable box data\n # Strip out anything that's not a number since we might get some other\n # data inside here also. If the box # can be a subnumber (BOX 102-A) then\n # we'll end up putting everything in the # only.\n if 'USPSBoxGroupID' == tag or 'USPSBoxGroupType' == tag or \\\n 'USPSBoxID' == tag or 'USPSBoxType' == tag or \\\n 'OccupancyType' == tag or 'OccupancyIdentifier' == tag or \\\n 'SubaddressType' == tag or 'SubaddressIdentifier' == tag:\n box = re.search('\\d+', data)\n if box is not None:\n self.__fields.probable_box.append(box.group(0)) \n\n # Street data\n # Discarding street number prefix and suffix for now\n if 'AddressNumber' == tag:\n self.__fields.street_line['number'].append(data) \n if 'StreetName' == tag:\n self.__fields.street_line['street_name'].append(data) \n\n # City data\n if 'PlaceName' == tag:\n self.__fields.city_line['city'].append(data) \n if 'StateName' == tag:\n self.__fields.city_line['state'].append(data) \n if 'ZipCode' == tag:\n self.__fields.city_line['zip_code'].append(data)", "def make_fields_unique(self, fields):\n ...", "def update_from(self, src):\n changed = {}\n for name in self.action_fields:\n other_field = getattr(src, name)\n this_field = getattr(self, name)\n if other_field != this_field:\n changed[name] = other_field\n setattr(self, name, other_field)\n return changed", "def __init__(self, coordinates): \n\t\tsuper().__init__(coordinates)\n\t\tself.type = 'source'", "def test_datatransformationsetups_id_put(self):\n pass", "def _route_to_dest(self):\n # Ask the network\n self.route = self.network.determine_route(self.start, self.dest)\n # Set the index to where we are now\n self.route_index = 0", "def clean_fields(self, *args, **kwargs):\n if self.ipi_name:\n self.ipi_name = self.ipi_name.zfill(11)\n if self.ipi_base:\n self.ipi_base = self.ipi_base.replace(\".\", \"\").upper()\n self.ipi_base = re.sub(\n r\"(I).?(\\d{9}).?(\\d)\", r\"\\1-\\2-\\3\", self.ipi_base\n )\n return super().clean_fields(*args, **kwargs)", "def origin_list(self, origin_list: List[Origin]):\n\n self._origin_list = origin_list", "def make_times(self, start, end, delta):\n assert type(start) is float or type(start) is int, \\\n 'Start time not specified with float'\n assert type(end) is float or type(end) is int, \\\n 'End time not specified with a number'\n assert type(delta) is float or type(delta) is int, \\\n 'Time increment not specified with a number'\n assert end >= start, 'End time is before start time'\n self.target_times = []\n step = start\n while step <= end:\n self.target_times.append(step)\n step += delta", "def destination_zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"destination_zone_id\")", "def init_scratchpad(self, in1, in2):\n lst = [str(in1), str(in2)]\n for inpt in range(len(lst)):\n for i in range(1, len(lst[inpt]) + 1):\n self.scratchpad[inpt, -i] = int(lst[inpt][-i])\n # print(self.scratchpad)", "def copy_annotation(self, from_id, to_id):\n if from_id == -1:\n _from = self.global_external_references\n else:\n _from = self.segments.get_by_id(from_id).biological_annotation.external_references\n if to_id == -1:\n to = self.global_external_references\n else:\n to = self.segments.get_by_id(to_id).biological_annotation.external_references\n # the id for global notes\n for extref in _from:\n to.append(extref)", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def defineInputs(listOfVal, neurons):\n count = 0\n for row in listOfVal:\n for value in row:\n neurons[0][count].input = value\n count += 1", "def start(self):\n self.id_oi = 1\n self.param_target_distances = [5, 30]\n self.cells_no_links = [cell for cell in self.cell_list if cell.id != self.id_oi]\n self.cell_oi = [cell for cell in self.cell_list if cell.id == self.id_oi][0]", "def add(self, source, destination, port):\n logger.info('Adding path from %s to %s on port %s', source, destination, port)\n rules = [{\"IPProtocol\": \"tcp\", \"ports\": [int(port)]}]\n src_tags, dest_tags, src_ranges, _ = self._extract_service_info(\n source, destination)\n firewall_name = \"bu-%s-%s-%s\" % (destination.network.name, destination.name, port)\n try:\n firewall = self.driver.ex_get_firewall(firewall_name)\n if isinstance(source, CidrBlock):\n if not firewall.source_ranges:\n firewall.source_ranges = []\n firewall.source_ranges.append(str(source.cidr_block))\n logger.info(firewall.source_ranges)\n if isinstance(source, Service):\n if not firewall.source_tags:\n firewall.source_tags = []\n source_tag = \"%s-%s\" % (source.network.name, source.name)\n firewall.source_tags.append(source_tag)\n logger.info(firewall.source_tags)\n firewall = self.driver.ex_update_firewall(firewall)\n except ResourceNotFoundError:\n logger.info(\"Firewall %s not found, creating.\", firewall_name)\n firewall = self.driver.ex_create_firewall(firewall_name, allowed=rules,\n network=destination.network.name,\n source_ranges=src_ranges,\n source_tags=src_tags,\n target_tags=dest_tags)\n return Path(destination.network, source, destination, \"tcp\", port)", "def add_pins(self):\n\n for bit in range(self.addr_size):\n self.add_pin(\"addr_{0}\".format(bit),\"INPUT\")\n \n self.add_pin(\"wl_en\", \"INPUT\")\n\n for bit in range(self.num_rows):\n self.add_pin(\"wl_{0}\".format(bit),\"OUTPUT\")\n \n self.add_pin(\"vdd\",\"POWER\")\n self.add_pin(\"gnd\",\"GROUND\")", "def append_to_exclude_id(self):\n self.execute_sql(\n sql.insert_into_select_from(\n into_table=self.tmp_table_exclude_id,\n into_col_list=(self.IDCOLNAME, self.DMLCOLNAME),\n from_table=self.tmp_table_include_id,\n from_col_list=(self.IDCOLNAME, self.DMLCOLNAME),\n enable_outfile_compression=self.enable_outfile_compression,\n )\n )", "def add_inputs(self, inputs):\n self.inputs += inputs", "def save_model(self, request, obj, form, change):\n From = User.objects.get(id=obj.From.id)\n To = User.objects.get(id=obj.To.id)\n From.following_numIn()\n To.followed_numIn()\n obj.save()", "def change_names (fixed_structure, moving_structure, index):\n for chain in moving_structure[0]:\n chain.id = utilities.merged_list[index]+\"-\"+chain.id\n index +=1\n return (fixed_structure, moving_structure, index)" ]
[ "0.6808154", "0.65527296", "0.6456082", "0.5107205", "0.4909032", "0.4837304", "0.48278338", "0.47855443", "0.4784247", "0.47640702", "0.47625896", "0.46876457", "0.4668027", "0.46508414", "0.46428096", "0.46241915", "0.45510978", "0.45495996", "0.45336407", "0.45158657", "0.4504798", "0.44942322", "0.4493436", "0.44904965", "0.4486859", "0.44841206", "0.4482974", "0.44731227", "0.4460108", "0.44594157", "0.44525102", "0.44415516", "0.4397893", "0.43926954", "0.4384025", "0.43824643", "0.43779528", "0.43682483", "0.43671647", "0.43642086", "0.43627033", "0.43592492", "0.43371812", "0.43323416", "0.43267828", "0.432203", "0.43203378", "0.43191072", "0.43191072", "0.43154365", "0.43113536", "0.43088004", "0.43011913", "0.42952892", "0.42925268", "0.4290722", "0.4285203", "0.4284373", "0.42817247", "0.42649525", "0.42641598", "0.42636985", "0.42631635", "0.42589366", "0.4258647", "0.42576018", "0.42554167", "0.42551583", "0.42548364", "0.424878", "0.4244345", "0.42415962", "0.4240576", "0.42373842", "0.42333582", "0.42274302", "0.42246568", "0.4219295", "0.42186528", "0.42175454", "0.4215667", "0.4215613", "0.42122263", "0.42103562", "0.42073202", "0.4199625", "0.41957867", "0.41953772", "0.41924152", "0.41916582", "0.41912875", "0.41849086", "0.41841233", "0.4183971", "0.41797516", "0.4173219", "0.41719562", "0.41687962", "0.4165901", "0.41637674" ]
0.7586507
0
Create layers from the origins so the layer contains only the desired inputs for the chunk.
def _select_inputs_one_to_one(self, origins_criteria): # Select the origins with ObjectIDs in this range self.logger.debug("Selecting origins for this chunk...") origins_oid_field_name = arcpy.Describe(self.origins).oidFieldName origins_where_clause = ( f"{origins_oid_field_name} >= {origins_criteria[0]} " f"And {origins_oid_field_name} <= {origins_criteria[1]}" ) self.logger.debug(f"Origins where clause: {origins_where_clause}") self.input_origins_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.origins, self.input_origins_layer, origins_where_clause], ).getOutput(0) num_origins = int(arcpy.management.GetCount(self.input_origins_layer_obj).getOutput(0)) self.logger.debug(f"Number of origins selected: {num_origins}") # Make a layer for destinations for quicker access helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.destinations, self.input_destinations_layer], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_layers(self):\n raise NotImplementedError", "def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X", "def _build(self, inputs):\n\n # calculate how many slots we need from the 3 dimensions of the incoming conv layer (filter w/h plus depth)\n dims = inputs.get_shape().as_list()\n new_dim = 1\n for d in dims[1:]: # leave first axis as is (batch)\n new_dim = new_dim * d # multiply 'em up\n return tf.reshape(inputs, [-1, new_dim]) # -1=keep this dimension as is (it could be anything as this is the number of samples) and flatten the others", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n CustomTransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(CustomTransformer, self).build(unused_input_shapes)", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerDecoderBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=(\"layer_%d\" % i)))\n super(TransformerDecoder, self).build(unused_input_shapes)", "def flatten_layer(inputs):\n return Spectrum(\n connections=[input_ for depth_slice in inputs\n for row in depth_slice\n for input_ in row]\n )", "def build_inputs_latlon(timesteps: int, dense_units: int):\n\n i = layers.Input(shape=(timesteps, 2), name=\"input_latlon\")\n unstacked = layers.Lambda(lambda x: tf.unstack(x, axis=1))(i)\n d = layers.Dense(\n units=dense_units,\n activation=\"relu\",\n use_bias=True,\n kernel_initializer=initializers.he_uniform(seed=config.SEED),\n name=\"embed_latlon\",\n )\n dense_latlon = [d(x) for x in unstacked]\n e = layers.Lambda(lambda x: tf.stack(x, axis=1))(dense_latlon)\n return (i, e)", "def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def splitLayer(self, src, dire):\n\n (rowN, colN) = src.shape\n res = []\n ## UNSURE ABOUT SLICING\n if (dire == self.VERTICAL):\n # range(start, stop, step)\n for i in range(0, rowN - self.slideThickness, self.slideThickness):\n # croping is much easier in Python, it is basically just slicing\n tmp = src[i:i+self.slideThickness, 0:colN]\n \n res.append(tmp)\n\n else:\n\n for i in range(0, colN - self.slideThickness, self.slideThickness):\n # croping is much easier in Python, it is basically just slicing\n tmp = src[0:self.slideThickness, i:i+rowN]\n res.append(tmp)\n\n return res", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def create_model_input_default(self, rgb_images, all_boxes, all_masks):\n box_regions = []\n for i in range(all_boxes.size(0)):\n for j in range(all_boxes.size(1)):\n box = all_boxes[i][j]\n if is_dummy_box(box):\n continue\n image = rgb_images[i].cpu()\n\n # Apply object mask to the image\n if self.use_masks:\n image = image.clone()\n mask = all_masks[i][j].cpu()\n image = image * mask\n\n box_region = get_patch_from_image(box, image)\n box_regions.append(box_region)\n\n t = T.Compose([T.ToPILImage(), T.Resize((self.reduced_size, self.reduced_size)), T.ToTensor()])\n box_regions = [t(box.cpu()) for box in box_regions]\n return torch.stack(box_regions)", "def get_batch(self, src, geometries):\n\n batch = []\n for bounds in geometries.bounds.itertuples():\n bot, left = src.index(bounds[1], bounds[2])\n top, right = src.index(bounds[3], bounds[4])\n window = rasterio.windows.Window(left, top, right-left, bot-top)\n batch.append(src.read(indexes=self.indexes, window=window))\n if self.interleave == 'pixel' and len(batch[-1].shape) == 3:\n batch[-1] = np.moveaxis(batch[-1], 0, -1)\n for func,args,kwargs in self.preprocess.values():\n batch[-1] = func(batch[-1], *args, **kwargs)\n\n return np.stack(batch)", "def __init__(self,layer_def,input,input_shape,rs,clone_from=None):\n batch_size = int(layer_def.find(\"batchsize\").text)\n image_size = int(layer_def.find(\"imagesize\").text)\n image_channels = int(layer_def.find(\"imagechannels\").text)\n self.layer_name = layer_def.attrib[\"name\"]\n self.init(input, input_shape, batch_size,image_size,image_channels)", "def _make_layer(self, block, planes, blocks, stride=1):\r\n downsample = None\r\n\r\n # determine whether we need to downsample within shortcut\r\n # - if stride != 1, main branch is downsampled spatially\r\n # - in_planes != planes * expansion, this downsample method simply\r\n # expands the channels.\r\n if stride != 1 or self.in_planes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n conv1x1(\r\n self.in_planes,\r\n planes * block.expansion,\r\n stride=stride,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n ),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = [None] * blocks\r\n layers[0] = block(\r\n self.in_planes,\r\n planes,\r\n stride=stride,\r\n downsample=downsample,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n )\r\n\r\n self.in_planes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers[i] = block(\r\n self.in_planes,\r\n planes,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n )\r\n\r\n return nn.Sequential(*layers)", "def _building_block_v1(inputs, filters, training, projection_shortcut, strides,\n data_format):\n shortcut = inputs\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n shortcut = batch_norm(inputs=shortcut, training=training,\n data_format=data_format)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n inputs = batch_norm(inputs, training, data_format)\n inputs += shortcut\n inputs = tf.nn.relu(inputs)\n\n return inputs", "def layers(self): # -> LayerView:\n ...", "def make_feature_layers(self, config):\n raise NotImplementedError", "def _make_layer(self, block, outputs, blocks, stride=1):\n downsample = None\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inputs, outputs * 4,\n kernel_size=1, stride=stride, bias=False,\n dilation=self.dilation),\n nn.BatchNorm2d(outputs * 4),\n )\n\n layers = []\n layers.append(block(self.inputs, outputs, stride, downsample, self.dilation))\n self.inputs = outputs * 4\n for i in range(1, blocks):\n layers.append(block(self.inputs, outputs))\n\n layer = nn.Sequential(*layers)\n\n self.channels.append(outputs * 4)\n self.layers.append(layer)\n\n return layer", "def makeMultiLayerMask( Tables ):\n\n # get data from the corresponding tables\n ElasticModulusData = Tables[ \"ElasticModulus\" ].getRawData( )\n ShearModulusData = Tables[ \"ShearModulus\" ].getRawData( )\n PoissonRatiosData = Tables[ \"PoissonRatios\" ].getRawData( )\n MaterialPropertiesData = Tables[ \"MaterialProperties\" ].getRawData( )\n\n # we're using implicit method to get value from tables since the\n # the last entry represents a string of layers thickness\n GeometryPropertiesData = [ [ Tables[ \"GeometryProperties\" ].getValue( 0, 0 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 1 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) ] ]\n\n\n Tables[ \"ElasticModulus\" ].setBufferData( \"Input\", ElasticModulusData )\n Tables[ \"ShearModulus\" ].setBufferData( \"Input\", ShearModulusData )\n Tables[ \"PoissonRatios\" ].setBufferData( \"Input\", PoissonRatiosData )\n Tables[ \"MaterialProperties\" ].setBufferData( \"Input\", MaterialPropertiesData )\n Tables[ \"GeometryProperties\" ].setBufferData( \"Input\", GeometryPropertiesData )", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def _building_block_v2(inputs, filters, training, \n projection_shortcut, strides,\n data_format):\n shortcut = inputs\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n ENDING_POINTS.append(inputs)\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=strides,\n data_format=data_format, time_stride=strides)\n\n inputs = batch_norm(inputs, training, data_format)\n inputs = tf.nn.relu(inputs)\n inputs = conv3d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, \n time_kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def build_inputs(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n latent_dim,\n vocab_sizes,\n noise=False,\n mask=False,\n):\n latlon_input, latlon_embed = build_inputs_latlon(timesteps, latlon_dense_units)\n inputs = [latlon_input]\n embeddings = [latlon_embed]\n for key, val in vocab_sizes.items():\n cat_input, cat_embed = build_inputs_cat(timesteps, val, key)\n inputs.append(cat_input)\n embeddings.append(cat_embed)\n concat_input = layers.Concatenate(axis=2)(embeddings)\n unstacked = layers.Lambda(lambda x: tf.unstack(x, axis=1))(concat_input)\n d = layers.Dense(\n units=concat_dense_units,\n activation=\"relu\",\n kernel_initializer=initializers.he_uniform(seed=1),\n name=\"emb_trajpoint\",\n )\n if noise:\n noise_input = layers.Input(shape=(latent_dim,), name=\"input_noise\")\n inputs.append(noise_input)\n dense_outputs = [d(layers.Concatenate(axis=1)([x, noise_input])) for x in unstacked]\n else:\n dense_outputs = [d(x) for x in unstacked]\n if mask:\n inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n emb_traj = layers.Lambda(lambda x: tf.stack(x, axis=1))(dense_outputs)\n return (inputs, emb_traj)", "def make_layers(self):\r\n #assuming temporal field is always the first column!\r\n timeCol = self.data.columns[0]\r\n times = self.data[timeCol].unique() \r\n lat = self.data.lat.unique()\r\n lon = self.data.lon.unique()\r\n shape = (len(lat), len(lon))\r\n depths, hours = [None], [None]\r\n if 'depth' in self.data.columns:\r\n depths = self.data.depth.unique()\r\n if 'hour' in self.data.columns:\r\n hours = self.data.hour.unique()\r\n layers, titles = [], []\r\n for t in times:\r\n for h in hours:\r\n for z in depths:\r\n frame = self.data[self.data[timeCol] == t]\r\n\r\n if timeCol == 'time':\r\n sub = self.variable + self.unit + ', ' + str(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').date())\r\n else:\r\n sub = self.variable + self.unit + ', ' + timeCol + ': ' + str(t) \r\n\r\n if h != None:\r\n frame = frame[frame['hour'] == h]\r\n sub = sub + ', hour: ' + str(h) + 'hr'\r\n if z != None:\r\n frame = frame[frame['depth'] == z] \r\n sub = sub + ', depth: %2.2f' % z + ' [m]' \r\n try: \r\n layers.append(frame[self.variable].values.reshape(shape))\r\n titles.append(sub)\r\n except Exception as e:\r\n continue \r\n return layers, titles, lat, lon", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def split_inputs(x, vocab_sizes):\n x_split = [x[:, :, 0:2]] # lat-lon feature\n start = 2\n for val in vocab_sizes.values():\n x_split.append(x[:, :, start : (start + val)])\n start += val\n # append mask\n x_split.append(x[:, :, [-1]])\n return x_split", "def _make_layer(self, block, layer_num, in_channel, out_channel):\r\n layers = []\r\n darkblk = block(in_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n for _ in range(1, layer_num):\r\n darkblk = block(out_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n return nn.SequentialCell(layers)", "def new_chunks(self, inputs, kws=None, **kwargs):\n return self._new_chunks(inputs, kws=kws, **kwargs)", "def _make_res_layer(self,\n block,\n planes,\n blocks,\n stride=1,\n norm_kwargs=None,\n layer_name=''):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.SequentialCell([\n nn.Conv3d(in_channels=self.inplanes,\n out_channels=planes * block.expansion,\n kernel_size=1,\n stride=(stride, stride, stride),\n has_bias=False),\n nn.BatchNorm3d(num_features=planes * block.expansion,\n **({} if norm_kwargs is None else norm_kwargs))])\n\n layers = []\n layers.append(block(inplanes=self.inplanes,\n planes=planes,\n stride=stride,\n downsample=downsample))\n\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(inplanes=self.inplanes, planes=planes))\n\n return nn.SequentialCell(layers)", "def _make_layer(self, block, planes, blocks, stride=1):\n\n if blocks == 0:\n return nn.Sequential(nn.Identity())\n norm_layer = self._norm_layer\n upsample = None\n if stride != 1:\n upsample = nn.Sequential(\n nn.UpsamplingNearest2d(scale_factor=2),\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n elif self.inplanes != planes * block.expansion:\n upsample = nn.Sequential(\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)]\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel))\n\n return nn.Sequential(*layers)", "def _make_layer(self, block, planes, blocks, stride=1, sync=False):\n downsample = None\n if stride != 1 or self.in_planes != planes * block.expansion:\n downsample = nn.SequentialCell(\n conv1x1x1(self.in_planes, planes * block.expansion, stride),\n _bn(planes * block.expansion, sync)\n )\n\n layers = []\n layers.append(\n block(in_planes=self.in_planes,\n planes=planes,\n stride=stride,\n downsample=downsample,\n sync=sync)\n )\n self.in_planes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_planes, planes, sync=sync))\n\n return nn.SequentialCell(layers)", "def layer_construction(self, in_channel, out_channel, stride, num_blocks):\n layer = [ResBlock(in_channel,out_channel,stride)]\n for i in range(0, num_blocks-1):\n layer.append(ResBlock(out_channel * 4, out_channel))\n\n return nn.Sequential(*layer)", "def create_model_input_blacked(self, rgb_images, all_boxes, all_masks):\n images = []\n for i in range(all_boxes.size(0)):\n for j in range(all_boxes.size(1)):\n box = all_boxes[i][j]\n if is_dummy_box(box):\n continue\n\n image = rgb_images[i].cpu()\n\n # Apply object mask to the image\n if self.use_masks:\n mask = all_masks[i][j].cpu()\n image = image * mask\n\n image = TF.to_pil_image(image)\n image = blacken_image(image, box)\n image = TF.resize(image, (self.reduced_size, self.reduced_size))\n image = TF.to_tensor(image)\n images.append(image)\n return torch.stack(images)", "def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for s in strides:\n layers.append(block(self.in_planes, planes, s))\n self.in_planes = planes * block.expansion\n\n return nn.SequentialCell(*layers)", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def create(self):\n\n if rs.IsLayer(self.name):\n\n return self\n\n mom = \"\"\n \n for s in self.path:\n \n son = s if (mom == \"\") else (mom + \"::\" + s)\n\n mommy = None if mom == \"\" else mom\n\n if not rs.IsLayer(son):\n\n rs.AddLayer(s, color = None, visible = True, locked = False, parent = mommy)\n\n mom = son\n \n return self", "def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,\n SE=False, expansion=3, stride=1):\n\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n\n # if stride > 1\n # or if block input planes != block output planes (only possible for first block in stack)\n # downsamples skip connection by 1x1-conv filter\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(\n conv1x1(inplanes, outplanes, stride=stride),\n norm_layer(outplanes)\n )\n\n layers = []\n\n # first block in stack can have stride > 1\n layers.append(block(inplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=stride, dropout=self._dropout, downsample=downsample,\n norm_layer=norm_layer, act_layer=act_layer))\n\n # other layers in stack\n # for each layer: inplanes = outplanes, stride=1, downsample=None\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=1, dropout=self._dropout, norm_layer=norm_layer,\n act_layer=act_layer))\n\n return nn.Sequential(*layers)", "def placeholder_inputs(sample_size, dim_input):\n\tinput_placeholder = tf.placeholder(tf.float32, [1, dim_input])\n\tobj_placeholder = tf.placeholder(tf.float32, [sample_size, dim_input])\n\n\treturn input_placeholder, obj_placeholder", "def __init__(self, incomings, a=tf.identity, name='MeanLayer'):\n super(MeanLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incomings = []\n self.incoming_shapes = []\n \n for incoming in incomings:\n incoming, incoming_shape = get_input(incoming)\n self.incomings.append(incoming)\n self.incoming_shapes.append(incoming_shape)\n \n self.a = a\n self.out = None\n self.name = name", "def decoder_block(layer_in, skip_in, n_filters, dropout=True):\n\n # weight initialization\n init = RandomNormal(stddev=0.02)\n # add upsampling layer\n g = Conv2DTranspose(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)(layer_in)\n # add batch normalization\n g = BatchNormalization()(g, training=True)\n # conditionally add dropout\n if dropout:\n g = Dropout(0.5)(g, training=True)\n # merge with skip connection\n g = Concatenate()([g, skip_in])\n # relu activation\n g = Activation('relu')(g)\n\n return g", "def extract_features(self, inputs):\n\n # Stem\n x = relu_fn(self._bn0(self._conv_stem(inputs)))\n\n # Blocks\n for idx, block in enumerate(self._blocks):\n drop_connect_rate = self._global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= float(idx) / len(self._blocks)\n x = block(x, drop_connect_rate=drop_connect_rate)\n\n # Head\n x = relu_fn(self._bn1(self._conv_head(x)))\n\n return x", "def get_4d(slice, copylayers=[], transparancy=0):\n assert slice.ndim < 3\n img = np.zeros(slice.shape)\n img = img[:, :, np.newaxis]\n img = np.repeat(img, 4, 2)\n transparancy = 255 - (255 * transparancy)\n img[:, :, -1] = transparancy\n for layer in copylayers:\n img[:, :, layer] = slice\n return(img)", "def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer", "def _select_inputs_many_to_many(self):\r\n # Select the origins present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting origins for this chunk...\")\r\n origins_in_chunk = set([pair[0] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][0], (int, float,)):\r\n origin_string = \", \".join([str(o_id) for o_id in origins_in_chunk])\r\n else:\r\n origin_string = \"'\" + \"', '\".join([str(o_id) for o_id in origins_in_chunk]) + \"'\"\r\n origins_where_clause = f\"{self.origin_id_field} IN ({origin_string})\"\r\n self.logger.debug(f\"Origins where clause: {origins_where_clause}\")\r\n self.input_origins_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.origins, self.input_origins_layer, origins_where_clause]\r\n ).getOutput(0)\r\n num_origins = int(arcpy.management.GetCount(self.input_origins_layer).getOutput(0))\r\n self.logger.debug(f\"Number of origins selected: {num_origins}\")\r\n # Select the destinations present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting destinations for this chunk...\")\r\n dests_in_chunk = set([pair[1] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][1], (int, float,)):\r\n dest_string = \", \".join([str(d_id) for d_id in dests_in_chunk])\r\n else:\r\n dest_string = \"'\" + \"', '\".join([str(d_id) for d_id in dests_in_chunk]) + \"'\"\r\n dests_where_clause = f\"{self.dest_id_field} IN ({dest_string})\"\r\n self.logger.debug(f\"Destinations where clause: {dests_where_clause}\")\r\n self.input_dests_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.destinations, self.input_destinations_layer, dests_where_clause]\r\n ).getOutput(0)\r\n num_dests = int(arcpy.management.GetCount(self.input_destinations_layer).getOutput(0))\r\n self.logger.debug(f\"Number of destinations selected: {num_dests}\")", "def inception_module(self, input_layer, name, cols):\n with tf.name_scope(name):\n col_layers = []\n col_layer_sizes = []\n for c, col in enumerate(cols):\n col_layers.append([])\n col_layer_sizes.append([])\n x = input_layer\n for l, layer in enumerate(col):\n ltype, args = layer[0], layer[1:]\n if ltype == 'conv': x = self.conv(x, *args)\n elif ltype == 'pool': x = self.pool(x, *args)\n elif ltype == 'share':\n # Share matching layer from previous column\n x = col_layers[c-1][l]\n else: raise KeyError(\"Invalid layer type for \" +\n \"inception module: '%s'\" % ltype)\n col_layers[c].append(x)\n catdim = 1\n catvals = [layers[-1] for layers in col_layers]\n x = tf.concat(catvals, catdim)\n return x", "def layer_offsets(self):\n ...", "def buildGenerator():\n inputs = tf.keras.layers.Input(shape=[256,256,3])\n\n down_stack = [\n downsample(64, 4, (None, 256, 256, 3), apply_batchnorm=False), # (bs, 128, 128, 64)\n downsample(128, 4, (None, 128, 128, 64)), # (bs, 64, 64, 128)\n downsample(256, 4, (None, 64, 64, 128)), # (bs, 32, 32, 256)\n downsample(512, 4, (None, 32, 32, 256)), # (bs, 16, 16, 512)\n downsample(512, 4, (None, 16, 16, 512)), # (bs, 8, 8, 512)\n downsample(512, 4, (None, 8, 8, 512)), # (bs, 4, 4, 512)\n downsample(512, 4, (None, 4, 4, 512)), # (bs, 2, 2, 512)\n downsample(512, 4, (None, 2, 2, 512)), # (bs, 1, 1, 512)\n ]\n\n up_stack = [\n upsample(512, 4, (None, 1, 1, 512), apply_dropout=True), # (bs, 2, 2, 1024)\n upsample(512, 4, (None, 2, 2, 1024), apply_dropout=True), # (bs, 4, 4, 1024)\n upsample(512, 4, (None, 4, 4, 1024), apply_dropout=True), # (bs, 8, 8, 1024)\n upsample(512, 4, (None, 8, 8, 1024)), # (bs, 16, 16, 1024)\n upsample(256, 4, (None, 16, 16, 1024)), # (bs, 32, 32, 512)\n upsample(128, 4, (None, 32, 32, 512)), # (bs, 64, 64, 256)\n upsample(64, 4, (None, 64, 64, 256)), # (bs, 128, 128, 128)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,\n strides=2,\n padding='same',\n kernel_initializer=initializer,\n activation='tanh') # (bs, 256, 256, 3)\n\n x = inputs\n\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)", "def _add_placeholders(self):\n\n\n # encoder part\n self._enc_batch = tf.placeholder(tf.int32, [config.batch_size, None], name='enc_batch')\n self._enc_lens = tf.placeholder(tf.int32, [config.batch_size], name='enc_lens')\n self._enc_padding_mask = tf.placeholder(tf.float32, [config.batch_size, None], name='enc_padding_mask')\n\n\n # decoder part\n self._dec_batch = tf.placeholder(tf.int32, [config.batch_size, None], name='dec_batch')\n self._dec_lens = tf.placeholder(tf.int32, [config.batch_size], name='dec_lens')\n self._dec_padding_mask = tf.placeholder(tf.float32, [config.batch_size, None], name='dec_padding_mask')\n\n #targets\n self._targets = tf.placeholder(tf.float32, [config.batch_size, 2], name='target')", "def _update_layer_input_spaces(self):\n layers = self.layers\n layers[0].set_input_space(self.input_space)\n for i in xrange(1,len(layers)-1):\n layers[i].set_input_space(layers[i-1].get_output_space())\n\n maxout2_outputspace = layers[-2].get_output_space()\n for layer in layers[-1]:\n layer.set_input_space(maxout2_outputspace)", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def building_block(inputs, filters, is_training, projection_shortcut, strides, data_format):\n\n\n shortcut = inputs\n inputs = batch_norm_relu(inputs, is_training, data_format)\n\n\n # The projection shortcut should come after the first batch norm and ReLU\n # since it performs a 1x1 convolution.\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=strides,\n data_format=data_format)\n\n inputs = batch_norm_relu(inputs, is_training, data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=3, strides=1,\n data_format=data_format)\n\n return inputs + shortcut", "def _create_chunks(opts, inputs, idx1, idx2):\n # idx2 = 75\n # idx1 = 71\n num_batch = idx2 - idx1\n # img1 = torch.zeros(num_batch, 1, 10, 224, 224)\n # img2 = torch.zeros(num_batch, 1, 10, 224, 224)\n # labels = torch.zeros(num_batch)\n\n feat1_list = []\n label_list = []\n for i in range(num_batch):\n curr_idx = i + idx1\n frames = range(curr_idx - 5, curr_idx + 5)\n temp1 = _load_chunk(opts, inputs, frames)\n feat1_list.append(temp1)\n\n temp_label = inputs[1][curr_idx, :].nonzero()\n if len(temp_label.size()) == 0:\n temp_label = 6\n else:\n if temp_label.size()[0] != 0:\n temp_label = temp_label[0][0]\n label_list.append(temp_label)\n\n feat1 = torch.cat(feat1_list, dim=0)\n labels = torch.LongTensor(label_list)\n return feat1, labels", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def copyLayerToMemory(in_lyr):\n\n import ogr\n\t\t\n drv_mem = ogr.GetDriverByName('Memory')\n shp_copy = drv_mem.CreateDataSource('temp')\n copy_lyr = shp_copy.CopyLayer(in_lyr, 'lyr_copy')\n in_lyr.ResetReading()\n copy_lyr.ResetReading()\n return shp_copy, copy_lyr", "def ApplyInputs(ss, en):\n ss.Net.InitExt()\n\n lays = [\"Input\", \"Output\"]\n for lnm in lays :\n ly = leabra.Layer(ss.Net.LayerByName(lnm))\n pats = en.State(ly.Nm)\n if pats != 0:\n ly.ApplyExt(pats)", "def build_boxes(inputs):\n center_x, center_y, width, height, confidence, classes = tf.split(inputs, [1, 1, 1, 1, 1, -1], axis=-1)\n\n top_left_x = center_x - width / 2\n top_left_y = center_y - height / 2\n bottom_right_x = center_x + width / 2\n bottom_right_y = center_y + height / 2\n return tf.concat([top_left_x, top_left_y, bottom_right_x, bottom_right_y, confidence, classes], axis=-1)", "def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result", "def _make_stack(self, block, planes, blocks, stride=1, dilate=False):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n\n # use dilation instead of striding if true\n if dilate:\n self.dilation *= stride\n stride = 1\n\n # apply conv-1x1 to input identity if stride > 1 or output channels != input channels for dim. matching\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion)\n )\n\n layers = []\n # first layer\n # input = batch_size x self.inplanes x H x H\n # output = batch_size x planes * block.expansion x H/stride x H/stride\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n # subsequent layers\n for _ in range(1, blocks):\n # input = output = batch_size x planes * block.expansion x H' x H'\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)", "def forward(self, inp):\n outp = []\n x = inp\n if self.resize_input:\n x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)\n if self.normalize_input:\n x = 2 * x - 1\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n if idx == self.last_needed_block:\n break\n return outp", "def call(self, inputs):\n squash = utils.BatchSquash(len(inputs.shape) - 2)\n # Reshape into single batch dim.\n\n net = squash.flatten(inputs)\n for layer in self.layers:\n net = layer(net)\n\n # Restore squashed batch dimensions.\n net = squash.unflatten(net)\n\n # Flatten out all non-batch dimensions.\n net = tf.reshape(net, tf.concat([inputs.shape[:-2], [-1]], axis=0))\n return net", "def build_simple_block(self, incoming_layer, names,\n num_filters, filter_size, stride, pad,\n use_bias=False, nonlin=rectify):\n net = []\n net.append((\n names[0],\n ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,\n flip_filters=False, nonlinearity=None) if use_bias\n else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,\n flip_filters=False, nonlinearity=None)\n ))\n \n net.append((\n names[1],\n BatchNormLayer(net[-1][1])\n ))\n if nonlin is not None:\n net.append((\n names[2],\n NonlinearityLayer(net[-1][1], nonlinearity=nonlin)\n ))\n \n return dict(net), net[-1][0]", "def layers(self, features: Dict[str, torch.Tensor]) -> torch.Tensor:\n x: torch.Tensor\n for i, f in enumerate(self.in_features):\n if i == 0:\n x = self.scale_heads[i](features[f])\n else:\n x = x + self.scale_heads[i](features[f])\n x = self.predictor(x)\n decoded_images = torch.nn.functional.interpolate(\n x, scale_factor=self.common_stride, mode=\"bilinear\", align_corners=False\n )\n return decoded_images", "def make_layer(basic_block, num_basic_block, **kwarg):\n layers = []\n for _ in range(num_basic_block):\n layers.append(basic_block(**kwarg))\n return nn.Sequential(*layers)", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n blocks = [self.layer_11,\n self.layer_12,\n self.layer_13,\n self.layer_14,\n self.layer_15]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def _init_layers(self):\n cls_branch = []\n for _ in range(self.num_reg_fcs):\n cls_branch.append(Linear(self.embed_dims, self.embed_dims))\n cls_branch.append(nn.LayerNorm(self.embed_dims))\n cls_branch.append(nn.ReLU(inplace=True))\n cls_branch.append(Linear(self.embed_dims, self.cls_out_channels))\n fc_cls = nn.Sequential(*cls_branch)\n\n reg_branch = []\n for _ in range(self.num_reg_fcs):\n reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n reg_branch.append(nn.ReLU())\n reg_branch.append(Linear(self.embed_dims, self.code_size))\n reg_branch = nn.Sequential(*reg_branch)\n\n past_traj_reg_branch = []\n for _ in range(self.num_reg_fcs):\n past_traj_reg_branch.append(\n Linear(self.embed_dims, self.embed_dims))\n past_traj_reg_branch.append(nn.ReLU())\n past_traj_reg_branch.append(\n Linear(self.embed_dims, (self.past_steps + self.fut_steps)*2))\n past_traj_reg_branch = nn.Sequential(*past_traj_reg_branch)\n\n def _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n # last reg_branch is used to generate proposal from\n # encode feature map when as_two_stage is True.\n num_pred = (self.transformer.decoder.num_layers + 1) if \\\n self.as_two_stage else self.transformer.decoder.num_layers\n\n if self.with_box_refine:\n self.cls_branches = _get_clones(fc_cls, num_pred)\n self.reg_branches = _get_clones(reg_branch, num_pred)\n self.past_traj_reg_branches = _get_clones(\n past_traj_reg_branch, num_pred)\n else:\n self.cls_branches = nn.ModuleList(\n [fc_cls for _ in range(num_pred)])\n self.reg_branches = nn.ModuleList(\n [reg_branch for _ in range(num_pred)])\n self.past_traj_reg_branches = nn.ModuleList(\n [past_traj_reg_branch for _ in range(num_pred)])\n if not self.as_two_stage:\n self.bev_embedding = nn.Embedding(\n self.bev_h * self.bev_w, self.embed_dims)", "def inception_v4_base(sample_shape, final_endpoint='Inception/Mixed_7d',\n aux_endpoint='Inception/Mixed_6e'):\n name = 'InceptionV4'\n end_points = {}\n net = ffnet.FeedForwardNet()\n\n def final_aux_check(block_name):\n if block_name == final_endpoint:\n return True\n if block_name == aux_endpoint:\n aux = aux_endpoint + '-aux'\n end_points[aux] = net.add(Split(aux, 2))\n return False\n\n # 299 x 299 x 3\n blk = name + '/Conv2d_1a_3x3'\n net.add(Conv2D(blk, 32, 3, 2, border_mode='VALID', use_bias=False,\n input_sample_shape=sample_shape))\n net.add(BatchNormalization('%s/BatchNorm' % blk))\n end_points[blk] = net.add(Activation('%s/relu' % blk))\n if final_aux_check(blk):\n return net, end_points\n\n # 149 x 149 x 32\n blk = name + '/Conv2d_2a_3x3'\n end_points[blk] = conv2d(net, blk, 32, 3, border_mode='VALID')\n if final_aux_check(blk):\n return net, end_points\n\n # 147 x 147 x 32\n blk = name + '/Conv2d_2b_3x3'\n end_points[blk] = conv2d(net, blk, 64, 3)\n if final_aux_check(blk):\n return net, end_points\n\n # 147 x 147 x 64\n blk = name + '/Mixed_3a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = net.add(MaxPooling2D('%s/Branch_0/MaxPool_0a_3x3' % blk, 3, 2,\n border_mode='VALID'), s)\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_3x3' % blk, 96, 3, 2,\n border_mode='VALID', src=s)\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 73 x 73 x 160\n blk = name + '/Mixed_4a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 64, 1, src=s)\n br0 = conv2d(net, '%s/Branch_0/Conv2d_1a_3x3' % blk, 96, 3,\n border_mode='VALID')\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 64, 1, src=s)\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, 64, (1, 7))\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, 64, (7, 1))\n br1 = conv2d(net, '%s/Branch_1/Conv2d_1a_3x3' % blk, 96, 3,\n border_mode='VALID')\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 71 x 71 x 192\n blk = name + '/Mixed_5a'\n s = net.add(Split('%s/Split' % blk, 2))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_1a_3x3' % blk, 192, 3, 2,\n border_mode='VALID', src=s)\n br1 = net.add(MaxPooling2D('%s/Branch_1/MaxPool_1a_3x3' % blk, 3, 2,\n border_mode='VALID'), s)\n end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])\n if final_aux_check(blk):\n return net, end_points\n\n # 35 x 35 x 384\n # 4 x Inception-A blocks\n for idx in range(4):\n blk = name + '/Mixed_5' + chr(ord('b') + idx)\n end_points[blk] = block_inception_a(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 35 x 35 x 384\n # Reduction-A block\n blk = name + '/Mixed_6a'\n end_points[blk] = block_reduction_a(blk, net)\n if final_aux_check(blk):\n return net, end_points[blk], end_points\n\n # 17 x 17 x 1024\n # 7 x Inception-B blocks\n for idx in range(7):\n blk = name + '/Mixed_6' + chr(ord('b') + idx)\n end_points[blk] = block_inception_b(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 17 x 17 x 1024\n # Reduction-B block\n blk = name + '/Mixed_7a'\n end_points[blk] = block_reduction_b(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n # 8 x 8 x 1536\n # 3 x Inception-C blocks\n for idx in range(3):\n blk = name + '/Mixed_7' + chr(ord('b') + idx)\n end_points[blk] = block_inception_c(blk, net)\n if final_aux_check(blk):\n return net, end_points\n\n assert final_endpoint == blk, \\\n 'final_enpoint = %s is not in the net' % final_endpoint", "def create_structure(self):\n\n float_type = numpy.dtype(theano.config.floatX).type\n\n layer_input = tensor.concatenate([x.output for x in self._input_layers],\n axis=2)\n # Pass rate is the probability of not dropping a unit.\n pass_rate = 1.0 - self._dropout_rate\n pass_rate = float_type(pass_rate)\n sample = self._network.random.uniform(size=layer_input.shape)\n mask = tensor.cast(sample < pass_rate, theano.config.floatX)\n # Multiply the output by the inverse of the pass rate before dropping\n # units to compensate the scaling effect.\n scale_correction = 1.0 / pass_rate\n scale_correction = float_type(scale_correction)\n self.output = tensor.switch(self._network.is_training,\n layer_input * scale_correction * mask,\n layer_input)", "def run(layers):", "def create_start_data(self):\n\t\tdef inputMesh(feature_size):\n\t\t\tc1= np.expand_dims(np.array([0,-0.9]),0)\n\t\t\tc2= np.expand_dims(np.array([-0.9,0.9]),0)\n\t\t\tc3= np.expand_dims(np.array([0.9,0.9]),0)\n\t\t\tx1 = np.expand_dims(np.pad(np.array([0,-0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx2 = np.expand_dims(np.pad(np.array([-0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tx3 = np.expand_dims(np.pad(np.array([0.9,0.9]),(0,feature_size-2),'constant',constant_values=(0,0)),0)\n\t\t\tedge_index = np.transpose(np.array([[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]])) # COO format\n\t\t\treturn np.concatenate((c1,c2,c3),axis=0), np.concatenate((x1,x2,x3),axis=0),edge_index\n\n\t\tc, x, edge_index = inputMesh(self.params.feature_size)# x is c with zeros appended, x=f ..pixel2mesh\n\t\tdata_list_x = []\n\t\tdata_list_c = []\n\t\tdata_list_pid = []\n\t\tfor i in range(self.params.batch_size):\n\t\t\tdata_list_x.append(Data(x=torch.Tensor(x).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_c.append(Data(x=torch.Tensor(c).type(dtypeF), edge_index=torch.Tensor(edge_index).type(dtypeL)))\n\t\t\tdata_list_pid.append(Data(x=torch.zeros(c.shape[0],1).type(dtypeL).requires_grad_(False)))\n\t\tbatch_x = Batch.from_data_list(data_list_x)\n\t\tbatch_c = Batch.from_data_list(data_list_c)\n\t\tbatch_pid = Batch.from_data_list(data_list_pid)\n\t\treturn batch_x, batch_c, batch_pid", "def placeholder_inputs(batch_size):\n # Note that the shapes of the placeholders match the shapes of the full\n # image and label tensors, except the first dimension is now batch_size\n # rather than the full size of the train or test data sets.\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,\n c3d_model.NUM_FRAMES_PER_CLIP,\n c3d_model.CROP_SIZE,\n c3d_model.CROP_SIZE,\n c3d_model.CHANNELS))\n labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))\n return images_placeholder, labels_placeholder", "def __init__(self,\n image_shape,\n z_dim,\n num_blocks,\n action_space,\n hiddens=[],\n dropout=False,\n subsampling=True):\n super().__init__()\n self.image_shape = image_shape\n self.layers = nn.ModuleList()\n self.layers.append(\n ImageInputNetwork(image_shape, z_dim, num_blocks, dropout,\n subsampling))\n self.layers.append(nn.Sequential(\n nn.Linear(128, z_dim),\n nn.ReLU()\n ))\n self.layers.append(nn.Sequential(nn.Linear(z_dim, action_space.n),\n nn.Softmax(1)))\n self.layers.append(nn.Linear(z_dim, 1))", "def __init__(self,\n image_shape,\n z_dim,\n num_blocks,\n action_space,\n hiddens=[],\n dropout=False,\n subsampling=True):\n super().__init__()\n self.layers = nn.ModuleList()\n self.layers.append(\n ImageInputNetwork(image_shape, z_dim, num_blocks, dropout,\n subsampling))\n self.layers.append(ActorNet(action_space, z_dim, hiddens))", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def __init__(\n self,\n image_size: tuple,\n out_channels: int,\n num_channel_initial: int,\n extract_levels: List[int],\n out_kernel_initializer: str,\n out_activation: str,\n name: str = \"LocalNet\",\n **kwargs,\n ):\n super().__init__(\n image_size=image_size,\n out_channels=out_channels,\n num_channel_initial=num_channel_initial,\n out_kernel_initializer=out_kernel_initializer,\n out_activation=out_activation,\n name=name,\n **kwargs,\n )\n\n # save parameters\n self._extract_levels = extract_levels\n self._extract_max_level = max(self._extract_levels) # E\n self._extract_min_level = min(self._extract_levels) # D\n\n # init layer variables\n num_channels = [\n num_channel_initial * (2 ** level)\n for level in range(self._extract_max_level + 1)\n ] # level 0 to E\n self._downsample_blocks = [\n layer.DownSampleResnetBlock(\n filters=num_channels[i], kernel_size=7 if i == 0 else 3\n )\n for i in range(self._extract_max_level)\n ] # level 0 to E-1\n self._conv3d_block = layer.Conv3dBlock(filters=num_channels[-1]) # level E\n\n self._upsample_blocks = [\n layer.LocalNetUpSampleResnetBlock(num_channels[level])\n for level in range(\n self._extract_max_level - 1, self._extract_min_level - 1, -1\n )\n ] # level D to E-1\n\n self._extract_layers = [\n # if kernels are not initialized by zeros, with init NN, extract may be too large\n layer.Conv3dWithResize(\n output_shape=image_size,\n filters=out_channels,\n kernel_initializer=out_kernel_initializer,\n activation=out_activation,\n )\n for _ in self._extract_levels\n ]", "def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n net = input_tensor\n while True:\n plate_dimension = net.get_shape()[2]\n if plate_dimension < self._kernel_size:\n break\n\n net = tf_slim.conv2d(\n net,\n min(get_channel_dim(net) * 2, self._max_channels),\n kernel_size=self._kernel_size,\n stride=self._strides,\n padding='same')\n net = tf.nn.leaky_relu(net)\n\n net = tf.keras.layers.Flatten(name='flatten')(net)\n return input_tensors + [net]", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def __init__(self, image_shape, z_dim, num_blocks, dropout=False,\n subsampling=True, embedding=128):\n super().__init__()\n\n self.image_shape = image_shape\n self.z_dim = z_dim\n self.num_blocks = num_blocks\n\n self.layers = nn.ModuleList()\n\n channels = self.image_shape[2]\n shape_x = self.image_shape[0]\n shape_y = self.image_shape[1]\n\n if subsampling:\n assert shape_x % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n assert shape_y % (2 ** num_blocks) == 0, \\\n 'Image is not evenly divisible by max pooling layer'\n\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, channels * 4, 3, padding=1))\n self.layers.append(nn.ReLU())\n self.layers.append(nn.MaxPool2d(2, 2))\n\n channels = channels * 4\n shape_x = int(shape_x / 2)\n shape_y = int(shape_y / 2)\n\n self.linear_input = channels * shape_x * shape_y\n self.linear = nn.Linear(channels * shape_x * shape_y, z_dim)\n\n else:\n block_shape = [8, 4, 3]\n block_strides = [4, 2, 1]\n filters = [16, 32, 64]\n for i in range(num_blocks):\n self.layers.append(\n nn.Conv2d(channels, filters[i], block_shape[i],\n stride=block_strides[i]))\n self.layers.append(nn.ReLU())\n\n channels = filters[i]\n # calculation taken from https://pytorch.org/docs/stable\n # nn.html#torch.nn.Conv2d\n shape_x = int(((shape_x - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n shape_y = int(((shape_y - (block_shape[i] - 1) - 1) /\n block_strides[i]) + 1)\n\n self.linear_input = int(channels * shape_x * shape_y)\n self.linear = nn.Linear(self.linear_input, embedding)", "def map_back(self, output):\r\n origin_images = output['origin_image']\r\n image_info = output['image_info']\r\n bboxes = self.tensor2numpy(output['dt_bboxes'])\r\n batch_size = len(image_info)\r\n\r\n output_list = []\r\n for b_ix in range(batch_size):\r\n\r\n origin_image = origin_images[b_ix]\r\n if origin_image.ndim == 3:\r\n origin_image_h, origin_image_w, _ = origin_image.shape\r\n else:\r\n origin_image_h, origin_image_w = origin_image.shape\r\n\r\n img_info = image_info[b_ix]\r\n unpad_image_h, unpad_image_w = img_info[:2]\r\n scale_h, scale_w = _pair(img_info[2])\r\n keep_ix = np.where(bboxes[:, 0] == b_ix)[0]\r\n\r\n # resize bbox\r\n img_bboxes = bboxes[keep_ix]\r\n img_bboxes[:, 1] /= scale_w\r\n img_bboxes[:, 2] /= scale_h\r\n img_bboxes[:, 3] /= scale_w\r\n img_bboxes[:, 4] /= scale_h\r\n img_bboxes = img_bboxes[:, 1:]\r\n\r\n img_output = {\r\n 'image': origin_image,\r\n 'image_info': img_info,\r\n 'dt_bboxes': img_bboxes\r\n }\r\n output_list.append(img_output)\r\n\r\n return output_list", "def create_inputs_norb(path, is_train: bool,batch_size,epochs):\n\n # Create batched dataset\n dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)\n\n # Create one-shot iterator\n iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)\n\n img, lab, cat, elv, azi, lit = iterator.get_next()\n\n output_dict = {'image': img,\n 'label': lab,\n 'category': cat,\n 'elevation': elv,\n 'azimuth': azi,\n 'lighting': lit}\n\n return output_dict", "def createInputPointCloud(side_size:int, center_x:int, center_y:int, nb_pts:int):\n in_pts = np.zeros(shape=(nb_pts,2), dtype=np.float32)\n side_nb_pts = nb_pts / 4\n ds = side_size / side_nb_pts\n for i in range(nb_pts):\n if i < side_nb_pts:\n in_pts[i][0] = center_x + i * ds - side_size * 0.5\n in_pts[i][1] = center_y + side_size / 2\n elif i < 2 * side_nb_pts:\n in_pts[i][0] = center_x + side_size / 2\n in_pts[i][1] = center_y + (i - 1*side_nb_pts) * ds - side_size * 0.5\n elif i < 3 * side_nb_pts:\n in_pts[i][0] = center_x + (i - 2*side_nb_pts) * ds - side_size * 0.5\n in_pts[i][1] = center_y - side_size / 2\n else:\n in_pts[i][0] = center_x - side_size / 2\n in_pts[i][1] = center_y + (i - 3*side_nb_pts) * ds - side_size * 0.5\n return in_pts", "def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self", "def __init__(self, incomings, a=tf.identity, name='ConcatLayer'):\n super(ConcatLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incomings = []\n self.incoming_shapes = []\n \n for incoming in incomings:\n incoming, incoming_shape = get_input(incoming)\n self.incomings.append(incoming)\n self.incoming_shapes.append(incoming_shape)\n self.name = name\n self.a = a", "def _construct_imggen(self):\n z = Input(shape=(self.latent_dim,))\n z0 = Dense(512)(z)\n deconv_block = deconvnet(z0, self.img_dim, self.dec_param,\n activation='elu')\n gen_img = bn_deconv_layer(deconv_block, self.img_dim[-1], 4, 2,\n activation='sigmoid', batchnorm=False)\n\n generator = Model(z, gen_img)\n return generator", "def _make_targets(self, pargs, origin):\n targets = []\n for pi, ai, co, ci, pa, pch, mask in pargs:\n if len(pi) < 5:\n continue\n\n tr = Target()\n tr.origin = origin\n tr.poly_points = pi\n # tr.bounding_rect = br\n tr.area = ai\n tr.min_enclose_area = co\n tr.centroid = ci\n tr.pactual = pa\n tr.pconvex_hull = pch\n tr.mask = mask\n targets.append(tr)\n\n return targets", "def generate_images_pred(self, inputs, outputs):\n for scale in self.scales:\n disp = outputs[(\"disp\", scale)]\n disp = F.interpolate(\n disp, [self.height, self.width], mode=\"bilinear\", align_corners=False)\n source_scale = 0\n\n _, depth = disp_to_depth(disp, self.min_depth, self.max_depth)\n\n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.frame_ids[1:]):\n\n T = outputs[(\"cam_T_cam\", 0, frame_id)]\n\n # from the authors of https://arxiv.org/abs/1712.00175\n # mean-normalized inverse depth from [62] to discourage shrinking of the estimated depth\n\n axisangle = outputs[(\"axisangle\", 0, frame_id)]\n translation = outputs[(\"translation\", 0, frame_id)]\n\n inv_depth = 1 / depth\n mean_inv_depth = inv_depth.mean(3, True).mean(2, True)\n\n T = transformation_from_parameters(\n axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\")\n\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]", "def layers(self, x):\n raise NotImplementedError", "def forward(self, src, mask):\n bs = src.shape[0]\n src = src.permute(2, 0, 1)\n m = src \n enc_embed = self.enc_embed.weight.unsqueeze(1).repeat(1, bs, 1)\n for layer in self.encoder_layers:\n m = layer(m,\n pos=enc_embed,\n src_mask = mask\n )\n return m.permute(1, 2, 0), enc_embed.permute(1, 2, 0)", "def _make_slices(self, img_stacks, mask_stacks, patient_id, out_pth):\n img_file_name = \"{patient}_{id}_stack\"\n msk_file_name = \"{patient}_{id}_stack_mask\"\n for s in range(1, img_stacks.shape[0] + 1):\n if s < self.stack_size or img_stacks.shape[0] - s <= self.stack_size:\n continue\n slice_idx = np.arange(-1, self.stack_size-1) + s\n im_block = img_stacks[slice_idx,:, :, 1]\n msk_block = mask_stacks[s, :, :, 1] # Output is the mask for the center channel\n np.save(os.path.join(out_pth, img_file_name.format(patient=patient_id, id=s)), im_block)\n np.save(os.path.join(out_pth, msk_file_name.format(patient=patient_id, id=s)), msk_block)", "def make_layer(block, num_blocks, **kwarg):\n layers = []\n for _ in range(num_blocks):\n layers.append(block(**kwarg))\n return nn.Sequential(*layers)", "def make_layer(self, out_channels, num_blocks, first_block=False):\n layers = []\n for i in range(num_blocks):\n first_block = first_block if i == 0 else False\n combine_mode = 'concat' if i == 0 else 'add'\n layers.append(ShuffleUnit(self.in_channels, out_channels, groups=self.groups, first_block=first_block, combine=combine_mode, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))\n self.in_channels = out_channels\n return nn.Sequential(*layers)", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model", "def process_layer(layer_def, inputs):\n\n outputs = []\n for n in layer_def['neurons']:\n n_res = n.activate(inputs)\n\n outputs.append(n_res)\n\n return outputs", "def combine_heads_2d(inputs):\n transposed = tf.transpose(inputs, [0, 2, 3, 1, 4])\n Nh, channels = shape_list(transposed)[-2:]\n ret_shape = shape_list(transposed)[:-2] + [Nh * channels]\n return tf.reshape(transposed, ret_shape)", "def projection_block(A_prev, filters, s=2):\n X = K.layers.Conv2D(filters=filters[0],\n kernel_size=1,\n padding='same',\n strides=(s, s),\n kernel_initializer='he_normal')(A_prev)\n\n X = K.layers.BatchNormalization(axis=3)(X)\n\n X = K.layers.Activation('relu')(X)\n\n X = K.layers.Conv2D(filters=filters[1],\n kernel_size=3,\n padding='same',\n # strides=(s, s),\n kernel_initializer='he_normal')(X)\n\n X = K.layers.BatchNormalization()(X)\n\n X = K.layers.Activation('relu')(X)\n\n X = K.layers.Conv2D(filters=filters[2],\n kernel_size=1,\n padding='same',\n kernel_initializer='he_normal')(X)\n\n X = K.layers.BatchNormalization()(X)\n\n shortcut = K.layers.Conv2D(filters=filters[2],\n kernel_size=1,\n padding='same',\n strides=(s, s),\n kernel_initializer='he_normal')(A_prev)\n\n shortcut = K.layers.BatchNormalization()(shortcut)\n\n adding = K.layers.Add()([X, shortcut])\n\n output = K.layers.Activation('relu')(adding)\n\n return output", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def _clone_layout_placeholders(self, slidelayout):\n latent_ph_types = (PH_TYPE_DT, PH_TYPE_SLDNUM, PH_TYPE_FTR)\n for sp in slidelayout.shapes:\n if not sp.is_placeholder:\n continue\n ph = Placeholder(sp)\n if ph.type in latent_ph_types:\n continue\n self.__clone_layout_placeholder(ph)" ]
[ "0.58452713", "0.57073355", "0.5678293", "0.567758", "0.566787", "0.5503139", "0.54909784", "0.5411151", "0.53651917", "0.5347449", "0.53288347", "0.5283121", "0.52772504", "0.52497953", "0.5245188", "0.5234502", "0.52329576", "0.5224384", "0.51927054", "0.51888996", "0.51857734", "0.5184375", "0.5172577", "0.516792", "0.5163813", "0.51612717", "0.51323724", "0.513159", "0.5125964", "0.51204616", "0.51201797", "0.5076041", "0.50750315", "0.5063795", "0.50622946", "0.50622946", "0.50515777", "0.5037159", "0.50324583", "0.5027079", "0.5024352", "0.5021956", "0.5021393", "0.5003978", "0.5002739", "0.49980864", "0.49895433", "0.49800965", "0.4978081", "0.49770057", "0.4968386", "0.49572572", "0.49563268", "0.4955954", "0.49470133", "0.4938233", "0.49370503", "0.49299324", "0.49291644", "0.49289608", "0.49184543", "0.49161333", "0.49159667", "0.49151215", "0.49134475", "0.49105093", "0.49101156", "0.4907535", "0.49070933", "0.48916045", "0.48795176", "0.48787498", "0.4876511", "0.48758924", "0.48654163", "0.48635727", "0.48598891", "0.48592663", "0.4857704", "0.48562497", "0.48518392", "0.48504534", "0.48476875", "0.48376924", "0.4835255", "0.48345265", "0.483151", "0.48302644", "0.48302418", "0.48277315", "0.48267433", "0.4826403", "0.4823353", "0.48210102", "0.4819774", "0.4818227", "0.481223", "0.48119512", "0.48070782", "0.48060006" ]
0.5049388
37
Retrieve a list of OD pairs included in this chunk.
def _get_od_pairs_for_chunk(self, chunk_definition): # Read the relevant rows from the CSV chunk_num, chunk_size = chunk_definition # Explicitly set data types dtypes = { 0: helpers.PD_FIELD_TYPES[self.origin_id_field_obj.type], 1: helpers.PD_FIELD_TYPES[self.dest_id_field_obj.type] } df_od_pairs = pd.read_csv( self.od_pair_table, header=None, skiprows=chunk_size*chunk_num, nrows=chunk_size, dtype=dtypes ) self.od_pairs = df_od_pairs.values.tolist()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pairs(self):\n return self.pairs", "def pairs(self):\n return self.items() if self.is_a(dict) else self.chunks(2)", "def pairs(self):\n return self.__pairs", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "async def fetch_trading_pairs() -> List[str]:\n raise NotImplementedError", "def items(self):\n out = []\n for y,x in self.coords(False):\n out.append(self.retrieve(y, x))\n return out", "def get_obstList(self,X,Y,Z):\n return []", "def get_obstList(self,X,Y,Z):\n return []", "def create_od_pairs():\n od_pair_shipments = []\n df = pd.read_excel(r'C:\\Users\\93855\\Downloads\\initial\\EDD_assignment.xlsx', sheet_name='OD Pairs',\n usecols=['Origin', 'Destination', 'Duration'])\n for x in range(0, len(df)):\n shipment = ODPair.ODPair(9000 + x, df['Origin'][x], df['Destination'][x])\n od_pair_shipments.append(shipment)\n return od_pair_shipments", "def returnOpenOrders(self, currency_pair=\"all\"):\n pass", "def all_dicoms(self):\n return [dcm_ctr_pair[1:] for dcm_ctr_pair in self.data]", "def show_pairs(self):\n\n all_pairs = []\n for plug in self.plugleads:\n all_pairs.append(plug.pair)\n return all_pairs", "def get_charged_pairs(self):\n charges = [ ai.GetFormalCharge() for ai in self.m.GetAtoms() ]\n # search for the pairs of atoms with smarts like '[N+](=O)[O-]'\n patt = '[+1]~[-1]'\n q = Chem.MolFromSmarts(patt)\n cpairs = np.array( self.m.GetSubstructMatches(q) ).astype(np.int)\n self.charges = charges\n self.cpairs = cpairs", "def get_items(self):\n return (self._start._value, self._end._value, self._value)", "def getPairs(self):\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n allPairs = []\n for i in jsonResponse:\n allPairs.append(i)\n return allPairs", "def items(self):\n ix_obj = list(self.d_buffer.keys())\n ix_obj.sort()\n l_obj = [self.d_buffer[ix] for ix in ix_obj]\n\n return ix_obj, l_obj", "def pairs(self) -> Iterator[tuple[str, list[CommandParser]]]:\n for module, cmds in self._registry[\"by_module\"].items():\n yield (module, cmds)", "async def get_trading_pairs(self) -> List[str]:\n return await self.fetch_trading_pairs()", "def iteritems(self):\n r = self.solr.select('%s:%s %s:* %s:*'\n % (self.index_uuid_field, self.index_uuid,\n self.d_uid_field, self.descriptor_field))\n for doc in r.results:\n d = cPickle.loads(doc[self.descriptor_field])\n yield d.uuid(), d\n for _ in range(r.numFound // 10):\n r = r.next_batch()\n for doc in r.results:\n d = cPickle.loads(doc[self.descriptor_field])\n yield d.uuid(), d", "def pairs(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None):\n return self._iter(txn, args, lo, hi, reverse, max, include)", "def get_soma_objects(self):\n\n msg_store = MessageStoreProxy(database=\"soma2data\", collection=\"soma2\")\n objs = msg_store.query(SOMA2Object._type, message_query={\"map_name\":self.soma_map,\"config\":self.soma_conf})\n print \"queried soma2 objects >> \", objs\n self.soma_objects = ce.get_soma_objects()\n print \"hard coded objects >> \", [self.soma_objects[r].keys() for r in self.soma_objects.keys()]", "def search_all_available_pair(self):\n return self._search({})", "def get_oblist_from_outfit(self, loc):\n from world.fashion.exceptions import FashionError\n from world.fashion.fashion_commands import get_caller_outfit_from_args\n\n try:\n outfit = get_caller_outfit_from_args(self.caller, self.args)\n except FashionError as err:\n raise CommandError(err)\n return [ob for ob in outfit.fashion_items.all() if ob.location == loc]", "def toPairs(self):\n result = {}\n stack = []\n start = self.StartSymbols\n end = self.EndSymbols\n for i, symbol in enumerate(self):\n if symbol in start: #open a pair\n stack.append(i)\n elif symbol in end: #close a pair\n result[stack.pop()] = i\n #test whether there are any open pairs left unaccounted for \n if stack:\n raise IndexError, \\\n \"Too many open pairs in structure:\\n%s\" % self\n return Pairs([(key,result[key]) for key in result])", "def _get_all_oshapes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n an_ishape = an_inode.oshapes['loc']\n \n return {'main' : an_ishape,\n 'loc' : an_ishape,\n 'cov' : an_ishape + [an_ishape[-1]]}", "def _get_obs(self):\n pos = []\n z = []\n for i in range(params['memory_size']):\n if self._step - i * params['memory_size'] > 1:\n pos.append(self._track_item['joint_pos'][self._step - i * params['memory_size'] - 1].copy())\n z.append(self._track_item['z'][self._step - i * params['memory_size'] - 1].copy())\n else:\n pos.append(self._track_item['joint_pos'][0].copy())\n if len(self._track_item['z']) < 1:\n z.append(self.z.copy())\n else:\n z.append(self._track_item['z'][0].copy())\n out = pos\n if params['observation_version'] == 1:\n out += z\n ob = {\n 'observation' : np.concatenate(out, -1),\n 'desired_goal' : self.desired_goal.copy(),\n 'achieved_goal' : self.achieved_goal.copy(),\n 'z' : self.z.copy()\n }\n return ob", "def getEquates(self) -> Iterator[ghidra.program.model.symbol.Equate]:\n ...", "def entries(self):\n out = []\n for y,x in self.coords(False):\n out.append((y,x,self.retrieve(y,x)))\n return out", "def get_orders_of_bot(self, pair:str):\n\n\t\tconn = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\tconn.row_factory = sqlite3.Row\n\t\tc = conn.cursor()\n\n\t\tc.execute('SELECT * FROM orders WHERE pair=?', (pair, ))\n\t\torders = c.fetchall()\n\t\t\t\t\t\t\t\t\t\t\t\t\t# list(orders) = [<sqlite3.Row object at 0x0000020664E28670>, <sqlite3.Row object at 0x0000020664E9BA70>, ...]\n\t\t\t\t\t\t\t\t\t\t\t\t\t# dict(order) = {'pair': 'STORMETH', 'side': 'BUY',...}\n\t\treturn orders \t# We need to return None if there is no bot on the pair, so no dict(orders)", "def odors(self, session):\n odors = session.query(Timepoint.odor).filter(\n Timepoint.id.between(self.start_timepoint_id, self.end_timepoint_id))\n return np.array(odors.all()).flatten()", "def getList(self):\n return self.position.exportToList()", "def getBondsWithH(self):\n try:\n return self._bondListWithH\n except AttributeError:\n pass\n bondPointers=self._raw_data[\"BONDS_INC_HYDROGEN\"]\n self._bondListWithH = self._getBonds(bondPointers)\n return self._bondListWithH", "def getContents(self):\r\n cont=[]\r\n for i in range (len(self._indices)):\r\n cont.append(self._dataset.getPoint(self._indices[i]))\r\n return cont", "def items(self):\n items = []\n for item in self.contents:\n items.append((item, self.contents[item]))\n return items", "def ole_objects(self):\n return self.container['ole_objects']", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def get_test_ODs() -> List[dict]:\n ods = gpd.read_file('data/tests/test_OD_lines.geojson')\n ods['orig_point'] = [geom.interpolate(0, normalized=True) for geom in ods['geometry']]\n ods['dest_point'] = [geom.interpolate(1, normalized=True) for geom in ods['geometry']]\n ods['orig_latLon'] = [get_lat_lon_from_geom(geom) for geom in ods['orig_point']]\n ods['dest_latLon'] = [get_lat_lon_from_geom(geom) for geom in ods['dest_point']]\n od_dicts = ods.to_dict(orient='records')\n od_dict = {}\n for od in od_dicts:\n od_dict[int(od['OD'])] = od\n return od_dict", "def sections(self):\n return list(self._dict)", "def rows(self, extras: dict) -> list[dict]:\n metadata = self.metadata.dict()\n return [\n {**extras, **metadata, **dict(dose=dose, response=response)}\n for dose, response in zip(self.individual_doses, self.responses, strict=True)\n ]", "def key_pairs(self):\n return self.get('key_pairs')", "def getPairs(self):\n jsonResponse = self.getJson(\"https://api.kraken.com/0/public/AssetPairs\")\n allPairs = []\n for i in jsonResponse[\"result\"]:\n allPairs.append(i.split(\".d\",1)[0])\n return allPairs", "def objects(self):\n _, c = self.get_column(0)\n size = len(c)\n headers = self.headers()\n for i in range(size):\n obj = {}\n for h in headers:\n _, col = self.get_column(h)\n val = col[i]\n obj[h] = val\n yield obj", "def object_lists(self) -> Dict[str, List[Any]]:\n return {name: self.hyperparams[name][2] for name in self.names()\n if self.hyperparams[name][0] == 'object'}", "def _objects(self):\n for d in self._dicts_with_ids():\n yield d['id'], tuple(d[k] for k in self.fields)", "def get_sections(self):\n return tuple(self._sections)", "def get_od_books(search_criteria, od_library_rows):\n\tprint \" in get_od_books \"\n\tbook_lists = []\n\toverdrive_client_app_fields, response = log_into_overdrive()\n\taccess_token = overdrive_client_app_fields['access_token']\n\tprint response.status_code, \" == \", response.reason\n\tif response.status_code > 201:\n\t flash((\"Action was not successful. %s == %s\\n\") % \n\t (response.status_code, response.reason))\n\telif response.status_code == 200 or response.status_code == 201:\n\t client_credentials = response.content\n\t print \"Post to get access token was successful\"\n\t#end \n\n\tfor library_fields in od_library_rows:\n\t\tbook_list = []\n\t\tproduct_url, headers = get_library_product_url(library_fields, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\taccess_token)\n\t\tbook_list = search_for_books(search_criteria, product_url, headers)\n\t\tbook_lists.extend(book_list)\n\t#end for\n\tprint \" number of books in book list == \", len(book_lists), \"\\n\"\n\treturn book_lists", "def getSegments(self) -> List[int]:\n ...", "def get_objects(self):\r\n bucket = self._get_bucket()\r\n objs = []\r\n for key in bucket:\r\n objs.append(key)\r\n return objs", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def yields ( self ) :\n return tuple ( [ i for i in self.alist2 ] )", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def getEntireASGList( self ):\r\n return self.__trackASG.keys()", "def get_raw(self):\n \n out_items = []\n for order in self.order_lst:\n out_items += [i.get_metadata() for i in order.get_items()]\n \n return out_items", "def get_items(self, start, stop, next_position=None):", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "def return_open_orders(self, currency_pair):\n return self.api_query('returnOpenOrders', {\"currencyPair\": currency_pair})", "def get_observation_list(self):\n return self.observations", "def obj(self) -> (Symbol, int, int):\n return (self._symbol, self._start, self._end)", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def get_sobol_indices(self, order):\n self._set_statistics()\n return self.statistics_object.get_sobol(order)", "def get_orders(self):\n return self.order_lst", "def osd_list(self):\n def unique_list_of_dicts(l):\n return reduce(lambda x, y: x if y in x else x + [y], l, [])\n\n tree = self.osd_tree()\n nodes = tree['nodes']\n if 'stray' in tree:\n nodes += tree['stray']\n for node in nodes:\n if u'depth' in node:\n del node[u'depth']\n nodes = unique_list_of_dicts(nodes)\n osdlists = list(unique_list_of_dicts([node for node in nodes if node['type'] == 'osd']))\n hostlists = list(unique_list_of_dicts([node for node in nodes if node['type'] == 'host']))\n # add host info in osdlist\n for osdlist in osdlists:\n for hostlist in hostlists:\n if osdlist[\"id\"] in hostlist[\"children\"]:\n osdlist[\"host\"] = hostlist[\"name\"]\n break\n return osdlists", "def get_orderbooks(self, pair='btc_jpy'):\n url = 'https://coincheck.com/api/order_books'\n r = requests.get(url, {'pair': pair}, timeout=self.timeout)\n\n return json.loads(r.text)", "def GetObjects(self): \r\n return self.model.GetObjects()", "def ciaPairs(self):\n\n return self._cia_pairs", "def _get_pairs_simple(self, distance):\n pairs = self.data_kd[0].query_pairs(distance)\n pairs = set(frozenset(p) for p in pairs)\n for kd in self.data_kd[1:]:\n newpairs = set(frozenset(p) for p in kd.query_pairs(distance)\n if frozenset(p) in pairs)\n pairs = newpairs\n return pairs", "def get_objects_data(self):\n return dict(result=self.objects)", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def items(self):\r\n L = []\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n L.append((o, value))\r\n return L", "def getEquates(self, reference: ghidra.program.model.address.Address, opndPosition: int) -> List[ghidra.program.model.symbol.Equate]:\n ...", "def items(self):\n for key in self._sorted:\n yield key, self._map[key]", "def combined(self):\n d = self._combined\n return [c for key, c in d.items()]", "def get_items(self):\r\n combined_list = []\r\n for prefix, item_list in self.class_map.values():\r\n combined_list.extend(zip(self._get_id_range(prefix, len(item_list)),\r\n item_list))\r\n return combined_list", "def items(self):\n return zip(self.products, self.yields)", "def get_ids_as_slice_or_list(self):\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)", "def exportExtendedDT(self):\n return self.coords, list(self.triangles)", "def get_pairs(self, instance):\n return self._property_schemas.items()", "def readentries(self):\n return list(x for x in self)", "def items(self):\n\t\treturn tuple(self.dist.items())", "def getItemIter(self):\n for key, raw in self.db.getAllItemIter(db=self.sdb, split=False):\n keys = tuple(key.decode(\"utf-8\").split('.'))\n yield (keys, coring.Serder(raw=bytes(raw)))", "def iterkeys(self, essid):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject.key)\n q = q.join(PYR2_DBObject).join(ESSID_DBObject)\n q = q.filter(ESSID_DBObject.essid == essid)\n keys = q.all()\n return (c[0] for c in keys)", "def get_all_trading_pairs(self):\r\n method = self.public_endpoints['all_trading_pairs']['method']\r\n url = self.base_url + self.public_endpoints['all_trading_pairs']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def get_index_array(self):\n return self.region_pairs", "def getChunks():", "def read_infodata(self):\n infodata = []\n self.writecmd(self.APP, self.WRITE2_READ2, 4, [0xac, 0x20, 0x00, 0x00])\n infodata += [ord(self.data[0]), ord(self.data[1])]\n self.writecmd(self.APP, self.WRITE2_READ2, 4, [0xac, 0x30, 0x00, 0x00])\n infodata += [ord(self.data[0]), ord(self.data[1])]\n return infodata", "def dataObjects(self):\n\t\treturn self._objects", "def _pco_list(self, hdr, name, pos):\n if name not in ('poly', 'champs', 'offset'):\n raise TypeError('Name must be one of \"poly\", \"champs\", or \"offset\".')\n\n hdr.seek(pos + 16)\n length = unpack(self._bo + 'i', hdr.read(4))[0]\n d = []\n for p in range(length):\n if name == 'poly':\n d.append(self._species(hdr))\n else:\n raise NotImplementedError(\n '{}List is non-null, don\\'t know how to read.'\n ''.format(name.capitalize()))\n hdr.seek(4, 1)\n return d", "def obj_list(self):\n return self._obj_list", "def objects(self):\n\t\treturn self._objects", "def as_OD(self) -> OrderedDict:\n od = OrderedDict()\n for k, v in self._tagged_values_dict.items():\n if self._is_visible(k):\n od[k] = v[1]\n return od", "def get_pairs(self):\n self.get_locations()\n self.choices = {}\n for host, pathogens in self.locations.iteritems():\n if len(pathogens) > 1:\n for pair in combinations(pathogens, 2):\n self.choices.update({pair: host}) # pairs of pathogens in same host", "def retrieve_offsets(self):\n return [], []", "def _source_POs(self):\n if not self._extraPO:\n sources = [self]\n elif self.self_first:\n sources = [self,self._extraPO]\n else:\n sources = [self._extraPO,self]\n return sources", "def get_obs_ids(self):\n return sorted(self.obsinfos.keys())", "def get_entries(self):\n return self.find_by_st(\"urn:schemas-denon-com:device:ACT-Denon:1\")", "def _get_output_objects_info(self):\n if len(self.output_objects) == 0:\n return []\n\n return self.output_objects[0].keys()", "def get_assets(self):\n # The size of the price_data list should not change, even when updated\n price_data_length = len(self.price_data)\n\n for index in itertools.cycle(range(price_data_length)):\n try:\n yield self.price_data[index]\n except IndexError:\n yield None", "def items(self):\n return zip(self._keys, self._values)", "def data_list(self):\n # return [data_object, data_object, ...]\n return self._data_list" ]
[ "0.61472523", "0.58992565", "0.5761564", "0.567055", "0.5611011", "0.55578417", "0.5545772", "0.5545772", "0.5542505", "0.55348593", "0.5519136", "0.55052334", "0.5441358", "0.5352025", "0.53083766", "0.52903664", "0.5279547", "0.5259124", "0.5257976", "0.52337116", "0.52139527", "0.5204154", "0.5191369", "0.5172068", "0.5165982", "0.5163863", "0.51575", "0.51416904", "0.5139521", "0.5123309", "0.5114598", "0.5112202", "0.5104578", "0.50996345", "0.5091783", "0.5076261", "0.50645345", "0.50635463", "0.5062762", "0.50615543", "0.5059927", "0.50580055", "0.5054294", "0.50434285", "0.5031114", "0.5022093", "0.5018718", "0.5011846", "0.5011051", "0.5011051", "0.49890164", "0.49878016", "0.49849942", "0.4978069", "0.49775857", "0.49758655", "0.49749213", "0.49705184", "0.49658924", "0.49651852", "0.4951674", "0.49514985", "0.49357623", "0.49332583", "0.49176502", "0.49140748", "0.49102592", "0.49097815", "0.48923284", "0.48923284", "0.48918316", "0.48756695", "0.4874043", "0.48727345", "0.4869618", "0.48674345", "0.4865579", "0.4855322", "0.48526117", "0.4851206", "0.48458576", "0.4839875", "0.48366612", "0.48364735", "0.48348403", "0.4828678", "0.4828342", "0.48281962", "0.48267347", "0.4823309", "0.4822998", "0.4813672", "0.4809631", "0.48055506", "0.48052868", "0.4797581", "0.47962594", "0.47866777", "0.4784699", "0.47835335" ]
0.6435109
0
Create layers that include only the origins and destinations relevant to this chunk.
def _select_inputs_many_to_many(self): # Select the origins present in this chunk of predefined OD pairs self.logger.debug("Selecting origins for this chunk...") origins_in_chunk = set([pair[0] for pair in self.od_pairs]) if isinstance(self.od_pairs[0][0], (int, float,)): origin_string = ", ".join([str(o_id) for o_id in origins_in_chunk]) else: origin_string = "'" + "', '".join([str(o_id) for o_id in origins_in_chunk]) + "'" origins_where_clause = f"{self.origin_id_field} IN ({origin_string})" self.logger.debug(f"Origins where clause: {origins_where_clause}") self.input_origins_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.origins, self.input_origins_layer, origins_where_clause] ).getOutput(0) num_origins = int(arcpy.management.GetCount(self.input_origins_layer).getOutput(0)) self.logger.debug(f"Number of origins selected: {num_origins}") # Select the destinations present in this chunk of predefined OD pairs self.logger.debug("Selecting destinations for this chunk...") dests_in_chunk = set([pair[1] for pair in self.od_pairs]) if isinstance(self.od_pairs[0][1], (int, float,)): dest_string = ", ".join([str(d_id) for d_id in dests_in_chunk]) else: dest_string = "'" + "', '".join([str(d_id) for d_id in dests_in_chunk]) + "'" dests_where_clause = f"{self.dest_id_field} IN ({dest_string})" self.logger.debug(f"Destinations where clause: {dests_where_clause}") self.input_dests_layer_obj = helpers.run_gp_tool( self.logger, arcpy.management.MakeFeatureLayer, [self.destinations, self.input_destinations_layer, dests_where_clause] ).getOutput(0) num_dests = int(arcpy.management.GetCount(self.input_destinations_layer).getOutput(0)) self.logger.debug(f"Number of destinations selected: {num_dests}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def create(self):\n\n if rs.IsLayer(self.name):\n\n return self\n\n mom = \"\"\n \n for s in self.path:\n \n son = s if (mom == \"\") else (mom + \"::\" + s)\n\n mommy = None if mom == \"\" else mom\n\n if not rs.IsLayer(son):\n\n rs.AddLayer(s, color = None, visible = True, locked = False, parent = mommy)\n\n mom = son\n \n return self", "def build_layers(self):\n raise NotImplementedError", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def layers(self): # -> LayerView:\n ...", "def init_layers(self):\n\n # get caching layers activated\n caching_layers = G3WCachingLayer.objects.all()\n for caching_layer in caching_layers:\n self.add_layer(str(caching_layer), caching_layer)", "def make_layers(self):\r\n #assuming temporal field is always the first column!\r\n timeCol = self.data.columns[0]\r\n times = self.data[timeCol].unique() \r\n lat = self.data.lat.unique()\r\n lon = self.data.lon.unique()\r\n shape = (len(lat), len(lon))\r\n depths, hours = [None], [None]\r\n if 'depth' in self.data.columns:\r\n depths = self.data.depth.unique()\r\n if 'hour' in self.data.columns:\r\n hours = self.data.hour.unique()\r\n layers, titles = [], []\r\n for t in times:\r\n for h in hours:\r\n for z in depths:\r\n frame = self.data[self.data[timeCol] == t]\r\n\r\n if timeCol == 'time':\r\n sub = self.variable + self.unit + ', ' + str(datetime.strptime(t, '%Y-%m-%dT%H:%M:%S').date())\r\n else:\r\n sub = self.variable + self.unit + ', ' + timeCol + ': ' + str(t) \r\n\r\n if h != None:\r\n frame = frame[frame['hour'] == h]\r\n sub = sub + ', hour: ' + str(h) + 'hr'\r\n if z != None:\r\n frame = frame[frame['depth'] == z] \r\n sub = sub + ', depth: %2.2f' % z + ' [m]' \r\n try: \r\n layers.append(frame[self.variable].values.reshape(shape))\r\n titles.append(sub)\r\n except Exception as e:\r\n continue \r\n return layers, titles, lat, lon", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def make_feature_layers(self, config):\n raise NotImplementedError", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def _make_layer(self, X, name, block, num_blocks, out_channels):\n\n for i in range(0, num_blocks):\n X = block(X, name = name + '_block{}'.format(i), out_channels=out_channels)\n return X", "def export_layers(self, dest, show):\n doc = copy.deepcopy(self.document)\n for layer in doc.xpath('//svg:g[@inkscape:groupmode=\"layer\"]', namespaces=inkex.NSS):\n layer.attrib['style'] = 'display:none'\n id = layer.attrib[\"id\"]\n if id in show:\n layer.attrib['style'] = 'display:inline'\n\n doc.write(dest)", "def _make_layer(self, block, outputs, blocks, stride=1):\n downsample = None\n \n downsample = nn.Sequential(\n nn.Conv2d(self.inputs, outputs * 4,\n kernel_size=1, stride=stride, bias=False,\n dilation=self.dilation),\n nn.BatchNorm2d(outputs * 4),\n )\n\n layers = []\n layers.append(block(self.inputs, outputs, stride, downsample, self.dilation))\n self.inputs = outputs * 4\n for i in range(1, blocks):\n layers.append(block(self.inputs, outputs))\n\n layer = nn.Sequential(*layers)\n\n self.channels.append(outputs * 4)\n self.layers.append(layer)\n\n return layer", "def _make_layer(self, block, planes, blocks, stride=1):\r\n downsample = None\r\n\r\n # determine whether we need to downsample within shortcut\r\n # - if stride != 1, main branch is downsampled spatially\r\n # - in_planes != planes * expansion, this downsample method simply\r\n # expands the channels.\r\n if stride != 1 or self.in_planes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n conv1x1(\r\n self.in_planes,\r\n planes * block.expansion,\r\n stride=stride,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n ),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = [None] * blocks\r\n layers[0] = block(\r\n self.in_planes,\r\n planes,\r\n stride=stride,\r\n downsample=downsample,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n )\r\n\r\n self.in_planes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers[i] = block(\r\n self.in_planes,\r\n planes,\r\n groups=self.num_groups,\r\n indices=self.indices,\r\n mask=self.mask,\r\n )\r\n\r\n return nn.Sequential(*layers)", "def init_vector_layers(self):\n if self.point_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.point_vector_layer.id())\n if self.line_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.line_vector_layer.id())\n if self.polygon_vector_layer:\n QgsMapLayerRegistry.instance().removeMapLayer(self.polygon_vector_layer.id())\n\n self.point_vector_layer = QgsVectorLayer(KEY_POINT, \"Vector Items (Points)\", \"memory\")\n self.point_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.point_vector_layer)\n\n self.line_vector_layer = QgsVectorLayer(KEY_LINE, \"Vector Items (Lines)\", \"memory\")\n self.line_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.line_vector_layer)\n\n self.polygon_vector_layer = QgsVectorLayer(KEY_POLYGON, \"Vector Items (Polygons)\", \"memory\")\n self.polygon_vector_layer.setCrs(QgsCoordinateReferenceSystem(4326), False)\n QgsMapLayerRegistry.instance().addMapLayer(self.polygon_vector_layer)\n\n point_data_provider = self.point_vector_layer.dataProvider()\n line_data_provider = self.line_vector_layer.dataProvider()\n polygon_data_provider = self.polygon_vector_layer.dataProvider()\n\n attribute_fields = []\n for attribute in KEY_JSON_PROPERTIES_LIST:\n attribute_fields.append(QgsField(u'vector_' + attribute, QVariant.String))\n\n point_data_provider.addAttributes(attribute_fields)\n line_data_provider.addAttributes(attribute_fields)\n polygon_data_provider.addAttributes(attribute_fields)", "def make_res_layer(self, **kwargs):\n return ResLayer(**kwargs)", "def getMoreComplexInLayerGraph(self):\n makeLayers = self.makeLayers\n addNodesToLayer = self.addNodesToLayer\n addNodeToLayer = self.addNodeToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addInLayerEdge = self.addInLayerEdge\n setFixedOrderConstraint = self.setFixedOrderConstraint\n\n layers = makeLayers(3)\n leftNodes = addNodesToLayer(4, layers[0])\n middleNodes = addNodesToLayer(3, layers[1])\n rightNode = addNodeToLayer(layers[2])\n setFixedOrderConstraint(middleNodes[0])\n setFixedOrderConstraint(middleNodes[1])\n\n eastWestEdgeFromTo(leftNodes[1], middleNodes[0])\n\n eastWestEdgeFromTo(leftNodes[3], middleNodes[1])\n eastWestEdgeFromTo(leftNodes[2], middleNodes[1])\n addInLayerEdge(middleNodes[0], middleNodes[1], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n addInLayerEdge(middleNodes[0], middleNodes[2], PortSide.WEST)\n\n addInLayerEdge(middleNodes[0], middleNodes[1], PortSide.EAST)\n eastWestEdgeFromTo(middleNodes[0], rightNode)\n\n return self.graph", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def getMoreComplexThreeLayerGraph(self):\n makeLayer = self.makeLayer\n graph = self.graph\n addNodesToLayer = self.addNodesToLayer\n addPortOnSide = self.addPortOnSide\n addEdgeBetweenPorts = self.addEdgeBetweenPorts\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer()\n middleLayer = makeLayer()\n rightLayer = makeLayer()\n\n leftNodes = addNodesToLayer(3, leftLayer)\n middleNodes = addNodesToLayer(2, middleLayer)\n rightNodes = addNodesToLayer(3, rightLayer)\n\n leftMiddleNodePort = addPortOnSide(leftNodes[1], PortSide.EAST)\n middleLowerNodePortEast = addPortOnSide(middleNodes[1], PortSide.EAST)\n middleUpperNodePortEast = addPortOnSide(middleNodes[0], PortSide.EAST)\n rightUpperNodePort = addPortOnSide(rightNodes[0], PortSide.WEST)\n rightMiddleNodePort = addPortOnSide(rightNodes[1], PortSide.WEST)\n\n addEdgeBetweenPorts(middleUpperNodePortEast, rightUpperNodePort)\n addEdgeBetweenPorts(middleUpperNodePortEast, rightMiddleNodePort)\n addEdgeBetweenPorts(middleUpperNodePortEast, rightMiddleNodePort)\n eastWestEdgeFromTo(middleLowerNodePortEast, rightNodes[2])\n eastWestEdgeFromTo(leftMiddleNodePort, middleNodes[0])\n eastWestEdgeFromTo(middleNodes[1], rightUpperNodePort)\n eastWestEdgeFromTo(leftMiddleNodePort, middleNodes[1])\n eastWestEdgeFromTo(leftNodes[2], middleNodes[0])\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n\n return graph", "def build_sample(self, wavelength=None):\n self.init_materials(wavelength)\n\n multi_layer = ba.MultiLayer()\n air_layer = ba.Layer(self.m_air_material)\n avg_layer = ba.Layer(self.m_air_material, self.m_average_layer_thickness)\n substrate_layer = ba.Layer(self.m_substrate_material)\n\n for layout_factory in self.m_layouts:\n avg_layer.addLayout(layout_factory.create_layout(self.m_particle_material))\n\n roughness = ba.LayerRoughness(self.m_roughness, 0.3, 500.0*nm)\n multi_layer.addLayer(air_layer)\n multi_layer.addLayer(avg_layer)\n multi_layer.addLayerWithTopRoughness(substrate_layer, roughness)\n\n return multi_layer", "def create_layer(layer_name, table, flds=None, where=None, shp_prefix=None):\n # if list of names provided, convert to dictionary\n if isinstance(flds, list):\n t = {}\n for item in flds:\n t[item] = item\n flds = t\n\n # add shape fields if desired\n if shp_prefix is not None:\n desc = arcpy.Describe(table)\n if desc.shapeType == \"Polygon\":\n flds[desc.AreaFieldName] = shp_prefix + '_area'\n\n # create field definitions\n fi = arcpy.FieldInfo()\n for fld in arcpy.ListFields(table):\n fld_name = fld.name\n if flds is None:\n fi.addField(fld_name, fld_name, 'VISIBLE', '')\n else:\n value = flds.get(fld_name, None)\n if value is not None:\n fi.addField(fld_name, value, 'VISIBLE', '')\n else:\n fi.addField(fld_name, fld_name, 'HIDDEN', '')\n\n # create the feature layer\n if where is None:\n arcpy.MakeFeatureLayer_management(table, layer_name, field_info=fi)\n else:\n arcpy.MakeFeatureLayer_management(table, layer_name, where, field_info=fi)", "def _make_res_layer(self,\n block,\n planes,\n blocks,\n stride=1,\n norm_kwargs=None,\n layer_name=''):\n downsample = None\n\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.SequentialCell([\n nn.Conv3d(in_channels=self.inplanes,\n out_channels=planes * block.expansion,\n kernel_size=1,\n stride=(stride, stride, stride),\n has_bias=False),\n nn.BatchNorm3d(num_features=planes * block.expansion,\n **({} if norm_kwargs is None else norm_kwargs))])\n\n layers = []\n layers.append(block(inplanes=self.inplanes,\n planes=planes,\n stride=stride,\n downsample=downsample))\n\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(inplanes=self.inplanes, planes=planes))\n\n return nn.SequentialCell(layers)", "def _make_layer(self, block, planes, blocks, stride=1, sync=False):\n downsample = None\n if stride != 1 or self.in_planes != planes * block.expansion:\n downsample = nn.SequentialCell(\n conv1x1x1(self.in_planes, planes * block.expansion, stride),\n _bn(planes * block.expansion, sync)\n )\n\n layers = []\n layers.append(\n block(in_planes=self.in_planes,\n planes=planes,\n stride=stride,\n downsample=downsample,\n sync=sync)\n )\n self.in_planes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_planes, planes, sync=sync))\n\n return nn.SequentialCell(layers)", "def layer_construction(self, in_channel, out_channel, stride, num_blocks):\n layer = [ResBlock(in_channel,out_channel,stride)]\n for i in range(0, num_blocks-1):\n layer.append(ResBlock(out_channel * 4, out_channel))\n\n return nn.Sequential(*layer)", "def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self", "def build_layers(node: md.Document, *, disambiguate_names: bool = True) -> list[dict]:\n layers = []\n names = []\n for i, folder in enumerate(get(node, \"Folder\")):\n name = val(get1(folder, \"name\"))\n geojson = build_feature_collection(folder, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if not layers:\n # No folders, so use the root node\n name = val(get1(node, \"name\"))\n geojson = build_feature_collection(node, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if disambiguate_names:\n new_names = disambiguate(names)\n new_layers = []\n for i, layer in enumerate(layers):\n layer[\"name\"] = new_names[i]\n new_layers.append(layer)\n layers = new_layers\n\n return layers", "def createMemoryLayer(self):\n suffix = \"\"\n name = \"Vector Bender\"\n while len(QgsProject.instance().mapLayersByName(name + suffix)) > 0:\n if suffix == \"\":\n suffix = \" 1\"\n else:\n suffix = \" \" + str(int(suffix) + 1)\n newMemoryLayer = QgsVectorLayer(\"Linestring\", name + suffix, \"memory\")\n newMemoryLayer.loadNamedStyle(join(dirname(__file__), \"PairStyle.qml\"), False)\n QgsProject.instance().addMapLayer(newMemoryLayer)\n self.updateLayersComboboxes()\n index = self.comboBox_pairsLayer.findData(newMemoryLayer.id())\n self.comboBox_pairsLayer.setCurrentIndex(index)\n newMemoryLayer.startEditing()", "def _make_layer(self, block, layer_num, in_channel, out_channel):\r\n layers = []\r\n darkblk = block(in_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n for _ in range(1, layer_num):\r\n darkblk = block(out_channel, out_channel)\r\n layers.append(darkblk)\r\n\r\n return nn.SequentialCell(layers)", "def define_layers(self):\n if self.d != 0:\n # If we have a fixed input size we use it do define the first layer\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n else:\n self.layers = [nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )]\n\n l = 0\n for l in self.layers_sizes():\n self.layers.append(nn.Sequential(nn.Linear(self.h - l, self.h - l - self.delta_h),\n nn.ReLU(), )) # nn.BatchNorm1d( self.h - l - self.delta_h, affine=False)))\n self.layers.append(nn.Sequential(nn.Linear(self.h - l - self.delta_h, 1), nn.ReLU()))", "def expand_vertex_layers(x):\n base_src = x['__id']\n base_tgt = x['__id']\n bitfield = shift_and_bitstrings(x['layers'], x['layers'])\n return expand_causal_edges_from_bitfield(bitfield, base_src, base_tgt, max_id)", "def generate_openlayers(self):\n\n args = {}\n args['title'] = self.options.title\n args['bingkey'] = self.options.bingkey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = self.tileext\n args['publishurl'] = self.options.url\n args['copyright'] = self.options.copyright\n if self.options.tmscompatible:\n args['tmsoffset'] = \"-1\"\n else:\n args['tmsoffset'] = \"\"\n if self.options.profile == 'raster':\n args['rasterzoomlevels'] = self.tmaxz+1\n args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]\n\n s = r\"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n .olImageLoadError { display: none; }\n .olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }\n </style>\"\"\" % args # noqa\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n <script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>\n \"\"\" % args\n\n s += \"\"\"\n <script src=\"http://www.openlayers.org/api/2.12/OpenLayers.js\"></script>\n <script>\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n var emptyTileURL = \"http://www.maptiler.org/img/none.png\";\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n\n function init(){\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:3857\",\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n numZoomLevels: 20\n };\n map = new OpenLayers.Map(options);\n\n // Create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n {\n type: google.maps.MapTypeId.ROADMAP,\n sphericalMercator: true\n });\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {\n type: google.maps.MapTypeId.SATELLITE,\n sphericalMercator: true\n });\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {\n type: google.maps.MapTypeId.HYBRID,\n sphericalMercator: true\n });\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {\n type: google.maps.MapTypeId.TERRAIN,\n sphericalMercator: true\n });\n\n // Create Bing layers\n var broad = new OpenLayers.Layer.Bing({\n name: \"Bing Roads\",\n key: \"%(bingkey)s\",\n type: \"Road\",\n sphericalMercator: true\n });\n var baer = new OpenLayers.Layer.Bing({\n name: \"Bing Aerial\",\n key: \"%(bingkey)s\",\n type: \"Aerial\",\n sphericalMercator: true\n });\n var bhyb = new OpenLayers.Layer.Bing({\n name: \"Bing Hybrid\",\n key: \"%(bingkey)s\",\n type: \"AerialWithLabels\",\n sphericalMercator: true\n });\n\n // Create OSM layer\n var osm = new OpenLayers.Layer.OSM(\"OpenStreetMap\");\n\n \"\"\" % args # noqa\n\t\t \n if self.options.xyz:\n s += \"\"\"\t\t \n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.XYZ(\"XYZ Overlay\",\n \"${z}/${x}/${y}.png\", {\n transitionEffect: 'resize',\n isBaseLayer: false\n });\n\t\t\t\t \n\t\t \"\"\" % args # noqa\n else:\n s += \"\"\"\t\t \n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n\t\t\t\t \n\t\t \"\"\" % args # noqa\t\t \n\t\t \n s += \"\"\" \n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([gmap, gsat, ghyb, gter,\n broad, baer, bhyb,\n osm, tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));\n \"\"\" % args # noqa\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n projection: \"EPSG:4326\"\n };\n map = new OpenLayers.Map(options);\n\n var wms = new OpenLayers.Layer.WMS(\"VMap0\",\n \"http://tilecache.osgeo.org/wms-c/Basic.py?\",\n {\n layers: 'basic',\n format: 'image/png'\n }\n );\n var tmsoverlay = new OpenLayers.Layer.TMS(\"TMS Overlay\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n isBaseLayer: false,\n getURL: getURL\n });\n if (OpenLayers.Util.alphaHack() == false) {\n tmsoverlay.setOpacity(0.7);\n }\n\n map.addLayers([wms,tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent(mapBounds);\n \"\"\" % args # noqa\n\n elif self.options.profile == 'raster':\n s += \"\"\"\n var options = {\n div: \"map\",\n controls: [],\n maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map(options);\n\n var layer = new OpenLayers.Layer.TMS(\"TMS Layer\", \"\",\n {\n serviceVersion: '.',\n layername: '.',\n alpha: true,\n type: '%(tileformat)s',\n getURL: getURL\n });\n\n map.addLayer(layer);\n map.zoomToExtent(mapBounds);\n \"\"\" % args # noqa\n\n s += \"\"\"\n map.addControls([new OpenLayers.Control.PanZoomBar(),\n new OpenLayers.Control.Navigation(),\n new OpenLayers.Control.MousePosition(),\n new OpenLayers.Control.ArgParser(),\n new OpenLayers.Control.Attribution()]);\n }\n \"\"\" % args\n\n if self.options.profile == 'mercator' and self.options.xyz is None:\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {\n z+=1;\n }\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom()%(tmsoffset)s;\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n elif self.options.profile == 'raster':\n s += \"\"\"\n function getURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.getServerResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.getServerZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (OpenLayers.Util.isArray(url)) {\n url = this.selectUrl(path, url);\n }\n if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {\n return url + path;\n } else {\n return emptyTileURL;\n }\n }\n \"\"\" % args # noqa\n\n s += \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"http://www.klokan.cz/projects/gdal2tiles/\">GDAL2Tiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" % args # noqa\n\n return s", "def warp(self):\n if not self.url or not self.sourceSRS:\n return\n self.map = mapscript.mapObj()\n self.map.setSize(int(self.url.getArgument(\"width\")),int(self.url.getArgument(\"height\")))\n (minx,miny,maxx,maxy) = map(lambda x: float(x), self.url.getArgument(\"bbox\").split(\",\"))\n self.map.extent = mapscript.rectObj(minx,miny,maxx,maxy)\n self.map.web.imagepath=tempfile.mkdtemp()\n self.map.setProjection(self.targetSRS.__str__())\n self.layer = mapscript.layerObj(self.map)\n self.layer.type = mapscript.MS_LAYER_RASTER\n self.layer.connection = self.url.getConnection()\n self.layer.status = mapscript.MS_DEFAULT\n self.layer.setConnectionType(mapscript.MS_WMS,None)\n self.layer.setMetaData(\"wms_srs\",self.sourceSRS.__str__())\n self.layer.setMetaData(\"wms_name\", self.url.getArgument(\"layers\"))\n self.layer.setMetaData(\"wms_server_version\",self.url.getArgument(\"version\"))\n\n # WMS 1.3.0 is not supported by MapServer < 6.0 \n # http://trac.osgeo.org/mapserver/ticket/3039\n if self.url.getArgument(\"version\") == \"1.3.0\":\n self.layer.setMetaData(\"wms_server_version\",\"1.1.1\")\n \n if self.sourceSRS.authority == \"CRS\" and self.sourceSRS.code == \"84\":\n self.layer.setMetaData(\"wms_srs\",\"EPSG:4326\")\n \n\n self.layer.setMetaData(\"wms_exceptions_format\",self.url.getArgument(\"exceptions\"))\n self.layer.setMetaData(\"wms_formatlist\",self.url.getArgument(\"format\"))\n self.layer.setMetaData(\"wms_style\",self.url.getArgument(\"style\"))\n self.layer.setMetaData(\"wms_transparent\",self.url.getArgument(\"transparent\"))\n self.layer.setProjection(self.sourceSRS.__str__())\n self.layer.debug = 5\n\n if self.url.getArgument(\"format\") == \"image/png\":\n self.map.outputformat.imagemode = mapscript.MS_IMAGEMODE_RGBA\n if self.url.getArgument(\"format\") == \"image/jpg\":\n self.layer.setMetaData(\"wms_formatlist\",\"image/jpeg\")\n self.map.selectOutputFormat(\"image/jpeg\")\n else:\n self.map.selectOutputFormat(self.url.getArgument(\"format\"))\n self.map.outputformat.transparent= 1\n\n try:\n # draw the map\n #self.map.save(\"/tmp/pokus2.map\")\n image = self.map.draw()\n if image:\n return image\n except :\n\n # something failed during the layer drawing. try to print the\n # error to stderr as well as generate new image with the error\n # message\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exc(file=sys.stderr)\n traceback.print_tb(exc_traceback, limit=1, file=sys.stderr)\n\n self.map.removeLayer(0)\n self.map.setFontSet(os.path.join(os.path.abspath(os.path.dirname(__file__)),\"fonts.txt\"))\n self.map.outputformat.transparent= 0\n\n self.layer = mapscript.layerObj(self.map)\n self.layer.type = mapscript.MS_LAYER_ANNOTATION\n #self.layer.transform = mapscript.MS_OFF\n\n line = mapscript.lineObj()\n line.add(mapscript.pointObj(minx+(maxx-minx)/2.,miny+(maxy-miny)/2.))\n feature = mapscript.shapeObj()\n feature.add(line)\n self.layer.addFeature(feature)\n self.layer.labelcache = mapscript.MS_TRUE\n \n\n classobj = mapscript.classObj(self.layer)\n text = \"\"\n \n ## try to guess, where the problem is\n for i in textwrap.wrap(str(exc_value),70):\n text += i+\"\\n\"\n classobj.setText(text)\n\n classobj.label.font = \"sans\"\n classobj.label.type = mapscript.MS_TRUETYPE\n classobj.label.antialias = mapscript.MS_FALSE\n classobj.label.size = 12\n classobj.label.position = mapscript.MS_CC\n #classobj.label.partials = mapscript.MS_FALSE\n classobj.label.force = mapscript.MS_TRUE\n\n\n self.layer.status = mapscript.MS_ON\n #self.map.save(\"/tmp/pokus3.map\")\n image = self.map.draw()\n return image", "def copyLayerToMemory(in_lyr):\n\n import ogr\n\t\t\n drv_mem = ogr.GetDriverByName('Memory')\n shp_copy = drv_mem.CreateDataSource('temp')\n copy_lyr = shp_copy.CopyLayer(in_lyr, 'lyr_copy')\n in_lyr.ResetReading()\n copy_lyr.ResetReading()\n return shp_copy, copy_lyr", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for s in strides:\n layers.append(block(self.in_planes, planes, s))\n self.in_planes = planes * block.expansion\n\n return nn.SequentialCell(*layers)", "def _make_layer(self, block, planes, blocks, stride=1):\n\n if blocks == 0:\n return nn.Sequential(nn.Identity())\n norm_layer = self._norm_layer\n upsample = None\n if stride != 1:\n upsample = nn.Sequential(\n nn.UpsamplingNearest2d(scale_factor=2),\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n elif self.inplanes != planes * block.expansion:\n upsample = nn.Sequential(\n SpectralNorm(conv1x1(self.inplanes, planes * block.expansion)),\n norm_layer(planes * block.expansion),\n )\n\n layers = [block(self.inplanes, planes, stride, upsample, norm_layer, self.large_kernel)]\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, norm_layer=norm_layer, large_kernel=self.large_kernel))\n\n return nn.Sequential(*layers)", "def makeMultiLayerMask( Tables ):\n\n # get data from the corresponding tables\n ElasticModulusData = Tables[ \"ElasticModulus\" ].getRawData( )\n ShearModulusData = Tables[ \"ShearModulus\" ].getRawData( )\n PoissonRatiosData = Tables[ \"PoissonRatios\" ].getRawData( )\n MaterialPropertiesData = Tables[ \"MaterialProperties\" ].getRawData( )\n\n # we're using implicit method to get value from tables since the\n # the last entry represents a string of layers thickness\n GeometryPropertiesData = [ [ Tables[ \"GeometryProperties\" ].getValue( 0, 0 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 1 ),\n Tables[ \"GeometryProperties\" ].getValue( 0, 2 ) ] ]\n\n\n Tables[ \"ElasticModulus\" ].setBufferData( \"Input\", ElasticModulusData )\n Tables[ \"ShearModulus\" ].setBufferData( \"Input\", ShearModulusData )\n Tables[ \"PoissonRatios\" ].setBufferData( \"Input\", PoissonRatiosData )\n Tables[ \"MaterialProperties\" ].setBufferData( \"Input\", MaterialPropertiesData )\n Tables[ \"GeometryProperties\" ].setBufferData( \"Input\", GeometryPropertiesData )", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def custom_layer_factory(self):\n raise NotImplementedError(\n '[custom_layer_factory] must be implemented by the subclass.')", "def get_layers(self, content_retriever):\n pass", "def get_layers(self, content_retriever):\n pass", "def get_layer_objects(self, layer, x0, y0, x1, y1, srid):\n l = Layer.objects.filter(code=layer).first()\n if not l:\n return {}\n features = []\n bbox = self.get_bbox(x0, y0, x1, y1, srid)\n for d in GeoData.objects.filter(layer=l, data__geo_within=bbox):\n features += [geojson.Feature(\n id=str(d[\"id\"]),\n geometry=self.transform(d[\"data\"], self.db_proj, srid),\n properties={\n \"object\": str(d.object.id),\n \"label\": d.label.encode(\"utf-8\") if d.label else \"\"\n }\n )]\n return geojson.FeatureCollection(features=features, crs=srid)", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerDecoderBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=(\"layer_%d\" % i)))\n super(TransformerDecoder, self).build(unused_input_shapes)", "def get_water_network():\n\n j = request.json\n\n region = ee.Geometry(j['region'])\n start = j['start']\n stop = j['stop']\n scale = j['scale']\n crs = j['crs']\n\n # get water mask\n water_vector = get_water_mask_vector(region, scale, start, stop)\n\n # skeletonize\n output = generate_skeleton_from_voronoi(scale, water_vector)\n centerline = output[\"centerline\"]\n\n centerline = centerline.map(\n lambda line: line.set('length', line.length(scale / 10)))\n\n centerline = centerline.map(transform_feature(crs, scale))\n\n # create response\n data = centerline.getInfo()\n\n return Response(json.dumps(data), status=200, mimetype='application/json')", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data\n\n blocks = [self.layer_11,\n self.layer_12,\n self.layer_13,\n self.layer_14,\n self.layer_15]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def define_layers(self):\n\n if self.D0Flag:\n self.d = self.h\n\n self.layers = [nn.Sequential(nn.Linear(self.d, self.h),\n nn.ReLU(), )] # nn.BatchNorm1d(self.h, affine=False))]\n for l in range(1, self.L):\n self.layers.append(nn.Sequential(nn.Linear(self.h, self.h),\n nn.ReLU(), )) # nn.BatchNorm1d(self.h, affine=False)))\n\n self.layers.append(nn.Linear(self.h, 1))", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def _init_world(self):\n self.world.restricted_world = {\n 'not_road': [],\n 'cross_road': [],\n }\n for polygon in self._data_loader.data.get_polygons(0):\n polygon_name = polygon['label']\n polygon_points = polygon['points']\n if polygon_name in {'not_road', 'cross_road'}:\n self.world.restricted_world[polygon_name].append(geometry.Polygon(\n self._data_loader.convertIMG2PLAY(polygon_points)\n ))", "def _build_point_cloud_graph(self):\n depths = self.depths_placeholder[tf.newaxis]\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n intrinsics = intrinsics_vec_to_matrix(intrinsics)\n\n depths_pad = tf.pad(depths, [[0,0],[0,0],[0,1],[0,1]], \"CONSTANT\")\n\n depths_grad = \\\n (depths_pad[:, :, 1:, :-1] - depths_pad[:, :, :-1, :-1])**2 + \\\n (depths_pad[:, :, :-1, 1:] - depths_pad[:, :, :-1, :-1])**2\n\n # don't use large depths for point cloud and ignore boundary regions\n valid = (depths < 3.0) & (depths_grad < 0.001)\n\n batch, num, ht, wd = tf.unstack(tf.shape(depths), num=4)\n Ts = VideoSE3Transformation(matrix=poses)\n X0 = projective_ops.backproject(depths, intrinsics)\n \n # transform point cloud into world coordinates\n X1 = Ts.inv()(X0)\n\n crop_h = 12\n crop_w = 32\n\n X1 = X1[:, :, crop_h:-crop_h, crop_w:-crop_w]\n valid = valid[:, :, crop_h:-crop_h, crop_w:-crop_w]\n images = images[:, :, crop_h:-crop_h, crop_w:-crop_w, ::-1]\n \n X1 = tf.reshape(X1, [-1, 3])\n colors = tf.reshape(images, [-1, 3])\n\n valid_inds = tf.where(tf.reshape(valid, [-1]))\n valid_inds = tf.reshape(valid_inds, [-1])\n\n X1 = tf.gather(X1, valid_inds, axis=0)\n colors = tf.gather(colors, valid_inds, axis=0)\n\n self.outputs['point_cloud'] = (X1, colors)", "def build(self):\n\n layers = GiraffeLayer.get_all_structural()\n \n for layer in layers:\n\n self.add_objects_from_layer(layer)\n\n return self", "def get_4d(slice, copylayers=[], transparancy=0):\n assert slice.ndim < 3\n img = np.zeros(slice.shape)\n img = img[:, :, np.newaxis]\n img = np.repeat(img, 4, 2)\n transparancy = 255 - (255 * transparancy)\n img[:, :, -1] = transparancy\n for layer in copylayers:\n img[:, :, layer] = slice\n return(img)", "def CreateLayer(self,layername):\n\t\treturn self.acad.ActiveDocument.Layers.Add(layername)", "def multipleInBetweenLayerEdgesIntoNodeWithNoFixedPortOrder(self):\n graph = self.graph\n makeLayer = self.makeLayer\n addNodesToLayer = self.addNodesToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n\n leftLayer = makeLayer(graph)\n leftNodes = addNodesToLayer(2, leftLayer)\n rightLayer = makeLayer(graph)\n rightNodes = addNodesToLayer(2, rightLayer)\n\n self.addInLayerEdge(rightNodes[0], rightNodes[1], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n eastWestEdgeFromTo(leftNodes[0], rightNodes[1])\n\n return graph", "def createEmptyLayer(self):\n # , wt.greeting: False , wt.ects: False, wt.preReqs: False, wt.courseCodeMentioned: False\n layer = {wt.questionWord: \"\", wt.pronoun: \"\", wt.verb: \"\", wt.websiteName: \"\", wt.timeWord: \"\", wt.about: \"\",\n wt.weather: \"\", wt.when: \"\", wt.keywords: [], wt.courseID: \"\", wt.structureUnitCode: \"\",\n wt.sentence: [], wt.hangman: \"\", wt.what: \"\"}\n return layer", "def refreshLayerLists(self):\n self.layers = self.iface.legendInterface().layers()\n self.lineLayerIndexMap = dict()\n self.pointLayerIndexMap = dict()\n self.lineLayerList = [] # holds the filtered layer names\n self.pointLayerList = [] # holds the filtered layer names\n for i, layer in enumerate(self.layers):\n try:\n if layer.geometryType() == 0: # 0: point, 1: line\n self.pointLayerIndexMap[len(self.pointLayerList)] = i # put the index pair in the dictionary\n self.pointLayerList.append(layer.name()) # add the layer name to the list\n elif layer.geometryType() == 1: # 0: point, 1: line\n self.lineLayerIndexMap[len(self.lineLayerList)] = i # put the index pair in the dictionary\n self.lineLayerList.append(layer.name()) # add the layer name to the list\n except AttributeError:\n # if the above checks failed, i.e. because of a raster layer, skip it\n continue", "def generatePolygons():", "def load_registration_as_layers() -> None:\n viewer = getattr(widget, \"viewer\").value\n registration_directory = pathlib.Path(\n getattr(widget, \"registration_output_folder\").value\n )\n add_registered_image_layers(\n viewer, registration_directory=registration_directory\n )", "def _create_zones(self, output_path):\n for z_id in self.zone_ids:\n Z = Zone(z_id, self.operator, output_path, rs=self.rs1)\n Z.read_daily_demand(self.daily_OD_demand) # , self.daily_pickup_demand\n self.zones.append(Z)", "def generate_openlayers( self ):\n\n args = {}\n args['title'] = self.options.title\n args['googlemapskey'] = self.options.googlekey\n args['yahooappid'] = self.options.yahookey\n args['south'], args['west'], args['north'], args['east'] = self.swne\n args['minzoom'] = self.tminz\n args['maxzoom'] = self.tmaxz\n args['tilesize'] = self.tilesize\n args['tileformat'] = format_extension[self.image_output.format]\n if self.image_output.format == \"PNG\":\n args['has_alpha'] = 'true'\n else:\n args['has_alpha'] = 'false'\n args['publishurl'] = \"\" if self.options.url is None else self.options.url\n args['copyright'] = self.options.copyright\n if self.options.profile in ('raster', 'gearth'):\n args['rasterzoomlevels'] = self.tmaxz+1\n args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]\n\n s = \"\"\"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html xmlns=\"http://www.w3.org/1999/xhtml>\"\n <head>\n <title>%(title)s</title>\n <meta http-equiv='imagetoolbar' content='no'/>\n <style type=\"text/css\"> v\\:* {behavior:url(#default#VML);}\n html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }\n body { margin: 10px; background: #fff; }\n h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }\n #header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }\n #subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}\n #map { height: 95%%; border: 1px solid #888; }\n </style>\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n <script src='http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1'></script>\n <script src='http://maps.google.com/maps?file=api&amp;v=2&amp;key=%(googlemapskey)s' type='text/javascript'></script>\n <script src=\"http://api.maps.yahoo.com/ajaxymap?v=3.0&amp;appid=%(yahooappid)s\"></script>\"\"\" % args\n\n s += \"\"\"\n <script src=\"http://www.openlayers.org/api/2.7/OpenLayers.js\" type=\"text/javascript\"></script>\n <script type=\"text/javascript\">\n var map;\n var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);\n var mapMinZoom = %(minzoom)s;\n var mapMaxZoom = %(maxzoom)s;\n\n // avoid pink tiles\n OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;\n OpenLayers.Util.onImageLoadErrorColor = \"transparent\";\n\n function init(){\"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:900913\"),\n displayProjection: new OpenLayers.Projection(\"EPSG:4326\"),\n units: \"m\",\n maxResolution: 156543.0339,\n maxExtent: new OpenLayers.Bounds(-20037508, -20037508, 20037508, 20037508.34)\n };\n map = new OpenLayers.Map('map', options);\n\n // create Google Mercator layers\n var gmap = new OpenLayers.Layer.Google(\"Google Streets\",\n { sphericalMercator: true, numZoomLevels: 20} );\n var gsat = new OpenLayers.Layer.Google(\"Google Satellite\",\n {type: G_SATELLITE_MAP, sphericalMercator: true, numZoomLevels: 20} );\n var ghyb = new OpenLayers.Layer.Google(\"Google Hybrid\",\n {type: G_HYBRID_MAP, sphericalMercator: true, numZoomLevels: 20});\n var gter = new OpenLayers.Layer.Google(\"Google Terrain\",\n {type: G_PHYSICAL_MAP, sphericalMercator: true, numZoomLevels: 20 });\n\n // create Virtual Earth layers\n OpenLayers.Layer.VirtualEarth.prototype.MAX_ZOOM_LEVEL=19;\n OpenLayers.Layer.VirtualEarth.prototype.RESOLUTIONS=OpenLayers.Layer.Google.prototype.RESOLUTIONS\n var veroad = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Roads\",\n {'type': VEMapStyle.Road, 'sphericalMercator': true, numZoomLevels: 20});\n var veaer = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Aerial\",\n {'type': VEMapStyle.Aerial, 'sphericalMercator': true, numZoomLevels: 20 });\n var vehyb = new OpenLayers.Layer.VirtualEarth(\"Virtual Earth Hybrid\",\n {'type': VEMapStyle.Hybrid, 'sphericalMercator': true});\n\n // create Yahoo layer\n var yahoo = new OpenLayers.Layer.Yahoo(\"Yahoo Street\",\n {'sphericalMercator': true});\n var yahoosat = new OpenLayers.Layer.Yahoo(\"Yahoo Satellite\",\n {'type': YAHOO_MAP_SAT, 'sphericalMercator': true});\n var yahoohyb = new OpenLayers.Layer.Yahoo(\"Yahoo Hybrid\",\n {'type': YAHOO_MAP_HYB, 'sphericalMercator': true});\n\n // create OSM/OAM layer\n var osm = new OpenLayers.Layer.TMS( \"OpenStreetMap\",\n \"http://tile.openstreetmap.org/\",\n { type: 'png', getURL: osm_getTileURL, displayOutsideMaxExtent: true,\n attribution: '<a href=\"http://www.openstreetmap.org/\">OpenStreetMap</a>'} );\n var oam = new OpenLayers.Layer.TMS( \"OpenAerialMap\",\n \"http://tile.openaerialmap.org/tiles/1.0.0/openaerialmap-900913/\",\n { type: 'png', getURL: osm_getTileURL } );\n\n // create TMS Overlay layer\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n { // url: '', serviceVersion: '.', layername: '.',\n type: '%(tileformat)s', getURL: overlay_getTileURL, alpha: %(has_alpha)s,\n isBaseLayer: false\n });\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n map.addLayers([gmap, gsat, ghyb, gter, veroad, veaer, vehyb,\n yahoo, yahoosat, yahoohyb, osm, oam,\n tmsoverlay]);\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds.transform(map.displayProjection, map.projection ) );\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n var options = {\n controls: [],\n projection: new OpenLayers.Projection(\"EPSG:4326\"),\n maxResolution: 0.703125,\n maxExtent: new OpenLayers.Bounds(-180, -90, 180, 90)\n };\n map = new OpenLayers.Map('map', options);\n\n layer = new OpenLayers.Layer.WMS( \"Blue Marble\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'satellite' } );\n map.addLayer(layer);\n wms = new OpenLayers.Layer.WMS( \"VMap0\",\n \"http://labs.metacarta.com/wms-c/Basic.py?\", {layers: 'basic', format: 'image/png' } );\n map.addLayer(wms);\n\n var tmsoverlay = new OpenLayers.Layer.TMS( \"TMS Overlay\", \"\",\n {\n serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL,\n isBaseLayer: false\n });\n map.addLayer(tmsoverlay);\n if (OpenLayers.Util.alphaHack() == false) { tmsoverlay.setOpacity(0.7); }\n\n var switcherControl = new OpenLayers.Control.LayerSwitcher();\n map.addControl(switcherControl);\n switcherControl.maximizeControl();\n\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n elif self.options.profile in ('raster', 'gearth'):\n s += \"\"\"\n var options = {\n controls: [],\n maxExtent: new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s ),\n maxResolution: %(rastermaxresolution)f,\n numZoomLevels: %(rasterzoomlevels)d\n };\n map = new OpenLayers.Map('map', options);\n\n var layer = new OpenLayers.Layer.TMS( \"TMS Layer\",\"\",\n { url: '', serviceVersion: '.', layername: '.', alpha: %(has_alpha)s,\n type: '%(tileformat)s', getURL: overlay_getTileURL\n });\n map.addLayer(layer);\n map.zoomToExtent( mapBounds );\n \"\"\" % args\n\n\n s += \"\"\"\n map.addControl(new OpenLayers.Control.PanZoomBar());\n map.addControl(new OpenLayers.Control.MousePosition());\n map.addControl(new OpenLayers.Control.MouseDefaults());\n map.addControl(new OpenLayers.Control.KeyboardDefaults());\n }\n \"\"\" % args\n\n if self.options.profile == 'mercator':\n s += \"\"\"\n function osm_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((this.maxExtent.top - bounds.top) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var limit = Math.pow(2, z);\n\n if (y < 0 || y >= limit) {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n } else {\n x = ((x %% limit) + limit) %% limit;\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n }\n }\n\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (this.map.baseLayer.name == 'Virtual Earth Roads' || this.map.baseLayer.name == 'Virtual Earth Aerial' || this.map.baseLayer.name == 'Virtual Earth Hybrid') {\n z = z + 1;\n }\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom ) {\n //console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile == 'geodetic':\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n bounds = this.adjustBounds(bounds);\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n var path = this.serviceVersion + \"/\" + this.layername + \"/\" + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n var url = this.url;\n if (mapBounds.intersectsBounds( bounds ) && z >= mapMinZoom && z <= mapMaxZoom) {\n // console.log( this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type);\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n elif self.options.profile in ('raster','gearth'):\n s += \"\"\"\n function overlay_getTileURL(bounds) {\n var res = this.map.getResolution();\n var x = Math.round((bounds.left - this.maxExtent.left) / (res * this.tileSize.w));\n var y = Math.round((bounds.bottom - this.maxExtent.bottom) / (res * this.tileSize.h));\n var z = this.map.getZoom();\n if (x >= 0 && y >= 0) {\n return this.url + z + \"/\" + x + \"/\" + y + \".\" + this.type;\n } else {\n return \"https://github.com/mj10777/mapmbtiles/img/none.png\";\n }\n }\n \"\"\" % args\n\n s += \"\"\"\n function getWindowHeight() {\n if (self.innerHeight) return self.innerHeight;\n if (document.documentElement && document.documentElement.clientHeight)\n return document.documentElement.clientHeight;\n if (document.body) return document.body.clientHeight;\n return 0;\n }\n\n function getWindowWidth() {\n if (self.innerWidth) return self.innerWidth;\n if (document.documentElement && document.documentElement.clientWidth)\n return document.documentElement.clientWidth;\n if (document.body) return document.body.clientWidth;\n return 0;\n }\n\n function resize() {\n var map = document.getElementById(\"map\");\n var header = document.getElementById(\"header\");\n var subheader = document.getElementById(\"subheader\");\n map.style.height = (getWindowHeight()-80) + \"px\";\n map.style.width = (getWindowWidth()-20) + \"px\";\n header.style.width = (getWindowWidth()-20) + \"px\";\n subheader.style.width = (getWindowWidth()-20) + \"px\";\n if (map.updateSize) { map.updateSize(); };\n }\n\n onresize=function(){ resize(); };\n\n </script>\n </head>\n <body onload=\"init()\">\n <div id=\"header\"><h1>%(title)s</h1></div>\n <div id=\"subheader\">Generated by <a href=\"https://github.com/mj10777/mapmbtiles\">MapMbTiles</a>/<a href=\"http://www.klokan.cz/projects/gdal2mbtiles/\">GDAL2MbTiles</a>, Copyright &copy; 2008 <a href=\"http://www.klokan.cz/\">Klokan Petr Pridal</a>, <a href=\"http://www.gdal.org/\">GDAL</a> &amp; <a href=\"http://www.osgeo.org/\">OSGeo</a> <a href=\"http://code.google.com/soc/\">GSoC</a>\n <!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->\n </div>\n <div id=\"map\"></div>\n <script type=\"text/javascript\" >resize()</script>\n </body>\n </html>\"\"\" % args\n\n return s", "def layer_offsets(self):\n ...", "def dummy_layer(request):\n easting = np.linspace(-1, 3, 5)\n northing = np.linspace(7, 13, 4)\n shape = (northing.size, easting.size)\n reference = 0\n surface = np.arange(20, dtype=float).reshape(*shape)\n density = 2670 * np.ones(shape)\n if request.param == \"xarray\":\n easting = xr.DataArray(easting, dims=(\"easting\",))\n northing = xr.DataArray(northing, dims=(\"northing\",))\n reference, surface = xr.DataArray(reference), xr.DataArray(surface)\n density = xr.DataArray(density)\n return (easting, northing), surface, reference, density", "async def generate_snapshot(self, include_world=True):\n if self.has_marker:\n y, x = -self.mlat, self.mlng\n else:\n y, x = -self.clat, self.clng\n zoom = self.zoom\n world = MapController.get_world_image()\n top, bottom = y - 256/(2**zoom), y + 256/(2**zoom)\n left, right = x - 256/(2**zoom), x + 256/(2**zoom)\n logging.info(f'Cropping world at {left} {top} {right} {bottom}')\n snapshot = world.crop((left, top, right, bottom))\n if top - bottom <= 256:\n snapshot = snapshot.resize((256, 256), resample=Image.NEAREST)\n else:\n snapshot = snapshot.resize((256, 256), resample=Image.BILINEAR)\n\n if self.has_marker:\n marker = MapController.get_marker_image()\n snapshot.paste(marker, (112, 96), marker.getchannel('A'))\n\n if include_world:\n # Expand the canvas and put the world map under the inset\n result = Image.new('RGBA', (256, 400))\n result.paste(snapshot)\n result.paste(world.resize((256, 144)), (0, 256))\n\n # Draw a marker at the same place at the world map\n if self.has_marker:\n result.paste(marker, (int(x//32)-16, int(y//32)-32+256), marker.getchannel('A'))\n\n # Draw an overlay indicating the inset on the world map\n overlay = Image.new('RGBA', (256, 400))\n draw = ImageDraw.Draw(overlay)\n fill_color = (255, 255, 0, 64) # Transparent yellow\n outline_color = (255, 255, 0, 96) # More solid yellow\n draw.rectangle((max(0, int(left//32)),\n max(0, int(top//32))+256,\n min(256, int(right//32)),\n min(144, int(bottom//32))+256),\n fill=fill_color,\n outline=outline_color,\n width=1)\n draw.line((0, 256, max(0, int(left//32)), min(144, int(bottom//32))+256), (255, 255, 0, 96), 1)\n draw.line((256, 256, min(256, int(right//32)), min(144, int(bottom//32))+256), (255, 255, 0, 96), 1)\n result = Image.alpha_composite(result, overlay)\n else:\n result = snapshot\n \n output = BytesIO()\n result.save(output, format='png')\n output.seek(0)\n return output", "def build(self, unused_input_shapes):\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n CustomTransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(CustomTransformer, self).build(unused_input_shapes)", "def make_layered_psd_from_images():\n\n\t\n\tdoc = open_document(FILEPATHS[0], show=False)\n\tdoc_root = doc.rootNode()\n\t\n\tdocs = []\n\tdocs.append(doc)\n\n\tall_layers = get_layers(doc)\n\tfor i in range(1, len(FILEPATHS)):\n\t\tdocx = open_document(FILEPATHS[i], show=False)\n\t\tdocs.append(docx)\n\t\tdocx_layers = get_layers(docx)\n\t\tfor layer in docx_layers:\n\t\t\tall_layers.append(layer.clone())\n\t\t\t# doc.rootNode().addChildNode(layer, parent_node)\n\tdoc_root.setChildNodes(all_layers)\n\n\tprint('Debug: all nodes: %s' % doc.rootNode().childNodes())\n\t# doc.refreshProjection()\n\n\tsave_filepath = filepath = QtWidgets.QFileDialog.getSaveFileName()[0]\n\tr = doc.saveAs(save_filepath)\n\tprint('Debug: saved: %s' % save_filepath)\n\t\n\tfor doc in docs:\n\t\tprint('Debug: closing %s' % doc)\n\t\tdoc.close()\n\n\tprint('Debug: Script done')", "def init_encoder(self):\n\n vgg = models.vgg16(pretrained=True)\n\n blocks = [self.layer_1,\n self.layer_2,\n self.layer_3,\n self.layer_4,\n self.layer_5]\n\n ranges = [[0, 4], [5, 9], [10, 16], [17, 23], [24, 29]]\n features = list(vgg.features.children())\n\n vgg_layers = []\n for _layer in features:\n if isinstance(_layer, nn.Conv2d):\n vgg_layers.append(_layer)\n\n merged_layers = []\n for idx, conv_block in enumerate(blocks):\n if idx < 2:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit]\n else:\n units = [conv_block.conv1.cbr_unit,\n conv_block.conv2.cbr_unit,\n conv_block.conv3.cbr_unit]\n for _unit in units:\n for _layer in _unit:\n if isinstance(_layer, nn.Conv2d):\n merged_layers.append(_layer)\n\n assert len(vgg_layers) == len(merged_layers)\n\n for l1, l2 in zip(vgg_layers, merged_layers):\n if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):\n assert l1.weight.size() == l2.weight.size()\n assert l1.bias.size() == l2.bias.size()\n l2.weight.data = l1.weight.data\n l2.bias.data = l1.bias.data", "def get_layers(self):\n layers = set()\n for element in itertools.chain(self.polygons, self.paths):\n layers.update(element.layers)\n for reference in self.references:\n layers.update(reference.ref_cell.get_layers())\n for label in self.labels:\n layers.add(label.layer)\n return layers", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def layers_from_h5(ctx, out_dir, layers, hsds):\n excl_h5 = ctx.obj['EXCL_H5']\n if layers is not None:\n layers = {layer: os.path.join(out_dir, \"{}.tif\".format(layer))\n for layer in layers}\n ExclusionsConverter.extract_layers(excl_h5, layers, hsds=hsds)\n else:\n ExclusionsConverter.extract_all_layers(excl_h5, out_dir, hsds=hsds)", "def _init_layers(self) -> None:\n self.convs_all_levels = nn.ModuleList()\n for i in range(self.start_level, self.end_level + 1):\n convs_per_level = nn.Sequential()\n convs_per_level.add_module(\n f'conv{i}',\n ConvModule(\n self.in_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n inplace=False,\n bias=False))\n self.convs_all_levels.append(convs_per_level)\n\n conv_branch = []\n for _ in range(self.num_stacked_convs):\n conv_branch.append(\n ConvModule(\n self.feat_channels,\n self.feat_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=False))\n self.conv_branch = nn.Sequential(*conv_branch)\n\n self.conv_pred = nn.Conv2d(\n self.feat_channels, self.out_channels, 1, stride=1)", "def make_res_layer(self, **kwargs):\n return ResLayer(\n groups=self.groups,\n base_width=self.base_width,\n base_channels=self.base_channels,\n **kwargs)", "def _init_layers(self):\n cls_branch = []\n for _ in range(self.num_reg_fcs):\n cls_branch.append(Linear(self.embed_dims, self.embed_dims))\n cls_branch.append(nn.LayerNorm(self.embed_dims))\n cls_branch.append(nn.ReLU(inplace=True))\n cls_branch.append(Linear(self.embed_dims, self.cls_out_channels))\n fc_cls = nn.Sequential(*cls_branch)\n\n reg_branch = []\n for _ in range(self.num_reg_fcs):\n reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n reg_branch.append(nn.ReLU())\n reg_branch.append(Linear(self.embed_dims, self.code_size))\n reg_branch = nn.Sequential(*reg_branch)\n\n past_traj_reg_branch = []\n for _ in range(self.num_reg_fcs):\n past_traj_reg_branch.append(\n Linear(self.embed_dims, self.embed_dims))\n past_traj_reg_branch.append(nn.ReLU())\n past_traj_reg_branch.append(\n Linear(self.embed_dims, (self.past_steps + self.fut_steps)*2))\n past_traj_reg_branch = nn.Sequential(*past_traj_reg_branch)\n\n def _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n # last reg_branch is used to generate proposal from\n # encode feature map when as_two_stage is True.\n num_pred = (self.transformer.decoder.num_layers + 1) if \\\n self.as_two_stage else self.transformer.decoder.num_layers\n\n if self.with_box_refine:\n self.cls_branches = _get_clones(fc_cls, num_pred)\n self.reg_branches = _get_clones(reg_branch, num_pred)\n self.past_traj_reg_branches = _get_clones(\n past_traj_reg_branch, num_pred)\n else:\n self.cls_branches = nn.ModuleList(\n [fc_cls for _ in range(num_pred)])\n self.reg_branches = nn.ModuleList(\n [reg_branch for _ in range(num_pred)])\n self.past_traj_reg_branches = nn.ModuleList(\n [past_traj_reg_branch for _ in range(num_pred)])\n if not self.as_two_stage:\n self.bev_embedding = nn.Embedding(\n self.bev_h * self.bev_w, self.embed_dims)", "def make_layers(self, n_repetitions: int = 1) -> List[List[tuple]]:\n if n_repetitions <= 0:\n raise ValueError(\"The number of repetitions must be positve\")\n\n root = [self.items]\n graph_layers = [root] + [[]] * (self.depth * 2)\n\n for _ in range(n_repetitions):\n layers = self.random_layers()\n for h in range(1, len(layers)):\n graph_layers[h] = graph_layers[h] + layers[h]\n\n return graph_layers", "def ogrCreateLayer(sourceLayer, pgConn, destinationLayer):\r\n print \" Creating {0}\".format(destinationLayer)\r\n newLayer = pgConn.CreateLayer(destinationLayer)\r\n\r\n lyrDefn = sourceLayer.GetLayerDefn()\r\n for i in range( lyrDefn.GetFieldCount() ):\r\n ##print \"Creating field: {0}\".format(lyrDefn.GetFieldDefn( i ).GetName())\r\n\r\n fieldName = lyrDefn.GetFieldDefn( i ).GetName()\r\n fieldType = lyrDefn.GetFieldDefn( i ).GetType()\r\n newField = ogr.FieldDefn(fieldName, fieldType)\r\n newLayer.CreateField(newField)", "def comp_add_wireframe_freestyle(self):\n scene = self.set_as_active()\n scene.use_nodes = True\n tree = scene.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_alphaover = tree.nodes.new('CompositorNodeAlphaOver')\n node_alphaover.location = -25, 50\n\n node_rlwire = tree.nodes.new('CompositorNodeRLayers')\n node_rlwire.location = -400, 250\n node_rlwire.scene = scene\n node_rlwire.layer = w_var.rlname\n\n node_rlclay = tree.nodes.new('CompositorNodeRLayers')\n node_rlclay.location = -400, -75\n node_rlclay.scene = scene\n node_rlclay.layer = w_var.rlname_other\n\n node_comp = tree.nodes.new('CompositorNodeComposite')\n node_comp.location = 400, 65\n\n node_viewer = tree.nodes.new('CompositorNodeViewer')\n node_viewer.location = 400, -125\n\n # connecting the nodes\n links = tree.links\n links.new(node_rlwire.outputs[0], node_alphaover.inputs[1])\n links.new(node_rlclay.outputs[0], node_alphaover.inputs[2])\n\n if w_var.cb_ao:\n node_mixcolor_wire = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor_wire.location = -125, 150\n node_mixcolor_wire.blend_type = 'MULTIPLY'\n node_mixcolor_wire.inputs[0].default_value = 0.730\n\n node_mixcolor_clay = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor_clay.location = -125, -100\n node_mixcolor_clay.blend_type = 'MULTIPLY'\n node_mixcolor_clay.inputs[0].default_value = 0.730\n\n node_alphaover.location = 125, 75\n\n links.new(node_rlwire.outputs[0], node_mixcolor_wire.inputs[1])\n links.new(node_rlwire.outputs[10], node_mixcolor_wire.inputs[2])\n\n links.new(node_rlclay.outputs[0], node_mixcolor_clay.inputs[1])\n links.new(node_rlclay.outputs[10], node_mixcolor_clay.inputs[2])\n\n links.new(node_mixcolor_wire.outputs[0], node_alphaover.inputs[1])\n links.new(node_mixcolor_clay.outputs[0], node_alphaover.inputs[2])\n\n links.new(node_alphaover.outputs[0], node_comp.inputs[0])\n links.new(node_alphaover.outputs[0], node_viewer.inputs[0])\n\n else:\n links.new(node_alphaover.outputs[0], node_comp.inputs[0])\n links.new(node_alphaover.outputs[0], node_viewer.inputs[0])\n\n for node in tree.nodes:\n node.select = False", "def consume_layer(self, reports):\n layer_list = []\n layer_count = 1\n for report in reports:\n layer = create_image_layer(report)\n layer.layer_index = layer_count\n layer_list.append(layer)\n layer_count += 1\n return layer_list", "def create_tourism_raster(self):\n self.create_raster('flickr',\n pixeltype='32BF', noData=0,\n value_col='pictures')", "def make_source(self):\n sources = []\n for feature in self.regions_json['features']:\n sources.append(dict(type= 'FeatureCollection', features = [feature]))\n return sources", "def _get_layers(self) :\n \n return self._layers", "def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map", "def get_water_network_properties():\n\n j = request.json\n\n region = ee.Geometry(j['region'])\n start = j['start']\n stop = j['stop']\n scale = j['scale']\n\n step = j['step']\n\n crs = j['crs']\n\n error = ee.ErrorMargin(scale / 2, 'meters')\n\n if 'network' in j:\n raise Exception(\n 'TODO: re-using existing networks is not supported yet')\n\n # get water mask\n water_vector = get_water_mask_vector(region, scale, start, stop)\n\n # skeletonize\n output = generate_skeleton_from_voronoi(scale, water_vector)\n centerline = ee.FeatureCollection(output[\"centerline\"])\n distance = ee.Image(output[\"distance\"])\n\n # generate width at every offset\n centerline = centerline.map(\n lambda line: line.set(\"length\", line.length(error)))\n\n short_lines = centerline.filter(ee.Filter.lte('length', step))\n long_lines = centerline.filter(ee.Filter.gt('length', step))\n\n def process_long_line(line):\n line_length = line.length(error)\n distances = ee.List.sequence(0, line_length, step)\n segments = line.geometry().cutLines(distances, error)\n\n def generate_line_middle_point(pair):\n pair = ee.List(pair)\n\n s = ee.Geometry(pair.get(0))\n offset = ee.Number(pair.get(1))\n\n centroid = ee.Geometry.Point(s.coordinates().get(0))\n\n return ee.Feature(centroid) \\\n .set(\"lineId\", line.id()) \\\n .set(\"offset\", offset)\n\n segments = segments.geometries().zip(distances) \\\n .map(generate_line_middle_point)\n\n return ee.FeatureCollection(segments)\n\n long_line_points = long_lines.map(process_long_line).flatten()\n\n def process_short_line(line):\n geom = line.geometry(error, 'EPSG:4326')\n\n geom = ee.Geometry.Point(geom.coordinates().get(0), 'EPSG:4326')\n\n return ee.Feature(geom) \\\n .set(\"lineId\", line.id()) \\\n .set(\"offset\", 0)\n\n short_line_points = short_lines.map(process_short_line)\n\n points = long_line_points.merge(short_line_points)\n\n fa = ee.Image('WWF/HydroSHEDS/15ACC')\n dem = ee.Image('JAXA/ALOS/AW3D30_V1_1').select('MED')\n\n def add_flow_accumulation(pt):\n flow_accumulation = fa.reduceRegion(\n reducer=ee.Reducer.max(),\n geometry=pt.geometry().buffer(scale * 10),\n scale=scale).values().get(0)\n\n return pt \\\n .set(\"flow_accumulation\", ee.Number(flow_accumulation))\n\n def add_width(pt):\n width = distance.reduceRegion(\n reducer=ee.Reducer.max(),\n geometry=pt.geometry(),\n scale=scale).values().get(0)\n\n return pt \\\n .set(\"width\", ee.Number(width).multiply(scale).multiply(2))\n\n def add_elevation(pt):\n elevation = dem.reduceRegion(\n reducer=ee.Reducer.median(),\n geometry=pt.geometry().buffer(scale * 10),\n scale=scale).values().get(0)\n\n return pt \\\n .set(\"elevation\", ee.Number(elevation))\n\n points = points.map(add_width)\n points = points.map(add_elevation)\n points = points.map(add_flow_accumulation)\n\n points = points.map(transform_feature(crs, scale))\n\n # create response\n data = points.getInfo()\n\n return Response(json.dumps(data), status=200, mimetype='application/json')", "def render(self):\r\n super().render()\r\n layers, titles, latVect, lonVect = self.make_layers()\r\n LON, LAT = np.meshgrid(lonVect, latVect)\r\n lon = LON.flatten()\r\n lat = LAT.flatten()\r\n for i in range(len(layers)):\r\n vals = layers[i].flatten()\r\n hovertext = []\r\n for k in range(len(vals)):\r\n hovertext.append('lon: {:.2f}<br>lat: {:.2f}<br>{}: {:.1e}'.format(lon[k], lat[k], self.variable + self.unit,vals[k]))\r\n if self.levels == 0:\r\n data = [\r\n go.Heatmap(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n zmin=self.vmin,\r\n zmax=self.vmax,\r\n hoverinfo='text',\r\n text=hovertext \r\n )\r\n ]\r\n elif self.levels > 0:\r\n data = [\r\n go.Contour(\r\n x=lon,\r\n y=lat,\r\n z=vals,\r\n colorscale=self.cmap,\r\n hoverinfo='text',\r\n text=hovertext, \r\n connectgaps=False,\r\n contours=dict(\r\n coloring='heatmap',\r\n showlabels=True,\r\n start=self.vmin,\r\n end=self.vmax,\r\n size=(self.vmax-self.vmin) / float(self.levels)\r\n )\r\n # line=dict(smoothing=0.85) \r\n )\r\n ] \r\n\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel}\r\n ) \r\n\r\n\r\n\r\n if self.surface3D:\r\n data = [\r\n go.Surface(\r\n x=lonVect,\r\n y=latVect,\r\n z=layers[i],\r\n colorscale=self.cmap,\r\n # hoverinfo='text',\r\n # text=hovertext \r\n )\r\n ]\r\n\r\n layout = go.Layout(\r\n autosize=False,\r\n title=titles[i],\r\n width=self.width,\r\n height=self.height,\r\n scene = dict(\r\n xaxis={'title': self.xlabel},\r\n yaxis={'title': self.ylabel},\r\n zaxis={'title': self.variable + self.unit}\r\n )\r\n ) \r\n\r\n\r\n self._save_plotly_(go, data, layout)", "def create_blank_tile(overviews, tile, nb_canaux, out_raster_srs):\n origin_x = overviews['crs']['boundingBox']['xmin'] + tile['x'] * tile['resolution'] * overviews['tileSize']['width']\n origin_y = overviews['crs']['boundingBox']['ymax'] - tile['y'] * tile['resolution'] * overviews['tileSize']['height']\n target_ds = gdal.GetDriverByName('MEM').Create('',\n overviews['tileSize']['width'], overviews['tileSize']['height'],\n nb_canaux, gdal.GDT_Byte)\n target_ds.SetGeoTransform((origin_x, tile['resolution'], 0,\n origin_y, 0, -tile['resolution']))\n target_ds.SetProjection(out_raster_srs.ExportToWkt())\n target_ds.FlushCache()\n return target_ds", "def build(self, *args, **kwargs):\n self._layer_counter = 0\n r = self._build_impl(*args, **kwargs)\n \n # Call the init functions \n if self._build_counter == 0:\n for initlayer in self._layers_to_init:\n if initlayer['initfnkwargs']:\n initlayer['initfn'](initlayer['layer'], **initlayer['initfnkwargs'])\n else:\n initlayer['initfn'](initlayer['layer'])\n \n self._build_counter += 1\n return r", "def create_map_with_layers(movies):\n map = folium.Map(tiles=\"OpenStreetMap\")\n layers = {}\n for movie in movies:\n lid, layer_ = layer(movie[0], layers) # lid - layer id\n layer_.add_child(folium.CircleMarker(location=[movie[2].latitude, movie[2].longitude],\n radius=7,\n popup= movie[0] + \"\\n\" + movie[1],\n fill_color= fill_colour_of_layer(lid),\n color = 'white',\n fill_opacity= 0.75))\n for layer_id in layers:\n map.add_child(layers[layer_id])\n map.add_child(folium.LayerControl())\n return map", "def makeSurf(self):\n layer = None\n\n if self.color is not None:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.HWSURFACE)\n layer = layer.convert()\n layer.fill(self.color)\n else:\n layer = pygame.Surface(self.destSurf.get_size(), flags=pygame.SRCALPHA|pygame.HWSURFACE, depth=32)\n layer.convert_alpha()\n layer.fill((0, 0, 0, 0))\n\n return layer", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def NewLayer(self, event):\n pass", "def create_new_layer_with_world_capitals(map, path):\n capitals_layer = folium.FeatureGroup(name=\"World capitals\")\n data = pandas.read_csv(path)\n for lat, long in zip(data['CapitalLatitude'], data['CapitalLongitude']):\n capitals_layer.add_child(folium.CircleMarker(location=(lat, long),\n radius=6,\n color='red'))\n map.add_child(capitals_layer)", "def set_up_rlayer(self, rlname, rlname_other=None, include_layers=None,\n exclude_layers=None, mask_layers=None):\n scene = self.set_as_active()\n layer_numbers = constants.layer_numbers\n w_var.rlname = rlname\n\n if include_layers is None:\n include_layers = w_var.layer_numbers_all_used\n\n if exclude_layers is None:\n exclude_layers = []\n\n if mask_layers is None:\n mask_layers = []\n\n if w_var.cb_clear_rlayers:\n for layer in scene.render.layers[:-1]:\n scene.render.layers.remove(layer)\n\n scene.render.layers.active.name = rlname\n scene.render.layers.active.use = True\n\n new_rlayer = scene.render.layers.active\n\n # if not clearing render layers: creates new one\n else:\n new_rlayer = scene.render.layers.new(rlname)\n scene.render.layers.active = new_rlayer\n\n # there needs to be two render layers in the same scene for freestyle compositing\n if w_var.cb_composited:\n w_var.rlname_other = rlname_other\n other_rlayer = scene.render.layers.new(rlname_other)\n other_rlayer.layers[19] = True\n scene.render.layers[rlname_other].layers_zmask = (False,) * 20\n\n if w_var.cb_ao:\n scene.render.layers[rlname].use_pass_ambient_occlusion = True\n\n if w_var.cb_composited:\n scene.render.layers[rlname_other].use_pass_ambient_occlusion = True\n\n # because I can't deactivate a layer if it is the only active one\n new_rlayer.layers[19] = True\n \n scene.render.layers[rlname].layers_exclude = (False,) * 20\n scene.render.layers[rlname].layers_zmask = (False,) * 20\n\n for i in layer_numbers:\n if w_var.cb_composited:\n if i in w_var.layer_numbers_affected:\n scene.render.layers[rlname].layers[i] = True\n scene.render.layers[rlname_other].layers_zmask[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in w_var.layer_numbers_other:\n scene.render.layers[rlname_other].layers[i] = True\n\n else:\n scene.render.layers[rlname_other].layers[i] = False\n\n else:\n if i in include_layers:\n scene.render.layers[rlname].layers[i] = True\n\n else:\n scene.render.layers[rlname].layers[i] = False\n\n if i in mask_layers:\n scene.render.layers[rlname].layers_zmask[i] = True\n\n if i in exclude_layers:\n scene.render.layers[rlname].layers_exclude[i] = True", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,\n SE=False, expansion=3, stride=1):\n\n norm_layer = self._norm_layer\n act_layer = self._act_layer\n downsample = None\n\n # if stride > 1\n # or if block input planes != block output planes (only possible for first block in stack)\n # downsamples skip connection by 1x1-conv filter\n if stride != 1 or inplanes != outplanes:\n downsample = nn.Sequential(\n conv1x1(inplanes, outplanes, stride=stride),\n norm_layer(outplanes)\n )\n\n layers = []\n\n # first block in stack can have stride > 1\n layers.append(block(inplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=stride, dropout=self._dropout, downsample=downsample,\n norm_layer=norm_layer, act_layer=act_layer))\n\n # other layers in stack\n # for each layer: inplanes = outplanes, stride=1, downsample=None\n for _ in range(1, num_layers):\n layers.append(block(outplanes, outplanes, expansion=expansion, kernel_size=kernel_size,\n SE=SE, stride=1, dropout=self._dropout, norm_layer=norm_layer,\n act_layer=act_layer))\n\n return nn.Sequential(*layers)", "def _build(self):\n with tf.variable_scope (self.name + '_architecutre') as scope:\n images_square = unflatten_layer ( self.images )\n visualize_images(images_square)\n\n # Conv Layer 1\n conv1_out, params = conv_2d_layer ( input = images_square,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'enc_conv_1',\n visualize = True )\n process_params(params, name = self.name)\n e1_params = params\n pool1_out = max_pool_2d_layer ( input = conv1_out, name = 'enc_pool_1')\n # lrn1_out = local_response_normalization_layer (pool1_out, name = 'lrn_1' )\n\n # Conv Layer 2\n conv2_out, params = conv_2d_layer ( input = pool1_out,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'enc_conv_2' )\n process_params(params, name = self.name)\n e2_params = params\n pool2_out = max_pool_2d_layer ( input = conv2_out, name = 'enc_pool_2')\n # lrn2_out = local_response_normalization_layer (pool2_out, name = 'lrn_2' )\n\n flattened = flatten_layer(pool2_out)\n\n # Dropout Layer 1 \n flattened_dropout = dropout_layer ( input = flattened,\n prob = self.dropout_prob,\n name = 'enc_dropout_1') \n\n # Dot Product Layer 1\n fc1_out, params = dot_product_layer ( input = flattened_dropout,\n neurons = HIDDEN_1,\n name = 'enc_dot_1')\n process_params(params, name = self.name)\n e3_params = params \n\n # Dropout Layer 2 \n fc1_out_dropout = dropout_layer ( input = fc1_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_2')\n # Dot Product Layer 2\n fc2_out, params = dot_product_layer ( input = fc1_out_dropout, \n neurons = HIDDEN_2,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n e4_params = params \n\n # Dropout Layer 3 \n fc2_out_dropout = dropout_layer ( input = fc2_out,\n prob = self.dropout_prob,\n name = 'enc_dropout_3')\n \n # Dot Product Layer 2\n self.codeword, params = dot_product_layer ( input = fc2_out_dropout, \n neurons = CODEWORD_LENGTH,\n activation = CODE_ACTIVATION,\n name = 'enc_dot_2')\n process_params(params, name = self.name)\n process_codeword_normalization_regularizer(self.codeword, \n coeff = AUTOENCODER_CODEWORD_COEFF,\n name = self.name)\n e5_params = params \n # tf.summary.histogram('codewords', self.codeword)\n # self.hash = threshold_layer ( input = self.codeword,\n # name = 'hash')\n # process_hash_regularizer(self.codeword, coeff = AUTOENCODER_HASH_COEFF,\n # name = self.name)\n\n # Decoder ... \n decoder_1_out, params = dot_product_layer ( input = self.codeword, \n neurons = HIDDEN_2,\n params = [tf.transpose(e5_params[0]), None],\n name = 'decoder_dot_1')\n d1_params = params\n process_params([params[1]], name = self.name)\n \n dec_1_out_dropout = dropout_layer ( input = decoder_1_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_1')\n\n decoder_2_out, params = dot_product_layer ( input = dec_1_out_dropout, \n neurons = HIDDEN_1,\n params = [tf.transpose(e4_params[0]), None],\n name = 'decoder_dot_2')\n d2_params = params\n process_params([params[1]], name = self.name)\n \n # dropout 2\n dec_2_out_dropout = dropout_layer ( input = decoder_2_out,\n prob = self.dropout_prob,\n name = 'dec_dropout_2')\n\n decoder_3_out, params = dot_product_layer ( input = dec_2_out_dropout, \n neurons = 1250,\n params = [tf.transpose(e3_params[0]), None],\n name = 'decoder_dot_3')\n d3_params = params\n process_params([params[1]], name = self.name)\n\n # DeConv Layer 1\n # The output shapes need to be changed according to architecture.\n\n dec_3_square = unflatten_layer ( decoder_3_out, channels = CONV_2_N )\n upsample_1 = upsampling_layer (dec_3_square, size = (10,10), name = 'dec_upsampling_1')\n\n deconv1_out, params = deconv_2d_layer ( input = upsample_1,\n neurons = CONV_1_N,\n filter_size = CONV_2_FILT,\n output_shape = (12,12),\n # n_outs = MINI_BATCH_SIZE,\n stride = (1,1,1,1), \n params = [e2_params[0], None], \n name = 'dec_deconv_1' )\n\n process_params([params[1]], name = self.name)\n d4_params = params\n\n # DeConv Layer 2\n upsample_2 = upsampling_layer (deconv1_out, size = (24,24), name = 'dec_upsampling_2')\n decoded_images_square, params = deconv_2d_layer ( input = upsample_2,\n neurons = 1,\n filter_size = CONV_1_FILT,\n stride = (1,1,1,1),\n output_shape = (28,28),\n # n_outs = MINI_BATCH_SIZE, \n params = [e1_params[0], None], \n activation = 'tanh', \n name = 'dec_deconv_2' )\n \n process_params([params[1]], name = self.name)\n d5_params = params \n \n self.decoded = flatten_layer (decoded_images_square, in_shp = [-1, 28, 28, 1])\n visualize_images(decoded_images_square, name = 'decoded')\n # This is because transpose don't initialize.\n self.params = [ [e5_params[0], d1_params[1] ],\n [e4_params[0], d2_params[1] ],\n [e3_params[0], d3_params[1] ],\n [e2_params[0], d4_params[1] ],\n [e1_params[0], d5_params[1] ] ]\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + '_decoder_error') as scope:\n reconstruction_error = rmse(self.images, self.decoded) \n tf.add_to_collection( self.name + '_objectives', reconstruction_error ) \n tf.summary.scalar('reconstruction_error', reconstruction_error)\n\n self._cook_optimizer( \n lr = AUTOENCODER_LR, \n optimizer = AUTOENCODER_OPTIMIZER,\n l1_coeff = AUTOENCODER_L1_COEFF,\n l2_coeff = AUTOENCODER_WEIGHT_DECAY_COEFF)", "def construct_layer(\n self,\n input_layer: \"NeuralNetworkLayer\",\n output_layer: \"NeuralNetworkLayer\",\n **kwargs\n ):\n # Add Nodes\n for node_number in range(self.num_nodes):\n node_object = Circle(\n radius=self.node_radius,\n color=self.node_color,\n stroke_width=self.node_stroke_width,\n )\n self.node_group.add(node_object)\n # Space the nodes\n # Assumes Vertical orientation\n for node_index, node_object in enumerate(self.node_group):\n location = node_index * self.node_spacing\n node_object.move_to([0, location, 0])\n # Create Surrounding Rectangle\n self.surrounding_rectangle = SurroundingRectangle(\n self.node_group,\n color=self.rectangle_color,\n fill_color=self.rectangle_fill_color,\n fill_opacity=1.0,\n buff=self.layer_buffer,\n stroke_width=self.rectangle_stroke_width,\n )\n self.surrounding_rectangle.set_z_index(1)\n # Add the objects to the class\n self.add(self.surrounding_rectangle, self.node_group)\n\n self.construct_activation_function()\n super().construct_layer(input_layer, output_layer, **kwargs)" ]
[ "0.61868954", "0.61661166", "0.60497373", "0.5842511", "0.5815913", "0.5705256", "0.5667303", "0.5603573", "0.5603573", "0.54255944", "0.5378894", "0.53700405", "0.53343993", "0.5305683", "0.528266", "0.5267994", "0.5221907", "0.52087885", "0.5189929", "0.51773155", "0.5176752", "0.5171672", "0.51704335", "0.5145638", "0.51340353", "0.5132355", "0.5126892", "0.51038104", "0.5101504", "0.5095765", "0.50754875", "0.50675356", "0.50555813", "0.50555587", "0.5041564", "0.50255543", "0.5024545", "0.50223196", "0.5015579", "0.49942163", "0.4983593", "0.49814162", "0.49814162", "0.49790466", "0.4977685", "0.4977685", "0.4972259", "0.4953861", "0.49524114", "0.4950385", "0.49483687", "0.49429253", "0.4940997", "0.49373683", "0.49336484", "0.4920454", "0.49195486", "0.4914589", "0.49016976", "0.49016932", "0.48997554", "0.48912528", "0.48854163", "0.48733392", "0.48634642", "0.48616475", "0.48496866", "0.48465297", "0.4838005", "0.48349354", "0.48296657", "0.48222286", "0.48189214", "0.4809928", "0.48087624", "0.47987375", "0.47903225", "0.477815", "0.4766599", "0.4764125", "0.47585112", "0.4758164", "0.47577477", "0.4756661", "0.47447252", "0.47401017", "0.47364047", "0.4735375", "0.47292417", "0.47282925", "0.47278473", "0.47261998", "0.47261158", "0.4722241", "0.4717359", "0.47161067", "0.47153854", "0.47114524", "0.4710819", "0.4710668", "0.4707866" ]
0.0
-1
Insert the origins and destinations as Stops for the Route analysis for the onetoone case.
def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals # Use an insertCursor to insert Stops into the Route analysis destinations = {} destination_rows = [] with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@", self.dest_unique_id_field_name] + self.origin_transfer_fields ) as icur: # Loop through origins and insert them into Stops along with their assigned destinations for origin in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_origins_layer, ["SHAPE@", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields ): dest_id = origin[2] if dest_id is None: continue if dest_id not in destinations: dest_val = f"'{dest_id}'" if isinstance(dest_id, str) else dest_id with arcpy.da.SearchCursor( # pylint: disable=no-member self.input_destinations_layer, ["SHAPE@", self.dest_id_field] + self.destination_transfer_fields, where_clause=f"{self.dest_id_field} = {dest_val}" ) as cur: try: destinations[dest_id] = next(cur) except StopIteration: # The origin's destination is not present in the destinations table. Just skip the origin. continue # Insert origin and destination destination = destinations[dest_id] if self.reverse_direction: route_name = f"{dest_id} - {origin[1]}" origin_sequence = 2 destination_sequence = 1 else: route_name = f"{origin[1]} - {dest_id}" origin_sequence = 1 destination_sequence = 2 # Define the final origin and destination rows for the input Stops origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:] destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \ list(destination)[2:] icur.insertRow(origin_row) destination_rows.append(destination_row) # Insert destinations with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@", self.dest_unique_id_field_name] + self.destination_transfer_fields ) as dcur: for row in destination_rows: dcur.insertRow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def set_dests(self, increment=1000000):\n modified = 0\n pb = Progress(len(self.graph.routers), 'Setting destinations', increment=increment, callback=lambda: 'Modified {:,d}'.format(modified))\n for router in pb.iterator(self.graph.routers.values()):\n for interface in router.interfaces:\n # Copy destination ASes to avoid messing up original\n idests: Set[int] = set(interface.dests)\n # If last hop, interface has non-IXP AS mapping, and interface has destination ASes\n if not router.succ and idests and interface.asn > 0:\n origin = interface.asn\n # Interface must have exactly 2 destination ASes and one must be its origin AS\n if len(idests) == 2 and origin in idests:\n other_asn = peek(idests - {origin}) # other AS\n # If other AS is likely customer of interface origin AS, and it's a small AS\n if self.bgp.conesize[origin] > self.bgp.conesize[other_asn] and self.bgp.conesize[other_asn] < 5:\n idests.discard(origin)\n modified += 1\n # Add all remaining destination ASes to the router destination AS set\n router.dests.update(idests)", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def generate_outgoing_flights(self):\n flights = []\n for date in self.leave_dates:\n for src in self.src_airports:\n for dst in self.dst_airports:\n flights.append( Flight(src, dst, date) )\n\n return flights", "def endpoints(self):\n return (self._origin,self._destination)", "def generate_trivial_tours(self):\n self.routes = []\n for c in range(1, self.vrpdata.NumCust+1):\n self.routes.append(VRP_Route([c]))\n return self.get_objective()", "def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations", "def set_destination(self, start_waypoint, end_waypoint, time=False):\n\n self.create_samples(start_waypoint, end_waypoint)\n\n route_trace = self._trace_route(time=time)\n assert route_trace\n\n self._local_planner.set_global_plan(route_trace)", "def add_routes(self):\n pass", "def add_route(g, origin, destination, distance, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n distance = int(distance)\n # Add route both ways\n if(choice_dir == \"y\"):\n g.city_dict[origin_code].add_flights_in((destination_code, distance))\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n \n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n g.city_dict[destination_code].add_flights_out((origin_code, distance))\n # Add route one way \n if(choice_dir == \"n\"):\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n \n \n \n return g", "def add_destination(self):\n pass", "def setup_intervlan_host_routes(self):\n if self.routers:\n for src in self.host_information:\n src_host = self.host_information[src]['host']\n src_vlan = self.host_information[src]['vlan']\n src_ip = self.host_information[src]['ip']\n for dst in self.host_information:\n if src != dst:\n dst_host = self.host_information[dst]['host']\n dst_vlan = self.host_information[dst]['vlan']\n dst_ip = self.host_information[dst]['ip']\n if src_vlan != dst_vlan and self.is_routed_vlans(src_vlan, dst_vlan):\n src_faucet_vip = self.faucet_vips[src_vlan]\n dst_faucet_vip = self.faucet_vips[dst_vlan]\n self.add_host_route(src_host, dst_ip, src_faucet_vip.ip)\n self.add_host_route(dst_host, src_ip, dst_faucet_vip.ip)", "def register_traffic(self, intent):\n servers = collections.defaultdict(list)\n clients = collections.defaultdict(list)\n _trules = []\n for rule in intent:\n srchost = self.get_ep_host(rule['src'])\n dsthost = self.get_ep_host(rule['dst'])\n\n if not srchost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['src'])\n continue\n elif not dsthost:\n log.error(\"No host found for running traffic from IP : %s\",\n rule['dst'])\n continue\n\n servers[dsthost].append(rule)\n clients[srchost].append(rule)\n\n trule = self.create_traffic_rule(rule)\n _trules.append(trule)\n\n # Register at endpoint and create local representation.\n if config.get_param('TRAFFIC_START_SERVERS_FIRST'):\n # Start Servers first and then Clients.\n host_rules_map = [servers, clients]\n else:\n # Start Servers / Clients in single call.\n # May result in some cool off time required before the\n # traffic settles.\n for host, rules in clients.items():\n servers[host].extend(rules)\n host_rules_map = [servers]\n\n def _register_traffic_rules(host, rules):\n with LydianClient(host) as dclient:\n dclient.controller.register_traffic(rules)\n\n # Start Server before the client.\n for host_rules in host_rules_map:\n collection = [(host, (host, rules), {})\n for host, rules in host_rules.items()]\n ThreadPool(_register_traffic_rules, collection)\n\n self.rules_app.add_rules(_trules) # Persist rules to local db", "def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")", "def create_ig_route(self, config):\n for vpc_id, vpc_config in config.iteritems():\n for route in vpc_config[\"RouteTables\"]:\n resource = self.ec2.RouteTable(route[\"RouteTableId\"])\n for route in resource.routes:\n route_exists = False\n for ig in vpc_config[\"InternetGateways\"]:\n route_exists = False\n if ig[\"InternetGatewayId\"] == route[\"GatewayId\"]:\n route_exists = True\n break\n if not route_exists:\n resource.create_route(\n DestinationCidrBlock=\"0.0.0.0/0\",\n GatewayId=ig[\"InternetGatewayId\"],\n )", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def _update_destinations(self):\r\n destinations = set()\r\n\r\n for passenger in self.passengers:\r\n destinations.add(passenger.get_destination_floor())\r\n\r\n self.destinations = destinations", "def _create_zones(self, output_path):\n for z_id in self.zone_ids:\n Z = Zone(z_id, self.operator, output_path, rs=self.rs1)\n Z.read_daily_demand(self.daily_OD_demand) # , self.daily_pickup_demand\n self.zones.append(Z)", "def get_self_origin(self, routes):\n # TODO\n outroutes = {}\n\n for ip in routes.keys():\n if routes[ip][SORG]:\n outroutes[ip] = routes[ip]\n\n return outroutes", "def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes", "def connect_portals(self):\n portal_coords = [tuple(coord)\n for coord in np.argwhere(self.maze.isalpha())]\n for portal_coord in portal_coords:\n\n portal_x = portal_coord[2]\n portal_y = portal_coord[1]\n portal_z = portal_coord[0]\n\n x_on_left = portal_x <= 3\n x_on_right = portal_x >= self.rim_x\n x_on_outside = x_on_left or x_on_right\n\n y_on_left = portal_y <= 3\n y_on_right = portal_y >= self.rim_y\n y_on_outside = y_on_left or y_on_right\n\n if x_on_outside or y_on_outside:\n portal_type = \"upward\"\n else:\n portal_type = \"downward\"\n\n for other_portal_coord in portal_coords:\n other_x = other_portal_coord[2]\n other_y = other_portal_coord[1]\n if (other_x == portal_x) and (other_y == portal_y):\n continue\n elif self.maze[portal_coord] == self.maze[other_portal_coord]:\n other_z = other_portal_coord[0]\n\n # Look for a the correspondig portal with a z-coord 1 lower\n if portal_type == \"upwards\":\n if portal_z == other_z + 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)\n\n # Look for a the correspondig portal with a z-coord 1 higher\n elif portal_type == \"downward\":\n if portal_z == other_z - 1:\n self.graph.add_edge(\n portal_coord, other_portal_coord)", "def _add_route(self, connections):\n route = ArduinoSwitchControlRoute(connections)\n if route.input.label not in self.routes:\n self.routes[route.input.label] = {route.output.label: [route]}\n elif route.output.label not in self.routes[route.input.label]:\n self.routes[route.input.label][route.output.label] = [route]\n else:\n self.routes[route.input.label][route.output.label].append(route)", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df", "def _add_stops_to_df(self, stop_coords, signal_coords, route_df):\n\n self.stop_nn_indicies, self.stop_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n stop_coords\n )\n\n\n signal_nn_indicies, singal_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n signal_coords)\n\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_signal = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_stop = ([False] * len(route_df.index))\n )\n \n for i in self.stop_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_bus_stop'] = True\n route_df.at[i, 'is_stop'] = True\n \n for i in signal_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_stop'] = True\n route_df.at[i, 'is_signal'] = True\n\n # route_df.at[0, 'is_bus_stop'] = True\n # route_df.at[-1, 'is_bus_stop'] = True\n\n return route_df", "def remove_route(g, origin, destination, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n \n # Removes both directions and returns \n if(choice_dir == \"y\"):\n \n \n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != destination_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != origin_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n \n # Removes one direction and returns\n if(choice_dir == \"n\"):\n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n g.city_dict[key].set_flights_in(new_flights_in)\n \n return g", "def buildStopsDict(self):\n \n if len(self.nodesDict) == 0:\n raise Exception('Nodes dictionary is empty!')\n if len(self.linksDict) == 0:\n raise Exception('Links dictionary is empty!')\n \n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n \n tempStops = \"temp_stops\"\n tempStopsSp = \"temp_stops_sp\"\n \n # Delete temp_stops and temp_stops_sp feature classes if they exist.\n if arcpy.Exists(tempStops):\n arcpy.Delete_management(tempStops)\n if arcpy.Exists(tempStopsSp):\n arcpy.Delete_management(tempStopsSp)\n arcpy.CopyFeatures_management(PublicTransit.RTD_PATH + PublicTransit.RTD_STOPS,\n tempStops)\n \n # Project temp_stops to CA state plane and add XY.\n install_dir = arcpy.GetInstallInfo()['InstallDir']\n out_coordinate_system = os.path.join(install_dir, PublicTransit.NAD_83_DIRECTORY)\n arcpy.Project_management(tempStops, tempStopsSp, out_coordinate_system,\n \"NAD_1983_To_WGS_1984_1\")\n arcpy.AddXY_management(tempStopsSp)\n \n # Create a search cursor to traverse all stops.\n stops = arcpy.SearchCursor(tempStopsSp, \"\", \"\",\n \"CPT_STOPPOINTID; SCH_STOPPOINTSEQNO; \" +\n \"SCH_ROUTEID; SCH_PATTERNID; ROUTE_PATTERN; \" +\n \"SourceOID; POINT_X; POINT_Y\",\n \"ROUTE_PATTERN A; SCH_STOPPOINTSEQNO A\")\n numStops = int(arcpy.GetCount_management(tempStopsSp).getOutput(0))\n print \"Found %d stops\" % numStops\n \n p = index.Property()\n p.overwrite = True\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE,properties=p)\n \n # For each stop determine the nearest network node.\n scount = 0\n icount = 0\n for s in stops:\n # only create stops for routes which exist in RTD\n if not s.ROUTE_PATTERN in self.transitRoutes:\n continue\n scount += 1\n st = TransitStop(s.CPT_STOPPOINTID, s.SCH_ROUTEID, s.SCH_PATTERNID,\n s.ROUTE_PATTERN, s.SourceOID, s.SCH_STOPPOINTSEQNO)\n # If the stop's linkId is in the links dictionary use the link from\n # and to node (these should all be bus routes since MTC's route\n # traversal FC was created for buses only at this time).\n if s.SourceOID in self.linksDict:\n link = self.linksDict[s.SourceOID]\n # Determine which node is nearest and snap to it.\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.fromNode.x,\n link.fromNode.y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.toNode.x,\n link.toNode.y):\n st.tanaNode = link.fromNode.nodeId\n else:\n st.tanaNode = link.toNode.nodeId\n st.inRegion = True\n \n # The stop's link is not in linksDict. These are either stops \n # outside the region or non-bus routes for which there are no\n # route traversal edges. Do a link lookup from the Roadways\n # feature class.\n else:\n arcpy.env.workspace = PublicTransit.RTD_PATH\n roadwaysSearch = arcpy.SearchCursor(PublicTransit.ROADWAYS_FC,\n \"LinkId = \" + str(s.SourceOID),\n \"\", \"\", \"F_JNCTID; T_JNCTID\", \"\")\n for r in roadwaysSearch:\n fromNode = self.__getIdHash(r.F_JNCTID)\n toNode = self.__getIdHash(r.T_JNCTID)\n if fromNode in self.nodesDict and toNode in self.nodesDict:\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[fromNode].x,\n self.nodesDict[fromNode].y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[toNode].x,\n self.nodesDict[toNode].y):\n st.tanaNode = fromNode\n else:\n st.tanaNode = toNode\n st.inRegion = True\n else:\n st.inRegion = False\n \n # Add the stop to stopsByRoute and stopsByNode dictionaries\n if s.ROUTE_PATTERN in self.stopsByRoute:\n self.stopsByRoute[s.ROUTE_PATTERN].append(st)\n else:\n self.stopsByRoute[s.ROUTE_PATTERN] = [st]\n if (st.tanaNode in self.stopsByNode):\n self.stopsByNode[st.tanaNode].append(st)\n else:\n self.stopsByNode[st.tanaNode] = [st]\n # add the stop node to the spatial index\n if st.tanaNode in self.nodesDict:\n icount += 1\n self.spIndex.insert(st.stopPointId,\n (self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y,\n self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y))\n del stops", "def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path", "def plan_trip():\n origins = []\n destinations = []\n\n origin_stop = request.args.get('origin', False)\n destination_stop = request.args.get('destination', False)\n origin_is_suburb = request.args.get('origin_suburb', False)\n dest_is_suburb = request.args.get('dest_suburb', False)\n origin_is_suburb = bool(origin_is_suburb)\n dest_is_suburb = bool(dest_is_suburb)\n if origin_stop and destination_stop:\n client = api.connection()\n origins = client.find_stops_by_name('any', origin_stop, True)\n\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n destinations = client.find_stops_by_name('any', destination_stop, True)\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n origins = stop_information_generator(\n origins.locations, [], origin_stop, origin_is_suburb\n )\n destinations = stop_information_generator(\n destinations.locations, [], destination_stop, dest_is_suburb\n )\n\n return render_template(\n \"trip-planner.jinja2\", origins=origins, destinations=destinations, err=200\n )", "def advertise_route_to_neighbors(self, destination):\n distance_vector = self.hosts_to_ports[destination]\n self.handle_proper_packet(distance_vector.port, destination, distance_vector.latency, True)\n self.handle_poison_packet(distance_vector.port, destination)", "def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)", "def _route_to_dest(self):\n # Ask the network\n self.route = self.network.determine_route(self.start, self.dest)\n # Set the index to where we are now\n self.route_index = 0", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes", "def add_edges(self):\n for u in self.G.nodes():\n for v in self.G.nodes():\n if u != v and u != \"Sink\" and v != \"Source\":\n self.G.add_edge(\n u, v, cost=self.manhattan(u, v), time=self.manhattan(u, v)\n )", "def airline_connections(region):\n\n outgoing_routes = region.airlines\n ingoing_routes = []\n\n for other_region in regions.values():\n if other_region == region: continue\n\n for airline in other_region.airlines:\n if airline.destination == region:\n ingoing_routes.append(airline)\n\n return (outgoing_routes, ingoing_routes)", "def connect(self, origin, destination):\n origin_section = self.sections[origin]\n destination_section = self.sections[destination]\n \"The junction of both origin and destination must be the same.\"\n \"\"\"A turn transitions a vehicle from the last segment of the origin to the first\n segment of the destination.\"\"\"\n edge = self.graph.add_edge(origin_section[-1], destination_section[0])\n self.edge_weights[edge] = 0 # The distance between the same location is 0.\n return", "def get_self_origin(self, routes):\n outroutes = []\n all_non_self = True\n for route in routes:\n if route[SORG]:\n outroutes.append(route)\n all_non_self = False\n if all_non_self:\n return routes\n return outroutes", "def destination_floors(self):\r\n return self.destinations", "def topology_complete(self):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def get_self_origin(self, routes):\n outroutes = []\n if len(routes) == 0:\n return routes\n\n for route in routes:\n if route[\"msg\"][\"selfOrigin\"]:\n outroutes.append(route)\n \n if len(outroutes) == 0:\n outroutes = routes\n \n return outroutes", "def route_matrix(client, origins, destinations, **kwargs):\n\n sep_pattern = re.compile(r'[,;|]')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n\n is_origins_str = isinstance(origins, str)\n is_origins_list = isinstance(origins, list)\n if not any([is_origins_str, is_origins_list]):\n raise ValueError('\"origins\" must be str or list!')\n elif is_origins_str:\n sep_origins = sep_pattern.split(origins)\n u_origins = origins.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_origins)\n if match:\n if len(sep_origins) > 5:\n raise ValueError('\"origins\" incorrect! upper limits is 5.')\n else:\n origins = '|'.join(sep_origins)\n else:\n if len(sep_origins) > 10:\n raise ValueError('\"origins\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_origins[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_origins)/2)]\n origins = '|'.join(temp)\n\n else:\n # element in list is CN_pattern characters.\n if len(origins[0]) == 1:\n origins = '|'.join(origins)\n # element in list is list/tuple of lng,lat.\n else:\n origins = [map(str, l) for l in origins]\n origins = '|'.join([','.join(l[::-1]) for l in origins])\n\n is_destinations_str = isinstance(destinations, str)\n is_destinations_list = isinstance(destinations, list)\n\n if not any([is_destinations_str, is_destinations_list]):\n raise ValueError('\"destinations\" must be str or list!')\n elif is_destinations_str:\n sep_destinations = sep_pattern.split(destinations)\n u_destinations = destinations.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_destinations)\n if match:\n if len(sep_destinations) > 5:\n raise ValueError('\"destinations\" incorrect! upper limits is \\\n 5.')\n else:\n destinations = '|'.join(sep_destinations)\n else:\n if len(sep_destinations) > 10:\n raise ValueError('\"destinations\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_destinations[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_destinations)/2)]\n destinations = '|'.join(temp)\n\n else:\n if len(destinations[0]) == 1:\n destinations = '|'.join(destinations)\n else:\n # first, map to str.\n destinations = [map(str, l) for l in destinations]\n destinations = '|'.join([','.join(l[::-1]) for l in destinations])\n\n kwargs.update({'server_name': 'direction', 'version': 'v1',\n 'subserver_name': 'routematrix', 'origins': origins,\n 'destinations': destinations})\n\n return client.get(kwargs)", "def _build_directions(self):\n d = {'start': self.get_start(), 'end': self.get_end(), 'duration': self.get_duration(),\n 'mode': self.get_primary_mode(), 'price_range': self.get_price_range(), 'legs': self.get_legs(),\n 'start_location': self.get_start_location(), 'end_location': self.get_end_location()}\n self.set_directions(d)", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def _find_routes(self, start_node, previous_nodes=None):\n if previous_nodes is None:\n previous_nodes = []\n\n routes = []\n for con in self.connections:\n if start_node == con.end:\n con.flip()\n if start_node == con.start:\n # if the connection ends in a box output,\n # add the connection (as a route of length 1)\n if con.end.is_box_output():\n routes.append([con])\n elif con.end.is_box_input():\n raise Exception(\"Route in connections detected, \"\n \"that ends at an input.\")\n elif con.end.is_switch_output():\n # check if there is conflict with previous nodes\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n # check orientation\n if con.end.switch.orientation == 1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n # Set orientation of the switch\n con.end.switch.orientation = -1\n # Add the node to the previous nodes and call the method\n # for the next node\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n next_step = self._find_routes(\n con.end.switch.input,\n previous_nodes=previous_nodes\n )\n # Merge the current connection with the resulting routes\n for route in next_step:\n routes.append([con] + route)\n # proceed the analogously for a switch input\n elif con.end.is_switch_input():\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n if con.end.switch.orientation == -1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n con.end.switch.orientation = 1\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n\n # continue with both outputs\n next_step0 = self._find_routes(\n con.end.switch.output[0],\n previous_nodes=previous_nodes\n )\n\n next_step1 = self._find_routes(\n con.end.switch.output[1],\n previous_nodes=previous_nodes\n )\n\n for route in next_step0:\n routes.append([con] + route)\n for route in next_step1:\n routes.append([con] + route)\n\n else:\n raise TypeError(f\"Node {con.end} not recognised\")\n\n return routes", "def getCornuCoord_forDistances(distanceFile, cornuCoordsFile):\n dataToWrite = []\n not_common = []\n coords = getStartEndCoords(distanceFile,cornuCoordsFile)\n #print(coords)\n routes = {}\n #routes['data'] = []\n with open(distanceFile, \"r\", encoding=\"utf8\") as csvfile:\n distances = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for line in distances:\n tmp = {}\n start = line[0][4:].strip()\n normStart = norm.normalize_alphabet(start)\n startRegion = line[1].strip().split(\",\")\n startKey = ','.join([start] + startRegion).strip()\n startKey_norm = ','.join([normStart] + startRegion).strip()\n end = line[2][4:].strip()\n normEnd = norm.normalize_alphabet(end)\n endRegion = line[3].strip().split(\",\")\n endKey = ','.join([end] + endRegion).strip()\n endKey_norm = ','.join([normEnd] + endRegion).strip()\n routes[startKey + \"+\" + endKey] = {}\n routes[startKey + \"+\" + endKey]['start'] = {}\n routes[startKey + \"+\" + endKey]['start']['URI'] = coords[startKey]['cornuUri'] if startKey in coords else coords[startKey_norm]['cornuUri']\n routes[startKey + \"+\" + endKey]['start']['lat'] = coords[startKey]['lat'] if startKey in coords else coords[startKey_norm]['lat']\n routes[startKey + \"+\" + endKey]['start']['lon'] = coords[startKey]['lon'] if startKey in coords else coords[startKey_norm]['lon']\n routes[startKey + \"+\" + endKey]['start']['region'] = coords[startKey]['region'] if startKey in coords else coords[startKey_norm]['region']\n routes[startKey + \"+\" + endKey]['end'] = {}\n routes[startKey + \"+\" + endKey]['end']['URI'] = coords[endKey]['cornuUri'] if endKey in coords else coords[endKey_norm]['cornuUri']\n routes[startKey + \"+\" + endKey]['end']['lat'] = coords[endKey]['lat'] if endKey in coords else coords[endKey_norm]['lat']\n routes[startKey + \"+\" + endKey]['end']['lon'] = coords[endKey]['lon'] if endKey in coords else coords[endKey_norm]['lon']\n routes[startKey + \"+\" + endKey]['end']['region'] = coords[endKey]['region'] if endKey in coords else coords[endKey_norm]['region']\n routes[startKey + \"+\" + endKey]['distance'] = line[-1][5:].strip()\n #routes['data'].append(tmp)\n with open('../Data/Distances_withCoords_normalized_with_cornuRegion_json_noNorm_noAL_origkey90', 'w') as outfile:\n json.dump(routes, outfile, ensure_ascii=False, indent=4)", "def combineGPSandPhoneStops(arg):\r\n\r\n # unpack parameters\r\n user_gps, user_cell, dur_constr, spat_constr_gps, spat_cell_split = arg\r\n\r\n # combine cellular stay if it is close to a gps stay\r\n cell_stays = list(set([(trace[6],trace[7]) for d in user_cell for trace in user_cell[d] if int(trace[9]) >= dur_constr]))\r\n gps_stays = list(set([(trace[6],trace[7]) for d in user_gps for trace in user_gps[d] if int(trace[9]) >= dur_constr]))\r\n pairs_close = set()\r\n for cell_stay in cell_stays:\r\n for gps_stay in gps_stays:\r\n if distance(cell_stay[0],cell_stay[1],gps_stay[0],gps_stay[1]) <= spat_constr_gps:\r\n pairs_close.add((gps_stay[0],gps_stay[1],cell_stay[0],cell_stay[1]))\r\n break\r\n # find all pair[1]s in list, and replace it with pair[0]\r\n for pair in list(pairs_close):\r\n for d in user_cell.keys():\r\n for trace in user_cell[d]:\r\n if trace[6] == pair[2] and trace[7] == pair[3]:\r\n trace[5], trace[6], trace[7] = 99, pair[0], pair[1] #pretend as gps\r\n\r\n user = user_gps\r\n for d in user.keys():\r\n if len(user_cell[d]):\r\n user[d].extend(user_cell[d])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr) #OscillationPairList = oscillation_h1_oscill(user, dur_constr)\r\n # ## when replaced, can only replaced with a gps stay; so let modify exchange ping-pong pair in the pairList\r\n # gpslist_temp = {(trace[6], trace[7]):int(trace[5]) for d in user.keys() for trace in user[d]}\r\n # for pair_i in range(len(OscillationPairList)):\r\n # if gpslist_temp[(OscillationPairList[pair_i][0],OscillationPairList[pair_i][1])] <= spat_constr_gps:# wrong(2,3)\r\n # OscillationPairList[pair_i] = [OscillationPairList[pair_i][2],OscillationPairList[pair_i][3],\r\n # OscillationPairList[pair_i][0],OscillationPairList[pair_i][1]]\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n for d in user:\r\n phone_index = [k for k in range(len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) == 0: # if no phone trace\r\n continue\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr: # passing phone observ\r\n user[d][i].append('checked')\r\n # combine consecutive obsv on a phone stay into two observ\r\n i = min(phone_index) # i has to be a phone index\r\n j = i + 1\r\n while i < len(user[d]) - 1:\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n break\r\n if int(user[d][j][5]) > spat_cell_split and user[d][j][6] == user[d][i][6] \\\r\n and user[d][j][7] == user[d][i][7] and j < len(user[d]):\r\n j += 1\r\n else:\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n phone_index = [k for k in range(j, len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) < 3: # if no phone trace\r\n break\r\n i = min(phone_index) ##i has to be a phone index\r\n j = i + 1\r\n i = 0 # remove []\r\n while i < len(user[d]):\r\n if len(user[d][i]) == 0:\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # adress phone stay one by one\r\n flag_changed = True\r\n phone_list_check = []\r\n while (flag_changed):\r\n # print('while........')\r\n flag_changed = False\r\n gps_list = []\r\n phone_list = []\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) <= spat_cell_split:#or user[d][i][2] == 'addedphonestay': #changed on 0428\r\n gps_list.append(user[d][i])\r\n else:\r\n phone_list.append(user[d][i])\r\n\r\n phone_list.extend(phone_list_check)\r\n # when updating duration for phone stay, we have to put back passing obs\r\n phone_list = sorted(phone_list, key=itemgetter(0))\r\n # update phone stay\r\n i = 0\r\n j = i\r\n while i < len(phone_list):\r\n if j >= len(phone_list): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n # we don't want to change a pssing into a stay; as we have not process the combine this stay\r\n # this is possible when a stay that prevents two passing is mergeed into gps as gps points\r\n phone_list[k][9] = dur\r\n break\r\n if phone_list[j][6] == phone_list[i][6] and phone_list[j][7] == phone_list[i][7] and j < len(\r\n phone_list):\r\n j += 1\r\n else:\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n phone_list[k][9] = dur\r\n i = j\r\n for trace in phone_list: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(phone_list) == 1: phone_list[0][9] = -1\r\n\r\n # update check lable\r\n for i in range(len(phone_list)):\r\n if int(phone_list[i][5]) > spat_cell_split and int(phone_list[i][9]) < dur_constr \\\r\n and phone_list[i][-1] != 'checked':\r\n # passing phone observ\r\n phone_list[i].append('checked')\r\n\r\n # put those not checked together with gps\r\n user[d] = gps_list\r\n phone_list_check = []\r\n for i in range(len(phone_list)):\r\n if phone_list[i][-1] == 'checked':\r\n phone_list_check.append(phone_list[i])\r\n else:\r\n user[d].append(phone_list[i])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # find a stay which is not checked\r\n flag_phonestay_notchecked = False\r\n phonestay_left, phonestay_right = -1, -1\r\n for i in range(max(0, phonestay_right+1), len(user[d])):\r\n phonestay_left, phonestay_right = -1, -1\r\n if int(user[d][i][5]) > spat_cell_split \\\r\n and int(user[d][i][9]) >= dur_constr and user[d][i][-1] != 'checked':\r\n phonestay_left = phonestay_right\r\n phonestay_right = i\r\n if phonestay_left != -1 and phonestay_right != -1 \\\r\n and user[d][phonestay_left][9] == user[d][phonestay_right][9]:\r\n flag_phonestay_notchecked = True\r\n\r\n ## modified on 04152019\r\n if flag_phonestay_notchecked == False or len(phone_list) == 0: # if all phone observation are checked, end\r\n break\r\n # if they are not two consecutive observation\r\n if phonestay_right != phonestay_left + 1: # attention: only phonestay_left is addressed\r\n # not consecutive two observations\r\n if any([int(user[d][j][9]) >= dur_constr for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # found a gps stay in betw\r\n # print('23: found a gps stay in betw, just use one gps stay trade one phone stay')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # find close gps\r\n # print('24: do not found a gps stay in betw')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n if all([(phone_uncernt + int(user[d][j][5])) > 1000 * distance(user[d][j][3], user[d][j][4],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # total uncerty larger than distance\r\n # this case should be rare, as those close gps may be clustered\r\n # print('241: all gps falling betw are close with phone stay')\r\n temp = user[d][phonestay_left][3:] # copy neighbor gps\r\n user[d][phonestay_left][3:] = user[d][phonestay_left + 1][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n flag_changed = True\r\n else:\r\n # print('242: find a gps in betw,\r\n # which is far away with phone stay, contradic with a stay (with phone obsv)')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # if they are two consecutive traces\r\n # two consecutive observation\r\n # if phonestay_left != 0 and phonestay_right < len(user[d]) - 1:\r\n # ignore if they are at the beginning or the end of traj\r\n prev_gps = next_gps = 0 # find prevous and next gps\r\n found_prev_gps = False\r\n found_next_gps = False\r\n for prev in range(phonestay_left - 1, -1, -1):\r\n # if int(user[d][prev][5]) <= spat_cell_split: ########## changed on 04282018\r\n if int(user[d][prev][5]) <= spat_cell_split and int(user[d][prev][9]) >= dur_constr:\r\n prev_gps = prev\r\n found_prev_gps = True\r\n break\r\n for nxt in range(phonestay_right + 1, len(user[d])):\r\n if int(user[d][nxt][5]) <= spat_cell_split and int(user[d][nxt][9]) >= dur_constr:\r\n next_gps = nxt\r\n found_next_gps = True\r\n break\r\n\r\n if found_prev_gps and found_next_gps and user[d][prev_gps][6] == user[d][next_gps][6]:\r\n # this is a phone stay within a gps stay\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 \\\r\n and dist > 1000*spat_constr_gps and speed_dep < 200 and speed_retn < 200:\r\n # print('1111: distance larger than acc, and can travel, add phone stay, shorten gps stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else: # merge into gps stay\r\n # print('1112: distance less than acc, or cannot travel, merge into gps stay')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_prev_gps and found_next_gps and user[d][prev_gps][6] != user[d][next_gps][6]:\r\n phone_uncernt_l = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_l = int(user[d][prev_gps][8])\r\n dist_l = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist_l - phone_uncernt_l - gps_uncernt_l) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n phone_uncernt_r = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_r = int(user[d][next_gps][8])\r\n dist_r = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist_r - phone_uncernt_r - gps_uncernt_r) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n comb_l = 0 #revised on 03202019 to pick up one gps stay to combine with; if spatial conti with multi\r\n comb_r = 0\r\n if (dist_l - phone_uncernt_l - gps_uncernt_l) < 0 \\\r\n or dist_l < 1000*spat_constr_gps or speed_dep > 200:\r\n comb_l = 1\r\n if (dist_r - phone_uncernt_r - gps_uncernt_r) < 0 \\\r\n or dist_r < 1000 * spat_constr_gps or speed_retn > 200:\r\n comb_r = 1\r\n if comb_l*comb_r == 1:\r\n if dist_l < dist_r:\r\n comb_r = 0\r\n else:\r\n comb_l = 0\r\n if comb_l:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif comb_r:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else:\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n elif found_prev_gps: # a gps stay #right# before\r\n # print('113: before phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_dep < 200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1132: dist>low_acc, add phone stay')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:\r\n # print('1131: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_next_gps: # a gps stay #right# after\r\n # print('112: after phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][next_gps][8])\r\n dist = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_retn<200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1122: dist>low_acc, add phone stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:# remain phone observ, but use gps location\r\n # print('1121: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else: # if don't match any case, just add it\r\n # print('donot match any case, just add it (e.g., consecutive two phone stays)')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n\r\n\r\n # user[d].extend(phone_list_check)\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace[:])\r\n # remove passingby cellular traces\r\n i = 0\r\n while i<len(user[d]):\r\n if user[d][i][5] == 99 and float(user[d][i][9]) < dur_constr:\r\n del user[d][i]\r\n else:\r\n i+=1\r\n # remove passing traces\r\n ## Flag_changed = True\r\n ## while (Flag_changed):\r\n ## Flag_changed = False\r\n # i = 0\r\n # while i < len(user[d]):\r\n # if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr:\r\n # # Flag_changed = True\r\n # del user[d][i]\r\n # else:\r\n # i += 1\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n # update duration\r\n i = 0\r\n j = i\r\n while i < len(user[d]):\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n break\r\n if user[d][j][6] == user[d][i][6] and user[d][j][7] == user[d][i][7] and j < len(\r\n user[d]):\r\n j += 1\r\n else:\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n i = j\r\n for trace in user[d]: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(user[d]) == 1: user[d][0][9] = -1\r\n # remove and add back; because phone stays are distroyed as multiple, should be combined as one\r\n i = 0\r\n while i < len(user[d]):\r\n if user[d][i][2] == 'addedphonestay':\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # add back and sort\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace)\r\n\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # remove temp marks\r\n user[d]=[trace[:12] for trace in user[d]]\r\n\r\n # oscillation\r\n # modify grid\r\n for day in user.keys():\r\n for trace in user[day]:\r\n if float(trace[6]) == -1:\r\n found_stay = False\r\n if found_stay == False:\r\n trace[6] = trace[3] + '000' # in case do not have enough digits\r\n trace[7] = trace[4] + '000'\r\n digits = (trace[6].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2]) / 2)\r\n trace[6] = (trace[6].split('.'))[0] + '.' + digits\r\n # trace[6] = trace[6][:5] + str(int(trace[6][5]) / 2) # 49.950 to 49.952 220 meters\r\n digits = (trace[7].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2:4]) / 25)\r\n trace[7] = (trace[7].split('.'))[0] + '.' + digits\r\n # trace[7] = trace[7][:7] + str(int(trace[7][7:9]) / 25) # -122.3400 to -122.3425 180 meters\r\n\r\n # added to address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr)\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n # end addressing oscillation\r\n # those newly added stays should be combined with close stays\r\n user = cluster_incremental(user, spat_constr_gps, dur_constr=dur_constr)\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n # use only one record for one stay\r\n for d in user:\r\n i = 0\r\n while i < len(user[d]) - 1:\r\n if user[d][i + 1][6] == user[d][i][6] and user[d][i + 1][7] == user[d][i][7] \\\r\n and user[d][i + 1][9] == user[d][i][9] and int(user[d][i][9]) >= dur_constr:\r\n del user[d][i + 1]\r\n else:\r\n i += 1\r\n # mark stay\r\n staylist = set() # get unique staylist\r\n for d in user.keys():\r\n for trace in user[d]:\r\n if float(trace[9]) >= dur_constr:\r\n staylist.add((trace[6], trace[7]))\r\n else: # change back keep full trajectory: do not use center for those are not stays\r\n trace[6], trace[7], trace[8], trace[9] = -1, -1, -1, -1 # for non stay, do not give center\r\n staylist = list(staylist)\r\n for d in user.keys():\r\n for trace in user[d]:\r\n for i in range(len(staylist)):\r\n if trace[6] == staylist[i][0] and trace[7] == staylist[i][1]:\r\n trace[10] = 'stay' + str(i)\r\n break\r\n\r\n return user", "def destinations(self, destinations):\n\n self._destinations = destinations", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def test_host_routes_two_subnets_with_segments_association(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n host_routes = [{'destination': cidrs[1], 'nexthop': gateway_ips[0]},\n {'destination': cidrs[0], 'nexthop': gateway_ips[1]}]\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n net_req = self.new_show_request('networks', net['id'])\n raw_res = net_req.get_response(self.api)\n net_res = self.deserialize(self.fmt, raw_res)\n for subnet_id in net_res['network']['subnets']:\n sub_req = self.new_show_request('subnets', subnet_id)\n raw_res = sub_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)['subnet']\n self.assertIn(sub_res['cidr'], cidrs)\n self.assertIn(sub_res['gateway_ip'], gateway_ips)\n self.assertIn(sub_res['host_routes'][0], host_routes)", "def skyroute(startroute, endroute, grid):\n # Find the start and end point\n start = startroute[-1]\n end = endroute[-1]\n path = astar(grid, start, end)\n\n # If a path is found, stitch the routes together\n if path != None: \n for location in reversed(startroute[:-1]):\n path.insert(0, location)\n\n for location in reversed(endroute[:-1]):\n path.insert(len(path), location)\n\n return path", "def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])", "def detour(src, dst, pitstop):\n options = on_path([src, dst],query='shell gas station', size=10,urgency=0)\n ret = []\n for place in options:\n title = place['title']\n x = place['latlon']\n addr = place['address']\n A_X = dist(src, x); X_B = dist(x, dst)\n consumer_dist = A_X['distance'] + X_B['distance']\n tour_time = A_X['trafficTime']+X_B['trafficTime']\n last_mile_dist = 2*dist(pitstop, x)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n ret.append({\"distance\" : consumer_dist,\n \"latlon\" : x,\n \"title\" : title,\n \"time\" : tour_time,\n \"address\" : addr,\n \"carbon\" : carbon_print})\n ret = sorted(ret, key=lambda loc: loc.get('distance'))\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst carbon\n consumer_dist = dist(src, dst)['distance']\n last_mile_dist = 2*dist(pitstop, dst)['distance']\n total_trip_dist = consumer_dist + last_mile_dist\n carbon_print = total_trip_dist/(1e3 * .621 * .70548)\n #print(total_trip_dist, consumer_dist, last_mile_dist)\n\n # worst case time A - C - B\n A_C = dist(src, pitstop)\n C_B = dist(pitstop, dst)\n total_time = A_C['trafficTime'] + C_B['trafficTime']\n return {\"meetpoints\" : ret, 'worst_time' : total_time, \"worst_carbon\" : carbon_print}", "def add_airport_arrivals(result, airport_cities_d):\n geo_airport_cities=create_airports(airport_cities_d)\n result=get_airport_start_end(result, geo_airport_cities)\n return result", "def build_url(start, end, transit_mode):\n transit = \"\"\n traffic = \"best_guess\"\n depart = \"now\"\n if transit_mode:\n transit = transit_mode\n direc_url = g_api_base_url + dir_url + \"origin=\" + start + \"&destination=\" + end + trans_url \\\n + transit + goog_dir_key\n dist_url = g_api_base_url + dis_url + units_i + or_dis_url + start + des_url + end + trans_url \\\n + transit + traffic_url + traffic + depart_url + depart + goog_dis_key\n direc_url = direc_url.replace(\" \",\"+\")\n print(\"directions :\"+ direc_url)\n dist_url = dist_url.replace(\" \",\"+\")\n return direc_url, dist_url", "def add_new_source_sink(self):\n source = self.source()\n sink = self.sink()\n for arc in self.out_arcs_lists[source]:\n self.arc_info[arc][\"lower_bound\"] = 0\n self.arc_info[arc][\"upper_bound\"] = float('inf')\n for arc in self.in_arcs_lists[sink]:\n self.arc_info[arc][\"lower_bound\"] = 0\n self.arc_info[arc][\"upper_bound\"] = float('inf')\n for vert in self.vertices:\n if vert != source and vert != sink:\n if self.get_arc(source, vert) is None:\n self.add_inexact_edge(source, vert, 0, float('inf'))\n if self.get_arc(vert, sink) is None:\n self.add_inexact_edge(vert, sink, 0, float('inf'))", "def writeRouteSequence(self):\n print \"writing route sequence\"\n f = open(PublicTransit.OUTFILE_NAME, 'wb')\n if (PublicTransit.LINE_FILE_TYPE == LineFileType.PTLINE):\n lines = [\";;<<PT>><<LINE>>;;\" + os.linesep]\n elif (PublicTransit.LINE_FILE_TYPE == LineFileType.TRNBUILD):\n lines = [\";;<<Trnbuild>>;;\" + os.linesep]\n\n for t in self.transitRoutes:\n if t in self.stopsByRoute:\n i = 0\n self.transitRoutes[t].nodeSequence = []\n prevLinkId = -1\n # Bus routes have a link sequence from BusRouteTraversalEdges. Others just have stops.\n if (len(self.transitRoutes[t].linkSequence) > 0):\n for link in self.transitRoutes[t].linkSequence:\n # make sure this link is within the region (i.e., it is in linksDict)\n if (link in self.linksDict):\n nodeToAppend = -1\n if (i == 0):\n nodeToAppend = self.stopsByRoute[t][0].tanaNode\n if (nodeToAppend == -1):\n if (self.linksDict[link].oneWay == \"FT\"):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n else: # open in both directions; determine traversal direction\n nodeToAppend = -self.linksDict[link].fromNode.nodeId \n elif (i == 1):\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n if (self.linksDict[link].oneWay == \"FT\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n else: # open in both directions\n if (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].toNode.nodeId == self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].fromNode.nodeId == self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n elif (prevLinkId != link and prevLinkId != -1): # ensure there are no repeated links\n if (self.linksDict[link].oneWay == \"FT\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and \n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n else: # open in both directions\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # determine direction based on the previous node in the sequence. If the previous\n # node is the same as this link's from node, append the toNode; otherwise append the fromNode.\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n # previous link doesn't connect to this because the previous link was a duplicate\n else:\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # remove the last node in the sequence and check if the one before connects to this one\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n # if the node is a stop on this route, set the node ID positive\n if (nodeToAppend != -1):\n if (i > 0 and abs(nodeToAppend) in [st.tanaNode for st in self.stopsByRoute[t]]):\n nodeToAppend = -1 * nodeToAppend\n self.transitRoutes[t].nodeSequence.append(nodeToAppend)\n prevLinkId = link\n \n i += 1\n # if the last node is not a stop, remove it\n if (len(self.transitRoutes[t].nodeSequence) > 0 and self.transitRoutes[t].nodeSequence[-1] < 0):\n del(self.transitRoutes[t].nodeSequence[-1])\n \n # if there are no links for the route, just record the stops as the nodes\n else:\n self.transitRoutes[t].nodeSequence = [n.tanaNode for n in self.stopsByRoute[t] if n.tanaNode != -1]\n \n # Only write routes with a node sequence.\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n lines.append(self.__getPrintString(t, PublicTransit.LINE_FILE_TYPE) + os.linesep)\n else:\n print \"No node sequence for \" + str(t) + \" (\" + self.transitRoutes[t].new_name + \")\"\n f.writelines(lines)\n f.close()", "def insert(self, v: Route) -> insertion_response: # pylint: disable=arguments-differ, undefined-variable\n # Check if it was already in the list\n i = hash(v.addr)\n if i not in self._destinations:\n self._destinations[i] = [v]\n elif v not in self._destinations[i]:\n self._destinations[i].append(v)\n else:\n # If it was not possible to enter the path then return None\n return None\n return v", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def create_route(stop_list):\n delivery_route = []\n stop_list = stop_list\n current_location = 0\n shortest_distance = sys.maxsize\n shortest_trip = None\n\n while len(stop_list) != 1:\n # calculate possible next trips from current location\n possible = determine_next_stop(current_location, stop_list)\n\n for key, value in possible.items():\n if value < shortest_distance:\n shortest_trip = key\n shortest_distance = value\n\n # adds the shortest next stop to delivery route\n delivery_route.append(shortest_trip[1])\n\n # makes the next shortest stop the current location\n current_location = shortest_trip[1]\n\n # removes current location from stop list\n stop_list.remove(shortest_trip[1])\n\n # resets shortest_distance variable\n shortest_distance = sys.maxsize\n\n # adds last stop to delivery route\n delivery_route.append(stop_list[0])\n\n return delivery_route", "def add_returned_route_on_gw(self, context, router_id, port):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip['subnet_id']\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': ovn_const.OVN_LROUTER_TRANSIT_PORT_IP}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.add_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def chunk_user_route(detail_of_trip):\n\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n # since I can't get javascript to load, here's a hacky way of loading json\n # that details the route based on the user's point A and point B\n # detail_of_trip = api.directions(\n # (40.760350, -73.976209),\n # (40.754009, -73.981097),\n # mode=\"walking\"\n # )[0]\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n # now that I have javascript sending over the json, load json that details\n # the route based on the user's point A and point B\n\n # -------------- This section is for interpolation/splitting using shapely\n first = True # to see if this is the start position for the entire route\n line_points = [] # stores all the points to the route based on dict passed\n\n for leg in detail_of_trip['legs']:\n for step in leg['steps']:\n # Create a list of two element lists that represent points along the\n # route. via google. line_points = [ [lat1, lng1], [lat2, lng2],...]\n # Only add the starting point the first time. Every other iteration\n # we will just tack on the end points to our line.\n if first:\n line_points.append([step['start_location']['lat'], step['start_location']['lng']])\n first = False\n line_points.append([step['end_location']['lat'], step['end_location']['lng']])\n\n # Now load those points into a geometry, here shapely's LineString type.\n route_line = LineString(line_points)\n return (route_line, line_points)", "def stop_areas(self):\n seen_ids = set()\n for route in self.routes:\n for stop in route:\n st = stop.stoparea\n if st.id not in seen_ids:\n seen_ids.add(st.id)\n yield st", "def destinatarios(self, destinatarios):\n self._destinatarios = destinatarios", "def __init__(self, origin, destination):\n self.origin = origin\n self.destination = destination", "def set_address_path(manager, routing, assignment,data_locations):\n assignment.ObjectiveValue()\n index = routing.Start(0)\n route_distance = 0\n address_list=[]\n while not routing.IsEnd(index):\n cur_node=manager.IndexToNode(index)\n# print('what are: index,cur_node=',index,cur_node)\n address_list.append(data_locations[cur_node])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n manager.IndexToNode(index)\n# print('almost there: ',address_list)\n address1=address_list[0]\n address2=address_list[1]\n address3=address_list[2]\n address4=address_list[3]\n address5=address_list[4]\n address6=address_list[5]\n address7=address_list[6]\n address8=address_list[7]\n address9=address_list[8]\n address10=address_list[9]\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)", "def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )", "def _build_stations(self, stop_list):\n # stations = [] TODO: What is this for\n dists = self._euclidian_distances(stop_list)\n stations = self._calculate_y_lines(dists)\n return stations", "def test_add_outgoing_connection():\n\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n i2 = Intersection(center, radius, speed_limit)\n i2.add_connection(10.0, 20, 2, 2, 40, 'test2')\n\n start = Coordinates(1,1)\n end = Coordinates(7, 9)\n len = 15\n out_ln = 2\n in_ln = 1\n ang = 3 * math.pi / 2\n\n road = Road(start, end, len, out_ln, in_ln, ang, 20, 'Test')\n\n l = i.get_connections()\n\n assert not l\n\n i.add_outgoing_connection(road)\n\n assert l\n assert l[0].get_length() == 15\n\n l2 = i2.get_connections()\n\n assert l2\n\n i2.add_outgoing_connection(road)\n\n assert l2\n assert l2[1].get_length() == 15", "def addConnection(analyzer, origin, destination, distance):\n edge = gr.getEdge(analyzer['connections'], origin, destination)\n if edge is None:\n gr.addEdge(analyzer['connections'], origin, destination, distance)\n return analyzer", "def _add_unique_id_fields(self):\r\n field_types = {\"String\": \"TEXT\", \"Single\": \"FLOAT\", \"Double\": \"DOUBLE\", \"SmallInteger\": \"SHORT\",\r\n \"Integer\": \"LONG\", \"OID\": \"LONG\"}\r\n origin_field_def = [self.origin_unique_id_field_name, field_types[self.origin_id_field_obj.type]]\r\n if self.origin_id_field_obj.type == \"String\":\r\n origin_field_def += [self.origin_unique_id_field_name, self.origin_id_field_obj.length]\r\n dest_field_def = [self.dest_unique_id_field_name, field_types[self.dest_id_field_obj.type]]\r\n if self.dest_id_field_obj.type == \"String\":\r\n dest_field_def += [self.dest_unique_id_field_name, self.dest_id_field_obj.length]\r\n self.rt_solver.addFields(arcpy.nax.RouteInputDataType.Stops, [origin_field_def, dest_field_def])", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def destinations(self, offset=None, page_size=None, min_times_seen=None,\r\n max_times_seen=None, first_seen_before=None,\r\n first_seen_after=None, last_seen_before=None,\r\n last_seen_after=None, sort_field=None, sort_order=None):\r\n params = base.get_params(None, locals())\r\n url = self.get_url() + '/destinations'\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def __init__(self, origin_table, destination_table):\n self.origin = origin_table\n self.destination = destination_table\n self._intersection = None", "def populateTable(self):\n\n output_list = self.output_ports.split(', ')\n\n for i in output_list:\n values = i.split('-')\n nextHopPort = values[0]\n linkCost = values[1]\n destId = values[2]\n learnedFrom = 0 # As it was learned from ConfigFile\n row = routing_row.RoutingRow(nextHopPort, destId, linkCost, destId, learnedFrom)\n self.addToRoutingTable(row)", "def get_origin_routes(self, routes):\n outroutes = []\n current_best = \"UNK\"\n # iterate through routes in given list updating the current best if a better\n # option is discovered\n for route in routes:\n if route[ORIG] == current_best:\n outroutes.append(route)\n elif (route[ORIG] == \"EGP\" and current_best != \"IGP\") or route[ORIG] == \"IGP\":\n # if the current best is worse than EGP and the current is EGP,\n # update best and start a new list\n # if the current best is worse than IGP and the current is IGP,\n # update best and start a new list\n current_best = route[ORIG]\n outroutes = [route]\n\n return outroutes", "def compute_waypoints(self, source_loc, destination_loc):\n start_waypoint = self._map.get_waypoint(\n source_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n end_waypoint = self._map.get_waypoint(\n destination_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n assert start_waypoint and end_waypoint, 'Map could not find waypoints'\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n # TODO(ionel): The planner returns several options in intersections.\n # We always take the first one, but this is not correct.\n return deque([to_pylot_transform(waypoint[0].transform)\n for waypoint in route])", "def strategy_location_mapper(details):\n place = normalize_address(details['name'])\n current = os.path.dirname(__file__)\n for agency in details['agency']:\n path = os.path.join(current, 'NextBusData', agency + '.json')\n with open(path) as data:\n name_2_stop_id = json.load(data)\n stop_ids = name_2_stop_id.get(place, \"Unavailable\")\n details['stop_ids'] = stop_ids\n if details['stop_ids'] != \"Unavailable\":\n details['agency'] = agency\n return details\n return details", "def pedestrian_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa E501\n resp = self.routing_api.route(\n transport_mode=\"pedestrian\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def add_neighbours(self, router1, router2):\n router1 = self.routers[router1]\n router2 = self.routers[router2]\n\n router1.add_neighbour(router2)\n router2.add_neighbour(router1)", "def global_plan(\n world: carla.World, # pylint: disable=no-member\n origin: carla.Location, # pylint: disable=no-member\n destination: carla.Location, # pylint: disable=no-member\n) -> Tuple[Sequence[carla.Waypoint], Sequence[Any], float]: # pylint: disable=no-member\n try:\n from agents.navigation.global_route_planner import GlobalRoutePlanner # pylint: disable=import-error\n from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO # pylint: disable=import-error\n except ImportError:\n raise ImportError(\n \"Missing CARLA installation, \"\n \"make sure the environment variable CARLA_ROOT is provided \"\n \"and that the PythonAPI is `easy_install`ed\")\n\n # Setup global planner.\n grp_dao = GlobalRoutePlannerDAO(wmap=world.get_map(), sampling_resolution=1)\n grp = GlobalRoutePlanner(grp_dao)\n grp.setup()\n # Generate plan.\n waypoints, roadoptions = zip(*grp.trace_route(origin, destination))\n # Accummulate pairwise distance.\n distances = [0.0]\n for i in range(1, len(waypoints)):\n loc_tm1 = waypoints[i - 1].transform.location\n loc_tm1 = np.asarray([loc_tm1.x, loc_tm1.y, loc_tm1.z])\n loc_t = waypoints[i].transform.location\n loc_t = np.asarray([loc_t.x, loc_t.y, loc_t.z])\n distances.append(np.linalg.norm(loc_tm1 - loc_t))\n\n return waypoints, roadoptions, distances", "def route_layout(self):\n self.route_pins()\n self.route_internal()\n self.route_supplies()", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def _populate_input_data_transfer_fields(self):\r\n # Valid fields for the Route Stops input are described here:\r\n # https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route-input-data-types.htm\r\n # Do not transfer RouteName or Sequence as these are explicitly controlled by this tool. Do not transfer\r\n # LocationType because we want all inputs to be Stops. Waypoints don't make sense for this analysis.\r\n int_types = [\"Integer\", \"SmallInteger\"]\r\n numerical_types = [\"Double\", \"Single\"] + int_types\r\n rt_stops_input_fields = {\r\n \"Name\": [\"String\"],\r\n \"AdditionalTime\": numerical_types,\r\n \"AdditionalDistance\": numerical_types,\r\n \"AdditionalCost\": numerical_types,\r\n \"TimeWindowStart\": [\"Date\"],\r\n \"TimeWindowEnd\": [\"Date\"],\r\n \"CurbApproach\": int_types,\r\n \"Bearing\": numerical_types,\r\n \"BearingTol\": numerical_types,\r\n \"NavLatency\": numerical_types,\r\n \"SourceID\": int_types,\r\n \"SourceOID\": int_types,\r\n \"PosAlong\": numerical_types,\r\n \"SideOfEdge\": int_types\r\n }\r\n # Preserve origin and destination input fields that match names and types\r\n origin_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.origins) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"origin_transfer_fields\"] = origin_transfer_fields\r\n if origin_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Origins table that will be used in the analysis: \"\r\n f\"{origin_transfer_fields}\"\r\n ))\r\n destination_transfer_fields = [\r\n f.name for f in arcpy.ListFields(self.destinations) if f.name in rt_stops_input_fields and\r\n f.type in rt_stops_input_fields[f.name]]\r\n self.rt_inputs[\"destination_transfer_fields\"] = destination_transfer_fields\r\n if destination_transfer_fields:\r\n LOGGER.info((\r\n \"Supported fields in the input Destinations table that will be used in the analysis: \"\r\n f\"{destination_transfer_fields}\"\r\n ))", "def savings2routes(self,r1,r2):\n newRoute = VRP_Route(r1.route+r2.route)\n newRoute.update_route(self.vrpdata) # compute distance, quantity for newRoute, check whether valid\n if newRoute.tourValid:\n return r1.distance + r2.distance - newRoute.distance\n return -1", "def _trace_route(self, start_waypoint, end_waypoint):\n\n # Setting up global router\n if self._grp is None:\n dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)\n grp = GlobalRoutePlanner(dao)\n grp.setup()\n self._grp = grp\n\n # Obtain route plan\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n\n return route", "def preprocess(args, id2info, mapping):\n polyline_spans = []\n keys = list(id2info.keys())\n assert 'AV' in keys\n assert 'AGENT' in keys\n keys.remove('AV')\n keys.remove('AGENT')\n keys = ['AGENT', 'AV'] + keys\n vectors = []\n two_seconds = mapping['two_seconds']\n mapping['trajs'] = []\n mapping['agents'] = []\n for id in keys:\n polyline = {}\n\n info = id2info[id]\n start = len(vectors)\n if args.no_agents:\n if id != 'AV' and id != 'AGENT':\n break\n\n agent = []\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n agent.append((line[X], line[Y]))\n\n if args.visualize:\n traj = np.zeros([args.hidden_size])\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n traj = traj[:i * 2].copy()\n break\n traj[i * 2], traj[i * 2 + 1] = line[X], line[Y]\n if i == len(info) - 1:\n traj = traj[:(i + 1) * 2].copy()\n traj = traj.reshape((-1, 2))\n mapping['trajs'].append(traj)\n\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n x, y = line[X], line[Y]\n if i > 0:\n # print(x-line_pre[X], y-line_pre[Y])\n vector = [line_pre[X], line_pre[Y], x, y, line[TIMESTAMP], line[OBJECT_TYPE] == 'AV',\n line[OBJECT_TYPE] == 'AGENT', line[OBJECT_TYPE] == 'OTHERS', len(polyline_spans), i]\n vectors.append(get_pad_vector(vector))\n line_pre = line\n\n end = len(vectors)\n if end - start == 0:\n assert id != 'AV' and id != 'AGENT'\n else:\n mapping['agents'].append(np.array(agent))\n\n polyline_spans.append([start, end])\n\n assert_(len(mapping['agents']) == len(polyline_spans))\n\n assert len(vectors) <= max_vector_num\n\n t = len(vectors)\n mapping['map_start_polyline_idx'] = len(polyline_spans)\n if args.use_map:\n vectors, polyline_spans = get_sub_map(args, mapping['cent_x'], mapping['cent_y'], mapping['city_name'],\n vectors=vectors,\n polyline_spans=polyline_spans, mapping=mapping)\n\n # logging('len(vectors)', t, len(vectors), prob=0.01)\n\n matrix = np.array(vectors)\n # matrix = np.array(vectors, dtype=float)\n # del vectors\n\n # matrix = torch.zeros([len(vectors), args.hidden_size])\n # for i, vector in enumerate(vectors):\n # for j, each in enumerate(vector):\n # matrix[i][j].fill_(each)\n\n labels = []\n info = id2info['AGENT']\n info = info[mapping['agent_pred_index']:]\n if not args.do_test:\n if 'set_predict' in args.other_params:\n pass\n else:\n assert len(info) == 30\n for line in info:\n labels.append(line[X])\n labels.append(line[Y])\n\n if 'set_predict' in args.other_params:\n if 'test' in args.data_dir[0]:\n labels = [0.0 for _ in range(60)]\n\n if 'goals_2D' in args.other_params:\n point_label = np.array(labels[-2:])\n mapping['goals_2D_labels'] = np.argmin(get_dis(mapping['goals_2D'], point_label))\n\n if 'lane_scoring' in args.other_params:\n stage_one_label = 0\n polygons = mapping['polygons']\n min_dis = 10000.0\n for i, polygon in enumerate(polygons):\n temp = np.min(get_dis(polygon, point_label))\n if temp < min_dis:\n min_dis = temp\n stage_one_label = i\n\n mapping['stage_one_label'] = stage_one_label\n\n mapping.update(dict(\n matrix=matrix,\n labels=np.array(labels).reshape([30, 2]),\n polyline_spans=[slice(each[0], each[1]) for each in polyline_spans],\n labels_is_valid=np.ones(args.future_frame_num, dtype=np.int64),\n eval_time=30,\n ))\n\n return mapping", "def process_vrps(self):\n self.origins = set()\n for afi in (\"ipv4\", \"ipv6\"):\n self.info(\"Creating prefix-lists for {} address-family\"\n .format(afi))\n self.covered[afi] = [\"seq {seq} permit {prefix} le {maxLength}\"\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.covered(afi))]\n origins = self.vrps.origins(afi)\n self.for_origin[afi] = {}\n for asn in origins:\n self.for_origin[afi][asn] = [\"seq {seq} permit {prefix} le {maxLength}\" # noqa: E501\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.for_origin(asn, afi))] # noqa: E501\n self.origins.update(origins)", "def directions(self):\n return []", "def get_destination(self, request, format=None):\n user = request.user\n cn = request.DATA.get('center', \"\")\n imei = request.DATA.get('imei', \"\")\n center_dict = dict(get_center_cache('unicode_name'))\n\n if not cn:\n return Response({'error': 'center key is missing from request',\n 'error_code': 'ECDCMCN005','success': False},\n status=status.HTTP_200_OK)\n\n #Validate origin/destination center\n if cn not in center_dict.keys():\n return Response({'error': 'Invalid center passed {}'.format(cn),\n 'error_code' : 'ECDCICN006', 'success': False},\n status=status.HTTP_200_OK)\n\n station = connection.Station.find_one({'user':user.username , 'cn' : cn})\n\n if not station:\n return Response({'error':'station not found for the user',\n 'error_code':'ECDCNST007','success': False},\n status=status.HTTP_200_OK)\n\n #restrict user if he want to access insta\n # bagging from different device\n if imei != station.get('imei') and station.get('lock'):\n return Response({'error': 'Session already exist for user',\n 'error_code': 'ECDCSEU001', 'success': False})\n\n station['lock'] = True\n station['imei'] = imei\n data = dict()\n data['destinations'] = station.bag_cn\n data['station'] = station.get('name')\n data['success'] = True\n station.save()\n return Response(data, status=status.HTTP_200_OK)", "def FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, TimeTableList, TimeTableIndex, StationHourIndex, PathInfo=[]):\r\n PathInfo = PathInfo + [ConnectionInfo]\r\n\r\n if Cond.IfTestRouteSearch:\r\n \tStations = GetAllStationsOfRoute(PathInfo)\r\n \tprint \"\\nStations of Path (%s): ++++++++\" % len(Stations)\r\n \tprint Stations\r\n \tprint \"Route Information:\"\r\n \tprint PrettyStringRouteInfo(PathInfo)\r\n\r\n # check successful termination\r\n # if len(PathInfo) > 1 and ConnectionInfo[ConnInfoInd['station_to']] == EndStation: \r\n if CheckIfPathTerminatesSuccessfully(ConnectionInfo, PathInfo, RouteConditions, EndStation):\r\n \tif Cond.IfTestRouteSearch:\r\n \t\tprint \"End Station is reached!\"\t\r\n \treturn [PathInfo]\r\n\r\n # current (this iteration's) path length\r\n CurPathLen = len(PathInfo)\r\n\r\n # get next connections\r\n start_station = ConnectionInfo[ConnInfoInd['station_to']]\r\n departure_hour = ConnectionInfo[ConnInfoInd['arrival_hour']] \t\r\n departure_min = ConnectionInfo[ConnInfoInd['arrival_min']]\r\n\r\n # TEST BU2019\r\n if False:\r\n\t print 'ConnInfoInd: ' + str(ConnectionInfo)\r\n\t print 'start_station,departure_hour,departure_min: %s, %s, %s' % (start_station, departure_hour, departure_min)\r\n\t time.sleep(0.1)\r\n \r\n # mandatory conditions\r\n WaitLimit = RouteConditions[Cond.MaxWaitingTimeAtStation][0]\r\n \r\n # get next connections from the station\r\n ConnectionInfoList = GetListOfNextConnections(TimeTableList, TimeTableIndex, StationHourIndex, start_station, departure_hour, departure_min, WaitLimit)\r\n\r\n # insert on-foot connections (Zu Fuss, ZF) to nearby stations into ConnectionInfoList\r\n # cancel (Tunc 4/3/2019)\r\n if False:\r\n\t StationMeasurementTime = ReqStationMeasureTime\r\n\t \r\n\t if Cond.MaxNumberOfSubsequentStationPassagesOnFoot in RouteConditions \\\r\n\t \tand RouteConditions[Cond.MaxNumberOfSubsequentStationPassagesOnFoot][0] > 0:\r\n\r\n\t\t if RouteConditions.has_key(Cond.MeasureStations):\r\n\t\t \tStationMeasurementTime = RouteConditions[Cond.MeasureStations][1]\r\n\t\t Connections = GetOnFootStationChangeConnections(start_station, departure_hour, departure_min, StationMeasurementTime)\r\n\t\t \r\n\t\t if Connections:\t\t# i.e. if Connections is not None\r\n\t\t \t(OnFootConnections1, OnFootConnections2) = Connections \r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections1)\r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections2)\r\n\r\n if Cond.IfTestRouteSearch:\r\n\t\tprint \"Next connections:\"\r\n\t\tfor c in ConnectionInfoList:\r\n\t\t\tprint c\r\n\t\ttime.sleep(Cond.TestWaitingTime)\r\n\r\n if not ConnectionInfoList:\t\t# Endstation: Node w/o successor nodes\r\n \treturn []\r\n\r\n PathInfoList = []\r\n\r\n for ConnectionInfo in ConnectionInfoList:\r\n\t\tres = Cond.CheckIfConnectionShouldBeSelected(ConnectionInfo, PathInfo, EndStation, RouteConditions)\r\n\r\n\t\t# test\r\n\t\tif Cond.IfTestRouteSearch:\r\n\t\t\tif res == None or res == False:\r\n\t\t\t\tprint \"CheckIfConnectionShouldBeSelected: %s\" % res\r\n\r\n\t \tif res == None: return[] \r\n\t \tif res == False: continue\r\n\r\n\t \t# recursive call\r\n\t\textended_paths = FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, \\\r\n\t\t\tTimeTableList, TimeTableIndex, StationHourIndex, PathInfo)\r\n\r\n\t\t# report status\r\n\t\tif Cond.ReportDuringRouteSearch in RouteConditions:\r\n\t\t\tTimeIntv = default_timer() - Cond.SearchStartTime\r\n\t\t\tRouteSearchReportingIntervalInSeconds = RouteConditions[Cond.ReportDuringRouteSearch][0]\r\n\t\t\tif TimeIntv > Cond.RouteSearchReportCounter * RouteSearchReportingIntervalInSeconds:\r\n\t\t\t\tCond.RouteSearchReportCounter += 1 \r\n\t\t\t\tprint \"%s seconds passed... \" % \"{:.2f}\".format(TimeIntv)\r\n\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\t\r\n\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\r\n\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t# append to path list\r\n\t\tfor p in extended_paths:\r\n\t\t\t# no need to recheck route unless current connection is the last one \r\n\t\t\t# LastConnection = (ConnectionInfo == p[-1])\r\n\t\t\tLastConnection = (CurPathLen == len(p) -1 and ConnectionInfo == p[-1])\r\n\t\t\t\r\n\t\t\tif LastConnection:\r\n\r\n\t\t\t\tif Cond.CheckIfRouteShouldBeSelected(p, RouteConditions):\r\n\t\t\t\t\tPathInfoList.append(p)\r\n\t\t\t\t\tCond.SelectedRoutes.append(ApplyAllRouteInfoCorrections(p))\r\n\r\n\t\t\t\t\t# evaluate route\r\n\t\t\t\t\t# cancel for BU2019\r\n\r\n\t\t\t\t\tif Cond.IfTestRouteSearch:\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\\n\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\t\r\n\t\t\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t\t\t\t# test\r\n\t\t\t\t\tIncrementDicValue(Cond.RouteCountPerRouteLength, CurPathLen)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# not last connection, no need to recheck the route\r\n\t\t\t\t# PathInfoList.append(p)\r\n\t\t\t\t# IncrementDicValue(SelectedRoutesPerLevel, CurPathLen)\r\n\t\t\t\tpass\r\n \r\n return PathInfoList", "def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)", "def lookup_routes(self, daddr):\n # TODO\n outroutes = []\n\n net_pre = daddr[0 : daddr.index('.')] + '.0.0.0'\n\n #print(self.routes)\n\n for ip in self.routes.keys():\n network = self.routes[ip][NTWK]\n net_pre_2 = network[0:network.index('.')] + '.0.0.0'\n if net_pre_2 == net_pre:\n outroutes.append(ip)\n return outroutes", "def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations", "def _build_legs(self):\n if self._primary_mode == 'transit':\n for transit_leg in self._best_trip.get_transit_legs():\n self._legs.append(transit_leg.get_directions())\n else:\n self._legs.append(self._best_trip.get_directions())" ]
[ "0.65625894", "0.5583581", "0.5395718", "0.5393464", "0.5392419", "0.53610194", "0.5345976", "0.52621037", "0.5238447", "0.521201", "0.52106446", "0.52027905", "0.518865", "0.5178128", "0.51780194", "0.51352537", "0.51346874", "0.5124585", "0.51178783", "0.51078916", "0.5107856", "0.510141", "0.50961626", "0.50841933", "0.50798804", "0.50780416", "0.5056264", "0.50527537", "0.5047856", "0.50461626", "0.5018767", "0.50128245", "0.50045407", "0.50036883", "0.50014997", "0.4974354", "0.4957224", "0.49541637", "0.49465528", "0.49384865", "0.4937752", "0.49336717", "0.49328843", "0.49091184", "0.49041966", "0.4899423", "0.48968905", "0.48768246", "0.48680758", "0.48656312", "0.48536053", "0.48472926", "0.48367682", "0.48353854", "0.48338473", "0.48319697", "0.48179477", "0.48165166", "0.4804951", "0.48025396", "0.4802522", "0.47985998", "0.4796322", "0.47942787", "0.47825468", "0.47799972", "0.4779197", "0.47775385", "0.47751293", "0.47710252", "0.47685644", "0.4754938", "0.47517502", "0.4743539", "0.47421408", "0.4741905", "0.47405797", "0.47383463", "0.47378868", "0.47372517", "0.47255278", "0.4702506", "0.47022167", "0.47001973", "0.46977645", "0.4696914", "0.46911982", "0.46875587", "0.46859288", "0.46831143", "0.46825296", "0.46820125", "0.46801412", "0.4679962", "0.46762183", "0.46725258", "0.467135", "0.46680552", "0.46664816", "0.46638212" ]
0.7464433
0
Insert each predefined OD pair into the Route analysis for the manytomany case.
def _insert_stops_many_to_many(self): # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse o_data = {} # {Origin ID: [Shape, transferred fields]} for row in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_origins_layer, [self.origin_id_field, "SHAPE@"] + self.origin_transfer_fields ): o_data[row[0]] = row[1:] d_data = {} # {Destination ID: [Shape, transferred fields]} for row in arcpy.da.SearchCursor( # pylint: disable=no-member self.input_destinations_layer, [self.dest_id_field, "SHAPE@"] + self.destination_transfer_fields ): d_data[row[0]] = row[1:] # Insert origins from each OD pair into the Route analysis with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.origin_unique_id_field_name, "SHAPE@"] + self.origin_transfer_fields ) as icur: for od_pair in self.od_pairs: origin_id, dest_id = od_pair try: origin_data = o_data[origin_id] except KeyError: # This should never happen because we should have preprocessed this out. self.logger.debug( f"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.") continue route_name = f"{origin_id} - {dest_id}" icur.insertRow((route_name, 1, origin_id) + origin_data) # Insert destinations from each OD pair into the Route analysis with self.rt_solver.insertCursor( arcpy.nax.RouteInputDataType.Stops, ["RouteName", "Sequence", self.dest_unique_id_field_name, "SHAPE@"] + self.destination_transfer_fields ) as icur: for od_pair in self.od_pairs: origin_id, dest_id = od_pair try: dest_data = d_data[dest_id] except KeyError: # This should never happen because we should have preprocessed this out. self.logger.debug( f"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.") continue route_name = f"{origin_id} - {dest_id}" icur.insertRow((route_name, 2, dest_id) + dest_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def assign_aovs(self, aovs: List[AOV]):\n\t\tfor aov in aovs:\n\t\t\tself.assign_aov(aov)", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def initial_dvec_and_forw_insert():\n global DATA\n for every_neighbor in DATA[\"neighbor\"]:\n DATA[\"distanc_vec\"].append([every_neighbor[0], every_neighbor[1]])\n # parent to direct neighbors is always\n # the router itself that's why DATA[\"router_id\"]\n # as third argument\n DATA[\"forw_table\"].append(\n [every_neighbor[0], every_neighbor[1], DATA[\"router_id\"]])", "def InitWayR(session):\n global way_r\n q = session.query(melt.StreetAssoc)\n way_r = set([it.osm_way for it in q.all()])", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))", "def insertAirport(airport, matchlist):\n graph = DSAGraph()\n for i in range(1,len(airport)):\n if airport[i][10] == \"plane\":\n From = airport[i][1]\n To = airport[i][5]\n weight = convertTravelTime(airport[i][9]) \n importToGraph(graph, From, To, weight, matchlist)\n return graph", "def add_many(self, pair_list):\n\n for pair in pair_list:\n plug = PlugLead(pair)\n self.check_conflicts(plug)\n self.plugleads.append(plug)", "def _addToKnowledge(pop):\n perspective = pop.getPerspectiveName()\n if perspective not in _byPerspective:\n _byPerspective[perspective] = []\n _byPerspective[perspective].append(pop)\n nodeOid = pop.getNodeOid()\n if nodeOid not in _byNodeOid:\n _byNodeOid[nodeOid] = sets.Set()\n _byNodeOid[nodeOid].add(perspective)\n apoOid = pop.getApoOid()\n if apoOid not in _byApoOid:\n _byApoOid[apoOid] = {}\n _byApoOid[apoOid][perspective] = pop", "def _add_route(self, connections):\n route = ArduinoSwitchControlRoute(connections)\n if route.input.label not in self.routes:\n self.routes[route.input.label] = {route.output.label: [route]}\n elif route.output.label not in self.routes[route.input.label]:\n self.routes[route.input.label][route.output.label] = [route]\n else:\n self.routes[route.input.label][route.output.label].append(route)", "def setAtomPairs(self):\n atomPairs = []\n for item in self.condensedProperDihedrals:\n dih = item[0]\n atom1 = dih.atoms[0]\n atom2 = dih.atoms[3]\n pair = [atom1, atom2]\n if atomPairs.count(pair) == 0:\n atomPairs.append(pair)\n self.atomPairs = atomPairs # [[atom1, atom2], ...]\n self.printDebug(\"atomPairs done\")", "def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations", "def create_routes_from_things(things):\n for thing in things.values():\n create_routes_from_thing(thing)", "def generate_trivial_tours(self):\n self.routes = []\n for c in range(1, self.vrpdata.NumCust+1):\n self.routes.append(VRP_Route([c]))\n return self.get_objective()", "def buildRoutesDict(self):\n \n # create route number and name xref dictionary\n arcpy.env.workspace = PublicTransit.RTD_PATH\n routes = arcpy.SearchCursor(PublicTransit.BUS_ROUTES, \"\", \"\", \"RouteID; Name\", \"\")\n self.routeXref = dict()\n for route in routes:\n self.routeXref[route.RouteID] = route.Name\n self.routeXref[route.Name] = route.RouteID\n del routes\n \n #get mode lookup table\n mode_table = self.getModeLookupTable()\n \n # Query the RTD database for the route name, operator, mode, and headways.\n # We are querying for weekday routes (DAYTYPE_CLASS Weekday field = 'Y')\n conn = pyodbc.connect(PublicTransit.DB_CONN_STRING)\n cursor = conn.cursor()\n self.transitRoutes = dict()\n qry = \"\"\"\n WITH t AS\n (\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR,\n CASE\n WHEN HOUR_CLASS >= 3 and HOUR_CLASS < 6 THEN 'EA'\n WHEN HOUR_CLASS >= 6 and HOUR_CLASS < 10 THEN 'AM'\n WHEN HOUR_CLASS >= 10 and HOUR_CLASS < 15 THEN 'MD'\n WHEN HOUR_CLASS >= 15 and HOUR_CLASS < 19 THEN 'PM'\n WHEN (HOUR_CLASS BETWEEN 19 AND 24) OR HOUR_CLASS < 3 THEN 'EV'\n END AS tod,\n [HOURLY_FREQUENCY(Daily until HOUR_CLASS update)], HOUR_CLASS\n FROM dbo.[ROUTE HEADWAY AND FREQUENCY]\n WHERE DAYTYPE_CLASS IN\n (SELECT dc.CLASS FROM dbo.DAYTYPE_CLASS dc WHERE WEEKDAY = 'Y')\n )\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod,\n 60.0 / ROUND(AVG(CAST([HOURLY_FREQUENCY(Daily until HOUR_CLASS update)] AS FLOAT)), 0) as headway\n FROM t\n GROUP BY CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod\n ORDER BY SCH_ROUTEID, SCH_PATTERNID, tod\"\"\"\n \n used_route_names = []\n # Iterate through result set and apply attributes.\n for row in cursor.execute(qry):\n routePattern = str(row.SCH_ROUTEID) + \"_\" + str(row.SCH_PATTERNID)\n if routePattern not in self.transitRoutes:\n self.transitRoutes[routePattern] = TransitRoute(routePattern,\n routeId = row.SCH_ROUTEID,\n patternId = row.SCH_PATTERNID)\n self.transitRoutes[routePattern].new_name = self.__cleanRouteName(row.CPT_AGENCYID + \"_\" + row.SCH_ROUTEDESIGNATOR[:(11 - 1 - len(row.CPT_AGENCYID))],used_route_names) #12 is the maximum name length\n self.transitRoutes[routePattern].agency = row.AGENCYNAME\n mode = -1\n for mode_row in mode_table:\n if row.CPT_AGENCYID == mode_row[\"CPT_AGENCYID\"] and row.CPT_MODE == mode_row[\"CPT_MODE\"]:\n if mode_row[\"SCH_ROUTEDESIGNATOR\"] != \"NA\":\n if row.SCH_ROUTEDESIGNATOR == mode_row[\"SCH_ROUTEDESIGNATOR\"]:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n break #this is as detailed as we can get\n else:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n self.transitRoutes[routePattern].mode = mode\n self.transitRoutes[routePattern].mode_group = Mode.getModeName(mode_group)\n # set headways\n if row.tod == 'EA':\n self.transitRoutes[routePattern].eaHeadway = row.headway\n elif row.tod == 'AM':\n self.transitRoutes[routePattern].amHeadway = row.headway\n elif row.tod == 'MD':\n self.transitRoutes[routePattern].mdHeadway = row.headway\n elif row.tod == 'PM':\n self.transitRoutes[routePattern].pmHeadway = row.headway\n elif row.tod == 'EV':\n self.transitRoutes[routePattern].evHeadway = row.headway\n conn.close()", "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def update_parties(self, *parties) -> None:\n\n for party in parties:\n self._route_table['route_table'][party.get_id()] = party.to_entry_point(\n )", "def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related", "def _process_connections(self, connections):\n # create connection\n for con in connections:\n self._add_connection(con)\n\n for inp_lab, inp in self.inputs.items():\n # use self._find_routes() to find routes from input inp\n routes_inp = self._find_routes(inp)\n # create routes\n for route in routes_inp:\n self._add_route(route)\n # sort the routes dictionary\n self._sort_routes()", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def choose_routes(self, veh_ids, route_choices):\n for i, veh_id in enumerate(veh_ids):\n if route_choices[i] is not None:\n self.traci_connection.vehicle.setRoute(\n vehID=veh_id, edgeList=route_choices[i])\n self.vehicles.set_route(veh_id, route_choices[i])", "def add_corridor_edges(G, road_set, irrationals, singleGraph):\r\n road_dict = {}\r\n for road in road_set:\r\n sensor_rows = run_on_file('d07_stations_2008_11_26.txt')\r\n if road not in irrationals:\r\n if road%2==0:\r\n dir1='E'#for even numbered roads, east is ascending\r\n dir2='W'\r\n else:\r\n dir1='N'#for odd numbered roads, north is ascending\r\n dir2='S'\r\n else:\r\n if road%2==0:\r\n dir1='N'#for most even numbered roads, east is ascending\r\n dir2='S'\r\n else:\r\n dir1='E'#for most odd numbered roads, north is ascending\r\n dir2='W'\r\n id_list_1=find_ids(sensor_rows, 0, 100000000, road, dir1)\r\n sensor_rows = run_on_file('d07_stations_2008_11_26.txt')\r\n id_list_2=find_ids(sensor_rows,0,100000000, road, dir2)\r\n road_dict[(road, dir1)]=id_list_1\r\n road_dict[(road, dir2)]=id_list_2\r\n sorted_list_1=sorted(id_list_1, key=itemgetter(1))\r\n sorted_list_2=sorted(id_list_2, key=itemgetter(1), reverse=True)\r\n if len(sorted_list_1)<2:\r\n print 'bad road', road\r\n for i in range(0, len(sorted_list_1)-1):\r\n G.add_edge(sorted_list_1[i][0], sorted_list_1[i+1][0])\r\n singleGraph.add_edge(sorted_list_1[i][0], sorted_list_1[i+1][0])\r\n for i in range(0, len(sorted_list_2)-1):\r\n G.add_edge(sorted_list_2[i][0], sorted_list_2[i+1][0]) \r\n singleGraph.add_edge(sorted_list_2[i][0], sorted_list_2[i+1][0]) \r\n print road_dict\r\n return G, road_dict, singleGraph", "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.origin_transfer_fields\r\n ) as icur:\r\n # Loop through origins and insert them into Stops along with their assigned destinations\r\n for origin in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [\"SHAPE@\", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields\r\n ):\r\n dest_id = origin[2]\r\n if dest_id is None:\r\n continue\r\n if dest_id not in destinations:\r\n dest_val = f\"'{dest_id}'\" if isinstance(dest_id, str) else dest_id\r\n with arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [\"SHAPE@\", self.dest_id_field] + self.destination_transfer_fields,\r\n where_clause=f\"{self.dest_id_field} = {dest_val}\"\r\n ) as cur:\r\n try:\r\n destinations[dest_id] = next(cur)\r\n except StopIteration:\r\n # The origin's destination is not present in the destinations table. Just skip the origin.\r\n continue\r\n # Insert origin and destination\r\n destination = destinations[dest_id]\r\n if self.reverse_direction:\r\n route_name = f\"{dest_id} - {origin[1]}\"\r\n origin_sequence = 2\r\n destination_sequence = 1\r\n else:\r\n route_name = f\"{origin[1]} - {dest_id}\"\r\n origin_sequence = 1\r\n destination_sequence = 2\r\n # Define the final origin and destination rows for the input Stops\r\n origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:]\r\n destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \\\r\n list(destination)[2:]\r\n icur.insertRow(origin_row)\r\n destination_rows.append(destination_row)\r\n\r\n # Insert destinations\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.destination_transfer_fields\r\n ) as dcur:\r\n for row in destination_rows:\r\n dcur.insertRow(row)", "def writePathways( self ):\n\n self.logger.info( 'writePathways: START' )\n\n # Generate inserts for meabolic pathways.\n self.importerPathway.writePathways()\n\n self.logger.info( 'writePathways: DONE' )", "def get_waypoints(self, maneuver_id, event):\n action = etree.SubElement(event, \"Action\")\n action_name = f\"Action for Manuever ID {maneuver_id}\"\n action.set(\"name\", action_name)\n private_action = etree.SubElement(action, \"PrivateAction\")\n routing_act = etree.SubElement(private_action, \"RoutingAction\")\n assign_route = etree.SubElement(routing_act, \"AssignRouteAction\")\n route = etree.SubElement(assign_route, \"Route\")\n route.set(\"name\", \"OSC Generated Route\")\n route.set(\"closed\", \"false\")\n\n waypoint_layer = QgsProject.instance().mapLayersByName(\"Waypoint Maneuvers\")[0]\n query = f\"\\\"Maneuver ID\\\" is '{maneuver_id}'\"\n request = QgsFeatureRequest().setFilterExpression(query)\n for feature in waypoint_layer.getFeatures(request):\n waypoint = etree.SubElement(route, \"Waypoint\")\n waypoint.set(\"routeStrategy\", feature[\"Route Strategy\"])\n position = etree.SubElement(waypoint, \"Position\")\n world_position = etree.SubElement(position, \"WorldPosition\")\n world_position.set(\"x\", str(feature[\"Pos X\"]))\n world_position.set(\"y\", str(feature[\"Pos Y\"]))\n world_position.set(\"z\", str(feature[\"Pos Z\"]))\n world_position.set(\"h\", str(feature[\"Orientation\"]))", "def get_pairs(self):\n self.get_locations()\n self.choices = {}\n for host, pathogens in self.locations.iteritems():\n if len(pathogens) > 1:\n for pair in combinations(pathogens, 2):\n self.choices.update({pair: host}) # pairs of pathogens in same host", "def _generate_ribs(self):\n for fw in self._fw_rules:\n source_tag = fw['source_tag']\n dest_tag = fw['dest_tag']\n\n for source_vm_index in self._tag_owners[source_tag]:\n for dest_vm_index in self._tag_owners[dest_tag]:\n # Add to each vertex access ability nodes\n self._graph[source_vm_index].add(dest_vm_index)", "def add_returned_route_on_gw(self, context, router_id, port):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip['subnet_id']\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': ovn_const.OVN_LROUTER_TRANSIT_PORT_IP}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.add_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def do_setup(self, ants): \n log_filter = LogFilter()\n getLogger().addFilter(log_filter)\n\n self.hills = []\n self.directions = []\n\n self.seen = [] #areas that have been seen, use this to avoid repetition\n self.unseen = []\n self.stepped_on = []\n\n self.intent = {}\n self.lc = {} #center of mass for a location\n self.i = {} #number of iterations for an ant\n\n for row in range(ants.rows):\n for col in range(ants.cols):\n self.unseen.append((row, col))\n self.intent[(row,col)] = Intent.GATHER\n\n self.lc[(row,col)] = (-1.0,-1.0) #set up center of mass\n self.i[(row,col)] = -1", "def to_agent(details):\n agent = {place_detail_keys[i]: \"N/A\" for i in range(0, len(place_detail_keys))}\n for key in place_detail_keys:\n if key in details:\n agent[key] = details[key]\n return agent", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def add_routes(self, mapper):\n pass", "def _add_connections(top, matches, conn_type):\n for sorted_conn in matches:\n to_add_conn = CONNS[conn_type](\n connection_members=[top.sites[idx] for idx in sorted_conn]\n )\n top.add_connection(to_add_conn, update_types=False)", "def add_element_to_action(context_action_dict, sent_full_dict, type_object, final_index_current_verb, init_index_next_verb, verb_dct=None, question=None):\n for obj_dct in sent_full_dict['context'][type_object]:\n if final_index_current_verb <= obj_dct['indexes'][0] <= init_index_next_verb:\n if type_object == 'agents':\n handling_agent_componentes(context_action_dict, sent_full_dict, verb_dct, obj_dct)\n else:\n context_action_dict[question].append(copy.copy(obj_dct))\n return context_action_dict", "def add_routes(self):\n pass", "def routes_for_od(self, r, s):\n\n for path in self.paths_for_od(r, s):\n route = self._road_network.network.copy()\n\n for edge in route.edges_iter():\n u,v = edge\n if BaseRouter._edge_in_path(path, edge):\n route.edge[u][v]['pathweight'] = 0.1\n else:\n route.edge[u][v]['pathweight'] = 1.0\n\n # Set node weights\n for node in route:\n try:\n dist_from_path = nx.shortest_path_length(route,\n node,\n path[-1],\n weight='pathweight')\n except nx.NetworkXNoPath:\n dist_from_path = float('inf')\n\n route.node[node]['weight'] = math.exp(-self._beta*dist_from_path)\n\n \n # Create policy from weights\n def edge_weight(u,v):\n return route.node[v]['weight']*math.exp(\n -self._beta*route.edge[u][v]['pathweight'])\n\n for node in route:\n edges = route.edges(node)\n total_edge_weight = reduce(\n lambda x,y: x+y,\n [edge_weight(u,v) for u,v in edges],\n 0.0)\n\n for u,v in edges:\n route[u][v]['weight'] = \\\n edge_weight(u, v) / float(total_edge_weight)\n\n yield Route(route, path)", "def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path", "def append(self, rewards, criteria):\n for val in rewards:\n agent_id = list(val.keys())[0]\n if agent_id not in self.data.keys():\n self.data.update({agent_id: self.data_template(self._data_key)})\n agent_data = self.data[agent_id]\n for _k in self._data_key:\n agent_data[_k].append(val[agent_id][_k])\n\n for dict_val in criteria:\n for _ag_id, val in dict_val.items():\n if _ag_id not in self.data.keys():\n self.data.update({_ag_id: dict()})\n for k, v in val.items():\n if k not in self.data[_ag_id].keys():\n self.data[_ag_id].update({k: [v]})\n else:\n self.data[_ag_id][k].append(v)", "def add_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.add_edge(i, j)", "def _select_inputs_many_to_many(self):\r\n # Select the origins present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting origins for this chunk...\")\r\n origins_in_chunk = set([pair[0] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][0], (int, float,)):\r\n origin_string = \", \".join([str(o_id) for o_id in origins_in_chunk])\r\n else:\r\n origin_string = \"'\" + \"', '\".join([str(o_id) for o_id in origins_in_chunk]) + \"'\"\r\n origins_where_clause = f\"{self.origin_id_field} IN ({origin_string})\"\r\n self.logger.debug(f\"Origins where clause: {origins_where_clause}\")\r\n self.input_origins_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.origins, self.input_origins_layer, origins_where_clause]\r\n ).getOutput(0)\r\n num_origins = int(arcpy.management.GetCount(self.input_origins_layer).getOutput(0))\r\n self.logger.debug(f\"Number of origins selected: {num_origins}\")\r\n # Select the destinations present in this chunk of predefined OD pairs\r\n self.logger.debug(\"Selecting destinations for this chunk...\")\r\n dests_in_chunk = set([pair[1] for pair in self.od_pairs])\r\n if isinstance(self.od_pairs[0][1], (int, float,)):\r\n dest_string = \", \".join([str(d_id) for d_id in dests_in_chunk])\r\n else:\r\n dest_string = \"'\" + \"', '\".join([str(d_id) for d_id in dests_in_chunk]) + \"'\"\r\n dests_where_clause = f\"{self.dest_id_field} IN ({dest_string})\"\r\n self.logger.debug(f\"Destinations where clause: {dests_where_clause}\")\r\n self.input_dests_layer_obj = helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.MakeFeatureLayer,\r\n [self.destinations, self.input_destinations_layer, dests_where_clause]\r\n ).getOutput(0)\r\n num_dests = int(arcpy.management.GetCount(self.input_destinations_layer).getOutput(0))\r\n self.logger.debug(f\"Number of destinations selected: {num_dests}\")", "def insert(self, words, handler):\n current_node = self.root\n\n for word in words:\n if word not in current_node.children:\n current_node.children[word] = RouteTrieNode()\n current_node = current_node.children[word]\n\n current_node.handler = handler", "def _add_mapping(self, mother_element: GraphElement,\n daughter_element: GraphElement) -> None:\n pass", "def create_ig_route(self, config):\n for vpc_id, vpc_config in config.iteritems():\n for route in vpc_config[\"RouteTables\"]:\n resource = self.ec2.RouteTable(route[\"RouteTableId\"])\n for route in resource.routes:\n route_exists = False\n for ig in vpc_config[\"InternetGateways\"]:\n route_exists = False\n if ig[\"InternetGatewayId\"] == route[\"GatewayId\"]:\n route_exists = True\n break\n if not route_exists:\n resource.create_route(\n DestinationCidrBlock=\"0.0.0.0/0\",\n GatewayId=ig[\"InternetGatewayId\"],\n )", "def setup_intervlan_host_routes(self):\n if self.routers:\n for src in self.host_information:\n src_host = self.host_information[src]['host']\n src_vlan = self.host_information[src]['vlan']\n src_ip = self.host_information[src]['ip']\n for dst in self.host_information:\n if src != dst:\n dst_host = self.host_information[dst]['host']\n dst_vlan = self.host_information[dst]['vlan']\n dst_ip = self.host_information[dst]['ip']\n if src_vlan != dst_vlan and self.is_routed_vlans(src_vlan, dst_vlan):\n src_faucet_vip = self.faucet_vips[src_vlan]\n dst_faucet_vip = self.faucet_vips[dst_vlan]\n self.add_host_route(src_host, dst_ip, src_faucet_vip.ip)\n self.add_host_route(dst_host, src_ip, dst_faucet_vip.ip)", "def store_matches(\n self,\n parameter_handler: ImproperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n parameter_handler._assert_correct_connectivity(\n val,\n [\n (0, 1),\n (1, 2),\n (1, 3),\n ],\n )\n n_terms = len(val.parameter_type.k)\n for n in range(n_terms):\n smirks = val.parameter_type.smirks\n non_central_indices = [key[0], key[2], key[3]]\n\n for permuted_key in [\n (\n non_central_indices[i],\n non_central_indices[j],\n non_central_indices[k],\n )\n for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]\n ]:\n topology_key = ImproperTorsionKey(\n atom_indices=(key[1], *permuted_key),\n mult=n,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ImproperTorsions\",\n )\n self.key_map[topology_key] = potential_key", "def _add_descriptors(related):\n\n for r in related:\n r[\"descriptors\"] = []\n for edge in G.edges(data=True):\n sibling_idx = _get_connected(edge, r[\"tokenIndex\"])\n if sibling_idx and (A.lookup[int(sibling_idx)][\"pos\"] == \"JJ\" or edge[2][\"dep\"] in [\"amod\", \"compound\"]):\n r[\"descriptors\"].append(\n {\n \"tokenIndex\": sibling_idx,\n \"rawName\": A.lookup[int(sibling_idx)][\"word\"]\n }\n )\n\n if sibling_idx and \"NN\" in A.lookup[int(sibling_idx)][\"pos\"] and \"amod\" in edge[2][\"dep\"]:\n additional_related = _get_cousin(sibling_idx, [\"nmod\"])\n for add in set(additional_related):\n related = _add_related(add, \"nmod\", related, A.index_lookup[add],\n connector=G.nodes[sibling_idx]['word'])\n return related", "def add_edge(i, j):\n if (i, j) in edges or (j, i) in edges:\n # Si ya esta agregado en la lista no agrega nada\n return\n edges.add( (i, j) )\n edge_points.append(points[ [i, j] ])", "def orifices_from_hydamo(self, orifices):\n # Convert to dflowfm input\n geconverteerd = hydamo_to_dflowfm.generate_orifices(orifices)\n # Add to dict\n for orifice in geconverteerd.itertuples():\n self.structures.add_orifice(\n id=orifice.code,\n branchid=orifice.branch_id,\n chainage=orifice.branch_offset, \n crestlevel=orifice.laagstedoorstroomhoogte,\n crestwidth=orifice.laagstedoorstroombreedte,\n gateloweredgelevel=orifice.schuifhoogte,\n uselimitflowpos=orifice.uselimitflow,\n limitflowpos=orifice.limitflow,\n uselimitflowneg=orifice.uselimitflow,\n limitflowneg=orifice.limitflow,\n corrcoeff=orifice.afvoercoefficient \n )", "def add_edges_from(self, edges_to_add, **attr):\n for e in edges_to_add:\n if len(e) == 3:\n u, v, d = e\n else:\n u, v = e\n d = {}\n u, v = sorted([e[0], e[1]])\n d = {**attr, **d}\n self.add_edge(u, v, **d)", "def store_matches(\n self,\n parameter_handler: ProperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map: dict[ProperTorsionKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n n_terms = len(val.parameter_type.phase)\n for n in range(n_terms):\n smirks = param.smirks\n if param.k_bondorder:\n # The relevant bond order is that of the _central_ bond in the torsion\n bond = topology.get_bond_between(key[1], key[2])\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = ProperTorsionKey(\n atom_indices=key,\n mult=n,\n bond_order=fractional_bond_order,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ProperTorsions\",\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=list(topology.propers),\n )", "def store_potentials(self, parameter_handler: AngleHandler) -> None:\n for potential_key in self.key_map.values():\n smirks = potential_key.id\n parameter = parameter_handler.parameters[smirks]\n potential = Potential(\n parameters={\n parameter_name: getattr(parameter, parameter_name)\n for parameter_name in self.potential_parameters()\n },\n )\n self.potentials[potential_key] = potential", "def decision_process(self) -> None:\n # order routes by preference\n self.adj_rib_in.preference_ordering()\n # for each route insert the best in the loc_rib\n for destination in self.adj_rib_in:\n best_route = destination[0]\n # if there as been a change insert the new route in the adj-rib-out\n old_best = None\n if self.loc_rib.exists(best_route):\n old_best = self.loc_rib[best_route]\n if self.loc_rib.insert(best_route) is not None:\n for neigh in self.nodes_rib_out:\n # Case 1, the RIB out doesn't contains a route for the destination\n if not self.nodes_rib_out[neigh].exists(best_route):\n # Insert the new best as an Advertisement\n self.nodes_rib_out[neigh].insert(best_route)\n # If the Old best is not none insert it as a withdraw\n if old_best is not None and \\\n not self.nodes_rib_out[neigh].exists_withdraws(best_route):\n self.nodes_rib_out[neigh].insert_withdraw(old_best)\n # Case 2, The Rib contains a Route for the detination\n else:\n # Remove the route from the advertisements\n self.nodes_rib_out[neigh].remove(old_best)\n if len(self.nodes_rib_out[neigh][old_best]) == 0:\n del self.nodes_rib_out[neigh][old_best]\n # If the route in the withdraws is equal to the new best don't do anything\n # Otherwise insert the new route as an advertisement\n if self.nodes_rib_out[neigh].exists_withdraws(best_route) and \\\n best_route in self.nodes_rib_out[neigh].get_withdraws(best_route):\n self.nodes_rib_out[neigh].remove_from_withdraws(best_route)\n else:\n self.nodes_rib_out[neigh].insert(best_route)\n # Evaluation if something has to be removed from the LOC rib and withdrawd\n for destination in self.loc_rib:\n if not self.adj_rib_in.exists(destination):\n del self.loc_rib[destination]\n for neigh in self.nodes_rib_out:\n # if self.nodes_rib_out[neigh].exists(destination):\n # del self.nodes_rib_out[neigh][destination]\n self.nodes_rib_out[neigh].insert_withdraw(destination)", "def add_tag(self, tag):\n\n # directional relation: tag is the blank of everything in the list\n self.relations[tag] = {\n \"overlord\": [],\n \"hegemon\": [], # for tributary\n \"tributary\": [],\n \"vassal\": [],\n \"guaranteeing\": [],\n \"guarantor\": [],\n \"alliance\": [],\n \"senior\": [],\n \"junior\": [],\n \"marriage\": []\n }", "def test_add_trivalent_lone_pair_virtual_site(self, molecule):\n # Do not modify the original molecule.\n molecule = copy.deepcopy(molecule)\n\n atom1 = molecule.atoms[0]\n atom2 = molecule.atoms[1]\n atom3 = molecule.atoms[2]\n atom4 = molecule.atoms[3]\n distance = 0.3 * unit.angstrom\n out_of_plane_angle = 30 * unit.degree\n in_plane_angle = 0.2 * unit.radian\n vsite1_index = molecule.add_trivalent_lone_pair_virtual_site(\n [atom1, atom2, atom3, atom4], distance, out_of_plane_angle, in_plane_angle\n )\n # Test for assertion when giving too few atoms\n with pytest.raises(AssertionError) as excinfo:\n vsite1_index = molecule.add_trivalent_lone_pair_virtual_site(\n [atom1, atom2, atom3], distance, out_of_plane_angle, in_plane_angle\n )\n molecule_dict = molecule.to_dict()\n molecule2 = Molecule.from_dict(molecule_dict)\n assert molecule.to_dict() == molecule2.to_dict()", "def route_layout(self):\n self.route_pins()\n self.route_internal()\n self.route_supplies()", "def add_epoxy(atom, atom_list, added_functional_groups, ct):\n global anywhere_map\n global edge_map\n current_size = len(atom_list)\n list_of_n = [x[0] for x in identify_bonds(atom, atom_list) if (x[0].atom_name != \"CY\")]\n if (len(list_of_n) != 0):\n atom2 = random.choice(list_of_n)\n epoxy_atom = Atom( current_size + 1, 'OE', 'E1A', str(added_functional_groups + 1), float(\"{0:.3f}\".format(abs(atom.x - atom2.x) / 2 + min(atom.x, atom2.x))), float(\"{0:.3f}\".format(abs(atom.y - atom2.y) / 2 + min(atom.y, atom2.y))), float(\"{0:.3f}\".format(ct * 1.46 * math.sin(math.radians(60)))) )\n atom_list.append(epoxy_atom)\n if ((len(identify_bonds(epoxy_atom, atom_list)) == 2) and ((identify_bonds(epoxy_atom, atom_list)[0][0].atom_number == atom.atom_number) or (identify_bonds(epoxy_atom, atom_list)[1][0].atom_number == atom.atom_number)) and ((identify_bonds(epoxy_atom, atom_list)[0][0].atom_number == atom2.atom_number) or ((identify_bonds(epoxy_atom, atom_list)[1][0].atom_number == atom2.atom_number)))):\n CY = Atom(atom.atom_number, 'CY', 'E1A', epoxy_atom.residue_number, atom.x, atom.y, atom.z)\n CY2 = Atom(atom2.atom_number, 'CZ', 'E1A', epoxy_atom.residue_number, atom2.x, atom2.y, atom2.z)\n atom_list.remove(atom); atom_list.remove(atom2)\n atom_list.append(CY); atom_list.append(CY2) \n if atom in edge_map: edge_map.remove(atom)\n if atom in anywhere_map: anywhere_map.remove(atom)\n if atom2 in edge_map: edge_map.remove(atom2)\n if atom2 in anywhere_map: anywhere_map.remove(atom2)\n else:\n atom_list.remove(epoxy_atom)\n del epoxy_atom\n return atom_list", "def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]", "def _add_relationships(self, element: Element) -> None:\n elements: Set[str] = {v.id for v in self.element_views}\n\n for relationship in element.get_efferent_relationships():\n if relationship.destination.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )\n\n for relationship in element.get_afferent_relationships():\n if relationship.source.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )", "def meshRelationships(Objects):\r\n # Create some variables to be used to store objects\r\n foreheadVariable = []\r\n noseBridgeVariable = []\r\n noseVariable = []\r\n eyeVariable = []\r\n mouthLoopVariable = []\r\n mouthVariable = []\r\n cheekVariable = []\r\n chinVariable = []\r\n earVariable = []\r\n backHeadVariable = []\r\n lowerBackHeadVariable = []\r\n\r\n # Create the relationshipList\r\n relationshipList = []\r\n\r\n for forehead in Objects:\r\n if \"TubxForehead_geo_\" in forehead:\r\n foreheadVariable.append(forehead)\r\n\r\n for noseBridge in Objects:\r\n if \"TubxNoseBridge_geo_\" in noseBridge:\r\n noseBridgeVariable.append(noseBridge)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, noseBridge, forehead)\r\n\r\n for eye in Objects:\r\n if \"TubxEye_geo_\" in eye:\r\n eyeVariable.append(eye)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, eye, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, eye, noseBridge)\r\n\r\n for nose in Objects:\r\n if \"TubxNose_geo_\" in nose:\r\n noseVariable.append(nose)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, nose, noseBridge)\r\n\r\n for mouthLoop in Objects:\r\n if \"TubxMouthLoop_geo_\" in mouthLoop:\r\n mouthLoopVariable.append(mouthLoop)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList, mouthLoop, nose)\r\n\r\n for mouth in Objects:\r\n if \"TubxMouth_geo_\" in mouth:\r\n mouthVariable.append(mouth)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, mouth, mouthLoop)\r\n\r\n for cheek in Objects:\r\n if \"TubxCheek_geo_\" in cheek:\r\n cheekVariable.append(cheek)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, cheek, mouthLoop)\r\n\r\n for chin in Objects:\r\n if \"TubxChin_geo_\" in chin:\r\n chinVariable.append(chin)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, chin, mouthLoop)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, chin, cheek)\r\n\r\n for ear in Objects:\r\n if \"TubxEar_geo_\" in ear:\r\n earVariable.append(ear)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, ear, forehead)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, ear, cheek)\r\n\r\n for backhead in Objects:\r\n if \"TubxBackHead_geo_\" in backhead:\r\n backHeadVariable.append(backhead)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, backhead, forehead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, backhead, ear)\r\n\r\n for lowerbackhead in Objects:\r\n if \"TubxLowerBackHead_geo_\" in lowerbackhead:\r\n lowerBackHeadVariable.append(lowerbackhead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, lowerbackhead, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, lowerbackhead, backhead)\r\n\r\n for default in Objects:\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, default, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, default, noseBridge)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList,default,nose)\r\n for eye in eyeVariable:\r\n createRelationships(relationshipList, default, eye)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, default, mouthLoop)\r\n for mouth in mouthVariable:\r\n createRelationships(relationshipList, default, mouth)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, default, cheek)\r\n for chin in chinVariable:\r\n createRelationships(relationshipList, default, chin)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, default, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, default, backhead)\r\n for lowerbackhead in lowerBackHeadVariable:\r\n createRelationships(relationshipList, default, lowerbackhead)\r\n\r\n return relationshipList", "def _general_set_neighs(self, key):\n if type(key) == list:\n self._set_neighs_general_list(key)\n elif type(key) == np.ndarray:\n self._set_neighs_general_array(key)\n elif type(key) in inttypes:\n self._set_neighs_number(key)\n else:\n# print key\n raise TypeError(\"Incorrect neighs input in pst.Neighs_Info\")", "def _link(self):\n for exp_atom in self['exp'].atoms:\n if exp_atom.isTolerated():\n continue\n for model_atom in self[exp_atom.model_compound.name].atoms:\n inv = exp_atom.get_active_invariom()\n if inv in model_atom.invarioms.keys():\n exp_atom.set_invariom_atom(model_atom)\n model_atom.set_invariom_atom(exp_atom)\n exp_atom.set_active_invariom(inv)\n model_atom.set_active_invariom(inv)\n break", "def add_route(app, *args):\n for route in args:\n app.router.add_route(route[0], route[1], route[2])", "def add_adjacent_vert(self, new_vert, vert_dict, action):\n self.adjacent_vert_dict[new_vert] = action\n vert_dict[new_vert.get_id()].pointed_vert_dict[self] = action", "def addNeighbor(self, neighbor):", "def add_neighbours(self, router1, router2):\n router1 = self.routers[router1]\n router2 = self.routers[router2]\n\n router1.add_neighbour(router2)\n router2.add_neighbour(router1)", "def test_relation_ways_inserted():\n park = query_row(db_conf, 'osm_landusages', -9201)\n assert park['type'] == 'park'\n assert park['name'] == '9209'\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9209)\n assert road['type'] == 'secondary'\n assert road['name'] == '9209'\n road = query_row(db_conf, 'osm_roads', 9210)\n assert road['type'] == 'residential'\n assert road['name'] == '9210'\n\n park = query_row(db_conf, 'osm_landusages', -9301)\n assert park['type'] == 'park'\n assert park['name'] == '' # no name on relation\n\n # outer ways of multipolygon stand for their own\n road = query_row(db_conf, 'osm_roads', 9309)\n assert road['type'] == 'secondary'\n assert road['name'] == '9309'\n road = query_row(db_conf, 'osm_roads', 9310)\n assert road['type'] == 'residential'\n assert road['name'] == '9310'", "def optimizedRoutePossibilities2(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tpath = find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tif couple[0] in graph[path[-1]]:\n\t\t\t\tyield path", "def _extract_opinions(self):\n self.data['adjectives'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADJ))\n self.data['adverbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, ADV))\n self.data['verbs'] = self.data['sentences'].apply(lambda x: self._extract_pos(x, VERB))", "def insert(self, route, handler):\n curr = self.root\n for part in route:\n curr = curr.insert(part)\n curr.handler = handler", "def _build_pairs_for_eval(self):\n rec = list()\n for idx1 in range(len(self)):\n idx2, is_similar = self._get_sec_idx_and_is_similar(idx1)\n rec.append((idx2, is_similar))\n self._pairs_for_eval = rec", "def _populate_rules(self,rules):\n rule2_index = {}\n for rule in rules:\n relation = rule.get_attribute('relation')[0] # vals are now lists\n ##: Time signals of IS_INCLUDED should not be used in relative time evaluation. They may cause confusion.\n ##: E.g., ... after 3 days in hospital.... \"3 days in\" is picked instead of \"after 3 days\" \n if relation=='IS_INCLUDED':\n continue\n \n signal = rule.get_attribute('signal')[0]\n confidence = float(rule.get_attribute('confidence')[0])\n rule2_index[signal] = (relation, confidence)\n return rule2_index", "def add(self, *items):\n for p in items:\n if not isinstance(p, Point):\n raise TypeError(\"items added to a Route must be a Point. got `{}`.\"\\\n .format(type(p)))\n super().add(p)", "def edge_mapping(self):\n ...", "def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))", "def __addNewASG2Tracker( self, metaModelName, ASGgraph ): \r\n self.__trackASG[ self.__sanitizeMetaModelName( metaModelName ) ] = \\\r\n [ASGgraph, cloningMachine(ASGgraph.nodeTypes)]", "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)", "def random_interroute_insertion(customer, solution, customers):\n posbyroute = factible_positions_in_routes(customer, solution, customers)\n #print(\"posbyroute = \",posbyroute)\n if posbyroute:\n nroute,positions = random.choice(posbyroute)\n ripos = random.choice(positions)\n solution.routes[nroute].insert(ripos, [customer], customers)\n else:\n #print(\"New route!\")\n route = Aroute()\n route.insert(0, [customer], customers)\n solution.append(route)\n #print(\"solution = \", solution)", "def init_linkage():\n for case in AutoCase.objects.all():\n case.autolink()\n case.save()", "def add_route(g, origin, destination, distance, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n distance = int(distance)\n # Add route both ways\n if(choice_dir == \"y\"):\n g.city_dict[origin_code].add_flights_in((destination_code, distance))\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n \n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n g.city_dict[destination_code].add_flights_out((origin_code, distance))\n # Add route one way \n if(choice_dir == \"n\"):\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n \n \n \n return g", "def __routes(self, with_return):\n nonzeo_pois = list(filter(None, self.pois.keys()))\n\n for path in itertools.permutations(nonzeo_pois):\n steps = self.poi_distance(0, path[0])\n for i, j in zip(path, path[1:]):\n steps += self.poi_distance(i, j)\n if with_return:\n steps += self.poi_distance(path[-1], 0)\n yield steps", "def _add_switchs(self):\r\n lst = self.model.get_all_switch()\r\n\r\n for itm in lst:\r\n self._add_switch(itm)", "def all_routing_tree_2(G, tors1, tors2, table_file_name):\n \n table = OrderedDict({})\n for s in G.nodes():\n table[s] = OrderedDict({})\n for s in tors1:\n for d in tors2:\n if s != d:\n routing(G, s, d, table)\n for d in tors1:\n for s in tors2:\n if s != d:\n routing(G, s, d, table)\n\n with open(table_file_name, 'w') as file:\n file.write(json.dumps(table))\n return table", "def find_routes_to_observations(self, agent):\n best_ends_for_observations = {i: None for i in range(len(\n self.observation_areas[\n agent]))} # We want to find best route to each observation area, given that we have found any\n best_costs = {i: np.inf for i in range(len(self.observation_areas[agent]))}\n while len(self.observations[agent]) < self.N_subtrees:\n self.add_node(agent)\n\n observations_made = {i: [] for i in range(\n len(self.observation_areas[agent]))} # {i : [observation_nodes]} for each observation area\n\n for node in self.all_nodes[agent]:\n if node.observed:\n area = node.observation_area\n for area_index in range(len(self.observation_areas[agent])):\n if area == self.observation_areas[agent][area_index]:\n observations_made[area_index].append(\n node) # Append node to observation area \"area_index\" to keep track of which nodes were observed where\n\n for i in observations_made.keys():\n all_obs_nodes = observations_made[i]\n for node_temp in all_obs_nodes:\n cost_temp = np.sum(node_temp.path_costs.copy() + node_temp.terminal_costs.copy())\n if cost_temp < best_costs[i]:\n best_costs[i] = cost_temp\n best_ends_for_observations[i] = node_temp\n\n return best_ends_for_observations", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def test_get_all_routes(self):\n\n post = {\n 'ip': 'test_ip',\n 'next_hop': 'test_nexthop',\n 'communities': 'test_commu'\n }\n post2 = {\n 'ip': 'test_ip2',\n 'next_hop': 'test_nexthop2',\n 'communities': 'test_commu2'\n }\n route1_id = self.database.add_route(post)\n route2_id = self.database.add_route(post2)\n post3 = self.database.get_all_routes()\n self.assertFalse(len(post3) > 2,\n 'Database was not empty before this function')\n self.database.delete_route({'_id': route1_id})\n self.database.delete_route({'_id': route2_id})\n for r in post3:\n if r['ip'] == post['ip']:\n self.assertEqual(r['ip'], post['ip'], 'insertion failed')\n self.assertEqual(r['next_hop'], post['next_hop'],\n 'insertion failed')\n self.assertEqual(r['communities'], post['communities'],\n 'insertion failed')\n else:\n self.assertEqual(r['ip'], post2['ip'], 'insertion failed')\n self.assertEqual(r['next_hop'], post2['next_hop'],\n 'insertion failed')\n self.assertEqual(r['communities'], post2['communities'],\n 'insertion failed')", "def _build_routes(self, routes, allow_redundant_targets=True):\n routes = routes or ()\n joins = {}\n targets_seen = set()\n\n for route in routes:\n if isinstance(route, dict):\n source_label = route.get('source')\n target_label = route.get('target')\n field_label = route.get('field')\n symmetrical = route.get('symmetrical')\n else:\n warnings.warn('Routes are now defined as dicts',\n DeprecationWarning)\n source_label, target_label, field_label, symmetrical = route\n\n # get models\n source = self.get_model(source_label, local=False)\n target = self.get_model(target_label, local=False)\n\n field = None\n\n # get field\n if field_label:\n model_name, field_name = field_label.split('.', 1)\n model_name = model_name.lower()\n\n # determine which model the join field specified exists on\n if model_name == source.__name__.lower():\n field = self.get_field(field_name, source)\n elif model_name == target.__name__.lower():\n field = self.get_field(field_name, target)\n else:\n raise TypeError('model for join field, \"{0}\", '\n 'does not exist'.format(field_name))\n\n if isinstance(field, RelatedObject):\n field = field.field\n\n if not allow_redundant_targets:\n if target in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(target_label))\n else:\n targets_seen.add(target)\n\n # The `joins` hash defines pairs which are explicitly joined\n # via the specified field. If no field is defined, then the\n # join field is implied or does not matter; the route is reduced\n # to a straight lookup.\n joins[(source, target)] = field\n\n if symmetrical:\n if not allow_redundant_targets:\n if source in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(source_label))\n else:\n targets_seen.add(source)\n\n joins[(target, source)] = field\n\n return joins", "def roads_all(osm_path): \n return (retrieve(osm_path,'lines',['highway'])).rename(columns={'highway': 'asset'})", "def _addrule(self, nonterm, program, params, info):\n rule = Rule(nonterm, program, params, info)\n\n if not nonterm in self.rules:\n self.rules[nonterm] = []\n \n self.rules[nonterm].append(rule)", "def add(self, *nonterminals):\n # type: (Iterable[Type[Nonterminal]]) -> None\n for nonterm in nonterminals:\n if nonterm in self:\n continue\n _NonterminalSet._control_nonterminal(nonterm)\n super().add(nonterm)\n self._assign_map[nonterm] = set()", "def load_routes():\n\n print (\"routes\")\n\n Route.query.delete()\n\n with open(\"seed_data/routes_seed.psv\") as routes:\n for row in routes:\n route, route_acronym = row.strip().split(\"|\")\n\n # Checks if seed is empty, if so, inserts a Null cell into the db\n acronym = None if route_acronym == 'None' else route_acronym\n\n route = Route(route=route,\n route_acronym=acronym)\n\n\n db.session.add(route)\n\n db.session.commit()", "def add_d_axioms(self):\n # #1\n items = self.literal_proxies.lit_to_bnode.items()\n items = ((lt, bn) for lt, bn in items if lt.dt is not None)\n for lt, bn in items:\n self.graph.add((bn, rdf_type, lt.dt))\n\n for t in RDFS_D_Axiomatic_Triples:\n self.graph.add(t)", "def _set_neighs_general_array(self, key):\n key = np.array([key]) if type(key) in inttypes else key\n sh = key.shape\n ## If only array of neighs\n if len(sh) == 0:\n self._set_neighs_number(key)\n# self._setted = False\n# if self.staticneighs:\n# self.idxs = np.array([[]])\n# else:\n# self.idxs = np.array([[[]]])\n elif len(sh) == 1:\n self._set_neighs_array_lvl1(key)\n ## If only iss and neighs\n elif len(sh) == 2:\n self._set_neighs_array_lvl2(key)\n elif len(sh) == 3:\n self._set_neighs_array_lvl3(key)", "def moveOPeople(self, li: list):\r\n busyOpts = sum(li[7:-2])\r\n freeOpts = self.NOPTS - busyOpts\r\n\r\n li[21] += li[20]\r\n li[20] = 0\r\n li = self.skipO2n(li, 19)\r\n\r\n li[19] = li[18]\r\n li[18] = 0\r\n li = self.skipO2n(li, 17)\r\n\r\n li[17] = li[16]\r\n li[16] = 0\r\n li = self.skipO2n(li, 15)\r\n\r\n li[15] = li[14]\r\n li[14] = 0\r\n li = self.skipO2n(li, 13)\r\n\r\n li[13] = li[12]\r\n li[12] = 0\r\n li = self.skipO2n(li, 11)\r\n\r\n li[11] = li[10]\r\n li[10] = 0\r\n li = self.skipO2n(li, 9)\r\n\r\n li[9] = li[8]\r\n li[8] = 0\r\n li = self.skipO2n(li, 7)\r\n li[7] = 0\r\n\r\n\r\n # # Add the number of free optomologists to O1\r\n toAdd = min(freeOpts, li[6])\r\n li[7] += toAdd\r\n li[6] -= toAdd\r\n\r\n return li", "def generate_antonym_pairs(config: SettingConfig) -> dict:\n print(f\"Generating initial antonym pairs from RoWordNet @ {datetime.now()}\")\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get the outbound relations of type antonym from\n outbound_relations = filter(lambda x: x[1] == 'near_antonym', wn.outbound_relations(synset_id))\n\n # Iterate outbound relations\n for relation in outbound_relations:\n # Get the synset corresponding to the target of the outbound relation\n target_synset = wn.synset(relation[0])\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_cross_synset_pairs(synset, target_synset)\n\n # Add the current set of pairs\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n # Return the whole dictionary\n print(f\"Successfully generated antonym paris @ {datetime.now()}\")\n return pairs", "def add_edges(self, *edges):\r\n for eds in edges:\r\n if not isinstance(eds, list):\r\n eds = [eds] # Make list of one\r\n for edge in eds:\r\n points = edge.get_points()\r\n self.add_points(points)", "def social_infrastructure_point(osm_path): \n df_all = retrieve(osm_path,'points',['other_tags']).rename(columns={'other_tags': 'asset'}) \n \n #get requested healthcare assets categorized under the key 'healthcare' with correct formatting \n df_h = healthcare_filter(df_all)\n \n #get requested healthcare assets categorized under the key 'amenity' \n df_a = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'amenity' in df_all[\"asset\"][row]: \n if not 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_a = df_a.append(df_all.loc[row]) #if so, save in df\n \n if '\"amenity\"=>\"doctors\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"amenity\"=>\"pharmacy\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'pharmacy'\n elif '\"amenity\"=>\"hospital\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'hospital'\n elif '\"amenity\"=>\"clinic\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'clinic'\n elif '\"amenity\"=>\"dentist\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'dentist'\n else:\n df_a = df_a.drop(index=row)\n \n df_social_points = df_a.append(df_h)\n \n return df_social_points.reset_index(drop=True)", "def roads(osm_path): \n return (retrieve(osm_path,'lines',['highway'],**{'highway':[\"='motorway' or \",\"='motorway_link' or \",\"='trunk' or \",\"='trunk_link' or \",\"='primary' or \",\"='primary_link' or \",\"='secondary' or \",\"='secondary_link' or \",\"='tertiary' or \",\"='tertiary_link' or \",\"='residential' or \",\"='road' or \",\"='unclassified' or \",\"='living_street'\"]})).rename(columns={'highway': 'asset'})", "def ApplyAllRouteInfoCorrections(RouteInfo):\r\n\t# correction for (1)\r\n\tCorrectedRouteInfo = []\r\n\tPrevLineID = None \r\n\tPrevFahrtID = None\r\n\tHstNrLimit = 1000\r\n\tCorrectedConnInfo = None \r\n\tIfCorrectNextConn = False\r\n\r\n\t# check connnection by connection\r\n\tfor ConnInfo in RouteInfo:\r\n\t\thaltestelle_ab = ConnInfo[ConnInfoInd['station_from']]\r\n\t\thaltestelle_an = ConnInfo[ConnInfoInd['station_to']]\r\n\t\tCurrLineID = ConnInfo[ConnInfoInd['line_id']]\r\n\t\tankunft_std = ConnInfo[ConnInfoInd['arrival_hour']]\r\n\t\tankunft_min = ConnInfo[ConnInfoInd['arrival_min']]\r\n\t\tfahrt_id = ConnInfo[ConnInfoInd['travel_id']]\r\n\r\n\t\tif haltestelle_an < HstNrLimit:\r\n\t\t\tCorrectedConnInfo = list(ConnInfo)\r\n\t\t\tIfCorrectNextConn = True \r\n\t\t\tcontinue\r\n\r\n\t\tif haltestelle_ab < HstNrLimit:\r\n\t\t\tif ConnInfo[ConnInfoInd['line_id']]:\r\n\t\t\t\tCorrectedConnInfo[ConnInfoInd['line_id']] = ConnInfo[ConnInfoInd['line_id']]\r\n\t\t\telse:\r\n\t\t\t\tif fahrt_id == PrevFahrtID and PrevLineID:\r\n\t\t\t\t\tCorrectedConnInfo[ConnInfoInd['line_id']] = PrevLineID\r\n\r\n\t\t\tCorrectedConnInfo[ConnInfoInd['arrival_hour']] = ankunft_std\r\n\t\t\tCorrectedConnInfo[ConnInfoInd['arrival_min']] = ankunft_min\r\n\t\t\tCorrectedConnInfo[ConnInfoInd['station_to']] = haltestelle_an \r\n\t\t\tCorrectedRouteInfo.append(CorrectedConnInfo)\r\n\t\t\tIfCorrectNextConn = False \r\n\t\t\tcontinue\r\n\r\n\t\tCorrectedRouteInfo.append(ConnInfo)\r\n\t\tPrevLineID = CurrLineID\r\n\t\tPrevFahrtID = fahrt_id\r\n\r\n\t# return corrected RouteInfo\r\n\treturn CorrectedRouteInfo" ]
[ "0.55640215", "0.50584227", "0.50430346", "0.502575", "0.5020248", "0.50056565", "0.49825788", "0.4962253", "0.49563637", "0.4931308", "0.4920993", "0.49143758", "0.4910077", "0.48764232", "0.48703808", "0.48517448", "0.48504838", "0.4847047", "0.48325107", "0.4819616", "0.4808247", "0.4800137", "0.47835636", "0.47761127", "0.4776082", "0.47490698", "0.47436094", "0.47410846", "0.47388276", "0.4722531", "0.4710839", "0.470741", "0.46994036", "0.46972165", "0.46972114", "0.4697161", "0.46831313", "0.46804634", "0.46801814", "0.4677026", "0.46766508", "0.46739203", "0.46563962", "0.46541885", "0.46512407", "0.46392775", "0.46368062", "0.46320203", "0.46317253", "0.46281287", "0.461443", "0.46104714", "0.46073657", "0.46005747", "0.45976114", "0.45935085", "0.45931807", "0.45825952", "0.45816314", "0.45801973", "0.45791155", "0.4578571", "0.4565769", "0.45633993", "0.45589837", "0.4555899", "0.45521212", "0.4546398", "0.4546246", "0.454038", "0.4540232", "0.45381033", "0.4535201", "0.45351303", "0.45334047", "0.45298997", "0.45276797", "0.45270473", "0.4517619", "0.4517172", "0.45040187", "0.45039922", "0.45000952", "0.44971785", "0.44888774", "0.44839433", "0.44815144", "0.4480876", "0.44806135", "0.44715175", "0.44695202", "0.44620687", "0.44599625", "0.44489846", "0.44385085", "0.4437671", "0.44341695", "0.44310197", "0.44307306", "0.44304803" ]
0.612654
0
Create and solve a Route analysis for the designated preassigned origindestination pairs.
def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches # Select the inputs to process if self.pair_type is helpers.PreassignedODPairType.one_to_one: self._select_inputs_one_to_one(chunk_definition) elif self.pair_type is helpers.PreassignedODPairType.many_to_many: self._get_od_pairs_for_chunk(chunk_definition) self._select_inputs_many_to_many() else: raise NotImplementedError(f"Invalid PreassignedODPairType: {self.pair_type}") # Initialize the Route solver object self.initialize_rt_solver() self._add_unique_id_fields() # Insert the origins and destinations self.logger.debug(f"Route solver fields transferred from Origins: {self.origin_transfer_fields}") self.logger.debug(f"Route solver fields transferred from Destinations: {self.destination_transfer_fields}") if self.pair_type is helpers.PreassignedODPairType.one_to_one: self._insert_stops_one_to_one() elif self.pair_type is helpers.PreassignedODPairType.many_to_many: self._insert_stops_many_to_many() else: raise NotImplementedError(f"Invalid PreassignedODPairType: {self.pair_type}") if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0: # There were no valid destinations for this set of origins self.logger.debug("No valid destinations for this set of origins. Skipping Route calculation.") return # Load barriers # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers, # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem. for barrier_fc in self.barriers: self.logger.debug(f"Loading barriers feature class {barrier_fc}...") shape_type = arcpy.Describe(barrier_fc).shapeType if shape_type == "Polygon": class_type = arcpy.nax.RouteInputDataType.PolygonBarriers elif shape_type == "Polyline": class_type = arcpy.nax.RouteInputDataType.LineBarriers elif shape_type == "Point": class_type = arcpy.nax.RouteInputDataType.PointBarriers else: self.logger.warning( f"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored." ) continue barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True) self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True) # Solve the Route analysis self.logger.debug("Solving Route...") solve_start = time.time() self.solve_result = self.rt_solver.solve() solve_end = time.time() self.logger.debug(f"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.") # Handle solve messages solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)] for msg in solve_msgs: self.logger.debug(msg) # Update the result dictionary self.job_result["solveMessages"] = solve_msgs if not self.solve_result.solveSucceeded: self.logger.debug("Solve failed.") return self.logger.debug("Solve succeeded.") self.job_result["solveSucceeded"] = True # Save output self._export_to_feature_class(chunk_definition) self.logger.debug("Finished calculating Route.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def global_plan(\n world: carla.World, # pylint: disable=no-member\n origin: carla.Location, # pylint: disable=no-member\n destination: carla.Location, # pylint: disable=no-member\n) -> Tuple[Sequence[carla.Waypoint], Sequence[Any], float]: # pylint: disable=no-member\n try:\n from agents.navigation.global_route_planner import GlobalRoutePlanner # pylint: disable=import-error\n from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO # pylint: disable=import-error\n except ImportError:\n raise ImportError(\n \"Missing CARLA installation, \"\n \"make sure the environment variable CARLA_ROOT is provided \"\n \"and that the PythonAPI is `easy_install`ed\")\n\n # Setup global planner.\n grp_dao = GlobalRoutePlannerDAO(wmap=world.get_map(), sampling_resolution=1)\n grp = GlobalRoutePlanner(grp_dao)\n grp.setup()\n # Generate plan.\n waypoints, roadoptions = zip(*grp.trace_route(origin, destination))\n # Accummulate pairwise distance.\n distances = [0.0]\n for i in range(1, len(waypoints)):\n loc_tm1 = waypoints[i - 1].transform.location\n loc_tm1 = np.asarray([loc_tm1.x, loc_tm1.y, loc_tm1.z])\n loc_t = waypoints[i].transform.location\n loc_t = np.asarray([loc_t.x, loc_t.y, loc_t.z])\n distances.append(np.linalg.norm(loc_tm1 - loc_t))\n\n return waypoints, roadoptions, distances", "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.origin_transfer_fields\r\n ) as icur:\r\n # Loop through origins and insert them into Stops along with their assigned destinations\r\n for origin in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [\"SHAPE@\", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields\r\n ):\r\n dest_id = origin[2]\r\n if dest_id is None:\r\n continue\r\n if dest_id not in destinations:\r\n dest_val = f\"'{dest_id}'\" if isinstance(dest_id, str) else dest_id\r\n with arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [\"SHAPE@\", self.dest_id_field] + self.destination_transfer_fields,\r\n where_clause=f\"{self.dest_id_field} = {dest_val}\"\r\n ) as cur:\r\n try:\r\n destinations[dest_id] = next(cur)\r\n except StopIteration:\r\n # The origin's destination is not present in the destinations table. Just skip the origin.\r\n continue\r\n # Insert origin and destination\r\n destination = destinations[dest_id]\r\n if self.reverse_direction:\r\n route_name = f\"{dest_id} - {origin[1]}\"\r\n origin_sequence = 2\r\n destination_sequence = 1\r\n else:\r\n route_name = f\"{origin[1]} - {dest_id}\"\r\n origin_sequence = 1\r\n destination_sequence = 2\r\n # Define the final origin and destination rows for the input Stops\r\n origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:]\r\n destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \\\r\n list(destination)[2:]\r\n icur.insertRow(origin_row)\r\n destination_rows.append(destination_row)\r\n\r\n # Insert destinations\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.destination_transfer_fields\r\n ) as dcur:\r\n for row in destination_rows:\r\n dcur.insertRow(row)", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def solve(self, indices_to_visit: List[int] = None) -> Dict[str, Any]:\n if indices_to_visit is None:\n indices_to_visit = list(range(len(self.matrix)))\n \n # make sure home location is in the listed, and that the list is sorted\n if self.home_index not in indices_to_visit:\n indices_to_visit.append(self.home_index)\n indices_to_visit.sort()\n \n data = self._create_data_model(indices_to_visit)\n\n # create routing index manager\n manager = RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['home'])\n\n # create routing model\n routing = RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n # returns distance between two nodes\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n dist = data['distance_matrix'][from_node][to_node]\n\n return dist\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # define cost of each arc\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # set first solution heuristic\n search_params = pywrapcp.DefaultRoutingSearchParameters()\n search_params.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # solve problem\n assignment = routing.SolveWithParameters(search_params)\n\n return self._extract_solution(manager, routing, assignment, indices_to_visit)", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')", "def pedestrian_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa E501\n resp = self.routing_api.route(\n transport_mode=\"pedestrian\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def add_route(g, origin, destination, distance, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n distance = int(distance)\n # Add route both ways\n if(choice_dir == \"y\"):\n g.city_dict[origin_code].add_flights_in((destination_code, distance))\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n \n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n g.city_dict[destination_code].add_flights_out((origin_code, distance))\n # Add route one way \n if(choice_dir == \"n\"):\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n \n \n \n return g", "def buildRoutesDict(self):\n \n # create route number and name xref dictionary\n arcpy.env.workspace = PublicTransit.RTD_PATH\n routes = arcpy.SearchCursor(PublicTransit.BUS_ROUTES, \"\", \"\", \"RouteID; Name\", \"\")\n self.routeXref = dict()\n for route in routes:\n self.routeXref[route.RouteID] = route.Name\n self.routeXref[route.Name] = route.RouteID\n del routes\n \n #get mode lookup table\n mode_table = self.getModeLookupTable()\n \n # Query the RTD database for the route name, operator, mode, and headways.\n # We are querying for weekday routes (DAYTYPE_CLASS Weekday field = 'Y')\n conn = pyodbc.connect(PublicTransit.DB_CONN_STRING)\n cursor = conn.cursor()\n self.transitRoutes = dict()\n qry = \"\"\"\n WITH t AS\n (\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR,\n CASE\n WHEN HOUR_CLASS >= 3 and HOUR_CLASS < 6 THEN 'EA'\n WHEN HOUR_CLASS >= 6 and HOUR_CLASS < 10 THEN 'AM'\n WHEN HOUR_CLASS >= 10 and HOUR_CLASS < 15 THEN 'MD'\n WHEN HOUR_CLASS >= 15 and HOUR_CLASS < 19 THEN 'PM'\n WHEN (HOUR_CLASS BETWEEN 19 AND 24) OR HOUR_CLASS < 3 THEN 'EV'\n END AS tod,\n [HOURLY_FREQUENCY(Daily until HOUR_CLASS update)], HOUR_CLASS\n FROM dbo.[ROUTE HEADWAY AND FREQUENCY]\n WHERE DAYTYPE_CLASS IN\n (SELECT dc.CLASS FROM dbo.DAYTYPE_CLASS dc WHERE WEEKDAY = 'Y')\n )\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod,\n 60.0 / ROUND(AVG(CAST([HOURLY_FREQUENCY(Daily until HOUR_CLASS update)] AS FLOAT)), 0) as headway\n FROM t\n GROUP BY CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod\n ORDER BY SCH_ROUTEID, SCH_PATTERNID, tod\"\"\"\n \n used_route_names = []\n # Iterate through result set and apply attributes.\n for row in cursor.execute(qry):\n routePattern = str(row.SCH_ROUTEID) + \"_\" + str(row.SCH_PATTERNID)\n if routePattern not in self.transitRoutes:\n self.transitRoutes[routePattern] = TransitRoute(routePattern,\n routeId = row.SCH_ROUTEID,\n patternId = row.SCH_PATTERNID)\n self.transitRoutes[routePattern].new_name = self.__cleanRouteName(row.CPT_AGENCYID + \"_\" + row.SCH_ROUTEDESIGNATOR[:(11 - 1 - len(row.CPT_AGENCYID))],used_route_names) #12 is the maximum name length\n self.transitRoutes[routePattern].agency = row.AGENCYNAME\n mode = -1\n for mode_row in mode_table:\n if row.CPT_AGENCYID == mode_row[\"CPT_AGENCYID\"] and row.CPT_MODE == mode_row[\"CPT_MODE\"]:\n if mode_row[\"SCH_ROUTEDESIGNATOR\"] != \"NA\":\n if row.SCH_ROUTEDESIGNATOR == mode_row[\"SCH_ROUTEDESIGNATOR\"]:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n break #this is as detailed as we can get\n else:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n self.transitRoutes[routePattern].mode = mode\n self.transitRoutes[routePattern].mode_group = Mode.getModeName(mode_group)\n # set headways\n if row.tod == 'EA':\n self.transitRoutes[routePattern].eaHeadway = row.headway\n elif row.tod == 'AM':\n self.transitRoutes[routePattern].amHeadway = row.headway\n elif row.tod == 'MD':\n self.transitRoutes[routePattern].mdHeadway = row.headway\n elif row.tod == 'PM':\n self.transitRoutes[routePattern].pmHeadway = row.headway\n elif row.tod == 'EV':\n self.transitRoutes[routePattern].evHeadway = row.headway\n conn.close()", "def compute_waypoints(self, source_loc, destination_loc):\n start_waypoint = self._map.get_waypoint(\n source_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n end_waypoint = self._map.get_waypoint(\n destination_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n assert start_waypoint and end_waypoint, 'Map could not find waypoints'\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n # TODO(ionel): The planner returns several options in intersections.\n # We always take the first one, but this is not correct.\n return deque([to_pylot_transform(waypoint[0].transform)\n for waypoint in route])", "def solve_route(inputs, chunk):\r\n rt = Route(**inputs)\r\n if inputs[\"pair_type\"] is helpers.PreassignedODPairType.one_to_one:\r\n rt.logger.info(f\"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}\")\r\n elif inputs[\"pair_type\"] is helpers.PreassignedODPairType.many_to_many:\r\n rt.logger.info(f\"Processing chunk {chunk[0]} as job id {rt.job_id}\")\r\n rt.solve(chunk)\r\n rt.teardown_logger()\r\n return rt.job_result", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def rout(pour_point, uh_box, fdr_data, fdr_atts, rout_dict):\n log.info(\"Starting routing program for point: %s\", pour_point)\n # ---------------------------------------------------------------- #\n # Unpack a few structures\n uh_t = uh_box['time']\n uh_box = uh_box['func']\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find Basin Dims and ID\n basin_id = fdr_data[rout_dict['BASIN_ID_VAR']][pour_point.routy, pour_point.routx]\n\n log.info('Input Latitude: %f' % pour_point.lat)\n log.info('Input Longitude: %f' % pour_point.lon)\n log.info('Global Basid ID: %i' % basin_id)\n\n y_inds, x_inds = np.nonzero(fdr_data[rout_dict['BASIN_ID_VAR']] == basin_id)\n y = np.arange(len(fdr_data[rout_dict['LATITUDE_VAR']]))\n x = np.arange(len(fdr_data[rout_dict['LONGITUDE_VAR']]))\n\n x_min = min(x[x_inds])\n x_max = max(x[x_inds])+1\n y_min = min(y[y_inds])\n y_max = max(y[y_inds])+1\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the Basin Dictionary, a subset of the fdr_data\n basin = {}\n basin['lat'] = fdr_data[rout_dict['LATITUDE_VAR']][y_min:y_max]\n basin['lon'] = fdr_data[rout_dict['LONGITUDE_VAR']][x_min:x_max]\n basin['basin_id'] = fdr_data[rout_dict['BASIN_ID_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_direction'] = fdr_data[rout_dict['FLOW_DIRECTION_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_distance'] = fdr_data[rout_dict['FLOW_DISTANCE_VAR']][y_min:y_max, x_min:x_max]\n basin['velocity'] = fdr_data['velocity'][y_min:y_max, x_min:x_max]\n basin['diffusion'] = fdr_data['diffusion'][y_min:y_max, x_min:x_max]\n\n log.debug('Grid cells in subset: %i' % basin['velocity'].size)\n\n pour_point.basiny, pour_point.basinx = latlon2yx(plats=pour_point.lat,\n plons=pour_point.lon,\n glats=basin['lat'],\n glons=basin['lon'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the rout_data Dictionary\n rout_data = {'lat': basin['lat'], 'lon': basin['lon']}\n\n # ---------------------------------------------------------------- #\n # Determine low direction syntax\n if 'VIC' in fdr_atts[rout_dict['FLOW_DIRECTION_VAR']]:\n # VIC Directions: http://www.hydro.washington.edu/Lettenmaier/Models/VIC/Documentation/Routing/FlowDirection.shtml\n dy = {1: -1, 2: -1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: -1}\n dx = {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: -1, 7: -1, 8: - 1}\n log.debug('Using VIC flow directions (1-8).')\n else:\n # ARCMAP Directions: http://webhelp.esri.com/arcgisdesktop/9.2/index.cfm?TopicName=flow_direction\n dy = {1: 0, 2: 1, 4: 1, 8: 1, 16: 0, 32: -1, 64: -1, 128: -1}\n dx = {1: 1, 2: 1, 4: 0, 8: -1, 16: -1, 32: -1, 64: 0, 128: 1}\n log.debug('Using ARCMAP flow directions (1-128).')\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find timestep (timestep is determined from uh_BOX input file)\n input_interval = find_ts(uh_t)\n rout_data['unit_hydrograph_dt'] = input_interval\n t_cell = int(rout_dict['CELL_FLOWDAYS']*SECSPERDAY/input_interval)\n t_uh = int(rout_dict['BASIN_FLOWDAYS']*SECSPERDAY/input_interval)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Read direction grid and find to_col (to_x) and to_row (to_y)\n to_y, to_x = read_direction(basin['flow_direction'], dy, dx)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find all grid cells upstream of pour point\n catchment, rout_data['fraction'] = search_catchment(to_y, to_x, pour_point,\n basin['basin_id'],\n basin_id)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Make uh for each grid cell upstream of basin pour point\n # (linear routing model - Saint-Venant equation)\n uh = make_uh(input_interval, t_cell, catchment['y_inds'],\n catchment['x_inds'], basin['velocity'], basin['diffusion'],\n basin['flow_distance'])\n\n # ---------------------------------------------------------------- #\n # Make uh_river by incrementally moving upstream comining uh functions\n uh_river = make_grid_uh_river(t_uh, t_cell, uh, to_y, to_x, pour_point,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n\n # ---------------------------------------------------------------- #\n # Make uh_s for each grid cell upstream of basin pour point\n # (combine IRFs for all grid cells in flow path)\n uh_s = make_grid_uh(t_uh, t_cell, uh_river, uh_box, to_y, to_x,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Agregate to output timestep\n rout_data['unit_hydrograph'], rout_data['timesteps'] = adjust_uh_timestep(uh_s, t_uh,\n input_interval,\n rout_dict['OUTPUT_INTERVAL'],\n catchment['x_inds'],\n catchment['y_inds'])\n # ---------------------------------------------------------------- #\n return rout_data", "def savings2routes(self,r1,r2):\n newRoute = VRP_Route(r1.route+r2.route)\n newRoute.update_route(self.vrpdata) # compute distance, quantity for newRoute, check whether valid\n if newRoute.tourValid:\n return r1.distance + r2.distance - newRoute.distance\n return -1", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(\r\n len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n# ADD THE DISTANCE CALLBACK\r\n # ADD THE DEMAND CALLBACK AND CAPACITY COSTRAINTS\r\n # In addition to the distance callback, the solver also requires a demand callback, \r\n # which returns the demand at each location, and a dimension for the capacity constraints.\r\n \r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n \r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n#!!! NB\r\n # Unlike the distance callback, which takes a pair of locations as inputs, \r\n # the demand callback only depends on the location (from_node) of the delivery.\r\n # The code also creates a dimension for capacities, we use the AddDimensionWithVehicleCapacity method, \r\n # which takes a vector of capacities.\r\n # Since all the vehicle capacities in this example are the same, you could use the the \r\n # AddDimension method, which takes a single upper bound for all vehicle quantities. \r\n # But AddDimensionWithVehicleCapacity handles the more general case in which different \r\n # vehicles have different capacities.\r\n \r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack, modify it if you accept unmet demand\r\n data['vehicle_capacities'], # vehicle maximum capacities set by the user\r\n True, # start cumul to zero\r\n 'Capacity')\r\n \r\n # you can find other research method here:\r\n # https://developers.google.com/optimization/routing/routing_options\r\n \r\n # Setting first solution heuristic:\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n# search_parameters.first_solution_strategy = (\r\n# routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n # Setting metaheuristic search method:\r\n search_parameters.local_search_metaheuristic = (\r\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\r\n # Setting time limit to the method\r\n search_parameters.time_limit.seconds = 30\r\n \r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Search status \r\n print('\\n')\r\n solver_index = routing.status()\r\n description = ['ROUTING_NOT_SOLVED','ROUTING_SUCCESS','ROUTING_FAIL',\r\n 'ROUTING_FAIL_TIMEOUT','ROUTING_INVALID']\r\n print(\"Solver status:\",description[solver_index],'\\n')\r\n \r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def buildStopsDict(self):\n \n if len(self.nodesDict) == 0:\n raise Exception('Nodes dictionary is empty!')\n if len(self.linksDict) == 0:\n raise Exception('Links dictionary is empty!')\n \n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n \n tempStops = \"temp_stops\"\n tempStopsSp = \"temp_stops_sp\"\n \n # Delete temp_stops and temp_stops_sp feature classes if they exist.\n if arcpy.Exists(tempStops):\n arcpy.Delete_management(tempStops)\n if arcpy.Exists(tempStopsSp):\n arcpy.Delete_management(tempStopsSp)\n arcpy.CopyFeatures_management(PublicTransit.RTD_PATH + PublicTransit.RTD_STOPS,\n tempStops)\n \n # Project temp_stops to CA state plane and add XY.\n install_dir = arcpy.GetInstallInfo()['InstallDir']\n out_coordinate_system = os.path.join(install_dir, PublicTransit.NAD_83_DIRECTORY)\n arcpy.Project_management(tempStops, tempStopsSp, out_coordinate_system,\n \"NAD_1983_To_WGS_1984_1\")\n arcpy.AddXY_management(tempStopsSp)\n \n # Create a search cursor to traverse all stops.\n stops = arcpy.SearchCursor(tempStopsSp, \"\", \"\",\n \"CPT_STOPPOINTID; SCH_STOPPOINTSEQNO; \" +\n \"SCH_ROUTEID; SCH_PATTERNID; ROUTE_PATTERN; \" +\n \"SourceOID; POINT_X; POINT_Y\",\n \"ROUTE_PATTERN A; SCH_STOPPOINTSEQNO A\")\n numStops = int(arcpy.GetCount_management(tempStopsSp).getOutput(0))\n print \"Found %d stops\" % numStops\n \n p = index.Property()\n p.overwrite = True\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE,properties=p)\n \n # For each stop determine the nearest network node.\n scount = 0\n icount = 0\n for s in stops:\n # only create stops for routes which exist in RTD\n if not s.ROUTE_PATTERN in self.transitRoutes:\n continue\n scount += 1\n st = TransitStop(s.CPT_STOPPOINTID, s.SCH_ROUTEID, s.SCH_PATTERNID,\n s.ROUTE_PATTERN, s.SourceOID, s.SCH_STOPPOINTSEQNO)\n # If the stop's linkId is in the links dictionary use the link from\n # and to node (these should all be bus routes since MTC's route\n # traversal FC was created for buses only at this time).\n if s.SourceOID in self.linksDict:\n link = self.linksDict[s.SourceOID]\n # Determine which node is nearest and snap to it.\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.fromNode.x,\n link.fromNode.y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.toNode.x,\n link.toNode.y):\n st.tanaNode = link.fromNode.nodeId\n else:\n st.tanaNode = link.toNode.nodeId\n st.inRegion = True\n \n # The stop's link is not in linksDict. These are either stops \n # outside the region or non-bus routes for which there are no\n # route traversal edges. Do a link lookup from the Roadways\n # feature class.\n else:\n arcpy.env.workspace = PublicTransit.RTD_PATH\n roadwaysSearch = arcpy.SearchCursor(PublicTransit.ROADWAYS_FC,\n \"LinkId = \" + str(s.SourceOID),\n \"\", \"\", \"F_JNCTID; T_JNCTID\", \"\")\n for r in roadwaysSearch:\n fromNode = self.__getIdHash(r.F_JNCTID)\n toNode = self.__getIdHash(r.T_JNCTID)\n if fromNode in self.nodesDict and toNode in self.nodesDict:\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[fromNode].x,\n self.nodesDict[fromNode].y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[toNode].x,\n self.nodesDict[toNode].y):\n st.tanaNode = fromNode\n else:\n st.tanaNode = toNode\n st.inRegion = True\n else:\n st.inRegion = False\n \n # Add the stop to stopsByRoute and stopsByNode dictionaries\n if s.ROUTE_PATTERN in self.stopsByRoute:\n self.stopsByRoute[s.ROUTE_PATTERN].append(st)\n else:\n self.stopsByRoute[s.ROUTE_PATTERN] = [st]\n if (st.tanaNode in self.stopsByNode):\n self.stopsByNode[st.tanaNode].append(st)\n else:\n self.stopsByNode[st.tanaNode] = [st]\n # add the stop node to the spatial index\n if st.tanaNode in self.nodesDict:\n icount += 1\n self.spIndex.insert(st.stopPointId,\n (self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y,\n self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y))\n del stops", "def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def obtain_results(self,assignment,show=False):\n\t\t# logger.debug('Objective: {} meters'.format(assignment.ObjectiveValue()))\n\t\tindex=self.routing.Start(0)\n\t\tplan_output='Route for vehicle 0:\\n'\n\t\troute_distance=0\n\t\tnodes=list()\n\t\twhile True:\n\t\t\tnode=self.manager.IndexToNode(index)\n\t\t\tnodes.append(node)\n\t\t\tplan_output+=' {} ->'.format(node)\n\t\t\tprevious_index=index\n\t\t\tif self.routing.IsEnd(index): break\n\t\t\tindex=assignment.Value(self.routing.NextVar(index))\n\t\t\troute_distance+=self.routing.GetArcCostForVehicle(previous_index,index,0)\n\t\t# plan_output+=' {}\\n'.format(manager.IndexToNode(index))\n\t\t# plan_output+='Route distance: {} meters\\n'.format(route_distance)\n\t\t# logger.debug('plan_output:\\n%s',plan_output)\n\t\t# logger.debug('nodes:\\n%s',nodes)\n\t\tif show:\n\t\t\tplt.scatter(self.city_coors[:,0],self.city_coors[:,1])\n\t\t\t# plt.scatter(*depot_point)\n\t\t\tplt.plot(self.city_coors[nodes][:,0],self.city_coors[nodes][:,1])\n\t\t\tplt.show()\n\t\treturn nodes", "def plan_trip():\n origins = []\n destinations = []\n\n origin_stop = request.args.get('origin', False)\n destination_stop = request.args.get('destination', False)\n origin_is_suburb = request.args.get('origin_suburb', False)\n dest_is_suburb = request.args.get('dest_suburb', False)\n origin_is_suburb = bool(origin_is_suburb)\n dest_is_suburb = bool(dest_is_suburb)\n if origin_stop and destination_stop:\n client = api.connection()\n origins = client.find_stops_by_name('any', origin_stop, True)\n\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n destinations = client.find_stops_by_name('any', destination_stop, True)\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n origins = stop_information_generator(\n origins.locations, [], origin_stop, origin_is_suburb\n )\n destinations = stop_information_generator(\n destinations.locations, [], destination_stop, dest_is_suburb\n )\n\n return render_template(\n \"trip-planner.jinja2\", origins=origins, destinations=destinations, err=200\n )", "def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0", "def initialize_rt_solver(self):\r\n # For a local network dataset, we need to checkout the Network Analyst extension license.\r\n if not self.is_service:\r\n arcpy.CheckOutExtension(\"network\")\r\n\r\n # Create a new Route object\r\n self.logger.debug(\"Creating Route object...\")\r\n self.rt_solver = arcpy.nax.Route(self.network_data_source)\r\n\r\n # Set the Route analysis properties.\r\n # Read properties from the rt_config.py config file for all properties not set in the UI as parameters.\r\n # Route properties documentation: https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route.htm\r\n # The properties have been extracted to the config file to make them easier to find and set so users don't have\r\n # to dig through the code to change them.\r\n self.logger.debug(\"Setting Route analysis properties from RT config file...\")\r\n for prop, value in RT_PROPS.items():\r\n if prop in RT_PROPS_SET_BY_TOOL:\r\n self.logger.warning((\r\n f\"Route config file property {prop} is handled explicitly by the tool parameters and will be \"\r\n \"ignored.\"\r\n ))\r\n continue\r\n try:\r\n setattr(self.rt_solver, prop, value)\r\n if hasattr(value, \"name\"):\r\n self.logger.debug(f\"{prop}: {value.name}\")\r\n else:\r\n self.logger.debug(f\"{prop}: {value}\")\r\n except Exception as ex: # pylint: disable=broad-except\r\n # Suppress warnings for older services (pre 11.0) that don't support locate settings and services\r\n # that don't support accumulating attributes because we don't want the tool to always throw a warning.\r\n if not (self.is_service and prop in [\r\n \"searchTolerance\", \"searchToleranceUnits\", \"accumulateAttributeNames\"\r\n ]):\r\n self.logger.warning(\r\n f\"Failed to set property {prop} from RT config file. Default will be used instead.\")\r\n self.logger.warning(str(ex))\r\n # Set properties explicitly specified in the tool UI as arguments\r\n self.logger.debug(\"Setting Route analysis properties specified tool inputs...\")\r\n self.rt_solver.travelMode = self.travel_mode\r\n self.logger.debug(f\"travelMode: {self.travel_mode}\")\r\n self.rt_solver.timeUnits = self.time_units\r\n self.logger.debug(f\"timeUnits: {self.time_units}\")\r\n self.rt_solver.distanceUnits = self.distance_units\r\n self.logger.debug(f\"distanceUnits: {self.distance_units}\")\r\n self.rt_solver.timeOfDay = self.time_of_day\r\n self.logger.debug(f\"timeOfDay: {self.time_of_day}\")", "def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path", "def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations", "def google_vrp(data):\n # Instantiate the data problem.\n #data = flora_data_model() #TODOcreate_data_model()\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Add Distance constraint.\n dimension_name = 'Distance'\n routing.AddDimension(\n transit_callback_index,\n 0, # no slack\n 300000, # vehicle maximum travel distance\n True, # start cumul to zero\n dimension_name)\n distance_dimension = routing.GetDimensionOrDie(dimension_name)\n distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n print_solution(data, manager, routing, solution)\n # get solution and return it\n sol = get_solution(data, manager, routing, solution)\n return sol\n else:\n print('No solution found !')", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def set_destination(self, start_waypoint, end_waypoint, time=False):\n\n self.create_samples(start_waypoint, end_waypoint)\n\n route_trace = self._trace_route(time=time)\n assert route_trace\n\n self._local_planner.set_global_plan(route_trace)", "def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def set_address_path(manager, routing, assignment,data_locations):\n assignment.ObjectiveValue()\n index = routing.Start(0)\n route_distance = 0\n address_list=[]\n while not routing.IsEnd(index):\n cur_node=manager.IndexToNode(index)\n# print('what are: index,cur_node=',index,cur_node)\n address_list.append(data_locations[cur_node])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n manager.IndexToNode(index)\n# print('almost there: ',address_list)\n address1=address_list[0]\n address2=address_list[1]\n address3=address_list[2]\n address4=address_list[3]\n address5=address_list[4]\n address6=address_list[5]\n address7=address_list[6]\n address8=address_list[7]\n address9=address_list[8]\n address10=address_list[9]\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def _create_new_route(self, tick):\n if self.target_node_id is None:\n self.source_node_id = random.choice(Network.nodes).getID()\n else:\n self.source_node_id = self.target_node_id # We start where we stopped\n # random target\n self.target_node_id = random.choice(Network.nodes).getID()\n self.current_route_id = self.id + \"-\" + str(self.rounds)\n self.current_router_result = CustomRouter.minimal_route(self.source_node_id, self.target_node_id)\n \n if len(self.current_router_result.edges) > 0:\n traci.route.add(self.current_route_id, self.current_router_result.edges)\n return self.current_route_id\n else:\n # try again\n return self._create_new_route(tick)", "def run_router(self, detour_scale):\n\n # Double check source and taget are not same node, if so, we are done!\n for k,v in self.rg.map.items():\n if v.source and v.target:\n debug.error(\"Grid cell is source and target! {}\".format(k))\n return False\n \n # returns the path in tracks\n (path,cost) = self.rg.route(detour_scale)\n if path:\n debug.info(1,\"Found path: cost={0} \".format(cost))\n debug.info(1,str(path))\n\n self.paths.append(path)\n self.add_route(path)\n \n path_set = grid_utils.flatten_set(path)\n self.path_blockages.append(path_set)\n else:\n self.write_debug_gds(\"failed_route.gds\")\n # clean up so we can try a reroute\n self.rg.reinit()\n return False\n return True", "def main(route_id, con):\n # Instantiate the data problem.\n data = create_data_model(con, route_id)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters() # TODO: change parameters\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n print_solution(manager, routing, solution)", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n self._validate_route_settings()\r\n\r\n # Check if the input origins and destinations have any fields we should use in the route analysis\r\n self._populate_input_data_transfer_fields()\r\n\r\n # Compute Route in parallel\r\n LOGGER.info(f\"Beginning parallelized Route solves ({self.total_jobs} chunks)\")\r\n completed_jobs = 0 # Track the number of jobs completed so far to use in logging\r\n # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes\r\n with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor:\r\n # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the\r\n # given origin ranges and their assigned destinations.\r\n jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks}\r\n # As each job is completed, add some logging information and store the results to post-process later\r\n for future in futures.as_completed(jobs):\r\n try:\r\n # The Route job returns a results dictionary. Retrieve it.\r\n result = future.result()\r\n except Exception: # pylint: disable=broad-except\r\n # If we couldn't retrieve the result, some terrible error happened and the job errored.\r\n # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely\r\n # causes are:\r\n # a) If you're calling a service, the service was temporarily down.\r\n # b) You had a temporary file read/write or resource issue on your machine.\r\n # c) If you're actively updating the code, you introduced an error.\r\n # To make the tool more robust against temporary glitches, retry submitting the job up to the number\r\n # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries,\r\n # fail the entire tool run.\r\n errs = traceback.format_exc().splitlines()\r\n failed_range = jobs[future]\r\n LOGGER.debug((\r\n f\"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry \"\r\n f\"up to {helpers.MAX_RETRIES} times. Errors: {errs}\"\r\n ))\r\n job_failed = True\r\n num_retries = 0\r\n while job_failed and num_retries < helpers.MAX_RETRIES:\r\n num_retries += 1\r\n try:\r\n future = executor.submit(solve_route, self.rt_inputs, failed_range)\r\n result = future.result()\r\n job_failed = False\r\n LOGGER.debug(f\"Route chunk {failed_range} succeeded after {num_retries} retries.\")\r\n except Exception: # pylint: disable=broad-except\r\n # Update exception info to the latest error\r\n errs = traceback.format_exc().splitlines()\r\n if job_failed:\r\n # The job errored and did not succeed after retries. Fail the tool run because something\r\n # terrible is happening.\r\n LOGGER.debug(f\"Route chunk {failed_range} continued to error after {num_retries} retries.\")\r\n LOGGER.error(\"Failed to get Route result from parallel processing.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n\r\n # If we got this far, the job completed successfully and we retrieved results.\r\n completed_jobs += 1\r\n LOGGER.info(\r\n f\"Finished Route calculation {completed_jobs} of {self.total_jobs}.\")\r\n\r\n # Parse the results dictionary and store components for post-processing.\r\n if result[\"solveSucceeded\"]:\r\n self.route_fcs.append(result[\"outputRoutes\"])\r\n else:\r\n # Typically, a solve fails because no destinations were found for any of the origins in the chunk,\r\n # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less\r\n # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug\r\n # mode only in case the user is having problems. The user can also check the individual OD log\r\n # files.\r\n LOGGER.debug(f\"Solve failed for job id {result['jobId']}.\")\r\n LOGGER.debug(result[\"solveMessages\"])\r\n\r\n # Post-process outputs\r\n if self.route_fcs:\r\n LOGGER.info(\"Post-processing Route results...\")\r\n self.route_fcs = sorted(self.route_fcs)\r\n self._post_process_route_fcs()\r\n else:\r\n LOGGER.warning(\"All Route solves failed, so no output was produced.\")\r\n\r\n # Clean up\r\n # Delete the job folders if the job succeeded\r\n if DELETE_INTERMEDIATE_OUTPUTS:\r\n LOGGER.info(\"Deleting intermediate outputs...\")\r\n try:\r\n shutil.rmtree(self.scratch_folder, ignore_errors=True)\r\n except Exception: # pylint: disable=broad-except\r\n # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.\r\n LOGGER.warning(f\"Unable to delete intermediate Route output folder {self.scratch_folder}.\")\r\n\r\n LOGGER.info(\"Finished calculating Routes.\")", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def car_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa: E501\n resp = self.routing_api.route(\n transport_mode=\"car\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def __init__(self, origin, destination):\n self.origin = origin\n self.destination = destination", "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def _parse_routepart(self, data):\n points = [self._parse_trip_point(point) for point in data.findall('./itdPoint')]\n\n path = []\n for coords in data.findall('./itdPathCoordinates/itdCoordinateBaseElemList/itdCoordinateBaseElem'):\n path.append(Coordinates(int(coords.find('y').text) / 1000000, int(coords.find('x').text) / 1000000))\n\n motdata = self._parse_mot(data.find('./itdMeansOfTransport'))\n\n if motdata is None or data.attrib['type'] == 'IT':\n waytype = {\n '98': 'walk',\n '99': 'walk',\n '100': 'walk',\n '101': 'bike',\n '104': 'car',\n '105': 'taxi'\n }[data.find('./itdMeansOfTransport').attrib['type']]\n # 98 = gesichter anschluss\n\n way = Way(WayType(waytype), points[0].stop, points[1].stop)\n way.distance = data.attrib.get('distance')\n if way.distance is not None:\n way.distance = float(way.distance)\n duration = data.attrib.get('timeMinute', None)\n if duration is not None:\n way.duration = timedelta(minutes=int(duration))\n if path:\n way.path = path\n return way\n\n else:\n origin, destination, line, ridenum, ridedir, canceled = motdata\n\n if data.find('./genAttrList/genAttrElem[value=\"HIGHSPEEDTRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance.highspeed')\n elif data.find('./genAttrList/genAttrElem[value=\"LONG_DISTANCE_TRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance')\n\n train_line = line.linetype in self.train_station_lines\n\n # Build Ride Objekt with known stops\n ride = Ride(line, ridenum)\n ride.canceled = canceled\n ride.direction = ridedir\n for infotext in data.findall('./infoTextList/infoTextListElem'):\n ride.infotexts.append(infotext)\n\n first = None\n last = None\n waypoints = False\n if data.find('./itdStopSeq'):\n new_points = [self._parse_trip_point(point, train_line=train_line) for point in data.findall('./itdStopSeq/itdPoint')]\n if not new_points or new_points[0].stop != new_points[0].stop:\n new_points.insert(0, points[0])\n if new_points[-1].stop != points[1].stop:\n new_points.append(points[1])\n points = new_points\n waypoints = True\n\n for p in points:\n if not waypoints and first is None:\n ride.append(None)\n pointer = ride.append(p)\n if first is None:\n first = pointer\n last = pointer\n\n if origin is not None:\n if origin != ride[0].stop:\n ride.prepend(None)\n ride.prepend(TimeAndPlace(Platform(origin)))\n else:\n ride.prepend(None)\n\n if destination is not None:\n if destination != ride[-1].stop:\n ride.append(None)\n ride.append(TimeAndPlace(Platform(destination)))\n else:\n ride.append(None)\n\n segment = ride[first:last]\n paths = self._split_path(path, [p.platform.coords for p in segment])[:-1]\n for i, point in segment.items():\n if not paths:\n break\n segment.ride._paths[i] = paths.pop(0)\n return segment", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def generate_trivial_tours(self):\n self.routes = []\n for c in range(1, self.vrpdata.NumCust+1):\n self.routes.append(VRP_Route([c]))\n return self.get_objective()", "def __init__(self):\n self.parameter = [[(0,1),(1,1),(0,0),(1,0)],\n [(1,0),(1,0),(0,1),(0,1)],\n [(1,0),(0,1),(1,0),(0,1)],\n [(0,0),(0,0),(0,0),(0,0)]]\n \"\"\"Distance is number of whole route from origin to destination\"\"\"\n self.distance = 100\n \"\"\"action sets\"\"\"\n self.action = [0,55,75]\n self.maxSpeed = self.action[-1]\n \"\"\"time period for each stage\"\"\"\n self.time_interval = 0.5\n \"\"\"Number of stages we want to check. Here we can use this to limit the\n travel time, since sometimes we want driver to arrive in a time window.\n For example, if stage is 4, that means we want to driver finish route within \n 2 hours. \"\"\"\n self.stage = 4\n self.time_block = 0.5\n self.distance_block = 25", "def _trace_route(self, start_waypoint, end_waypoint):\n\n # Setting up global router\n if self._grp is None:\n dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)\n grp = GlobalRoutePlanner(dao)\n grp.setup()\n self._grp = grp\n\n # Obtain route plan\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n\n return route", "def remove_route(g, origin, destination, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n \n # Removes both directions and returns \n if(choice_dir == \"y\"):\n \n \n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != destination_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != origin_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n \n # Removes one direction and returns\n if(choice_dir == \"n\"):\n for key in g.city_dict:\n if(key == origin_code):\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != destination_code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_out(new_flights_out)\n \n if(key == destination_code):\n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != origin_code):\n new_flights_in.append(flight)\n g.city_dict[key].set_flights_in(new_flights_in)\n \n return g", "def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)", "def FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, TimeTableList, TimeTableIndex, StationHourIndex, PathInfo=[]):\r\n PathInfo = PathInfo + [ConnectionInfo]\r\n\r\n if Cond.IfTestRouteSearch:\r\n \tStations = GetAllStationsOfRoute(PathInfo)\r\n \tprint \"\\nStations of Path (%s): ++++++++\" % len(Stations)\r\n \tprint Stations\r\n \tprint \"Route Information:\"\r\n \tprint PrettyStringRouteInfo(PathInfo)\r\n\r\n # check successful termination\r\n # if len(PathInfo) > 1 and ConnectionInfo[ConnInfoInd['station_to']] == EndStation: \r\n if CheckIfPathTerminatesSuccessfully(ConnectionInfo, PathInfo, RouteConditions, EndStation):\r\n \tif Cond.IfTestRouteSearch:\r\n \t\tprint \"End Station is reached!\"\t\r\n \treturn [PathInfo]\r\n\r\n # current (this iteration's) path length\r\n CurPathLen = len(PathInfo)\r\n\r\n # get next connections\r\n start_station = ConnectionInfo[ConnInfoInd['station_to']]\r\n departure_hour = ConnectionInfo[ConnInfoInd['arrival_hour']] \t\r\n departure_min = ConnectionInfo[ConnInfoInd['arrival_min']]\r\n\r\n # TEST BU2019\r\n if False:\r\n\t print 'ConnInfoInd: ' + str(ConnectionInfo)\r\n\t print 'start_station,departure_hour,departure_min: %s, %s, %s' % (start_station, departure_hour, departure_min)\r\n\t time.sleep(0.1)\r\n \r\n # mandatory conditions\r\n WaitLimit = RouteConditions[Cond.MaxWaitingTimeAtStation][0]\r\n \r\n # get next connections from the station\r\n ConnectionInfoList = GetListOfNextConnections(TimeTableList, TimeTableIndex, StationHourIndex, start_station, departure_hour, departure_min, WaitLimit)\r\n\r\n # insert on-foot connections (Zu Fuss, ZF) to nearby stations into ConnectionInfoList\r\n # cancel (Tunc 4/3/2019)\r\n if False:\r\n\t StationMeasurementTime = ReqStationMeasureTime\r\n\t \r\n\t if Cond.MaxNumberOfSubsequentStationPassagesOnFoot in RouteConditions \\\r\n\t \tand RouteConditions[Cond.MaxNumberOfSubsequentStationPassagesOnFoot][0] > 0:\r\n\r\n\t\t if RouteConditions.has_key(Cond.MeasureStations):\r\n\t\t \tStationMeasurementTime = RouteConditions[Cond.MeasureStations][1]\r\n\t\t Connections = GetOnFootStationChangeConnections(start_station, departure_hour, departure_min, StationMeasurementTime)\r\n\t\t \r\n\t\t if Connections:\t\t# i.e. if Connections is not None\r\n\t\t \t(OnFootConnections1, OnFootConnections2) = Connections \r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections1)\r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections2)\r\n\r\n if Cond.IfTestRouteSearch:\r\n\t\tprint \"Next connections:\"\r\n\t\tfor c in ConnectionInfoList:\r\n\t\t\tprint c\r\n\t\ttime.sleep(Cond.TestWaitingTime)\r\n\r\n if not ConnectionInfoList:\t\t# Endstation: Node w/o successor nodes\r\n \treturn []\r\n\r\n PathInfoList = []\r\n\r\n for ConnectionInfo in ConnectionInfoList:\r\n\t\tres = Cond.CheckIfConnectionShouldBeSelected(ConnectionInfo, PathInfo, EndStation, RouteConditions)\r\n\r\n\t\t# test\r\n\t\tif Cond.IfTestRouteSearch:\r\n\t\t\tif res == None or res == False:\r\n\t\t\t\tprint \"CheckIfConnectionShouldBeSelected: %s\" % res\r\n\r\n\t \tif res == None: return[] \r\n\t \tif res == False: continue\r\n\r\n\t \t# recursive call\r\n\t\textended_paths = FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, \\\r\n\t\t\tTimeTableList, TimeTableIndex, StationHourIndex, PathInfo)\r\n\r\n\t\t# report status\r\n\t\tif Cond.ReportDuringRouteSearch in RouteConditions:\r\n\t\t\tTimeIntv = default_timer() - Cond.SearchStartTime\r\n\t\t\tRouteSearchReportingIntervalInSeconds = RouteConditions[Cond.ReportDuringRouteSearch][0]\r\n\t\t\tif TimeIntv > Cond.RouteSearchReportCounter * RouteSearchReportingIntervalInSeconds:\r\n\t\t\t\tCond.RouteSearchReportCounter += 1 \r\n\t\t\t\tprint \"%s seconds passed... \" % \"{:.2f}\".format(TimeIntv)\r\n\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\t\r\n\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\r\n\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t# append to path list\r\n\t\tfor p in extended_paths:\r\n\t\t\t# no need to recheck route unless current connection is the last one \r\n\t\t\t# LastConnection = (ConnectionInfo == p[-1])\r\n\t\t\tLastConnection = (CurPathLen == len(p) -1 and ConnectionInfo == p[-1])\r\n\t\t\t\r\n\t\t\tif LastConnection:\r\n\r\n\t\t\t\tif Cond.CheckIfRouteShouldBeSelected(p, RouteConditions):\r\n\t\t\t\t\tPathInfoList.append(p)\r\n\t\t\t\t\tCond.SelectedRoutes.append(ApplyAllRouteInfoCorrections(p))\r\n\r\n\t\t\t\t\t# evaluate route\r\n\t\t\t\t\t# cancel for BU2019\r\n\r\n\t\t\t\t\tif Cond.IfTestRouteSearch:\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\\n\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\t\r\n\t\t\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t\t\t\t# test\r\n\t\t\t\t\tIncrementDicValue(Cond.RouteCountPerRouteLength, CurPathLen)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# not last connection, no need to recheck the route\r\n\t\t\t\t# PathInfoList.append(p)\r\n\t\t\t\t# IncrementDicValue(SelectedRoutesPerLevel, CurPathLen)\r\n\t\t\t\tpass\r\n \r\n return PathInfoList", "def execute_waypoint_sequence(detail_of_trip):\n\n # rets (route_line, line_points)\n sliced_route_and_line_points = chunk_user_route(detail_of_trip)\n\n sliced_route = sliced_route_and_line_points[0]\n line_points = sliced_route_and_line_points[1]\n\n # Interpolate/Break into 1/10 segments\n segmented_points = interpolate_points(sliced_route, line_points)\n waypoints = find_crime_areas(segmented_points)\n\n # print \"segmented_points\", json.dumps(segmented_points, indent=2)\n print \"\\n\\n\\n\\n\" # compensating for the giant GET request\n return waypoints", "def findRoute(self, x1, y1, x2, y2):\r\n\r\n\t\t# Check to see if the start and end node are the same\r\n\t\tif x1 == x2 and y1 == y2:\r\n\t\t\treturn [(x1, y1)]\r\n\r\n\t\troot_node = DijkstraNode(x1, y1, None, 0)\r\n\t\troot_node.neighbours = self.getNeighbours(x1, y1)\r\n\r\n\t\t# Create a dictionary to store all of the nodes\r\n\t\tall_nodes = {(x1, y1): root_node}\r\n\t\t# If no starting place is found return nothing\r\n\t\tif len(root_node.neighbours) == 0:\r\n\t\t\treturn []\r\n\t\tcurrent_node = root_node\r\n\t\twhile (x2, y2) not in all_nodes:\r\n\r\n\t\t\t# If the algorithm hasn't found the target node and cannot explore further then return empty path\r\n\t\t\tif current_node is None:\r\n\t\t\t\treturn []\r\n\r\n\t\t\tcurrent_node.neighbours = self.getNeighbours(current_node.x, current_node.y)\r\n\r\n\t\t\t# The distance from the root node through the current node to the neighbour\r\n\t\t\tcurrent_neighbour_dist = current_node.dist + 1\r\n\r\n\t\t\tfor neighbour in current_node.neighbours:\r\n\t\t\t\tif neighbour in all_nodes:\r\n\t\t\t\t\tneighbour_node = all_nodes[neighbour]\r\n\t\t\t\t\tif current_neighbour_dist < neighbour_node.dist:\r\n\t\t\t\t\t\t# The new best path is through the current node\r\n\t\t\t\t\t\tneighbour_node.parent = current_node\r\n\t\t\t\t\t\tneighbour_node.dist = current_neighbour_dist\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Add a new node if it doesn't exist within the currently explored nodes\r\n\t\t\t\t\tall_nodes[neighbour] = DijkstraNode(neighbour[0], neighbour[1], current_node, current_neighbour_dist)\r\n\r\n\t\t\t# Mark the current node as being explored as you have checked all the neighbours\r\n\t\t\tcurrent_node.explored = True\r\n\r\n\t\t\t# Gets a list of all of the unexplored nodes to check for the next node to explore\r\n\t\t\tunexplored_nodes = [node for _, node in all_nodes.items() if not node.explored]\r\n\r\n\t\t\tif len(unexplored_nodes) > 0:\r\n\t\t\t\t# Go to the next node with the smallest distance that hasn't been explored\r\n\t\t\t\tcurrent_node = min(unexplored_nodes, key=lambda node: node.dist)\r\n\t\t\telse:\r\n\t\t\t\tcurrent_node = None\r\n\r\n\t\t# Make your way back from the target node\r\n\t\tcurrent_node = all_nodes[(x2, y2)]\r\n\t\t# Initialise a list to hold the path going from the target to the root\r\n\t\treversed_path = []\r\n\t\t# This will end when the root node tries to travel to a None node\r\n\t\twhile current_node is not None:\r\n\t\t\t# Add the current node to the list\r\n\t\t\treversed_path.append((current_node.x, current_node.y))\r\n\t\t\t# Travel to the parent node\r\n\t\t\tcurrent_node = current_node.parent\r\n\t\t\t# current_node will be None at the root because the parent of the root node is 'None'\r\n\r\n\t\t# Return the list in the correct order\r\n\t\treturn list(reversed(reversed_path))", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def __init__(self, route_idx=None, arr_date=None, dep_time=None, lon=None, dep_date=None, track=None, rt_track=None, id=None, rt_dep_time=None, rt_arr_time=None, name=None, arr_time=None, lat=None, rt_dep_date=None, rt_arr_date=None):\n\n self._route_idx = None\n self._arr_date = None\n self._dep_time = None\n self._lon = None\n self._dep_date = None\n self._track = None\n self._rt_track = None\n self._id = None\n self._rt_dep_time = None\n self._rt_arr_time = None\n self._name = None\n self._arr_time = None\n self._lat = None\n self._rt_dep_date = None\n self._rt_arr_date = None\n\n self.route_idx = route_idx\n if arr_date is not None:\n self.arr_date = arr_date\n if dep_time is not None:\n self.dep_time = dep_time\n self.lon = lon\n if dep_date is not None:\n self.dep_date = dep_date\n self.track = track\n if rt_track is not None:\n self.rt_track = rt_track\n self.id = id\n if rt_dep_time is not None:\n self.rt_dep_time = rt_dep_time\n if rt_arr_time is not None:\n self.rt_arr_time = rt_arr_time\n self.name = name\n if arr_time is not None:\n self.arr_time = arr_time\n self.lat = lat\n if rt_dep_date is not None:\n self.rt_dep_date = rt_dep_date\n if rt_arr_date is not None:\n self.rt_arr_date = rt_arr_date", "def find_path(self, origin, destination, max_time = 1):\r\n \r\n # Before we start, let's check we need to do something\r\n if origin == destination or self._heuristic_weight(origin, destination) == 0:\r\n return None\r\n \r\n # Add the starting point to the \"open\" list\r\n self.open_list.append(origin)\r\n self.g_cost[origin] = 0\r\n self.h_cost[origin] = self.f_cost[origin] = self._heuristic_weight(origin, destination)\r\n \r\n self.start_time = lib.clock()\r\n nearest_parent = {}\r\n self.path = PATH_INEXISTENT\r\n \r\n #while (lib.clock() - self.start_time) < max_time:\r\n while len(self.open_list):\r\n # The \"parent\" node, around which we look, is always the first node of the \"open\" list\r\n # This node is transferred to the \"closed\" list\r\n current_parent = self.open_list[0]\r\n self.closed_list.append(current_parent)\r\n del self.open_list[0]\r\n\r\n # The \"parent\" node is the destination : the path has been found.\r\n if current_parent == destination:\r\n self.path = PATH_FOUND\r\n break\r\n\r\n # Set the first element of the open list as the one that has the smallest F-cost\r\n for (i, node) in enumerate(self.open_list):\r\n if self.f_cost[self.open_list[0]] > self.f_cost[node]:\r\n (self.open_list[i], self.open_list[0]) = (self.open_list[0], node)\r\n \r\n # Check the adjacent nodes\r\n children = [road.end for road in current_parent.leaving_roads]\r\n \r\n for child in children:\r\n # Not already in the closed list neither in the open list\r\n if not (child in self.closed_list) and not (child in self.open_list):\r\n # Compute its G-cost, H-cost and F-cost\r\n self.g_cost[child] = self.g_cost[current_parent] + road.weight\r\n self.h_cost[child] = self._heuristic_weight(child, destination)\r\n self.f_cost[child] = self.g_cost[child] + self.h_cost[child]\r\n \r\n nearest_parent[child] = current_parent\r\n \r\n # Add the node to the open list, keeping the order (the first node has the smallest F-cost)\r\n if len(self.open_list) and (self.f_cost[self.open_list[0]] > self.f_cost[child]):\r\n self.open_list.insert(0, child)\r\n else:\r\n self.open_list.append(child)\r\n\r\n # Already in the open list : check to see if this path is a better one than the currently known path\r\n elif child in self.open_list:\r\n # Compute the G-cost of this possible new path\r\n current_g_cost = self.g_cost[current_parent] + road.weight\r\n \r\n # This path is shorter (lower G-cost) : store this path as default to reach this node\r\n if current_g_cost < self.g_cost[child]:\r\n # Set this path as the shortest path to reach this node\r\n nearest_parent[child] = current_parent\r\n self.g_cost[child] = current_g_cost\r\n self.f_cost[child] = self.g_cost[current_parent] + self.h_cost[child] # Do not forget to update the F-cost !\r\n \r\n # Check if the open list is still in the right order\r\n if self.f_cost[self.open_list[0]] > self.f_cost[child]:\r\n i = self.open_list.index(child)\r\n (self.open_list[0], self.open_list[i]) = (self.open_list[i], self.open_list[0])\r\n\r\n # Save the path if it exists.\r\n if self.path == PATH_FOUND:\r\n \r\n current_node = destination\r\n self.path = []\r\n self.path_length = 0\r\n \r\n while current_node != origin:\r\n self.path.insert(0, current_node)\r\n if current_node in nearest_parent:\r\n current_node = nearest_parent[current_node]\r\n else:\r\n raise Exception('ERROR (in gps.find_path()): ill-formed parent list, a node has no parent.')\r\n \r\n self.path_length += 1\r\n return self._build_path()\r\n\r\n return None", "def read_routes(routes_source: TextIO, airports: AirportDict) -> RouteDict:\n #RouteDict = Dict[str, Set[str]]\n routes_list = routes_source.readlines()\n d = {}\n src_index = ROUTE_DATA_INDEXES['Source airport']\n dst_index = ROUTE_DATA_INDEXES['Destination airport']\n \n for i in range(len(routes_list)):\n source_airport = get_routes_information(routes_list[i], src_index)\n destination_airport = get_routes_information(routes_list[i], dst_index)\n \n if source_airport in airports and destination_airport in airports\\\n and source_airport not in d:\n \n routes = set() # it's a set\n routes.add(destination_airport)\n d[source_airport] = routes\n \n elif source_airport in airports and destination_airport in \\\n airports and source_airport in d:\n d[source_airport].add(destination_airport)\n return d", "def search_path(self):\n\n nodes = [self.start]\n final_node = None\n \n count = 0\n while True:\n count += 1\n\n if count % self.pick_target == 0:\n pick = self.goal.pos[:2]\n else:\n pick = self.car.random_pos()[:2]\n \n nearest = self.get_nearest_node(nodes, pick)\n\n if count % self.check_dubins == 0:\n solutions = self.dubins.find_tangents(nearest.pos, self.goal.pos)\n dubins_route, cost, valid = self.dubins.best_tangent(solutions)\n \n if valid:\n final_node = nearest\n break\n\n phi = self.get_steering_angle(nearest.pos, pick)\n pos = nearest.pos\n branch = [pos[:2]]\n \n for i in range(self.max_steps):\n pos = self.car.step(pos, phi)\n branch.append(pos[:2])\n \n # check safety of route-----------------------\n if phi == 0:\n safe = self.dubins.is_straight_route_safe(nearest.pos, pos)\n else:\n d, c, r = self.car.get_params(nearest.pos, phi)\n safe = self.dubins.is_turning_route_safe(nearest.pos, pos, d, c, r)\n # --------------------------------------------\n \n if not safe:\n continue\n \n new_node = Node(pos, phi, i+1)\n \n if new_node in nodes:\n continue\n \n new_node.branch = branch\n new_node.parent = nearest\n nodes.append(new_node)\n \n route = self.backtracking(final_node) + dubins_route\n path = self.car.get_path(self.car.start_pos, route)\n print('Total iteration:', count)\n \n return path, nodes", "def handle_set_destination(self, data):\n #If the origin_id is 0, it has not been specified and we must find\n #the closest node to where we are now\n self.dest_node = data.dest_id\n if data.origin_id == 0:\n #Will set self.current_node\n self.get_nearest_node(data.dest_id)\n else:\n self.current_node = data.origin_id\n if self.current_node == data.dest_id:\n self.at_dest = True\n msg = (\"We're already there!\")\n return srvs.SetDestinationResponse(True, msg)\n rospy.wait_for_service('/get_trajectory')\n get_traj = rospy.ServiceProxy('/get_trajectory', srvs.GetTrajectory)\n trajectory = get_traj(False, self.current_node, data.dest_id).trajectory\n self.np_trajectory = to_numpy_trajectory(trajectory)\n self.loop = False\n self.at_dest = False\n msg = (\"Trajectory to destination of vehicle #%i \" % self.vehicle_id +\n \"successfully set.\")\n return srvs.SetDestinationResponse(True, msg)", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def _instantiate_pathway(self, context):\n # DOCUMENT: Projections SPECIFIED IN A PATHWAY MUST BE A MappingProjection\n # DOCUMENT:\n # Each item in Pathway can be a Mechanism or Projection object, class ref, or specification dict,\n # str as name for a default Mechanism,\n # keyword (IDENTITY_MATRIX or FULL_CONNECTIVITY_MATRIX) as specification for a default Projection,\n # or a tuple with any of the above as the first item and a param dict as the second\n pathway = self.paramsCurrent[PATHWAY]\n self._mech_tuples = []\n self._monitoring_mech_tuples = []\n self._target_mech_tuples = []\n\n from PsyNeuLink.Globals.Run import _get_unique_id\n\n self._standardize_config_entries(pathway=pathway, context=context)\n\n # VALIDATE PATHWAY THEN PARSE AND INSTANTIATE MECHANISM ENTRIES ------------------------------------\n self._parse_and_instantiate_mechanism_entries(pathway=pathway, context=context)\n\n # Identify origin and terminal mechanisms in the process and\n # and assign the mechanism's status in the process to its entry in the mechanism's processes dict\n self.firstMechanism = pathway[0][OBJECT_ITEM]\n self.firstMechanism.processes[self] = ORIGIN\n self._origin_mech_tuples = [pathway[0]]\n self.originMechanisms = MechanismList(self, self._origin_mech_tuples)\n\n self.lastMechanism = pathway[-1][OBJECT_ITEM]\n if self.lastMechanism is self.firstMechanism:\n self.lastMechanism.processes[self] = SINGLETON\n else:\n self.lastMechanism.processes[self] = TERMINAL\n self._terminal_mech_tuples = [pathway[-1]]\n self.terminalMechanisms = MechanismList(self, self._terminal_mech_tuples)\n\n # # Assign process outputState to last mechanisms in pathway\n # self.outputState = self.lastMechanism.outputState\n\n # PARSE AND INSTANTIATE PROJECTION ENTRIES ------------------------------------\n\n self._parse_and_instantiate_projection_entries(pathway=pathway, context=context)\n\n self.pathway = pathway\n\n self._instantiate__deferred_inits(context=context)\n\n if self.learning:\n self._check_for_target_mechanism()\n if self.targetMechanism:\n self._instantiate_target_input()\n self._learning_enabled = True\n else:\n self._learning_enabled = False\n\n self._allMechanisms = MechanismList(self, self._mech_tuples)\n self.monitoringMechanisms = MechanismList(self, self._monitoring_mech_tuples)\n self.targetMechanisms = MechanismList(self, self._target_mech_tuples)", "def work(params) -> Union[None, float]:\n try:\n # either HTTP or bindings\n if host:\n path = action if action == \"route\" else \"sources_to_targets\"\n params_str = delimit_tuple(\n tuple((delimit_tuple(x) for x in params)), delimiter=\";\"\n )\n route = requests.get(f\"{host}/{path}/v1/driving/{params_str}\")\n else:\n route = router.route(params) if action == \"route\" else None\n except (RuntimeError, requests.exceptions.BaseHTTPError):\n return None\n\n if (\n random() > 0.95\n ): # assume that large number of routes will be tested, only print sample in debug mode\n LOGGER.debug(f\"Calculated route between {params[0]} and {params[1]}\")\n\n if report:\n result = route.json()\n if action == \"route\":\n try:\n dist = sum([x[\"distance\"] for x in result[\"routes\"]])\n except KeyError:\n LOGGER.critical(\n f\"No route was found from {params[0]} to {params[1]}. \"\n f\"Try regenerating the locations or specify a more narrow bounding box.\"\n )\n return None\n else:\n dists = [\n inner[\"distance\"]\n for outer in route[\"sources_to_targets\"]\n for inner in outer\n ]\n dist: float = mean(filter(lambda x: x is not None, dists))\n\n return dist", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def find_route(risk_map: MAP, destination: COORDINATE) -> int:\n # Time for some Dijkstra! https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm\n # Per the problem description cost is added when a position is entered, so we start at 0\n heap = [(0, (0, 0))] # distance (risk), node\n min_risk = defaultdict(lambda: math.inf, {(0, 0): 0})\n seen = set()\n while heap:\n # Use heapq, as we want to select the next unvisited node with the lowest potential distance\n risk, node = heappop(heap)\n\n if node == destination:\n return risk\n\n if node in seen:\n continue\n\n # For each node we visit, we check its unvisited neighbors & find the shortest distance\n # (risk) to the starting node & update the current node accordingly\n seen.add(node)\n for neighbor_coord in iter_neighbors(*node):\n if neighbor_coord not in risk_map:\n continue\n\n tentative_risk = risk + risk_map[neighbor_coord]\n if tentative_risk < min_risk[neighbor_coord]:\n min_risk[neighbor_coord] = tentative_risk\n heappush(heap, (tentative_risk, neighbor_coord))", "def test_planning():\n\n joints1 = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n joints2 = [4.80, 2.92, 1.00, 4.20, 1.45, 1.32]\n\n\n path_planner = PathPlanner(\"manipulator\")\n\n print path_planner.group.get_end_effector_link()\n\n while True:\n raw_input(\"Press Enter to move to position 1\")\n plan = path_planner.plan_to_config(joints1)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)\n\n raw_input(\"Press Enter to move to position 2\")\n plan = path_planner.plan_to_config(joints2)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)", "def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))", "def solve_inputfile(inputfile):\n graph,source,homes,indexToLoc = graph_file_io.graph_from_input(inputfile)\n cost, dropoffs, route, num_clusters = optimal_route(graph,homes,source)\n named_route = [indexToLoc[r] for r in route]\n named_dropoffs = get_named_dict(dropoffs,indexToLoc)\n return named_route, named_dropoffs", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def two_user_route_statistics(i,j, source_data, destination_data, source_destination_data, delta=1.2):\n\toccupancy_ratio = 0.0\n\tminimum_distance_so_far = 0.0\n\tcommon_travel_distance = 0.0\n\n\ttry:\n\t\tif source_destination_data[j][i] + source_data[i][j] <= 1.2*source_destination_data[i][i] and source_destination_data[j][i] + destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[j][i] + source_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\tsecond = ((source_destination_data[j][i] + destination_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\toccupancy_ratio = (first+second)/2\n\t\t\tcommon_travel_distance = source_destination_data[j][i]\n\t\t\tminimum_distance_so_far = source_data[i][j] + source_destination_data[j][i] + destination_data[i][j]\n\n\t\tif source_destination_data[i][j] + destination_data[j][i] <= 1.2*source_destination_data[i][i] and source_destination_data[i][j] + source_data[j][i] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[i][j] + destination_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\t\t\n\t\t\tsecond = ((source_destination_data[i][j] + source_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\n\t\t\ttotal_distance = source_data[j][i] + source_destination_data[i][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[i][j]+source_destination_data[j][j]+destination_data[j][i] <= 1.2*source_destination_data[i][i]:\n\t\t\tfirst = (1)\n\t\t\tsecond = (source_destination_data[j][j]/(source_data[i][j]+source_destination_data[j][j]+destination_data[j][i]))\n\n\t\t\ttotal_distance = source_data[i][j] + source_destination_data[j][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[j][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[j][i]+source_destination_data[i][i]+destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = (source_destination_data[i][i]/(source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]))\n\t\t\tsecond = (1)\n\n\t\t\ttotal_distance = source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][i]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\texcept Exception as e:\n\t\toccupancy_ratio = 1.0\n\t\tminimum_distance_so_far = 0.0\n\t\tcommon_travel_distance = 0.0\n\n\n\treturn occupancy_ratio, common_travel_distance, minimum_distance_so_far", "def new_journey(self, starting_point, end_point, mode=None):\n self.starting_point = starting_point\n self.end_point = end_point\n self.mode = mode if mode else self.default_mode\n self._heading = \"\"\n self._footer = \"\"\n self._steps = []\n self._found = False\n\n # Let's make sure that mode is valid\n if self.mode.lower() not in self.valid_modes:\n self._heading = \"The mode of travel must be either {}.\".format(\n \", \".join(x for x in self.valid_modes[:-1]) + \" or \" + self.valid_modes[-1]\n )\n return self\n\n # Grab the directions, check for an error\n try:\n self._directions = Directions().directions(self.starting_point, self.end_point, self.mode)\n except (NoResults, InvalidRequest, GmapException) as e:\n self._heading = \"We couldn't find ({}) directions from: {}, to {}.\".format(\n self.mode,\n self.starting_point,\n self.end_point\n )\n except (RateLimitExceeded, RequestDenied) as e:\n self._heading = \"Google is a little busy at the moment, or for some reason our request has been \" \\\n \"denied. Wait a while, and then try again.\"\n else:\n if self._directions:\n self._found = True\n self._heading = \"These are the steps for the ({}) journey from {} to {}.\".format(\n self.mode,\n self._directions[0]['legs'][0]['start_address'],\n self._directions[0]['legs'][0]['end_address'],\n )\n self._steps = [\n \"{:3}. {} ({} / {})\".format(\n counter + 1,\n Markup(step['html_instructions']).striptags(),\n step['distance']['text'],\n step['duration']['text']\n ) for counter, step in enumerate(self._directions[0]['legs'][0]['steps'])\n ]\n self._footer = self._directions[0]['copyrights']\n\n return self", "def move_astar(self, dt, destination):\r\n destination_grid_position = self.map.get_grid_position_of_point(destination)\r\n if self.destination_queue or self.is_moving:\r\n self.move(dt)\r\n return\r\n # Determine grid position of destination\r\n if destination_grid_position == self.grid_position:\r\n return\r\n def reconstruct_path(came_from, current_node):\r\n total_path = []\r\n current_node_coordinates_center = self.map.grid_coordinates[current_node[0]][current_node[1]]\r\n total_path.append(Point(current_node_coordinates_center[0] + self.map.grid_width / 2,\r\n current_node_coordinates_center[1] + self.map.grid_width / 2))\r\n while current_node in came_from.keys():\r\n current_node = came_from[current_node]\r\n current_node_coordinates_center = self.map.grid_coordinates[current_node[0]][current_node[1]]\r\n total_path.append(Point(current_node_coordinates_center[0] + self.map.grid_width / 2,\r\n current_node_coordinates_center[1] + self.map.grid_width / 2))\r\n return total_path\r\n def heuristic_function(grid_position):\r\n \"\"\"Calculates euclidean distance\"\"\"\r\n grid_position_coordinates_center = self.map.grid_coordinates[grid_position[0]][grid_position[1]]\r\n destination_grid_position_coordinates_center = self.map.grid_coordinates[destination_grid_position[0]][destination_grid_position[1]]\r\n return int(euclidean_distance(Point(grid_position_coordinates_center[0] + self.map.grid_width / 2, grid_position_coordinates_center[1] + self.map.grid_width / 2),\r\n Point(destination_grid_position_coordinates_center[0] + self.map.grid_width / 2, destination_grid_position_coordinates_center[1] + self.map.grid_width / 2)))\r\n open_set = [self.grid_position]\r\n came_from = {}\r\n g_scores = {self.grid_position: 0}\r\n f_scores = {self.grid_position: heuristic_function(self.grid_position)}\r\n current_node = None\r\n distance_to_corner_neighbor = sqrt(2 * (abs(self.map.grid_width) ** 2))\r\n while open_set:\r\n # Determine lowest f-score node\r\n lowest_f_score_node = None\r\n for node in open_set:\r\n if lowest_f_score_node is None:\r\n lowest_f_score_node = node\r\n elif f_scores[node] < f_scores[lowest_f_score_node]:\r\n lowest_f_score_node = node\r\n current_node = lowest_f_score_node\r\n if current_node == destination_grid_position:\r\n total_path = reconstruct_path(came_from, current_node)\r\n for i in range(0, len(total_path)):\r\n next_node = total_path.pop()\r\n self.add_to_destination_queue(next_node)\r\n self.move(dt)\r\n return\r\n open_set.remove(current_node)\r\n corner_neighbors = self.map.get_neighboring_corner_grid_positions(current_node)\r\n edge_neighbors = self.map.get_neighboring_edge_grid_positions(current_node)\r\n for corner_neighbor in corner_neighbors:\r\n if corner_neighbor[1] < 0 or corner_neighbor[1] > self.map.ncols - 1:\r\n continue\r\n if corner_neighbor[0] < 0 or corner_neighbor[0] > self.map.nrows - 1:\r\n continue\r\n if self.enable_collisions:\r\n if self.map.map_array[corner_neighbor[0]][corner_neighbor[1]] == 1:\r\n continue\r\n # This is to prevent movement diagonally through two touching wall's corners\r\n corner_neighbor_row = corner_neighbor[0]\r\n corner_neighbor_col = corner_neighbor[1]\r\n current_node_row = current_node[0]\r\n current_node_col = current_node[1]\r\n relative_row_position_to_current_node = corner_neighbor_row - current_node_row\r\n relative_col_position_to_current_node = corner_neighbor_col - current_node_col\r\n if self.map.map_array[current_node_row][current_node_col + relative_col_position_to_current_node] == 1 and self.map.map_array[current_node_row + relative_row_position_to_current_node][current_node_col] == 1:\r\n continue\r\n tentative_g_score = g_scores[current_node] + distance_to_corner_neighbor\r\n better_path = False\r\n if g_scores.setdefault(corner_neighbor, None) is None:\r\n better_path = True\r\n elif tentative_g_score < g_scores[corner_neighbor]:\r\n better_path = True\r\n if better_path:\r\n came_from[corner_neighbor] = current_node\r\n g_scores[corner_neighbor] = tentative_g_score\r\n f_scores[corner_neighbor] = tentative_g_score + heuristic_function(corner_neighbor)\r\n if corner_neighbor not in open_set:\r\n open_set.append(corner_neighbor)\r\n for edge_neighbor in edge_neighbors:\r\n if edge_neighbor[1] < 0 or edge_neighbor[1] > self.map.ncols - 1:\r\n continue\r\n if edge_neighbor[0] < 0 or edge_neighbor[0] > self.map.nrows - 1:\r\n continue\r\n if self.map.map_array[edge_neighbor[0]][edge_neighbor[1]] == 1 and self.enable_collisions:\r\n continue\r\n tentative_g_score = g_scores[current_node] + self.map.grid_width\r\n better_path = False\r\n if g_scores.setdefault(edge_neighbor, None) is None:\r\n better_path = True\r\n elif tentative_g_score < g_scores[edge_neighbor]:\r\n better_path = True\r\n if better_path:\r\n came_from[edge_neighbor] = current_node\r\n g_scores[edge_neighbor] = tentative_g_score\r\n f_scores[edge_neighbor] = tentative_g_score + heuristic_function(edge_neighbor)\r\n if edge_neighbor not in open_set:\r\n open_set.append(edge_neighbor)\r\n return", "def initCoordination(self, fromTime, toTime, fluct, criterion_type):\n #reset\n self.MsgReceiveCount_interval = 0\n self.MsgSendCount_interval = 0\n self.origins = []\n self.pathLengths = []\n self.globalMin = []\n self.globalMinSchedIdx = []\n self.overall_min = 0\n self.overall_max_path_length = 0\n self.min_path = []\n self.min_path_schedules = []\n self.chosenSchedule = []\n self.schedules = []\n self.EConsumptionChosenSchedule = []\n self.chosenScheduleIndex = -1\n\n\n #save data\n self.fromTime = fromTime\n self.toTime = toTime\n self.noOfTimesteps = (self.toTime - self.fromTime) / self.stepSize + 1\n self.EFluctuationCurve = fluct\n self.OPTcriterion = criterion_type\n\n self.overall_max_path_length = 0\n if self.OPTcriterion == 'maxmindiff':\n self.overall_min = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n self.overall_min = 0\n for a in range(len(self.EFluctuationCurve)):\n self.overall_min += abs(self.EFluctuationCurve[a])\n\n #self.globalMin = [max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n\n\n #calc schedule pool and schedule load curves\n if not self.isGasBoiler():\n self.calcSchedulePool(fromTime, toTime)\n self.calcScheduleConsumptionCurves()\n return", "def _calculate_emissions(self):\n parameters = self._get_pollutants_for_vehicle()\n\n self.routes = RouteSet()\n\n if \"routes\" not in self._json_data:\n log.debug(\"Error in returned JSON data from web service.\")\n log.debug(\"data: {}\".format(self._json_data))\n return\n\n # Create a \"set\" of Routes. The planner web service will\n # return 2-4 routes with different paths.\n for idx, r in enumerate(self._json_data[\"routes\"][\"features\"]):\n attributes = r.get(\"attributes\")\n route = Route(distance=attributes.get(\"Total_Meters\"),\n minutes=attributes.get(\"Total_Minutes\"),\n path=r.get(\"geometry\").get(\"paths\")[0], id = idx)\n self.routes.add(route)\n\n log.debug(\"Nr of routes: {}\".format(len(self.routes)))\n for i, route in enumerate(self.routes):\n # A list of x,y,z points that all together represents the route\n path_coordinates = route.path\n distances = []\n\n # Nifty little trick to loop over 'path_coordinates',\n # but keep a reference to the 'prev' item to calculate the\n # distance between them\n iter_points = iter(path_coordinates)\n prev = next(iter_points)\n for point in path_coordinates:\n if not distances:\n # first point\n distances.append(Planner._get_distance_3d(prev, point) / 1000)\n else:\n distances.append(distances[-1] + Planner._get_distance_3d(prev, point) / 1000)\n\n point_slope = Planner._get_slope(prev, point)\n\n # Calculate emission for each pollutants the user has asked for\n for p in self._pollutants:\n parms = [x for x in parameters if x.pollutant.name.startswith(p)]\n calc_emission = self.get_emission(parms, point_slope)\n route.add_pollutant(p, calc_emission)\n\n prev = point\n\n route.add_distances(distances)", "def _extract_solution(self, manager: RoutingIndexManager, routing: RoutingModel, assignment: Assignment, indices_to_visit: List[int]) -> Dict[str, Any]:\n sln = {\"objective\": assignment.ObjectiveValue()}\n \n stop_indices = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n sln[\"order\"] = stop_indices\n return sln", "def routes_with_criteria(self, src, target, criteria):\n\n # BFS\n routes = []\n q = deque() # <- [ ... ] <-\n stops = 0\n distance = 0 # not true for this app, but it works out in the conditional check\n q.append((src, stops, distance, [src]))\n\n while q:\n # this city, stops to this city, distance to this city, route to this city\n city, stops, distance, route = q.popleft()\n if target == city and distance: # no self-loops!\n r = list(route)\n routes.append(r)\n for dest, cost in self.G[city].items():\n if criteria(stops + 1, distance + cost):\n new_route = list(route)\n new_route.append(dest)\n q.append((dest, stops + 1, distance + cost, new_route))\n return routes", "def __init__(__self__, *,\n destination_region_id: pulumi.Input[str],\n destination_zone_id: pulumi.Input[str],\n source_region_id: pulumi.Input[str],\n source_zone_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"destination_region_id\", destination_region_id)\n pulumi.set(__self__, \"destination_zone_id\", destination_zone_id)\n pulumi.set(__self__, \"source_region_id\", source_region_id)\n pulumi.set(__self__, \"source_zone_id\", source_zone_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if group_name is not None:\n pulumi.set(__self__, \"group_name\", group_name)\n if rpo is not None:\n pulumi.set(__self__, \"rpo\", rpo)", "def get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values):\n # computes the coefficient used by the greedy oracle to choose routes\n targets_coeff = np.transpose(np.multiply(attacker_strategy, target_values))\n\n # randomly selects the player for which the non optimal choice will be made\n wrong_pl = randint(1, n_pl)\n\n # generate the non optimal route randomly\n n_covered_targets = randint(n_pl,n_target-1)\n non_opt_action = np.zeros(n_target)\n for i in range(0, n_covered_targets):\n random_covered_target = randint(0, n_target-1)\n non_opt_action[random_covered_target] = 1\n\n # compute the value of the non optimal route\n non_opt_val = get_value_single_route(non_opt_action, targets_coeff)\n\n # generate routes that have, as a single, values smaller than the best greedy route but taken togher perform\n # at least as well. [[0,1,...],[...],...] a[r][t]=1 iff t is covered by r.\n # The returned list should have n_pl - 1 routes\n opt_routes = get_opt_routes(n_pl, non_opt_action)\n\n I={}\n for pl in range(1, n_pl+1):\n\n n_r = randint(0, MAX_ROUTES)\n temp = lil_matrix((n_r+1, n_target), dtype='int8')\n\n if pl == wrong_pl:\n # put the non opt route in the bucket\n for t in non_opt_action.nonzero():\n temp[0,t] = 1\n else:\n for t in opt_routes.pop().nonzero():\n temp[0,t] = 1\n\n # generate other random routes with single value less than the non_opt_value\n for r in range(1, n_r):\n new_route = get_r_limited_val(non_opt_val, targets_coeff)\n\n for t in new_route.nonzero():\n temp[r,t] = 1\n\n I[pl] = temp.tocsr()\n\n return I", "def main():\n\n print('Drones capacity = {}'.format(DRONES_CAPACITY))\n\n # Instantiate the data of the problem\n data = create_data_model(MAX_POINT_DEMAND, USE_CACHE)\n\n # Create the routing index manager\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model\n routing = pywrapcp.RoutingModel(manager)\n\n # Defining weights of the edges\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Addding capacity constraints.\n def demand_callback(from_index):\n \"\"\"Returns the demand for tests of the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['demands'][from_node]\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n\n def counter_callback(from_index):\n \"\"\"Returns the number of stops done at the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['counter'][from_node]\n\n counter_callback_index = routing.RegisterUnaryTransitCallback(\n counter_callback)\n\n # Limiting the number of tests each drone can carry\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n\n # Limiting the overall number of nodes a drone can serve in one tour\n routing.AddDimensionWithVehicleCapacity(\n counter_callback_index,\n 0, # null capacity slack\n data['vehicle_max_number_of_stops'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Counter')\n\n # Setting parameters of the solver\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = HEURISTIC_TIME_LIMIT\n search_parameters.log_search = True\n\n\n print('START SOLVING')\n assignment = routing.SolveWithParameters(search_parameters)\n\n if assignment:\n print_and_save_solution(data, manager, routing, assignment)", "def _route_to_dest(self):\n # Ask the network\n self.route = self.network.determine_route(self.start, self.dest)\n # Set the index to where we are now\n self.route_index = 0", "def create_path_new(self):\n\n \n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n nodes_visited = []\n nodes_visited.append([])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n prev_gps = (-1.0,-1.0)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n new_edges = self.find_edges((lat,lon),prev_gps)\n for add_edge in new_edges:\n edge_sets[matrices_index][add_edge] = 1\n else:\n edge_sets[matrices_index][edge_num] = 1\n\n if coords[0] == -1:\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n nodes_visited.append([])\n matrices_index += 1\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n normalized = normalize_simple(self.graph.lines[cur_line])\n prev_gps = (lat,lon)\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n #for coords in nodes_visited[best_index]:\n # self.graph.node_visit(self.trip_id,coords)\n\n #if self.trip_id not in self.graph.trip_id2line_num:\n # self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],first_lasts[best_index]", "def search_paths_agent_to_goal(self, robot_x, robot_y, goal_x, goal_y, G, road_node_Nos, road_node_info,\n road_lines, road_directions, road_lines_num, node_edges):\n # add target node\n target_node_coordinate = np.zeros((1, 2))\n target_node_coordinate[0][0] = goal_x\n target_node_coordinate[0][1] = goal_y\n target_node = None\n\n for (key, value) in road_node_info.items():\n if math.sqrt((value[0]-target_node_coordinate[0][0])**2 + (value[1]-target_node_coordinate[0][1])**2) <= 0.01:\n target_node = key\n\n if target_node == 0:\n print(target_node)\n raise Exception(\"wrong target node\", target_node)\n\n # Check whether the robot is on the road node or not\n at_node = False\n for (key, value) in road_node_info.items():\n if key == 0:\n continue\n if value[0] == robot_x and value[1] == robot_y:\n at_node = True\n agent_node_No = key\n\n if at_node == False:\n # add agent node\n agent_node_No = 0\n agent_node_coordinate = np.zeros((1, 2))\n agent_node_coordinate[0][0] = robot_x\n agent_node_coordinate[0][1] = robot_y\n agent_node = dict(zip([agent_node_No], agent_node_coordinate))\n road_node_info.update(agent_node)\n\n # add node\n env_node_Nos = [agent_node_No] + road_node_Nos\n G.add_nodes_from(env_node_Nos)\n\n # add edges from agent to the nearest road line\n # calculate the distance from the agent to the lines\n agent_line_dist = []\n for i in range(road_lines_num):\n cross = (road_lines[i][2] - road_lines[i][0]) * (agent_node_coordinate[0][0] - road_lines[i][0]) \\\n + (road_lines[i][3] - road_lines[i][1]) * (agent_node_coordinate[0][1] - road_lines[i][1])\n if cross <= 0:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][0]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][1]) ** 2))\n continue\n\n d2 = (road_lines[i][2] - road_lines[i][0]) ** 2 + (road_lines[i][3] - road_lines[i][1]) ** 2\n if cross >= d2:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][2]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][3]) ** 2))\n continue\n r = cross / d2\n p0 = road_lines[i][0] + (road_lines[i][2] - road_lines[i][0]) * r\n p1 = road_lines[i][1] + (road_lines[i][3] - road_lines[i][1]) * r\n agent_line_dist.append(\n np.sqrt((agent_node_coordinate[0][0] - p0) ** 2 + (agent_node_coordinate[0][1] - p1) ** 2))\n\n # find the nearest line index\n agent_line_dist_shortest = float(\"inf\")\n agent_line_shortest_index = 0\n\n for index, item in enumerate(agent_line_dist):\n if item < agent_line_dist_shortest:\n agent_line_shortest_index = index\n agent_line_dist_shortest = item\n\n # find the shortest line's node\n agent_line_shortest_node0 = None\n agent_line_shortest_node1 = None\n\n for (key, value) in road_node_info.items():\n if value[0] == road_lines[agent_line_shortest_index][0] and value[1] == \\\n road_lines[agent_line_shortest_index][1]:\n agent_line_shortest_node0 = key\n if value[0] == road_lines[agent_line_shortest_index][2] and value[1] == \\\n road_lines[agent_line_shortest_index][3]:\n agent_line_shortest_node1 = key\n\n # add new edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n if nx.has_path(G, source=agent_node_No, target=target_node):\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n for path in simple_paths_list:\n if path[1] == agent_line_shortest_node1:\n path[0] = agent_line_shortest_node0\n elif path[1] == agent_line_shortest_node0:\n path[0] = agent_line_shortest_node1\n else:\n raise ValueError('First node Error!')\n\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # Choose 1 simple paths\n if len(simple_paths_list) > 1:\n simple_paths_list = simple_paths_list[0:1]\n\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = True\n G.clear()\n else:\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = False\n G.clear()\n else:\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n # 判断站点是否在路网上\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n # 判断站点和目标间是否存在路径\n if nx.has_path(G, source=agent_node_No, target=target_node):\n # 提取所有简单路径\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n # 移除带有回环的路网\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # 提取最多2条路径\n if len(simple_paths_list) > 2:\n simple_paths_list = simple_paths_list[0:2]\n\n # 确认存在路径\n has_path = True\n G.clear()\n else:\n # 不存在路径\n has_path = False\n G.clear()\n\n return simple_paths_list, has_path", "def connect(self, origin, destination):\n origin_section = self.sections[origin]\n destination_section = self.sections[destination]\n \"The junction of both origin and destination must be the same.\"\n \"\"\"A turn transitions a vehicle from the last segment of the origin to the first\n segment of the destination.\"\"\"\n edge = self.graph.add_edge(origin_section[-1], destination_section[0])\n self.edge_weights[edge] = 0 # The distance between the same location is 0.\n return", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def print_solution(data, manager, routing, assignment):\n\n html_name = './app/templates/res.html'\n depot_latlon = str2ll(depot)\n gmap = gmplot.GoogleMapPlotter(depot_latlon[0], depot_latlon[1], 15, api)\n\n total_distance = 0\n total_load = 0\n\n routes = []\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n\n route = []\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n\n route.append(node_index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n route.append(0)\n\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n print(plan_output)\n\n total_distance += route_distance\n total_load += route_load\n routes.append([route, route_distance, route_load])\n \n # coordinates = []\n # for i in range(len(route)-1):\n # src = data['addresses'][route[i]]\n # dst = data['addresses'][route[i+1]]\n \n # latlon = str2ll(src)\n # if route[i] == 0:\n # color = 'pink'\n # else:\n # color = colors[vehicle_id%len(colors)]\n\n # gmap.marker(latlon[0], latlon[1], color)\n \n # now = datetime.now()\n # directions_result = gmaps.directions(src,\n # dst,\n # mode=\"driving\",\n # departure_time=now)[0]\n \n # polyline = directions_result['overview_polyline']['points']\n # coordinates = np.asarray(decode_polyline(polyline))\n # gmap.plot(coordinates[:, 0], coordinates[:, 1], colors[vehicle_id%len(colors)], edge_width=2)\n \n\n \n # if gmap is not None:\n # #save plot as html\n # gmap.draw(html_name)\n\n print('Total distance of all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n\n return routes\n\n #draw html for this", "def print_and_save_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n routes = []\n for vehicle_id in range(data['num_vehicles']):\n route = []\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n point = data['all_points'][node_index]\n point['load'] = route_load\n route.append(point)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n if route_load > 0:\n routes.append({\n 'stops': route,\n 'distance': route_distance,\n 'load': route_load,\n 'number_of_stops': len(route)\n })\n print('Total distance of all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open('./results/capacity_{}_{}.json'.format(DRONES_CAPACITY, MAX_POINT_DEMAND), 'w') as f:\n simplejson.dump({\n 'routes': routes,\n 'total_load': total_load,\n 'total_distance': total_distance,\n 'number_of_drones_used': len(routes),\n 'total_numer_of_stops': sum([x['number_of_stops'] for x in routes])\n }, f)", "def __createNewRoute(self, tick):\n # import here because python can not handle circular-dependencies\n if self.targetID is None:\n self.sourceID = random.choice(Network.nodes).getID()\n else:\n self.sourceID = self.targetID # We start where we stopped\n # random target\n self.targetID = random.choice(Network.nodes).getID()\n self.currentRouteID = self.id + \"-\" + str(self.rounds)\n self.currentRouterResult = CustomRouter.route(self.sourceID, self.targetID, tick, self)\n if len(self.currentRouterResult.route) > 0:\n traci.route.add(self.currentRouteID, self.currentRouterResult.route)\n # set color to red\n return self.currentRouteID\n else:\n # recursion aka. try again as this should work!\n return self.__createNewRoute(tick)", "def get_robo_route(start=(41.814884, -87.664603), chicago_path=\"chicago.xml\", pothole_path=\"potholes.xml\"):\n\n\t#Acquire the graph of Chicago\n\tprint(\"Opening Chicago\")\n\tChicago = open_chicago_graph(chicago_path)\n\tCpp = CppGraph(Chicago)\n\tCpp.set_start(start)\n\t\n\t#Acquire the set of all potholes that need to be filled\n\tprint(\"Opening Potholes\")\n\tpotholes = open_potholes(pothole_path)\n\t\n\t#Get the shortest paths connecting the potholes and the robot facility\n\tprint(\"Adding potholes to Chicago\")\n\tshort_pathN(Chicago, Cpp, potholes)\n\t\n\t#Find the optimal way to traverse the graph starting from the robot's facility\n\tprint(\"Finding the route\")\n\troute = Cpp.solve()\n\t\n\t#Save the route the robot will take as an image\n\tfig, ax = ox.plot_graph_route(G, route, save=True, filename=\"graph\")\n\treturn route", "def print_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/route/route_vehicle{vehicle_id}.txt\", \"w\") as file:\n file.write(plan_output)\n file.close()\n print(\"aaa\")\n print('Total cost for all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/load_dist_{data['num_vehicles']}vehicles.txt\", \"w\") as file:\n out_file = \"\"\n out_file += str(total_load) + \",\" + str(total_distance)\n file.write(out_file)\n file.close() # OPEN AND ANALYZE LATER WITH PANDAS", "def AssignRoutingEngine(self, Routing):\n self.routing = Routing(self.topo)\n self.Qlearning_enable = 0\n if str(Routing) == \"Routing.Qlearning_SpineLeaf.Qlearning\":\n self.Qlearning_enable = 1\n self.state = [0.0]*len(self.topo.GetLinks())\n #print len(self.topo.GetLinks())\n self.reward = [0.0, 0.0]\n self.stateId = 0\n self.logDir = \"LogInfo/\"\n self.logfname = \"StateLog.csv\"\n self.logf = open(self.logDir + self.logfname, \"w\")\n #self.updatenum=0\n\n # We can get path by\n # path_3_5 = self.routing.GetPath(3,5) # result is a list with node ids", "def launch_parallel_rt_pairs():\r\n # Create the parser\r\n parser = argparse.ArgumentParser(description=globals().get(\"__doc__\", \"\"), fromfile_prefix_chars='@')\r\n\r\n # Define Arguments supported by the command line utility\r\n\r\n # --pair-type parameter\r\n help_string = \"The type of origin-destination pair assignment to use. Either one_to_one or many_to_many.\"\r\n parser.add_argument(\"-pt\", \"--pair-type\", action=\"store\", dest=\"pair_type_str\", help=help_string, required=True)\r\n\r\n # --origins parameter\r\n help_string = \"The full catalog path to the feature class containing the origins.\"\r\n parser.add_argument(\"-o\", \"--origins\", action=\"store\", dest=\"origins\", help=help_string, required=True)\r\n\r\n # --origins-id-field parameter\r\n help_string = \"The name of the unique ID field in origins.\"\r\n parser.add_argument(\r\n \"-oif\", \"--origins-id-field\", action=\"store\", dest=\"origin_id_field\", help=help_string, required=True)\r\n\r\n # --destinations parameter\r\n help_string = \"The full catalog path to the feature class containing the destinations.\"\r\n parser.add_argument(\"-d\", \"--destinations\", action=\"store\", dest=\"destinations\", help=help_string, required=True)\r\n\r\n # --destinations-id-field parameter\r\n help_string = \"The name of the unique ID field in destinations.\"\r\n parser.add_argument(\r\n \"-dif\", \"--destinations-id-field\", action=\"store\", dest=\"dest_id_field\", help=help_string, required=True)\r\n\r\n # --network-data-source parameter\r\n help_string = \"The full catalog path to the network dataset or a portal url that will be used for the analysis.\"\r\n parser.add_argument(\r\n \"-n\", \"--network-data-source\", action=\"store\", dest=\"network_data_source\", help=help_string, required=True)\r\n\r\n # --travel-mode parameter\r\n help_string = (\r\n \"The name or JSON string representation of the travel mode from the network data source that will be used for \"\r\n \"the analysis.\"\r\n )\r\n parser.add_argument(\"-tm\", \"--travel-mode\", action=\"store\", dest=\"travel_mode\", help=help_string, required=True)\r\n\r\n # --time-units parameter\r\n help_string = \"String name of the time units for the analysis. These units will be used in the output.\"\r\n parser.add_argument(\"-tu\", \"--time-units\", action=\"store\", dest=\"time_units\", help=help_string, required=True)\r\n\r\n # --distance-units parameter\r\n help_string = \"String name of the distance units for the analysis. These units will be used in the output.\"\r\n parser.add_argument(\r\n \"-du\", \"--distance-units\", action=\"store\", dest=\"distance_units\", help=help_string, required=True)\r\n\r\n # --max-routes parameter\r\n help_string = \"Maximum number of routes that can be in one chunk for parallel processing of Route solves.\"\r\n parser.add_argument(\r\n \"-mr\", \"--max-routes\", action=\"store\", dest=\"max_routes\", type=int, help=help_string, required=True)\r\n\r\n # --max-processes parameter\r\n help_string = \"Maximum number parallel processes to use for the Route solves.\"\r\n parser.add_argument(\r\n \"-mp\", \"--max-processes\", action=\"store\", dest=\"max_processes\", type=int, help=help_string, required=True)\r\n\r\n # --reverse-direction parameter\r\n help_string = \"Whether to reverse the direction of travel (destination to origin).\"\r\n parser.add_argument(\r\n \"-rd\", \"--reverse-direction\", action=\"store\", type=lambda x: bool(strtobool(x)),\r\n dest=\"reverse_direction\", help=help_string, required=True)\r\n\r\n # --out-routes parameter\r\n help_string = \"The full catalog path to the output routes feature class.\"\r\n parser.add_argument(\"-r\", \"--out-routes\", action=\"store\", dest=\"out_routes\", help=help_string, required=True)\r\n\r\n # --scratch-folder parameter\r\n help_string = \"The full catalog path to the scratch folder where intermediate outputs will be stored.\"\r\n parser.add_argument(\r\n \"-sf\", \"--scratch-folder\", action=\"store\", dest=\"scratch_folder\", help=help_string, required=True)\r\n\r\n # --assigned-dest-field parameter\r\n help_string = (\"The name of the field in origins indicating the assigned destination. \"\r\n \"Required for one_to_one pair-type\")\r\n parser.add_argument(\r\n \"-adf\", \"--assigned-dest-field\", action=\"store\", dest=\"assigned_dest_field\", help=help_string, required=False)\r\n\r\n # --od-pair-table parameter\r\n help_string = \"CSV file holding preassigned OD pairs. Required for many_to_many pair-type.\"\r\n parser.add_argument(\r\n \"-odp\", \"--od-pair-table\", action=\"store\", dest=\"od_pair_table\", help=help_string, required=False)\r\n\r\n # --time-of-day parameter\r\n help_string = (f\"The time of day for the analysis. Must be in {helpers.DATETIME_FORMAT} format. Set to None for \"\r\n \"time neutral.\")\r\n parser.add_argument(\"-tod\", \"--time-of-day\", action=\"store\", dest=\"time_of_day\", help=help_string, required=False)\r\n\r\n # --barriers parameter\r\n help_string = \"A list of catalog paths to the feature classes containing barriers to use in the Route.\"\r\n parser.add_argument(\r\n \"-b\", \"--barriers\", action=\"store\", dest=\"barriers\", help=help_string, nargs='*', required=False)\r\n\r\n try:\r\n # Get arguments as dictionary.\r\n args = vars(parser.parse_args())\r\n\r\n # Initialize a parallel Route calculator class\r\n rt_calculator = ParallelRoutePairCalculator(**args)\r\n # Solve the Route in parallel chunks\r\n start_time = time.time()\r\n rt_calculator.solve_route_in_parallel()\r\n LOGGER.info(f\"Parallel Route calculation completed in {round((time.time() - start_time) / 60, 2)} minutes\")\r\n\r\n except Exception: # pylint: disable=broad-except\r\n LOGGER.error(\"Error in parallelization subprocess.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise", "def solve(\n self,\n projections: Projections,\n initial_squad: Squad,\n next_gw: int = None,\n force_chips: Dict[int, str] = None,\n force_players: Dict[str, list] = None,\n force_transfers: Dict[int, dict] = None,\n price_changes: Dict[str, Iterable] = None,\n time_limit: float = None,\n optimizer: type = pulp.GUROBI,\n message: bool = True\n ):\n if next_gw is None:\n next_gw = sorted([int(column.split('_')[0]) for column in projections.columns if column.endswith('Pts')])[0]\n # Set up useful references\n initial_players = initial_squad.players\n initial_itb = initial_squad.itb\n initial_fts = initial_squad.fts\n active_chip = initial_squad.active_chip\n players = projections.index\n positions = ('G', 'D', 'M', 'F')\n teams = projections['Team'].unique()\n gw_interval = list(range(next_gw, next_gw + self.horizon))\n\n # Initialise optimisation model\n prob = LpProblem('FPL_transfer_optimisation')\n\n # Initialise decision variables\n default_args = {'index': players, 'columns': gw_interval, 'column_type': 'gw', 'model': prob}\n lineup = DecisionMatrix.lp_variable('lineup', **default_args)\n bench_gk = DecisionMatrix.lp_variable('bench_gk', **default_args)\n bench_1 = DecisionMatrix.lp_variable('bench_1', **default_args)\n bench_2 = DecisionMatrix.lp_variable('bench_2', **default_args)\n bench_3 = DecisionMatrix.lp_variable('bench_3', **default_args)\n squad = DecisionMatrix.lp_variable('squad', **default_args)\n prob += squad == lineup + bench_gk + bench_1 + bench_2 + bench_3\n squad[next_gw - 1] = pd.Series(squad.index).isin(initial_players).astype(int)\n captain = DecisionMatrix.lp_variable('captain', **default_args)\n vice_captain = DecisionMatrix.lp_variable('vice_captain', **default_args)\n transfer_in = DecisionMatrix.lp_variable('transfer_in', **default_args)\n transfer_out = DecisionMatrix.lp_variable('transfer_out', **default_args)\n itb = DecisionSeries(data=[initial_itb], index=[next_gw - 1], model=prob)\n # itb is previous GW's itb + revenue from outgoing players + cost of incoming players\n for i, gw in enumerate(gw_interval):\n itb[gw] = itb[gw - 1] + (transfer_out[gw] * projections['SV']).sum() - \\\n (transfer_in[gw] * projections['BV']).sum() - self.budget_decay_rate\n\n # Add problem constraints to optimisation model\n prob += squad == squad.lag(1) + transfer_in - transfer_out # New squad is previous squad plus transfers\n prob += squad.drop(next_gw - 1, axis=1) <= 1 # Each player can only appear in the squad once\n prob += lineup.sum() == 11 # Lineup contains 11 players\n prob += bench_gk.sum() == 1 # There is 1 bench GK;\n prob += bench_1.sum() == 1 # 1 1st bench slot;\n prob += bench_2.sum() == 1 # 1 2nd bench slot;\n prob += bench_3.sum() == 1 # 1 3rd bench slot;\n prob += captain.sum() == 1 # 1 Captain;\n prob += transfer_out.sum() == transfer_in.sum() # Transfers in must be same as transfers out\n\n prob += vice_captain.sum() == 1 # 1 vice-captain\n prob += captain <= lineup # Captain must be in lineup\n prob += vice_captain <= lineup # Vice-captain must be in lineup\n prob += captain + vice_captain <= 1 # Captain and vice-captain must be different players\n for position, limit in zip(positions, (2, 5, 5, 3)):\n prob += squad[projections['Pos'] == position].sum() == limit # Set squad position structure\n for team in teams:\n prob += squad[projections['Team'] == team].sum() <= 3 # No more than 3 players from each team\n if self.exclude_everton:\n prob += squad[projections['Team'] == 'Everton'].sum() == 0 # Option to exclude Everton players\n prob += bench_gk <= (projections['Pos'] == 'G') # Bench GK must be a goalkeeper\n prob += (lineup * (projections['Pos'] == 'G')).sum() == 1 # There must be 1 goalkeeper in lineup\n prob += (lineup * (projections['Pos'] == 'D')).sum() >= 3 # There must be at least 3 defenders in lineup\n prob += itb[[False] + [True] * self.horizon] >= 0 # The itb amount must be non-negative for future GWs\n\n # Set up transfer logic\n transfer_args = {'index': gw_interval, 'column_type': 'gw', 'model': prob, 'cat': 'Integer'}\n aux = DecisionSeries.lp_variable('aux', **transfer_args)\n free_transfers = DecisionSeries(data=[initial_fts], index=[next_gw - 1], model=prob) + DecisionSeries.\\\n lp_variable('free_transfers', **transfer_args)\n penalised_transfers = DecisionSeries.lp_variable('penalised_transfers', **transfer_args)\n transfer_counts = transfer_in.sum()\n frees_minus_transfers = free_transfers.lag(1) - transfer_counts\n lower_bound = aux * 15 - 14\n upper_bound = aux * 2\n if initial_fts > 1:\n prob += transfer_counts[next_gw] >= 1\n prob += frees_minus_transfers >= lower_bound\n prob += frees_minus_transfers <= upper_bound\n prob += free_transfers == aux + 1\n # penalised_transfers is max(transfers - frees, 0)\n prob += penalised_transfers >= -frees_minus_transfers\n prob += penalised_transfers >= 0\n\n ev_values = projections[[f'{gw}_Pts' for gw in gw_interval]] # Restructure projections data for easier\n ev_values.columns = gw_interval # manipulation\n objective = ((lineup + captain) * ev_values).sum() # Objective function is sum of lineup and captain pts\n objective += (vice_captain * self.vc_weight * ev_values).sum() # Add vice-captain weight\n for loc, bench_slot in enumerate((bench_gk, bench_1, bench_2, bench_3)):\n objective += (bench_slot * ev_values).sum() * self.bench_weights[:, loc] # Add bench weights to objective\n if force_transfers is None:\n objective -= penalised_transfers * 4 # Take away 4 points from each hit taken\n if force_chips is not None:\n self.force_chips = force_chips\n for gw in force_chips:\n if force_chips[gw] == 'wildcard':\n objective[gw] += penalised_transfers[gw] * 4 # Remove penalised points in wildcard week\n\n if force_players is not None:\n for player in force_players['include']:\n prob += squad.T[player].drop(next_gw - 1) == 1\n for player in force_players['exclude']:\n prob += squad.T[player].drop(next_gw - 1) == 0\n if 'include_for_gw' in force_players:\n for gw in force_players['include_for_gw']:\n try:\n prob += squad[force_players['include_for_gw'][gw], gw] == 1\n except ValueError:\n pass\n if 'exclude_for_gw' in force_players:\n for gw in force_players['exclude_for_gw']:\n try:\n prob += squad[force_players['exclude_for_gw'][gw], gw] == 0\n except ValueError:\n pass\n self.rolls = frees_minus_transfers + penalised_transfers\n prob += self.rolls <= 2\n\n if self.penalties is not None:\n if time_decay in self.penalties:\n self.penalties[time_decay] = self.penalties.pop(time_decay) # Apply time decay after other penalties\n for penalty, parameter in self.penalties.items():\n objective = penalty(objective, self, parameter) # Apply external penalty functions\n\n # Apply price change EV\n if price_changes is not None:\n gws_remaining = 38 - next_gw + 1\n for player in price_changes['rise']:\n objective[next_gw] += self.million_value / 30 * squad[player, next_gw] * gws_remaining\n for player in price_changes['drop']:\n objective[next_gw] -= self.million_value / 10 * squad[player, next_gw] * gws_remaining\n\n prob.model += objective.sum()\n prob.solve(time_limit=time_limit, optimizer=optimizer, message=message)\n\n return Solution(lineup, bench_gk, bench_1, bench_2, bench_3, captain, vice_captain, objective, transfer_in,\n transfer_out, itb, projections, free_transfers, penalised_transfers, force_chips)", "def main():\n aoc_input = aoc_01_input.get_input()\n\n current_direction = 'N'\n steps_north = 0\n steps_east = 0\n\n # For part 2: Store all the coords visited in a list\n all_coords_list = []\n # A variable to save HQ coordinates in\n hq_coords = None\n\n for instruction in aoc_input:\n # One instruction is eg 'R2' or 'L44'\n input_turn = instruction[0]\n input_steps = int(instruction[1:])\n\n current_direction = change_direction(current_direction, input_turn)\n\n if current_direction == 'N':\n\n for k in range(input_steps):\n current_coords = [steps_north + k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north += input_steps\n\n elif current_direction == 'E':\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east + k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east += input_steps\n\n elif current_direction == 'S':\n\n for k in range(input_steps):\n current_coords = [steps_north - k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north -= input_steps\n\n else:\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east - k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east -= input_steps\n\n current_coords = [steps_north, steps_east]\n\n total_distance = abs(steps_north) + abs(steps_east)\n\n total_distance_part2 = abs(hq_coords[0]) + abs(hq_coords[1])\n\n print('Part 1: {}'.format(total_distance))\n print('Part 2: {}'.format(total_distance_part2))\n\n # print('Part 1: {}'.format(get_root(aoc_input[:])['name']))\n # print('Part 2: {}'.format(find_imbalance(aoc_input[:])))" ]
[ "0.67139935", "0.6400536", "0.6300604", "0.6229401", "0.6149414", "0.61029875", "0.60279167", "0.59973127", "0.59507513", "0.59370536", "0.5883438", "0.58514273", "0.58248085", "0.58187276", "0.57969856", "0.5756195", "0.5724864", "0.5687748", "0.5662441", "0.56054395", "0.55930066", "0.5592561", "0.5544223", "0.5541067", "0.5511938", "0.54838413", "0.5479299", "0.54677516", "0.546655", "0.5440963", "0.5432419", "0.5429251", "0.54179245", "0.5416701", "0.5379249", "0.5372565", "0.5360505", "0.53584176", "0.5356277", "0.53514653", "0.53411585", "0.533751", "0.53259814", "0.5320016", "0.5309212", "0.5297523", "0.5259747", "0.5258913", "0.52530456", "0.52475166", "0.52367574", "0.52358866", "0.52311975", "0.52276707", "0.52131623", "0.5209132", "0.5207967", "0.520126", "0.51866317", "0.5183807", "0.51821005", "0.51703346", "0.5158411", "0.5158305", "0.5152149", "0.51438206", "0.51406795", "0.5139854", "0.51304", "0.5130195", "0.5121465", "0.5120349", "0.51164114", "0.5116351", "0.51131326", "0.51090175", "0.50992423", "0.50940377", "0.50925785", "0.5079936", "0.507841", "0.5077404", "0.50740397", "0.5052282", "0.50496864", "0.5049232", "0.5049132", "0.5048645", "0.5045748", "0.5044012", "0.5043811", "0.5035428", "0.501729", "0.50152993", "0.50074106", "0.49966478", "0.49927586", "0.4982465", "0.49773714", "0.49756187" ]
0.6315577
2
Export the Route result to a feature class.
def _export_to_feature_class(self, chunk_definition): # Make output gdb rt_workspace = self._create_output_gdb() # Export routes output_routes = os.path.join(rt_workspace, f"Routes_{chunk_definition[0]}_{chunk_definition[1]}") self.logger.debug(f"Exporting Route Routes output to {output_routes}...") self.solve_result.export(arcpy.nax.RouteOutputDataType.Routes, output_routes) # Export stops output_stops = os.path.join(rt_workspace, f"Stops_{chunk_definition[0]}_{chunk_definition[1]}") self.logger.debug(f"Exporting Route Stops output to {output_stops}...") self.solve_result.export(arcpy.nax.RouteOutputDataType.Stops, output_stops) # Join the input ID fields to Routes # The new FirstStopID and LastStopID fields were added at Pro 3.1 / Enterprise 11.1 to make relationships # between IDs/OIDs in output classes are more reliable. Use these fields if they exist in the output. # Otherwise, use FirstStopOID and LastStopOID, which are mostly reliable but not perfect. For best results, use # the most recent ArcGIS software. if "FirstStopID" in self.solve_result.fieldNames(arcpy.nax.RouteOutputDataType.Routes): id_field_prefix = "ID" else: id_field_prefix = "OID" if self.reverse_direction: first_stop_field = self.dest_unique_id_field_name second_stop_field = self.origin_unique_id_field_name else: first_stop_field = self.origin_unique_id_field_name second_stop_field = self.dest_unique_id_field_name with arcpy.EnvManager(overwriteOutput=True): helpers.run_gp_tool( self.logger, arcpy.management.JoinField, [output_routes, f"FirstStop{id_field_prefix}", output_stops, "ObjectID", [first_stop_field]] ) helpers.run_gp_tool( self.logger, arcpy.management.JoinField, [output_routes, f"LastStop{id_field_prefix}", output_stops, "ObjectID", [second_stop_field]] ) self.job_result["outputRoutes"] = output_routes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)", "def test_output(self):\n new_route = self.route.output(\"test data\", transform=\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"output\"] == \"test data\"\n assert new_route.route[\"transform\"] == \"transformed\"", "def Point_to_FeatureClass(self, fc):\n\n\n feature_class = []\n for index, traectory in enumerate(self.__traectory_list):\n point_row = arcpy.Point(X=traectory[0], Y=traectory[1], Z=traectory[2], ID=index)\n feature_class.append(arcpy.PointGeometry(point_row, arcpy.SpatialReference(2436)))\n arcpy.CopyFeatures_management(feature_class, (self.workspace + '\\\\' + fc))\n print 'Complete Creating a Point Feature Class'\n\n return None", "def export(self):\n logger.info('Exporting...')\n trips = []\n for veh_id, trip in tqdm(self.history.items()):\n veh_type = self.vehicles[veh_id].type\n if veh_type is VehicleType.Public:\n road_network = self.transit_roads\n else:\n road_network = self.roads\n trips.append({\n 'vendor': veh_type.value,\n 'segments': road_network.segments(trip, step=0.5)\n })\n\n coords = [(e.pt.x, e.pt.y) for e in self.transit_roads.stops.values()]\n stops = self.transit_roads.to_latlon_bulk(coords)\n\n return {\n 'meta': {\n 'lat': float(self.transit_roads.place_meta['lat']),\n 'lng': float(self.transit_roads.place_meta['lon']),\n 'start_time': self.history_window[0],\n 'end_time': self.history_window[1]\n },\n 'trips': trips,\n 'stops': stops\n }", "def route(self):\n pass", "def get_route_features(route_url, area_id, lat, long):\n\n # Open page html with BeautifulSoup\n route_doc = urlopen(route_url, context=ctx)\n if route_doc.getcode() != 200:\n print(\"Error on page: \", route_doc.getcode())\n cursor.execute('UPDATE Routes SET error=%s, WHERE url=%s',\n (route_doc.getcode(), route_url))\n conn.commit()\n else:\n route_html = route_doc.read()\n # Parses html with BS package\n route_soup = BeautifulSoup(route_html, 'html.parser')\n\n # metadata includes name, url, lat, long, stars, and votes\n data = get_route_metadata(route_url, route_soup, lat, long)\n\n # Includes sport, trad, tr, etc.\n route_type = get_route_type(route_soup)\n # Includes route difficulty according to different grading systems\n route_diff = get_route_diff(route_soup)\n name = data['name']\n\n # Updates user\n print(' Gathering route data on:', name)\n print(' - ', route_url)\n\n # Combines all dictionaries\n data.update(route_type)\n data.update(route_diff)\n data['area_id'] = area_id\n\n # Sends to function that updates new DB\n write_to_sql(data)\n\n cursor.execute('SELECT route_id FROM Routes WHERE url = %s',\n (route_url,))\n route_id = cursor.fetchone()[0]\n\n # Includes output from analysis of the route description and\n # comments\n get_text(route_soup, name, route_id)", "def _to_arcpy_featureset(self):\r\n if HAS_ARCPY:\r\n import uuid, string, random\r\n l = []\r\n for i in range(3):\r\n l.append(random.choice(string.ascii_letters))\r\n l = \"\".join(l)\r\n out_name = l\r\n res = self.to_featureclass(out_location='in_memory',\r\n out_name=out_name)\r\n\r\n feature_set = arcpy.FeatureSet()\r\n feature_set.load(res)\r\n return feature_set\r\n else:\r\n raise Exception(\"ArcPy must be present to convert to arcpy.FeatureSet object\")", "def __repr__(self):\n return '<Route {}>'.format(self.name)", "def feature_class():\n if deployment_settings.get_security_map() and not s3_has_role(\"MapAdmin\"):\n unauthorised()\n\n tablename = \"%s_%s\" % (module, resourcename)\n table = db[tablename]\n\n # Model options\n table.gps_marker.comment = DIV( _class=\"tooltip\",\n _title=\"%s|%s\" % (T(\"GPS Marker\"),\n T(\"Defines the icon used for display of features on handheld GPS.\")))\n\n # CRUD Strings\n LIST_FEATURE_CLASS = T(\"List Feature Classes\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_FEATURE_CLASS,\n title_display = T(\"Feature Class Details\"),\n title_list = T(\"Feature Classes\"),\n title_update = T(\"Edit Feature Class\"),\n title_search = T(\"Search Feature Class\"),\n subtitle_create = T(\"Add New Feature Class\"),\n subtitle_list = LIST_FEATURE_CLASS,\n label_list_button = LIST_FEATURE_CLASS,\n label_create_button = ADD_FEATURE_CLASS,\n label_delete_button = T(\"Delete Feature Class\"),\n msg_record_created = T(\"Feature Class added\"),\n msg_record_modified = T(\"Feature Class updated\"),\n msg_record_deleted = T(\"Feature Class deleted\"),\n msg_list_empty = T(\"No Feature Classes currently defined\"))\n\n output = s3_rest_controller(module, resourcename)\n\n if not \"gis\" in response.view and response.view != \"popup.html\":\n response.view = \"gis/\" + response.view\n\n return output", "def out_featuretxt(self):\n return self.outputfrominput(inputformat='csv', stripextension='.csv', addextension='.features.csv')", "def normalise(self) -> \"Route\":\n pass", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def export(self, fname):\n\n # discard any data with null feature values\n self.discard()\n\n # set target as last column\n self.target = self.getFeatureData('Weather Type')\n\n # remove non-exportable features\n for n in ['Station ID', 'Station Name', 'Date', 'Weather Type']:\n if self._isFIdx(n):\n self.delete(n)\n\n # convert all data to float\n self.data = self.data.astype(float)\n\n # export to file\n pickle.dump(self, open(fname, 'wb'))\n\n return 0", "def get_features(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the output\n self.response = self.model.features_df\n self.response[\"sort_order\"] = pd.Series([i+1 for i in range(len(self.response.index))], index=self.response.index)\n self.response = self.response[[\"model_name\", \"sort_order\", \"name\", \"variable_type\", \"data_type\",\\\n \"feature_strategy\", \"strategy_args\"]]\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"features\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def add_route_to_map(gdf_best_route: gpd.GeoDataFrame, basemap):\n #create a list of colors\n colors = ['orange', 'darkred', 'darkblue', 'purple', 'darkgreen', '#364e4a', 'cadetblues']\n \n # make a feature group for every route\n # merge them to a feature group\n for i, row in gdf_best_route.iterrows():\n fg = folium.FeatureGroup(f\"Route {row['order']} from {row['start_city']} to {row['end_city']}\")\n # add the simple route\n fg.add_child(folium.PolyLine(\n locations=row[\"folium_geom\"], \n popup=f\"From {row['start_city']} to {row['end_city']}\",\n tooltip=f\"Route {row['order']}\",\n color=colors[i], \n dash_array='10',\n weight=4))\n basemap.add_child(fg)\n \n return None", "def to_featureclass(self,\r\n out_location, out_name,\r\n overwrite=True, skip_invalid=True):\r\n from .io import to_featureclass\r\n return to_featureclass(df=self,\r\n out_location=out_location,\r\n out_name=out_name,\r\n overwrite=overwrite, skip_invalid=skip_invalid)", "def export_megas(self, path_out):\n with open(path_out, 'w') as fout:\n import csv\n writer = csv.writer(fout)\n writer.writerow([\"ROUTE\",'MEGA_STOP_ID',\"LAT\",\"LON\"])\n for route, mega in self.megas.items():\n for stop in mega:\n writer.writerow([route]+ list(stop.to_csv()))", "def export(self, location=None):\n self.location = self.set_location(location)\n\n # Export all Stormpath data.\n for export_type in self.EXPORTS:\n getattr(self, 'export_' + export_type)()", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def gen_features(log_file_path: str, out_path: str):\n raise RuntimeError(\"Feature extraction is not supported yet in AutoScheduler dialect\")", "def _export_model(\n self,\n precision: ModelPrecision = ModelPrecision.FP32,\n export_format: ExportType = ExportType.ONNX,\n dump_features: bool = True,\n ):\n # copied from OTX inference_task.py\n self._data_cfg = ConfigDict(\n data=ConfigDict(\n train=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n test=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n )\n )\n self._init_task(export=True)\n\n cfg = self.configure(False, None)\n\n self._precision[0] = precision\n export_options: Dict[str, Any] = {}\n export_options[\"deploy_cfg\"] = self._init_deploy_cfg(cfg)\n assert len(self._precision) == 1\n export_options[\"precision\"] = str(self._precision[0])\n export_options[\"type\"] = str(export_format)\n\n export_options[\"deploy_cfg\"][\"dump_features\"] = dump_features\n if dump_features:\n output_names = export_options[\"deploy_cfg\"][\"ir_config\"][\"output_names\"]\n if \"feature_vector\" not in output_names:\n output_names.append(\"feature_vector\")\n if export_options[\"deploy_cfg\"][\"codebase_config\"][\"task\"] != \"Segmentation\":\n if \"saliency_map\" not in output_names:\n output_names.append(\"saliency_map\")\n export_options[\"model_builder\"] = getattr(self, \"model_builder\", build_segmentor)\n\n if self._precision[0] == ModelPrecision.FP16:\n export_options[\"deploy_cfg\"][\"backend_config\"][\"mo_options\"][\"flags\"].append(\"--compress_to_fp16\")\n\n backend_cfg_backup = {}\n if export_format == ExportType.ONNX:\n backend_cfg_backup = export_options[\"deploy_cfg\"][\"backend_config\"]\n export_options[\"deploy_cfg\"][\"backend_config\"] = {\"type\": \"onnxruntime\"}\n export_options[\"deploy_cfg\"][\"ir_config\"][\"dynamic_axes\"][\"input\"] = {0: \"batch\"}\n\n exporter = SegmentationExporter()\n results = exporter.run(\n cfg,\n **export_options,\n )\n\n if export_format == ExportType.ONNX:\n results[\"inference_parameters\"] = {}\n results[\"inference_parameters\"][\"mean_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--mean_values\"])\n )\n results[\"inference_parameters\"][\"scale_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--scale_values\"])\n )\n\n return results", "def export_type(cls):\n cls.export_in_rule_data = True\n return cls", "def extract_features(self, *args, **kwargs):\n return self(*args, **kwargs)", "def test_route():\n coordinates = \"54.971288,82.875554|54.988211,82.906425|55.038288,82.937137|55.060150,82.984610\"\n return jsonify(data=coordinates, status=OK_STATUS)", "def export_routes_xml(self, filepath=None, method_routechoice=None, encoding='UTF-8'):\n if method_routechoice is None:\n method_routechoice = self.get_route_first\n\n if filepath is None:\n filepath = self.get_routefilepath()\n print 'export_routes_xml', filepath\n try:\n fd = open(filepath, 'w')\n except:\n print 'WARNING in write_obj_to_xml: could not open', filepath\n return False\n\n xmltag_routes, xmltag_veh, attrname_id = (\"routes\", \"vehicle\", \"ids_sumo\")\n xmltag_trip = \"trip\"\n xmltag_rou = \"route\"\n\n fd.write('<?xml version=\"1.0\" encoding=\"%s\"?>\\n' % encoding)\n fd.write(xm.begin(xmltag_routes))\n indent = 2\n\n #ids_modes_used = set(self.parent.vtypes.ids_mode[self.ids_vtype.get_value()])\n self.parent.vtypes.write_xml(fd, indent=indent,\n ids=set(self.ids_vtype.get_value()),\n is_print_begin_end=False\n )\n\n ids_mode = self.parent.vtypes.ids_mode\n id_pedestrian = MODES['pedestrian']\n routes = self.routes.get_value()\n\n # here we could write the route info\n # but we do write it inside each trip so that it can be parsed\n # in the same way as duarouter output\n # routes.write_xml( fd, indent=indent,\n # attrconfigs_excluded = [routes.costs, routes.probabilities],\n # is_print_begin_end = False)\n\n # let's write trip info manually\n tripconfigs = [self.ids_vtype,\n self.times_depart,\n self.ids_edge_depart,\n self.ids_edge_arrival,\n self.inds_lane_depart,\n self.positions_depart,\n self.speeds_depart,\n self.inds_lane_arrival,\n self.positions_arrival,\n self.speeds_arrival,\n ]\n\n routeconfigs = [routes.ids_edges,\n routes.colors,\n ]\n\n attrconfig_id = getattr(self.get_attrsman(), attrname_id)\n xmltag_id = attrconfig_id.xmltag\n\n for id_trip in self.times_depart.get_ids_sorted():\n\n if ids_mode[self.ids_vtype[id_trip]] == id_pedestrian:\n self.write_persontrip_xml(fd, id_trip,\n method_routechoice=method_routechoice,\n indent=indent+2)\n\n else:\n id_route = method_routechoice(id_trip)\n if id_route >= 0: # a valid route has been found\n # init vehicle route only if valid route exists\n fd.write(xm.start(xmltag_veh, indent+2))\n else:\n # init trip instead of route\n fd.write(xm.start(xmltag_trip, indent+2))\n\n # print ' make tag and id',_id\n fd.write(xm.num(xmltag_id, attrconfig_id[id_trip]))\n\n # print ' write columns',len(scalarcolconfigs)>0,len(idcolconfig_include_tab)>0,len(objcolconfigs)>0\n for attrconfig in tripconfigs:\n # print ' attrconfig',attrconfig.attrname\n attrconfig.write_xml(fd, id_trip)\n\n if id_route >= 0: # a valid route has been found\n # write route id\n #fd.write(xm.num('route', id_route ))\n\n # instead of route id we write entire route here\n fd.write(xm.stop())\n fd.write(xm.start(xmltag_rou, indent+4))\n for attrconfig in routeconfigs:\n # print ' attrconfig',attrconfig.attrname\n attrconfig.write_xml(fd, id_route)\n\n # end route and vehicle\n fd.write(xm.stopit())\n fd.write(xm.end(xmltag_veh, indent+2))\n\n else:\n # end trip without route\n fd.write(xm.stopit())\n\n fd.write(xm.end(xmltag_routes))\n fd.close()\n return filepath", "def extract_features(input_path, output_path, spellbook):\n\n input_df = pd.read_csv(input_path)\n spellbook = SpellBook.read_json(spellbook)\n output_df = spellbook.cast(input_df)\n output_df.to_csv(output_path, index=False)\n print(output_df)", "def export_features(self, filename: str, handler: int = 0) -> int:\n return self.get_func(\"DC_ExportFeatures\", None, c_int)(filename, handler)", "def get_route(self,\n spawn_location,\n distance: float = 10.,\n turning_flag=-1, # left -> -1, straight -> 0, right -> 1\n resolution: float = 1.):\n\n waypoint_route = []\n transform_route = []\n location_route = []\n\n spawn_waypoint = self.map.get_waypoint(spawn_location,\n project_to_road=True, # must set to True, center of lane\n )\n # start waypoint: beginning waypoint of the route,\n start_waypoint = spawn_waypoint.next(3.0)[0] # assuming that next waypoint is not in junction\n # exit_waypoint: the first waypoint after leaving the junction\n exit_waypoint = generate_target_waypoint(waypoint=start_waypoint, turn=turning_flag)\n # end_waypoint: end waypoint of the route\n end_waypoint, _ = get_waypoint_in_distance(exit_waypoint, distance)\n\n # list of carla.Location for route generation\n raw_route = [start_waypoint.transform.location,\n exit_waypoint.transform.location,\n end_waypoint.transform.location,\n ]\n\n waypoint_route = interpolate_trajectory(world=self.world,\n waypoints_trajectory=raw_route,\n hop_resolution=resolution)\n\n # get route of transform\n for wp, road_option in waypoint_route:\n transform_route.append((wp.transform, road_option))\n\n # get route of location\n for wp, road_option in waypoint_route:\n location_route.append((wp.transform.location, road_option))\n\n # ==================== visualization ====================\n key_waypoints = [start_waypoint,\n exit_waypoint,\n end_waypoint,\n ]\n\n for wp in key_waypoints:\n draw_waypoint(self.world, wp, color=(red, red))\n\n if self.verbose:\n # visualize complete route\n for wp, _ in waypoint_route:\n draw_waypoint(self.world, wp, color=(magenta, magenta))\n\n return waypoint_route, location_route, transform_route", "def create_route_response(self, ApiId: str, RouteId: str, RouteResponseKey: str, ModelSelectionExpression: str = None, ResponseModels: Dict = None, ResponseParameters: Dict = None) -> Dict:\n pass", "def set_route_class_to_add_destination(self, route_class):\n self.multiple_items_selection_from_kendo_dropdown(self.route_class_multiple_kendo_dropdown_locator, route_class)\n self.click_element(self.new_destination_header_locator)", "def export_as(self, version):\n raise NotImplementedError(\"export_as is not implemented\")", "def _write_feature(self, feature, rec_id, out_handle):\n if feature.strand == 1:\n strand = '+'\n elif feature.strand == -1:\n strand = '-'\n else:\n strand = '.'\n # remove any standard features from the qualifiers\n quals = feature.qualifiers.copy()\n for std_qual in [\"source\", \"score\", \"phase\"]:\n if quals.has_key(std_qual) and len(quals[std_qual]) == 1:\n del quals[std_qual]\n parts = [str(rec_id),\n feature.qualifiers.get(\"source\", [\"feature\"])[0],\n (feature.type if feature.type else \"sequence_feature\"),\n str(feature.location.nofuzzy_start),\n str(feature.location.nofuzzy_end),\n feature.qualifiers.get(\"score\", [\".\"])[0],\n strand,\n str(feature.qualifiers.get(\"phase\", [\".\"])[0]),\n self._format_keyvals(quals)]\n out_handle.write(\"\\t\".join(parts) + \"\\n\")\n for sub_feature in feature.sub_features:\n self._write_feature(sub_feature, rec_id, out_handle)", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def output_layer(self, features, **kwargs):\n raise NotImplementedError", "def extract_feature(self, article) :\n pass", "def output_features(self) -> List[str]:\n return self._pipeline.features", "def export_coreml(self, filename):\n import coremltools\n # First define three internal helper functions\n\n\n # Internal helper function\n def _create_vision_feature_print_screen():\n prob_name = self.target + 'Probability'\n\n #\n # Setup the top level (pipeline classifier) spec\n #\n top_spec = coremltools.proto.Model_pb2.Model()\n top_spec.specificationVersion = 3\n\n desc = top_spec.description\n desc.output.add().name = prob_name\n desc.output.add().name = self.target\n\n desc.predictedFeatureName = self.target\n desc.predictedProbabilitiesName = prob_name\n\n input = desc.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')\n input.type.imageType.colorSpace = BGR_VALUE\n\n #\n # VisionFeaturePrint extractor\n #\n pipelineClassifier = top_spec.pipelineClassifier\n scene_print = pipelineClassifier.pipeline.models.add()\n scene_print.specificationVersion = 3\n scene_print.visionFeaturePrint.scene.version = 1\n\n input = scene_print.description.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n input.type.imageType.colorSpace = BGR_VALUE\n\n output = scene_print.description.output.add()\n output.name = \"output_name\"\n DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')\n output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n output.type.multiArrayType.shape.append(2048)\n\n #\n # Neural Network Classifier, which is just logistic regression, in order to use GPUs\n #\n temp = top_spec.pipelineClassifier.pipeline.models.add()\n temp.specificationVersion = 3\n\n # Empty inner product layer\n nn_spec = temp.neuralNetworkClassifier\n feature_layer = nn_spec.layers.add()\n feature_layer.name = \"feature_layer\"\n feature_layer.input.append(\"output_name\")\n feature_layer.output.append(\"softmax_input\")\n fc_layer_params = feature_layer.innerProduct\n fc_layer_params.inputChannels = 2048\n\n # Softmax layer\n softmax = nn_spec.layers.add()\n softmax.name = \"softmax\"\n softmax.softmax.MergeFromString(b'')\n softmax.input.append(\"softmax_input\")\n softmax.output.append(prob_name)\n\n input = temp.description.input.add()\n input.name = \"output_name\"\n input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n input.type.multiArrayType.shape.append(2048)\n\n # Set outputs\n desc = temp.description\n prob_output = desc.output.add()\n prob_output.name = prob_name\n label_output = desc.output.add()\n label_output.name = self.target\n\n if type(self.classifier.classes[0]) == int:\n prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')\n label_output.type.int64Type.MergeFromString(b'')\n else:\n prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')\n label_output.type.stringType.MergeFromString(b'')\n\n temp.description.predictedFeatureName = self.target\n temp.description.predictedProbabilitiesName = prob_name\n\n return top_spec\n\n\n # Internal helper function\n def _update_last_two_layers(nn_spec):\n # Replace the softmax layer with new coeffients\n num_classes = self.num_classes\n fc_layer = nn_spec.layers[-2]\n fc_layer_params = fc_layer.innerProduct\n fc_layer_params.outputChannels = self.classifier.num_classes\n inputChannels = fc_layer_params.inputChannels\n fc_layer_params.hasBias = True\n\n coefs = self.classifier.coefficients\n weights = fc_layer_params.weights\n bias = fc_layer_params.bias\n del weights.floatValue[:]\n del bias.floatValue[:]\n\n import numpy as np\n W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(\n inputChannels, num_classes - 1, order = 'F')\n b = coefs[coefs['index'] == None]['value']\n Wa = np.hstack((np.zeros((inputChannels, 1)), W))\n weights.floatValue.extend(Wa.flatten(order = 'F'))\n bias.floatValue.extend([0.0] + list(b))\n\n # Internal helper function\n def _set_inputs_outputs_and_metadata(spec, nn_spec):\n # Replace the classifier with the new classes\n class_labels = self.classifier.classes\n\n probOutput = spec.description.output[0]\n classLabel = spec.description.output[1]\n probOutput.type.dictionaryType.MergeFromString(b'')\n if type(class_labels[0]) == int:\n nn_spec.ClearField('int64ClassLabels')\n probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')\n classLabel.type.int64Type.MergeFromString(b'')\n del nn_spec.int64ClassLabels.vector[:]\n for c in class_labels:\n nn_spec.int64ClassLabels.vector.append(c)\n else:\n nn_spec.ClearField('stringClassLabels')\n probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')\n classLabel.type.stringType.MergeFromString(b'')\n del nn_spec.stringClassLabels.vector[:]\n for c in class_labels:\n nn_spec.stringClassLabels.vector.append(c)\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n old_output_name = nn_spec.layers[-1].name\n coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)\n coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)\n if nn_spec.layers[-1].name == old_output_name:\n nn_spec.layers[-1].name = prob_name\n if nn_spec.labelProbabilityLayerName == old_output_name:\n nn_spec.labelProbabilityLayerName = prob_name\n coremltools.models.utils.rename_feature(spec, 'data', self.feature)\n if len(nn_spec.preprocessing) > 0:\n nn_spec.preprocessing[0].featureName = self.feature\n\n mlmodel = coremltools.models.MLModel(spec)\n model_type = 'image classifier (%s)' % self.model\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n mlmodel.input_description[self.feature] = u'Input image'\n mlmodel.output_description[prob_name] = 'Prediction probabilities'\n mlmodel.output_description[label_name] = 'Class label of top prediction'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'model': self.model,\n 'target': self.target,\n 'features': self.feature,\n 'max_iterations': str(self.max_iterations),\n }, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)\n\n return mlmodel\n\n\n # main part of the export_coreml function\n if self.model in _pre_trained_models.MODELS:\n ptModel = _pre_trained_models.MODELS[self.model]()\n feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)\n\n coreml_model = feature_extractor.get_coreml_model()\n spec = coreml_model.get_spec()\n nn_spec = spec.neuralNetworkClassifier\n else: # model == VisionFeaturePrint_Screen\n spec = _create_vision_feature_print_screen()\n nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier\n\n _update_last_two_layers(nn_spec)\n mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)\n mlmodel.save(filename)", "def test_get_students_features_csv(self):\r\n url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url + '/csv', {})\r\n self.assertEqual(response['Content-Type'], 'text/csv')", "def as_geojson_feature(result: ResponseObject, properties: List[str] = None) -> ResponseObject:\n result['geojson']['coordinates'] = [float(i) for i in result['geojson']['coordinates']]\n return {\n 'type': 'Feature',\n 'geometry': result['geojson'],\n 'properties': {k: result.get(k) for k in properties or []},\n }", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def exports():", "def features_result_page():\n #  Get all fields from form\n module = request.forms.getall('module')\n version = request.forms.getall('version')\n software = request.forms.getall('sw')\n\n # Build html\n module, version, software, result = do_features_request(module_type=module,\n version=version, software=software)\n\n # Build template page\n with open(\"./header.html\") as header, open('./features.tpl') as features, open('./footer.html') as footer:\n template_html = header.read() + features.read() + footer.read()\n\n if not result:\n result = []\n\n output = template(template_html, module=module, version=version, sw=software, result=result)\n\n return output", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def createOutput(self, outputFC):\n\n #### Validate Output Workspace ####\n ERROR.checkOutputPath(outputFC)\n\n #### Shorthand Attributes ####\n ssdo = self.ssdo\n caseField = self.caseField\n\n #### Create Output Feature Class ####\n ARCPY.SetProgressor(\"default\", ARCPY.GetIDMessage(84003))\n tempCFLayer = \"tmpCFLayer\"\n\n try:\n DM.MakeFeatureLayer(ssdo.inputFC, tempCFLayer)\n first = True\n for key, value in self.cf.iteritems():\n oids = value[0]\n for oid in oids:\n sqlString = ssdo.oidName + '=' + str(oid)\n if first:\n DM.SelectLayerByAttribute(tempCFLayer, \n \"NEW_SELECTION\",\n sqlString)\n first = False\n else:\n DM.SelectLayerByAttribute(tempCFLayer,\n \"ADD_TO_SELECTION\", \n sqlString)\n\n UTILS.clearExtent(DM.CopyFeatures(tempCFLayer, outputFC))\n except:\n ARCPY.AddIDMessage(\"ERROR\", 210, outputFC)\n raise SystemExit()\n\n #### Set Attribute ####\n self.outputFC = outputFC", "def feature():\n pass", "def get_features(self, request, **kwargs):\n\n if hasattr(request, 'GET'):\n reference, start, stop = parse_das_segment(request)\n query_seg = {'id': self._meta.ref_prefix + reference, 'start':start, 'stop':stop}\n\n query_method = getattr(self, \"%s_query\" % self._meta.filetype, None)\n if query_method:\n hits = query_method(**query_seg)\n else:\n raise NotImplementedError(\"No query function implemented for\\\n filetype %s\" % self._meta.filetype)\n options = {'query': query_seg, \n 'method': self._meta.method, \n 'request_string': request.META['QUERY_STRING'],\n 'request_path': request.path,\n }\n \"\"\"\n to_be_serialized = [self.build_bundle(data = i.__dict__,\n request=request) for i in hits]\n \n content = feature_serializer(request, hits, **query_seg) \n \"\"\"\n to_be_serialized = []\n for i in hits:\n to_be_serialized.append(Bundle(data=i.__dict__, request=request))\n #self.full_dehydrate(hits)\n content = self.serialize(request, to_be_serialized, 'xml',\n options=options)\n response = HttpResponse(content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def __features2response(self, features):\n dmp_display.DmpDisplay().display(features)\n\n return defines.ReturnCode.SUCC", "def extractFeatures(self, datum):\n abstract", "def convert_outputs(self):\n self.out('relaxed_structure', self.ctx.workchain.outputs.output_structure)\n self.out('total_energy', get_total_energy(self.ctx.workchain.outputs.output_parameters))\n self.out('forces', get_forces_from_trajectory(self.ctx.workchain.outputs.output_trajectory))\n self.out('stress', get_stress_from_trajectory(self.ctx.workchain.outputs.output_trajectory))", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def buildRoute(rte):\n route = myPyGPX.Route()\n for rtept in rte.getElementsByTagName(\"rtept\"):\n (lat,lon, t, ele, name, description) = parsePoint(rtept)\n route.addPoint(myPyGPX.RoutePoint(lat, lon, ele, name, description)) \n return route", "def export(self, folder):\n raise NotImplementedError('Not implemented for %s' % self.__class__.__name__)", "def routes(self) -> pulumi.Output[Sequence['outputs.RouteTableRoute']]:\n return pulumi.get(self, \"routes\")", "def join_route_successfully(self):\n response = self.client.post(\n self.join_route_url, self.route, format='json',\n HTTP_AUTHORIZATION='token {}'.format(self.token_two))\n\n return response", "def save_feature(self):\n feature_dict = {\n 'name': self.name,\n 'preActionDes': self.pre_action_des,\n 'inActionDes': self.in_action_des,\n 'postActionDes': self.post_action_des,\n 'actionable': self.actionable,\n 'usable': self.usable,\n 'state': self.state,\n 'featureId': self.feature_id\n }\n return feature_dict", "def export(self, class_name, method_name,\n export_data=False, export_dir='.', export_filename='data.json',\n export_append_checksum=False, embed_data=True, **kwargs):\n # Arguments:\n self.class_name = class_name\n self.method_name = method_name\n\n # Estimator:\n est = self.estimator\n\n self.estimators = [est.estimators_[idx] for idx\n in range(est.n_estimators)]\n self.n_estimators = len(self.estimators)\n self.n_features = est.estimators_[0].n_features_\n self.n_classes = est.n_classes_\n\n if self.target_method == 'predict':\n # Exported:\n if export_data and os.path.isdir(export_dir):\n self.export_data(export_dir, export_filename,\n export_append_checksum)\n return self.predict('exported')\n # Embedded:\n return self.predict('embedded')", "def route_creation():\r\n city_ids = json.loads(open(\"cities.json\").read())\r\n cities = []\r\n for id in city_ids:\r\n cities.append(fetch_weather(id))\r\n return Route(cities)", "def export(output, model_path, run_id, mlflow_home):\n mlflow.azureml.export(output=output, model_path=model_path, run_id=run_id,\n mlflow_home=os.path.abspath(mlflow_home) if mlflow_home else None)", "def to_representation(self, instance):\n # prepare OrderedDict geojson structure\n feature = OrderedDict()\n # the list of fields that will be processed by get_properties\n # we will remove fields that have been already processed\n # to increase performance on large numbers\n fields = list(self.fields.values())\n\n # optional id attribute\n if self.Meta.id_field:\n field = self.fields[self.Meta.id_field]\n value = field.get_attribute(instance)\n feature[self.Meta.identifier] = field.to_representation(value)\n fields.remove(field)\n\n # required type attribute\n # must be \"Feature\" according to GeoJSON spec\n feature[\"type\"] = \"Feature\"\n\n # required geometry attribute\n # MUST be present in output according to GeoJSON spec\n field = self.fields[self.Meta.geo_field]\n geo_value = field.get_attribute(instance)\n feature[\"geometry\"] = field.to_representation(geo_value)\n fields.remove(field)\n # Bounding Box\n # if auto_bbox feature is enabled\n # bbox will be determined automatically automatically\n if self.Meta.auto_bbox and geo_value:\n feature[\"bbox\"] = geo_value.extent\n # otherwise it can be determined via another field\n elif self.Meta.bbox_geo_field:\n field = self.fields[self.Meta.bbox_geo_field]\n value = field.get_attribute(instance)\n feature[\"bbox\"] = value.extent if hasattr(value, 'extent') else None\n fields.remove(field)\n\n # GeoJSON properties\n feature[\"properties\"] = self.get_properties(instance, fields)\n\n return feature", "def __init__(self, feature, how_to_behave):\n self.feature = feature\n self.results = feature.results\n self.how_to_behave = how_to_behave", "def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')", "def train(features, result):\n clf = grid_search(result)\n clf.fit(features, result)\n return clf", "def extract_features(self, write_to_file=False):\r\n logging.info(f\"Extracting features from {self.name}...\")\r\n sift = cv.SIFT_create()\r\n self.keypoints, self.descriptors = sift.detectAndCompute(self.image, None)\r\n logging.info(f\"Feature extraction complete.\")\r\n if write_to_file:\r\n self.write_features()\r\n return None", "def export(self, outdir=os.getcwd(), filename='biogridpy_response'):\r\n\r\n suffix = self.output_format\r\n \r\n #json out includes headers in response\r\n if (self.output_format == 'json' or\r\n self.output_format == 'jsonExtended'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix)\r\n try:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify2(self.result), outfile)\r\n except AttributeError:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify3(self.result), outfile)\r\n #tab out need to add headers\r\n elif (self.output_format == 'tab2' or\r\n self.output_format == 'extendedTab2' or\r\n self.output_format == 'tab1'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix + \".txt\")\r\n with open(filepath, 'w') as outfile:\r\n outfile.write('#' + '\\t'.join(self.headers))\r\n outfile.write(self.result)", "def node_route_data(th_object, topology_info, file_name):\n route_information(th_object, topology_info, file_name, \"20\", \"91\", \"extracted_data/Route_data/\")\n route_information(th_object, topology_info, file_name, \"22\", \"91\", \"extracted_data/Route_data/\")\n route_information(th_object, topology_info, file_name, \"22\", \"71\", \"extracted_data/Route_data/\")", "def createFeatureClass(self, location, name=\"\"):\n if not validWorkspace(location):\n raise IncorrectWorkspaceType(\"Incorrect workspace - feature class must be created in a local geodatabase\")\n if name != \"\":\n self.name = name\n self.featureClassLocation = location\n featureset = arcpy.CreateFeatureclass_management(out_path=self.featureClassLocation,\n out_name=self.name,\n geometry_type=self.geometryType,\n spatial_reference=self.sr)\n self.__createFields()\n return featureset", "def to_service(self):\n pass", "def get_waypoints(self, maneuver_id, event):\n action = etree.SubElement(event, \"Action\")\n action_name = f\"Action for Manuever ID {maneuver_id}\"\n action.set(\"name\", action_name)\n private_action = etree.SubElement(action, \"PrivateAction\")\n routing_act = etree.SubElement(private_action, \"RoutingAction\")\n assign_route = etree.SubElement(routing_act, \"AssignRouteAction\")\n route = etree.SubElement(assign_route, \"Route\")\n route.set(\"name\", \"OSC Generated Route\")\n route.set(\"closed\", \"false\")\n\n waypoint_layer = QgsProject.instance().mapLayersByName(\"Waypoint Maneuvers\")[0]\n query = f\"\\\"Maneuver ID\\\" is '{maneuver_id}'\"\n request = QgsFeatureRequest().setFilterExpression(query)\n for feature in waypoint_layer.getFeatures(request):\n waypoint = etree.SubElement(route, \"Waypoint\")\n waypoint.set(\"routeStrategy\", feature[\"Route Strategy\"])\n position = etree.SubElement(waypoint, \"Position\")\n world_position = etree.SubElement(position, \"WorldPosition\")\n world_position.set(\"x\", str(feature[\"Pos X\"]))\n world_position.set(\"y\", str(feature[\"Pos Y\"]))\n world_position.set(\"z\", str(feature[\"Pos Z\"]))\n world_position.set(\"h\", str(feature[\"Orientation\"]))", "def export(self, path=\"/tmp/result.txt\", format=0):\n\n try:\n out = open(path, 'wa')\n except Exception, e:\n print(\"Something wrong : %s.\" % e)\n\n if format == 0:\n for ip in self.subnets:\n out.write(\"route add -net %s/24 gw $VPNGW \\n\" % ip)\n for ip in self.single_ips:\n out.write(\"route add -host %s gw $VPNGW \\n\" % ip)\n elif format == 1:\n for domain in self.domain_list:\n if not self.__is_ip_address(domain):\n out.write('server=/%s/8.8.8.8\\n' % domain)\n\n elif format == 2:\n for ip in self.iplist:\n out.write(\"route add -host %s gw $OLDGW \\n\" % ip)\n\n elif format == 3:\n for domain, ip in self.fixed_domain_ip_dict.iteritems():\n out.write('address=/%s/%s\\n' % (domain, ip))\n\n else:\n print(\"Invalid format option\")\n out.close()\n return\n\n out.close()\n print(\"Exported to %s ...\" % path)", "def export(self, stream):\n pass", "def add_features_to_output(m: onnx.ModelProto) -> None:\n del m.graph.output[:]\n m.graph.output.extend(m.graph.value_info)", "def writeRoutesCSV(filename, routes):\n if filename[-4:] != \".csv\": # Make sure the filename is a .csv\n filename += \".csv\"\n try:\n with open(os.path.join(\"input\", filename), \"w\", newline='') as f:\n writer = csv.writer(f, delimiter=\",\")\n writer.writerows(routes)\n except (OSError, FileNotFoundError):\n return False\n else:\n return True", "def add_routes(self):\n pass", "def add_route(self, route: Route, routing_url: str, methods: typing.Iterable[str] = (\"GET\",)):\n # Create an endpoint name for the route.\n route.routing_url = routing_url\n route.methods = methods\n # Add it to the list of routes to add later.\n self.routes.append(route)\n # Add the self to the route.\n route.bp = self\n\n return route", "def reach_points_as_features(self) -> List[Feature]:\n return [pt.as_feature for pt in self._reach_points]", "def to_representation(self, instance):\n # Load the paginated descendant features\n if instance is None:\n # This happens when OPTIONS is called from browsable API\n return None\n self.add_sources(instance)\n\n ret = OrderedDict()\n fields = self._readable_fields\n\n for field in fields:\n attribute = field.get_attribute(instance)\n assert attribute is not None, (\n 'field.get_attribute return None for instance %s, field %s'\n % (instance, field))\n field_ret = field.to_representation(attribute)\n if isinstance(field, ListSerializer):\n # Wrap lists of related resources in a ReturnList, so that the\n # renderer has access to the serializer\n field_ret = ReturnList(field_ret, serializer=field)\n ret[field.field_name] = field_ret\n\n return ReturnDict(ret, serializer=self)", "def export_gltf(self, path):\n with open(path, 'w') as gltf_f:\n json.dump(self.to_dict(), gltf_f)", "def _parse_routepart(self, data):\n points = [self._parse_trip_point(point) for point in data.findall('./itdPoint')]\n\n path = []\n for coords in data.findall('./itdPathCoordinates/itdCoordinateBaseElemList/itdCoordinateBaseElem'):\n path.append(Coordinates(int(coords.find('y').text) / 1000000, int(coords.find('x').text) / 1000000))\n\n motdata = self._parse_mot(data.find('./itdMeansOfTransport'))\n\n if motdata is None or data.attrib['type'] == 'IT':\n waytype = {\n '98': 'walk',\n '99': 'walk',\n '100': 'walk',\n '101': 'bike',\n '104': 'car',\n '105': 'taxi'\n }[data.find('./itdMeansOfTransport').attrib['type']]\n # 98 = gesichter anschluss\n\n way = Way(WayType(waytype), points[0].stop, points[1].stop)\n way.distance = data.attrib.get('distance')\n if way.distance is not None:\n way.distance = float(way.distance)\n duration = data.attrib.get('timeMinute', None)\n if duration is not None:\n way.duration = timedelta(minutes=int(duration))\n if path:\n way.path = path\n return way\n\n else:\n origin, destination, line, ridenum, ridedir, canceled = motdata\n\n if data.find('./genAttrList/genAttrElem[value=\"HIGHSPEEDTRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance.highspeed')\n elif data.find('./genAttrList/genAttrElem[value=\"LONG_DISTANCE_TRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance')\n\n train_line = line.linetype in self.train_station_lines\n\n # Build Ride Objekt with known stops\n ride = Ride(line, ridenum)\n ride.canceled = canceled\n ride.direction = ridedir\n for infotext in data.findall('./infoTextList/infoTextListElem'):\n ride.infotexts.append(infotext)\n\n first = None\n last = None\n waypoints = False\n if data.find('./itdStopSeq'):\n new_points = [self._parse_trip_point(point, train_line=train_line) for point in data.findall('./itdStopSeq/itdPoint')]\n if not new_points or new_points[0].stop != new_points[0].stop:\n new_points.insert(0, points[0])\n if new_points[-1].stop != points[1].stop:\n new_points.append(points[1])\n points = new_points\n waypoints = True\n\n for p in points:\n if not waypoints and first is None:\n ride.append(None)\n pointer = ride.append(p)\n if first is None:\n first = pointer\n last = pointer\n\n if origin is not None:\n if origin != ride[0].stop:\n ride.prepend(None)\n ride.prepend(TimeAndPlace(Platform(origin)))\n else:\n ride.prepend(None)\n\n if destination is not None:\n if destination != ride[-1].stop:\n ride.append(None)\n ride.append(TimeAndPlace(Platform(destination)))\n else:\n ride.append(None)\n\n segment = ride[first:last]\n paths = self._split_path(path, [p.platform.coords for p in segment])[:-1]\n for i, point in segment.items():\n if not paths:\n break\n segment.ride._paths[i] = paths.pop(0)\n return segment", "def export_geojson(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".geojson\",\n filetypes=((\"geo json\", \"*.geojson\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n self.tabs.window.aistracker.create_geojson_map(outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)", "def response(status, response_def):\n def decorator(fn): # pylint: disable=missing-docstring\n meta = RouteMeta.load(fn)\n meta.set_response(status, response_def)\n meta.save()\n return fn\n return decorator", "def generate_feature(dataset_path, output_path, num_threads):\n settings = BeatSettings()\n\n if output_path is not None:\n settings.dataset.feature_save_path = output_path\n\n beat.app.generate_feature(dataset_path, beat_settings=settings, num_threads=num_threads)", "def project(self, feature):\n return feature", "def export_feature_values(\n self,\n ) -> Callable[\n [featurestore_service.ExportFeatureValuesRequest],\n Awaitable[operations_pb2.Operation],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"export_feature_values\" not in self._stubs:\n self._stubs[\"export_feature_values\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues\",\n request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"export_feature_values\"]", "def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)", "def features_2_result_page():\n #  Get all fields from form\n features = request.forms.getall('features')\n\n # Build html\n features_2, result = do_features_request_2(features=features)\n\n # Build template page\n with open(\"./header.html\") as header, open('./features_2.tpl') as features, open('./footer.html') as footer:\n template_html = header.read() + features.read() + footer.read()\n\n if not result:\n result = []\n\n output = template(template_html, features=features_2, result=result)\n\n return output", "def extract(cls, path, outdir):\r\n raise NotImplementedError()", "def get_features(self, state):\n temp_feats = np.zeros([8, NUM_VALUES])\n\n board, head = state\n head_pos, direction = head\n\n # the route for each region\n forward = ['F']\n left = ['L']\n right = ['R']\n forward_region = ['F', 'F', 'F']\n forward_left_region = ['L', 'R', 'F', 'F', 'L', 'L', 'F']\n forward_right_region = ['R', 'L', 'F', 'F', 'R', 'R', 'F']\n right_region = ['R', 'F', 'R', 'R']\n left_region = ['L', 'F', 'L', 'L']\n\n routes = [forward, left, right, forward_region, forward_left_region,\n forward_right_region, right_region, left_region]\n\n # for each route, count how many of each objects it contains\n for route_ind, route in enumerate(routes):\n temp_pos = head_pos\n temp_direction = direction\n for step in route:\n temp_direction = bp.Policy.TURNS[temp_direction][step]\n temp_pos = temp_pos.move(temp_direction)\n r = temp_pos[0]\n c = temp_pos[1]\n temp_feats[route_ind, board[r, c] + 1] += 1\n # we add one in the index since the minimum value is -1\n\n feats = temp_feats.flatten()\n\n return feats", "def export_gps_route( trip_id, trip_date, vehicle_id, \n gtfs_error, offset_seconds,\n gps_data ):\n\n sql1 = \"\"\"insert into gps_segments (\n trip_id, trip_date, vehicle_id,\n schedule_error, schedule_offset_seconds\n ) VALUES (\n %(trip_id)s,%(trip_date)s,%(vehicle_id)s,\n %(gtfs_error)s, %(offset)s\n ) RETURNING gps_segment_id\"\"\"\n\n sql2 = \"\"\"insert into tracked_routes (\n gps_segment_id, lat, lon, reported_update_time\n ) VALUES (\n %(seg_id)s,%(lat)s,%(lon)s,%(reported_update_time)s\n )\"\"\"\n cur = get_cursor()\n\n \n SQLExec(cur,sql1,\n {'trip_id':trip_id,'trip_date':trip_date,'vehicle_id':vehicle_id,\n 'gtfs_error':str(gtfs_error),'offset':offset_seconds});\n segment_id = list(cur.fetchall())[0][0];\n \n for lat,lon,reported_update_time in gps_data:\n SQLExec(cur,sql2,\n {'lat':lat,'lon':lon,\n 'reported_update_time':reported_update_time,\n 'seg_id':str(segment_id)});\n\n cur.close()\n return segment_id", "def GetRouteAndSave(self, route, params=None):\n url = Url(route, params)\n return json.loads(self.GetAndSave(url, DEFAULT_SUFFIX))", "def getGTFSRouteData(route_id):\n cur = get_cursor();\n SQLExec(cur,\"select * from gtf_routes where route_id=%(id)s\",{'id':str(route_id)});\n ret = cur.next();\n cur.close();\n return ret;", "def output(self):\n print(\">>>>>\\n\")\n print(self.input()[0].path)\n return GCSTarget(self.input()[0].path + '.label.csv')", "def export(self, out_filename='RESULT.csv'):\n\n query = input(\n \"Enter output filename (Default = {}):\\n>\".format(out_filename))\n # Set filename to the default value if user doesn't enter one\n if query == '':\n query = out_filename\n\n with open(query, encoding='utf-8', mode='w', newline='\\n') as out_file:\n writer = csv.DictWriter(out_file, fieldnames=self.headers)\n\n writer.writeheader() # Write the headers to the first row\n for record in self.records:\n writer.writerow(record.attributes)\n\n print(\"Successfully output to {}\".format(query))", "def export_as(self, version):\n # its a new version please update the paths\n version.update_paths()\n # set the extension to '.comp'\n version.extension = self.extensions[0]\n version.created_with = self.name\n\n raise NotImplementedError(\n 'export_as() is not implemented yet for Fusion'\n )\n\n # create a local copy\n self.create_local_copy(version)", "def obtain_results(self,assignment,show=False):\n\t\t# logger.debug('Objective: {} meters'.format(assignment.ObjectiveValue()))\n\t\tindex=self.routing.Start(0)\n\t\tplan_output='Route for vehicle 0:\\n'\n\t\troute_distance=0\n\t\tnodes=list()\n\t\twhile True:\n\t\t\tnode=self.manager.IndexToNode(index)\n\t\t\tnodes.append(node)\n\t\t\tplan_output+=' {} ->'.format(node)\n\t\t\tprevious_index=index\n\t\t\tif self.routing.IsEnd(index): break\n\t\t\tindex=assignment.Value(self.routing.NextVar(index))\n\t\t\troute_distance+=self.routing.GetArcCostForVehicle(previous_index,index,0)\n\t\t# plan_output+=' {}\\n'.format(manager.IndexToNode(index))\n\t\t# plan_output+='Route distance: {} meters\\n'.format(route_distance)\n\t\t# logger.debug('plan_output:\\n%s',plan_output)\n\t\t# logger.debug('nodes:\\n%s',nodes)\n\t\tif show:\n\t\t\tplt.scatter(self.city_coors[:,0],self.city_coors[:,1])\n\t\t\t# plt.scatter(*depot_point)\n\t\t\tplt.plot(self.city_coors[nodes][:,0],self.city_coors[nodes][:,1])\n\t\t\tplt.show()\n\t\treturn nodes", "def on_action_4_triggered(self):\n # TODO: not implemented yet\n model = self.model2\n self.doExport(model)\n #raise NotImplementedError", "def test_to_geojson(self):\n fc = self.read_feature()\n dest_filename = str(self.datadir.join('test.geojson'))\n fc.to_geojson(dest_filename)\n fc_check = read_feature_collection(dest_filename)\n self.check_feature(fc_check.features[0])", "def test_get_routes(self):\n routes = self.stop.routes\n self.assertEqual(type(routes), type([]))\n [self.assertEqual(type(i), BusRoute) for i in routes]\n routes[0].__repr__()\n routes[0].__str__()\n routes[0].__unicode__()", "def trace_route(self, start_waypoint, end_waypoint, sampling_distance=10.0):\n self.target_waypoint = end_waypoint\n\n # Setting up global router\n if self._grp is None:\n dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), sampling_distance)\n grp = GlobalRoutePlanner(dao)\n grp.setup()\n self._grp = grp\n\n # Obtain route plan\n route = self._grp.trace_route(start_waypoint.transform.location, end_waypoint.transform.location)\n self.route = route\n return route", "def to_featureset(self):\r\n from arcgis.features import FeatureSet\r\n return FeatureSet.from_dataframe(self)" ]
[ "0.6221787", "0.5916604", "0.53928643", "0.5349487", "0.5265204", "0.52557456", "0.51746285", "0.51711124", "0.51549613", "0.5122801", "0.51089555", "0.50449544", "0.5029637", "0.50276506", "0.5018805", "0.4998023", "0.49829662", "0.49803904", "0.49756134", "0.49702242", "0.49286768", "0.48718175", "0.4831356", "0.4831141", "0.48309848", "0.48194715", "0.48189428", "0.48159936", "0.48134634", "0.47990984", "0.47932795", "0.47905624", "0.47592545", "0.47592545", "0.47570395", "0.4732999", "0.47177234", "0.47078508", "0.47010556", "0.46987742", "0.46949032", "0.46930036", "0.46846396", "0.46829888", "0.46648023", "0.46578908", "0.4638038", "0.4615688", "0.461384", "0.46094066", "0.46063155", "0.4601502", "0.45963472", "0.45955065", "0.45840397", "0.45838603", "0.45655495", "0.45624346", "0.455939", "0.4559014", "0.45457858", "0.4545152", "0.45332423", "0.45309764", "0.45265245", "0.45230052", "0.45180246", "0.4517591", "0.45153368", "0.45063803", "0.45035866", "0.45031908", "0.4500744", "0.4497914", "0.4491424", "0.44886267", "0.44864494", "0.4486304", "0.44860193", "0.44854072", "0.44844007", "0.4484104", "0.44815862", "0.44803292", "0.44790894", "0.44757032", "0.4474664", "0.44658354", "0.44564706", "0.44558516", "0.44543067", "0.44526112", "0.44502896", "0.44455704", "0.44443667", "0.44423583", "0.4441504", "0.44399977", "0.4439846", "0.44391355" ]
0.72400814
0
Solve a Route analysis for the given inputs for the given chunk of preassigned OD pairs.
def solve_route(inputs, chunk): rt = Route(**inputs) if inputs["pair_type"] is helpers.PreassignedODPairType.one_to_one: rt.logger.info(f"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}") elif inputs["pair_type"] is helpers.PreassignedODPairType.many_to_many: rt.logger.info(f"Processing chunk {chunk[0]} as job id {rt.job_id}") rt.solve(chunk) rt.teardown_logger() return rt.job_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def main():\n aoc_input = aoc_01_input.get_input()\n\n current_direction = 'N'\n steps_north = 0\n steps_east = 0\n\n # For part 2: Store all the coords visited in a list\n all_coords_list = []\n # A variable to save HQ coordinates in\n hq_coords = None\n\n for instruction in aoc_input:\n # One instruction is eg 'R2' or 'L44'\n input_turn = instruction[0]\n input_steps = int(instruction[1:])\n\n current_direction = change_direction(current_direction, input_turn)\n\n if current_direction == 'N':\n\n for k in range(input_steps):\n current_coords = [steps_north + k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north += input_steps\n\n elif current_direction == 'E':\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east + k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east += input_steps\n\n elif current_direction == 'S':\n\n for k in range(input_steps):\n current_coords = [steps_north - k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north -= input_steps\n\n else:\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east - k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east -= input_steps\n\n current_coords = [steps_north, steps_east]\n\n total_distance = abs(steps_north) + abs(steps_east)\n\n total_distance_part2 = abs(hq_coords[0]) + abs(hq_coords[1])\n\n print('Part 1: {}'.format(total_distance))\n print('Part 2: {}'.format(total_distance_part2))\n\n # print('Part 1: {}'.format(get_root(aoc_input[:])['name']))\n # print('Part 2: {}'.format(find_imbalance(aoc_input[:])))", "def solve_inputfile(inputfile):\n graph,source,homes,indexToLoc = graph_file_io.graph_from_input(inputfile)\n cost, dropoffs, route, num_clusters = optimal_route(graph,homes,source)\n named_route = [indexToLoc[r] for r in route]\n named_dropoffs = get_named_dict(dropoffs,indexToLoc)\n return named_route, named_dropoffs", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def solve_inputs(input_reader, output_writer):\n for input_values in parse_input(input_reader):\n solve_problem(output_writer=output_writer, **input_values)", "def solve(input):\n T = int(input.readline())\n\n \"\"\" line = A, B \"\"\"\n for i in range(T):\n line = input.readline().rstrip()\n A, B = line.split()\n length = len(A)\n A = int(A)\n B = int(B)\n print \"Case #{0}: {1}\".format(i+1, do_solve(A, B, length))", "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n self._validate_route_settings()\r\n\r\n # Check if the input origins and destinations have any fields we should use in the route analysis\r\n self._populate_input_data_transfer_fields()\r\n\r\n # Compute Route in parallel\r\n LOGGER.info(f\"Beginning parallelized Route solves ({self.total_jobs} chunks)\")\r\n completed_jobs = 0 # Track the number of jobs completed so far to use in logging\r\n # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes\r\n with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor:\r\n # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the\r\n # given origin ranges and their assigned destinations.\r\n jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks}\r\n # As each job is completed, add some logging information and store the results to post-process later\r\n for future in futures.as_completed(jobs):\r\n try:\r\n # The Route job returns a results dictionary. Retrieve it.\r\n result = future.result()\r\n except Exception: # pylint: disable=broad-except\r\n # If we couldn't retrieve the result, some terrible error happened and the job errored.\r\n # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely\r\n # causes are:\r\n # a) If you're calling a service, the service was temporarily down.\r\n # b) You had a temporary file read/write or resource issue on your machine.\r\n # c) If you're actively updating the code, you introduced an error.\r\n # To make the tool more robust against temporary glitches, retry submitting the job up to the number\r\n # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries,\r\n # fail the entire tool run.\r\n errs = traceback.format_exc().splitlines()\r\n failed_range = jobs[future]\r\n LOGGER.debug((\r\n f\"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry \"\r\n f\"up to {helpers.MAX_RETRIES} times. Errors: {errs}\"\r\n ))\r\n job_failed = True\r\n num_retries = 0\r\n while job_failed and num_retries < helpers.MAX_RETRIES:\r\n num_retries += 1\r\n try:\r\n future = executor.submit(solve_route, self.rt_inputs, failed_range)\r\n result = future.result()\r\n job_failed = False\r\n LOGGER.debug(f\"Route chunk {failed_range} succeeded after {num_retries} retries.\")\r\n except Exception: # pylint: disable=broad-except\r\n # Update exception info to the latest error\r\n errs = traceback.format_exc().splitlines()\r\n if job_failed:\r\n # The job errored and did not succeed after retries. Fail the tool run because something\r\n # terrible is happening.\r\n LOGGER.debug(f\"Route chunk {failed_range} continued to error after {num_retries} retries.\")\r\n LOGGER.error(\"Failed to get Route result from parallel processing.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n\r\n # If we got this far, the job completed successfully and we retrieved results.\r\n completed_jobs += 1\r\n LOGGER.info(\r\n f\"Finished Route calculation {completed_jobs} of {self.total_jobs}.\")\r\n\r\n # Parse the results dictionary and store components for post-processing.\r\n if result[\"solveSucceeded\"]:\r\n self.route_fcs.append(result[\"outputRoutes\"])\r\n else:\r\n # Typically, a solve fails because no destinations were found for any of the origins in the chunk,\r\n # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less\r\n # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug\r\n # mode only in case the user is having problems. The user can also check the individual OD log\r\n # files.\r\n LOGGER.debug(f\"Solve failed for job id {result['jobId']}.\")\r\n LOGGER.debug(result[\"solveMessages\"])\r\n\r\n # Post-process outputs\r\n if self.route_fcs:\r\n LOGGER.info(\"Post-processing Route results...\")\r\n self.route_fcs = sorted(self.route_fcs)\r\n self._post_process_route_fcs()\r\n else:\r\n LOGGER.warning(\"All Route solves failed, so no output was produced.\")\r\n\r\n # Clean up\r\n # Delete the job folders if the job succeeded\r\n if DELETE_INTERMEDIATE_OUTPUTS:\r\n LOGGER.info(\"Deleting intermediate outputs...\")\r\n try:\r\n shutil.rmtree(self.scratch_folder, ignore_errors=True)\r\n except Exception: # pylint: disable=broad-except\r\n # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.\r\n LOGGER.warning(f\"Unable to delete intermediate Route output folder {self.scratch_folder}.\")\r\n\r\n LOGGER.info(\"Finished calculating Routes.\")", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n locations = student_utils.convert_locations_to_indices(list_of_locations, list_of_locations)\n homes = student_utils.convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n\n start_time = time.time()\n\n if params[0] == 'naive':\n car_path, drop_off = naive_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy':\n car_path, drop_off = greedy_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'three_opt':\n car_path, drop_off = three_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'ant_colony':\n car_path, drop_off = ant_colony(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_three_opt':\n car_path, drop_off = greedy_clustering_three_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'mst':\n car_path, drop_off = mst_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'two_opt':\n car_path, drop_off = two_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_two_opt':\n car_path, drop_off = greedy_clustering_two_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n else:\n pass", "def solve_part1(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_same_precedence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def solve(puzzle_input):\r\n return {'a': part_a(puzzle_input), 'b': part_b(puzzle_input)}", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def solve(self, indices_to_visit: List[int] = None) -> Dict[str, Any]:\n if indices_to_visit is None:\n indices_to_visit = list(range(len(self.matrix)))\n \n # make sure home location is in the listed, and that the list is sorted\n if self.home_index not in indices_to_visit:\n indices_to_visit.append(self.home_index)\n indices_to_visit.sort()\n \n data = self._create_data_model(indices_to_visit)\n\n # create routing index manager\n manager = RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['home'])\n\n # create routing model\n routing = RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n # returns distance between two nodes\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n dist = data['distance_matrix'][from_node][to_node]\n\n return dist\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # define cost of each arc\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # set first solution heuristic\n search_params = pywrapcp.DefaultRoutingSearchParameters()\n search_params.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # solve problem\n assignment = routing.SolveWithParameters(search_params)\n\n return self._extract_solution(manager, routing, assignment, indices_to_visit)", "def process_solution(self, solution: dict) -> (list, float):\n start_time = time() * 1000\n nodes = self.application.nodes()\n start = np.min(nodes)\n # fill route with None values\n route = [None] * len(self.application)\n # get nodes from sample\n # NOTE: Prevent duplicate node entries by enforcing only one occurrence per node along route\n logging.info(str(solution.items()))\n\n for (node, timestep), val in solution.items():\n if val:\n logging.info((node, timestep))\n if val and (node not in route):\n route[timestep] = node\n\n # check whether every timestep has only 1 node flagged\n for i in nodes:\n relevant_nodes = []\n relevant_timesteps = []\n for (node, timestep) in solution.keys():\n if node == i:\n relevant_nodes.append(solution[(node, timestep)])\n if timestep == i:\n relevant_timesteps.append(solution[(node, timestep)])\n if sum(relevant_nodes) != 1 or sum(relevant_timesteps) != 1:\n # timestep or nodes have more than 1 or 0 flags\n return None, round(time() * 1000 - start_time, 3)\n\n # check validity of solution\n if sum(value == 1 for value in solution.values()) > len(route):\n logging.warning(\"Result is longer than route! This might be problematic!\")\n return None, round(time() * 1000 - start_time, 3)\n\n # run heuristic replacing None values\n if None in route:\n # get not assigned nodes\n nodes_unassigned = [node for node in list(nodes) if node not in route]\n nodes_unassigned = list(np.random.permutation(nodes_unassigned))\n for idx, node in enumerate(route):\n if node is None:\n route[idx] = nodes_unassigned[0]\n nodes_unassigned.remove(route[idx])\n\n # cycle solution to start at provided start location\n if start is not None and route[0] != start:\n # rotate to put the start in front\n idx = route.index(start)\n route = route[idx:] + route[:idx]\n\n # print route\n parsed_route = ' ->\\n'.join([f' Node {visit}' for visit in route])\n logging.info(f\"Route found:\\n{parsed_route}\")\n return route, round(time() * 1000 - start_time, 3)", "def _get_route(self, inp):\n inp = self.connector_by_label(inp)\n inp_routes = []\n # get the routes starting at the input and the maximum route length\n max_length = 0\n for routes in self.routes[inp.label].values():\n for route in routes:\n inp_routes.append(route)\n if len(route) > max_length:\n max_length = len(route)\n\n # to find the outputs, the switches on possible routes are\n # successively measured. Routes that do not fit the measured switch\n # states are eliminated, such that following switches do not have\n # to be read.\n outputs = [] # list for the outputs\n routes = inp_routes\n measured_switch_states = {} # store already measured switches\n for k in range(max_length):\n routes_left = [] # list to store remaining routes\n if len(routes) == 0:\n break\n for route in routes:\n # action only required if start or end of the\n # connection route[k] is a switch output\n if route[k].start.is_switch_output():\n # check if the switch is already measured\n if route[k].start.switch.label in measured_switch_states:\n state = measured_switch_states[\n route[k].start.switch.label]\n else:\n # measure switch and store result\n state = route[k].start.switch.mode()\n measured_switch_states[\n route[k].start.switch.label] = state\n # got to next route (such that this one is not added to\n # the remaining routes)\n if route[k].start.output_nr != state:\n continue\n # if a output is reached, add it to outputs\n if route[k].end.is_box_output():\n outputs.append(route[k].end.label)\n continue\n # continue analogously to above\n elif route[k].end.is_switch_output():\n if route[k].end.switch.label in measured_switch_states:\n state = measured_switch_states[\n route[k].end.switch.label]\n else:\n state = route[k].end.switch.mode()\n measured_switch_states[\n route[k].end.switch.label] = state\n if route[k].end.output_nr != state:\n continue\n # the route has not been eliminated,\n # add it to the remaining routes\n routes_left.append(route)\n # set routes to routes_left before starting the next iteration\n routes = routes_left\n\n # returns\n if len(outputs) == 0:\n return None\n elif len(outputs) == 1:\n return outputs[0]\n else:\n outputs = [out for out in self.outputs if out in outputs]\n return outputs", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def part1():\n camera_output = IntCodeProcessor(path='day17input.txt').execute_program()\n camera_output = ints_to_string(camera_output)\n grid = camera_output.splitlines()\n\n scaffolds = set()\n for row_index, row in enumerate(grid):\n for col_index, element in enumerate(row): \n if element == '#':\n scaffolds.add((row_index,col_index))\n\n intersections = [loc for loc in scaffolds if set(locations_adjacent_to(loc)) <= scaffolds]\n parameters = [loc[0] * loc[1] for loc in intersections]\n\n print(f'Part 1 answer: {sum(parameters)}')", "def part1(input_lines):\n # This is a DAG problem. We need to form a dependency graph.\n tower = get_tower(input_lines)\n return find_root(tower)", "def solve(self):", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def solution2(inp):\n rules, mticket, nearby = inp.strip().split(\"\\n\\n\")\n rules = rules.split(\"\\n\")\n nearby = nearby.split(\"\\n\")[1:]\n mticket = list(map(int, mticket.split(\"\\n\")[1].split(\",\")))\n rrules = []\n for rule in rules:\n a, b = rule.split(\" or \")\n name = a.strip().split(\":\")[0]\n r1 = a.strip().split(\" \")[-1]\n r2 = b.strip()\n def to_range(r):\n i, j = list(map(int, r.split(\"-\")))\n return range(i, j + 1)\n rrules.append((to_range(r1), to_range(r2), name))\n\n nearby = [list(map(int, ticket.split(\",\"))) for ticket in nearby]\n s = 0\n to_remove = []\n for i, ticket in enumerate(nearby):\n for v in ticket:\n valid = False\n for r in rrules:\n valid |= v in r[0] or v in r[1]\n if not valid:\n to_remove.append(i)\n nearby = list(map(lambda x: x[1], filter(lambda x: x[0] not in to_remove, enumerate(nearby))))\n indices = list(range(len(rrules)))\n keys = {}\n for rule in rrules:\n rule_idx = []\n for i in indices:\n if all(ticket[i] in rule[0] or ticket[i] in rule[1] for ticket in nearby):\n rule_idx.append(i)\n keys[rule[2]] = rule_idx\n\n stack = list(keys.items())\n def resolve(j, avail):\n f, cand = stack[j]\n for i in avail.intersection(cand):\n if len(avail) == 1:\n return [i]\n avail.remove(i)\n res = resolve(j + 1, avail)\n avail.add(i)\n if res != False:\n return [i] + res\n return False\n solver = resolve(0, set(range(len(rrules))))\n names = list(map(lambda x: x[0], stack))\n return reduce(lambda x, y: x * y, [mticket[v] for k, v in zip(names, solver) if k.startswith(\"departure\")], 1)", "def _solve_puzzle_parts(self):\n reindeer = self._parse_input()\n race_points = {name: 0 for name in reindeer}\n max_distance = 0\n for time_elapsed in range(1, self.time_limit + 1):\n distances = {\n name: Solver._get_distance(reindeer[name], time_elapsed)\n for name in reindeer\n }\n max_distance = max(distances.values())\n race_points.update({\n name: race_points[name] + 1\n for name in race_points if distances[name] == max_distance\n })\n return max_distance, max(race_points.values())", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def solve(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers):\n \n N, locations, locations_r, distances, closest = precalculate(customers)\n \n #print locations\n #print locations_r\n angle_order = range(1, N)\n angle_order.sort(key=lambda i: (locations_r[i, 1], locations_r[i, 0])) \n \n vehicleTours = best_order(customerCount, customers, vehicleCount, vehicleCapacity, angle_order)\n if not vehicleTours:\n vehicleTours = solve0(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n \n vehicleTours0 = copy.deepcopy(vehicleTours)\n dist0 = total_dist(customers, depotIndex, vehicleTours)\n if False:\n for _ in range(100):\n vehicleTours = copy.deepcopy(vehicleTours0) \n adjust_tours(customers, vehicleCapacity, vehicleCount, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n #check(customerCount, customers, vehicleCapacity, vehicleTours)\n if not is_valid(customerCount, customers, vehicleCapacity, vehicleTours):\n continue\n dist = total_dist(customers, depotIndex, vehicleTours)\n if dist < dist0:\n print '%s => %s' % (dist0, dist)\n vehicleTours0 = vehicleTours[:]\n dist0 = dist\n \n \n vehicleTours = copy.deepcopy(vehicleTours0) \n check(customerCount, customers, vehicleCapacity, vehicleTours)\n while len(vehicleTours) < vehicleCount:\n vehicleTours.append([])\n \n print '*', vehicleTours \n \n return vehicleTours", "def SOM(args):\n\n # Obtain the normalized set of cities (w/ coord in [0,1])\n cities = pd.read_csv(Path(args.data_dir) / 'data1.csv')\n\n iteration = args.iteration\n learning_rate = args.learning_rate\n decay = args.decay\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir_p()\n\n cities_nm = cities.copy()\n\n cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])\n cities_nm.to_csv(out_dir/'cities_nm.csv')\n cities.to_csv(out_dir/'cities.csv')\n\n\n depot = cities_nm.query('city==0')[['x','y']].to_numpy()\n # The population size is 8 times the number of cities\n #n = cities_cp.shape[0] * 2# a single route's neurons\n n=100\n # Generate an adequate network of neurons:\n #network = generate_network(n)\n neuron_chains =init_neurons(size=n,depot=depot)\n print('--> Network of {} neurons created. Starting the iterations:'.format(n))\n best_routes=np.array([0])\n\n #save\n losses_sum_log={}#每个循环losses_sum值\n min_losses_sum_log = {}##保存最小值的路径losses\n min_losses_log={}#存储最好情况下四条路径的距离值\n min_routes_log={}\n best_id=0\n min_losses_sum=0\n\n for i in tqdm(range(iteration)):\n if not i % args.neuro_plot_freq:\n print('\\t> Iteration {}/{}'.format(i, iteration), end=\"\\r\")\n # Choose a random city\n sample = cities_nm.sample(1)\n if int(sample['city']) in args.depot_idxs:\n continue\n city = sample[['x', 'y']].values#随机抽样 random sampling\n group_idx,winner_idx = select_closest_gpid(neuron_chains, city)\n\n # Generate a filter that applies changes to the winner's gaussian\n gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chains[0].shape[0])\n # Update the network's weights (closer to the city)\n neuron_chains[group_idx] += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chains[group_idx])\n # Decay the variables\n learning_rate = learning_rate * decay\n n = n * decay\n\n\n if i % args.evaluate_freq==0:\n cities_od = rebuild_cities(cities_nm,neuron_chains,args.num_depots)\n cities_od[['x','y']] =cities.reindex(cities_od['city'])[['x','y']]\n losses = routes_distances(cities_od)\n losses_sum = sum(losses)\n losses_sum_log[i] = losses_sum\n\n if min_losses_sum == 0 or min_losses_sum > losses_sum:\n min_losses_sum = losses_sum\n best_id = i\n routes = get_routes(cities_od)\n routes = [list(item.astype(np.float64)) for item in routes]\n min_routes_log[i] = routes\n\n min_losses_sum_log[i] = losses_sum\n min_losses_log[i] = losses\n cities_od.to_csv(out_dir/'data_out_{:04d}.csv'.format(i))\n save_neuron_chains(neuron_chains,out_dir/\"neuron_chains_{:04d}.npy\".format(i))\n\n #end for\n\n # Check if any parameter has completely decayed.\n if n < 1:\n print('Radius has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n if learning_rate < 0.001:\n print('Learning rate has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n\n\n print('Completed {} iterations.'.format(iteration))\n\n results = {}\n\n results['losses_sum_log']=losses_sum_log\n results['best_id'] = best_id\n\n results['min_losses_sum_log']=min_losses_sum_log\n results['min_losses_log']=min_losses_log\n results['min_routes_log'] = min_routes_log\n\n\n p = Path(out_dir/'results.json')\n with open(p, 'w') as fp:\n json.dump(results, fp)\n print('ok')\n\n\n return results", "def main():\n input_source = \"../input1.txt\"\n # Make list, since the generator has to be used multiple times\n d = list(data_parser(input_source))\n return (solver_1star(d),solver_2star(d))", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def main():\n A = np.array([\n [40, 36],\n [36, 45]\n ])\n b = np.array([-64, -66])\n c = 27\n solve(Task1TargetFunction(A, b, c))", "def solve_part2(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_addition_precendence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def run_ann(detected_objects, nn):\n\n inputs = []\n\n # Find goombas\n sorted_goombas = sorted_objects(detected_objects, 'goomba')\n if len(sorted_goombas) > 0:\n goomba1 = sorted_goombas[0]\n\n inputs.append(goomba1['norm_pos'][0])\n inputs.append(goomba1['norm_pos'][1])\n else:\n inputs.append(INFINITY_DIST)\n inputs.append(INFINITY_DIST)\n\n if len(sorted_goombas) > 1:\n goomba2 = sorted_goombas[1]\n inputs.append(goomba2['norm_pos'][0])\n inputs.append(goomba2['norm_pos'][1])\n else:\n inputs.append(INFINITY_DIST)\n inputs.append(INFINITY_DIST)\n\n # Find koopa troopas\n sorted_koopa = sorted_objects(detected_objects, 'koopa-troopa')\n if len(sorted_koopa) > 0:\n koopa1 = sorted_koopa[0]\n inputs.append(koopa1['norm_pos'][0])\n inputs.append(koopa1['norm_pos'][1])\n else:\n inputs.append(INFINITY_DIST)\n inputs.append(INFINITY_DIST)\n\n # Find obstacles\n sorted_obstacles = sorted_objects(detected_objects, 'obstacle')\n if len(sorted_obstacles) > 0:\n object1 = sorted_obstacles[0]\n inputs.append(object1['norm_pos'][0])\n else:\n inputs.append(INFINITY_DIST)\n\n # Find pipes\n sorted_pipes = sorted_objects(detected_objects, 'pipe')\n if len(sorted_pipes) > 0:\n pipe = sorted_pipes[0]\n inputs.append(pipe['norm_pos'][0])\n else:\n inputs.append(INFINITY_DIST)\n\n # Find holes\n sorted_holes = sorted_objects(detected_objects, 'hole')\n if len(sorted_holes) > 0:\n hole = sorted_holes[0]\n inputs.append(hole['norm_pos'][0])\n inputs.append(hole['width'] / player.NES_WIDTH)\n else:\n inputs.append(INFINITY_DIST)\n inputs.append(0)\n\n action = feed_forward_net(nn, inputs)\n\n return action", "def rout(pour_point, uh_box, fdr_data, fdr_atts, rout_dict):\n log.info(\"Starting routing program for point: %s\", pour_point)\n # ---------------------------------------------------------------- #\n # Unpack a few structures\n uh_t = uh_box['time']\n uh_box = uh_box['func']\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find Basin Dims and ID\n basin_id = fdr_data[rout_dict['BASIN_ID_VAR']][pour_point.routy, pour_point.routx]\n\n log.info('Input Latitude: %f' % pour_point.lat)\n log.info('Input Longitude: %f' % pour_point.lon)\n log.info('Global Basid ID: %i' % basin_id)\n\n y_inds, x_inds = np.nonzero(fdr_data[rout_dict['BASIN_ID_VAR']] == basin_id)\n y = np.arange(len(fdr_data[rout_dict['LATITUDE_VAR']]))\n x = np.arange(len(fdr_data[rout_dict['LONGITUDE_VAR']]))\n\n x_min = min(x[x_inds])\n x_max = max(x[x_inds])+1\n y_min = min(y[y_inds])\n y_max = max(y[y_inds])+1\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the Basin Dictionary, a subset of the fdr_data\n basin = {}\n basin['lat'] = fdr_data[rout_dict['LATITUDE_VAR']][y_min:y_max]\n basin['lon'] = fdr_data[rout_dict['LONGITUDE_VAR']][x_min:x_max]\n basin['basin_id'] = fdr_data[rout_dict['BASIN_ID_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_direction'] = fdr_data[rout_dict['FLOW_DIRECTION_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_distance'] = fdr_data[rout_dict['FLOW_DISTANCE_VAR']][y_min:y_max, x_min:x_max]\n basin['velocity'] = fdr_data['velocity'][y_min:y_max, x_min:x_max]\n basin['diffusion'] = fdr_data['diffusion'][y_min:y_max, x_min:x_max]\n\n log.debug('Grid cells in subset: %i' % basin['velocity'].size)\n\n pour_point.basiny, pour_point.basinx = latlon2yx(plats=pour_point.lat,\n plons=pour_point.lon,\n glats=basin['lat'],\n glons=basin['lon'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the rout_data Dictionary\n rout_data = {'lat': basin['lat'], 'lon': basin['lon']}\n\n # ---------------------------------------------------------------- #\n # Determine low direction syntax\n if 'VIC' in fdr_atts[rout_dict['FLOW_DIRECTION_VAR']]:\n # VIC Directions: http://www.hydro.washington.edu/Lettenmaier/Models/VIC/Documentation/Routing/FlowDirection.shtml\n dy = {1: -1, 2: -1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: -1}\n dx = {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: -1, 7: -1, 8: - 1}\n log.debug('Using VIC flow directions (1-8).')\n else:\n # ARCMAP Directions: http://webhelp.esri.com/arcgisdesktop/9.2/index.cfm?TopicName=flow_direction\n dy = {1: 0, 2: 1, 4: 1, 8: 1, 16: 0, 32: -1, 64: -1, 128: -1}\n dx = {1: 1, 2: 1, 4: 0, 8: -1, 16: -1, 32: -1, 64: 0, 128: 1}\n log.debug('Using ARCMAP flow directions (1-128).')\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find timestep (timestep is determined from uh_BOX input file)\n input_interval = find_ts(uh_t)\n rout_data['unit_hydrograph_dt'] = input_interval\n t_cell = int(rout_dict['CELL_FLOWDAYS']*SECSPERDAY/input_interval)\n t_uh = int(rout_dict['BASIN_FLOWDAYS']*SECSPERDAY/input_interval)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Read direction grid and find to_col (to_x) and to_row (to_y)\n to_y, to_x = read_direction(basin['flow_direction'], dy, dx)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find all grid cells upstream of pour point\n catchment, rout_data['fraction'] = search_catchment(to_y, to_x, pour_point,\n basin['basin_id'],\n basin_id)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Make uh for each grid cell upstream of basin pour point\n # (linear routing model - Saint-Venant equation)\n uh = make_uh(input_interval, t_cell, catchment['y_inds'],\n catchment['x_inds'], basin['velocity'], basin['diffusion'],\n basin['flow_distance'])\n\n # ---------------------------------------------------------------- #\n # Make uh_river by incrementally moving upstream comining uh functions\n uh_river = make_grid_uh_river(t_uh, t_cell, uh, to_y, to_x, pour_point,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n\n # ---------------------------------------------------------------- #\n # Make uh_s for each grid cell upstream of basin pour point\n # (combine IRFs for all grid cells in flow path)\n uh_s = make_grid_uh(t_uh, t_cell, uh_river, uh_box, to_y, to_x,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Agregate to output timestep\n rout_data['unit_hydrograph'], rout_data['timesteps'] = adjust_uh_timestep(uh_s, t_uh,\n input_interval,\n rout_dict['OUTPUT_INTERVAL'],\n catchment['x_inds'],\n catchment['y_inds'])\n # ---------------------------------------------------------------- #\n return rout_data", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(\r\n len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n# ADD THE DISTANCE CALLBACK\r\n # ADD THE DEMAND CALLBACK AND CAPACITY COSTRAINTS\r\n # In addition to the distance callback, the solver also requires a demand callback, \r\n # which returns the demand at each location, and a dimension for the capacity constraints.\r\n \r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n \r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n#!!! NB\r\n # Unlike the distance callback, which takes a pair of locations as inputs, \r\n # the demand callback only depends on the location (from_node) of the delivery.\r\n # The code also creates a dimension for capacities, we use the AddDimensionWithVehicleCapacity method, \r\n # which takes a vector of capacities.\r\n # Since all the vehicle capacities in this example are the same, you could use the the \r\n # AddDimension method, which takes a single upper bound for all vehicle quantities. \r\n # But AddDimensionWithVehicleCapacity handles the more general case in which different \r\n # vehicles have different capacities.\r\n \r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack, modify it if you accept unmet demand\r\n data['vehicle_capacities'], # vehicle maximum capacities set by the user\r\n True, # start cumul to zero\r\n 'Capacity')\r\n \r\n # you can find other research method here:\r\n # https://developers.google.com/optimization/routing/routing_options\r\n \r\n # Setting first solution heuristic:\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n# search_parameters.first_solution_strategy = (\r\n# routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n # Setting metaheuristic search method:\r\n search_parameters.local_search_metaheuristic = (\r\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\r\n # Setting time limit to the method\r\n search_parameters.time_limit.seconds = 30\r\n \r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Search status \r\n print('\\n')\r\n solver_index = routing.status()\r\n description = ['ROUTING_NOT_SOLVED','ROUTING_SUCCESS','ROUTING_FAIL',\r\n 'ROUTING_FAIL_TIMEOUT','ROUTING_INVALID']\r\n print(\"Solver status:\",description[solver_index],'\\n')\r\n \r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def solve(self, matrix, offset=0, temp_start=_DEFAULT_temp_start,\n temp_end=_DEFAULT_temp_end, tau=_DEFAULT_tau, beta=_DEFAULT_beta,\n maximize=_DEFAULT_maximize, algorithm=_DEFAULT_algorithm):\n self.api_instance = qc_qubosolv_api.ProblemApi(\n qc_qubosolv_api.ApiClient(self.configuration)\n )\n self.body = qc_qubosolv_api.Task()\n\n if isinstance(matrix, np.ndarray):\n self.body.matrix = matrix.tolist()\n else:\n self.body.matrix = matrix\n\n self.body.parameter = qc_qubosolv_api.Parameter()\n self.body.parameter.temp_start = temp_start\n self.body.parameter.temp_end = temp_end\n self.body.parameter.tau = tau\n self.body.parameter.beta = beta\n self.body.parameter.maximize = maximize\n self.body.parameter.algorithm = algorithm\n response_json = self.api_instance.task_post(self.body)\n self.response = SolverResult(json.loads(response_json), offset)\n return self.response", "def solve():\n n = int(input())\n m = int(input())\n Ks = list(map(int, input().split()))\n\n # 4重ループにより全探索\n for a in range(n):\n for b in range(n):\n for c in range(n):\n for d in range(n):\n if Ks[a]+Ks[b]+Ks[c]+Ks[d] == m:\n print(\"Yes\")\n return\n print(\"No\")", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def solve(self):\n for step in self.run.values():\n step.solve()", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def processPart1(inputs):\n total = 0\n inputs = inputs if type(inputs) is list else [inputs]\n for input in inputs:\n [w, h, l] = map(int, input.split('x'))\n areas = [w * h, h * l, w * l]\n total = total + sum(map(lambda x: x * 2, areas)) + (min(areas))\n return total", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)", "def parse_street_waynodes(input, use_highway):\r\n way_key = use_highway and name_highway_key or name_key\r\n rels, ways, nodes = ParserOSM().parse(input, way_key=way_key)\r\n \r\n return ways, nodes", "def forward(self, inputs):\n x = equiangular_calculator(inputs, self.ratio)\n x = x.permute(0, 3, 1, 2)\n\n if self.return_indices:\n x, indices = F.max_pool2d(x, self.kernel_size, return_indices=self.return_indices)\n else:\n x = F.max_pool2d(x, self.kernel_size)\n x = reformat(x)\n\n if self.return_indices:\n output = x, indices\n else:\n output = x\n\n return output", "def process_directed_scans(self, label: str, pivots: Union[List[int], List[List[int]]]):\n for rotor_dict_index in self.species_dict[label].rotors_dict.keys():\n rotor_dict = self.species_dict[label].rotors_dict[rotor_dict_index] # avoid modifying the iterator\n if rotor_dict['pivots'] == pivots:\n # identified a directed scan (either continuous or brute force, they're treated the same here)\n dihedrals = [[float(dihedral) for dihedral in dihedral_string_tuple]\n for dihedral_string_tuple in rotor_dict['directed_scan'].keys()]\n sorted_dihedrals = sorted(dihedrals)\n min_energy = extremum_list([directed_scan_dihedral['energy']\n for directed_scan_dihedral in rotor_dict['directed_scan'].values()],\n return_min=True)\n trshed_points = 0\n if rotor_dict['directed_scan_type'] == 'ess':\n # parse the single output file\n results = parser.parse_nd_scan_energies(path=rotor_dict['scan_path'])[0]\n else:\n results = {'directed_scan_type': rotor_dict['directed_scan_type'],\n 'scans': rotor_dict['scan'],\n 'directed_scan': rotor_dict['directed_scan']}\n for dihedral_list in sorted_dihedrals:\n dihedrals_key = tuple(f'{dihedral:.2f}' for dihedral in dihedral_list)\n dihedral_dict = results['directed_scan'][dihedrals_key]\n if dihedral_dict['trsh']:\n trshed_points += 1\n if dihedral_dict['energy'] is not None:\n dihedral_dict['energy'] -= min_energy # set 0 at the minimal energy\n folder_name = 'rxns' if self.species_dict[label].is_ts else 'Species'\n rotor_yaml_file_path = os.path.join(self.project_directory, 'output', folder_name, label, 'rotors',\n f'{pivots}_{rotor_dict[\"directed_scan_type\"]}.yml')\n plotter.save_nd_rotor_yaml(results, path=rotor_yaml_file_path)\n self.species_dict[label].rotors_dict[rotor_dict_index]['scan_path'] = rotor_yaml_file_path\n if trshed_points:\n logger.warning(f'Directed rotor scan for species {label} between pivots {rotor_dict[\"pivots\"]} '\n f'had {trshed_points} points that required optimization troubleshooting.')\n rotor_path = os.path.join(self.project_directory, 'output', folder_name, label, 'rotors')\n if len(results['scans']) == 1:\n plotter.plot_1d_rotor_scan(\n results=results,\n path=rotor_path,\n scan=rotor_dict['scan'][0],\n label=label,\n original_dihedral=self.species_dict[label].rotors_dict[rotor_dict_index]['original_dihedrals'],\n )\n elif len(results['scans']) == 2:\n plotter.plot_2d_rotor_scan(results=results, path=rotor_path)\n else:\n logger.debug('Not plotting ND rotors with N > 2')", "def solution1(inp):\n rules, _, nearby = inp.strip().split(\"\\n\\n\")\n rules = rules.split(\"\\n\")\n nearby = nearby.split(\"\\n\")[1:]\n\n rrules = []\n for rule in rules:\n a, b = rule.split(\" or \")\n r1 = a.strip().split(\" \")[-1]\n r2 = b.strip()\n def to_range(r):\n i, j = list(map(int, r.split(\"-\")))\n return range(i, j + 1)\n rrules.append((to_range(r1), to_range(r2)))\n\n s = 0\n for ticket in nearby:\n ticket = list(map(int, ticket.split(\",\")))\n for v in ticket:\n valid = False\n for r in rrules:\n valid |= v in r[0] or v in r[1]\n if not valid:\n s += v\n return s", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def calculate_paths(shape: Tuple[int, int], point: Tuple[int, int]) -> int:\n\tn, m = map(int, input().split())\n\tf = [[0] * (m+1) for i in range(n+1)]\n\tf[1][1] = 1\n\tfor i in range(2, n+1):\n\t\tfor j in range(2, m + 1):\n\t\t\tf[i][j] = f[i-2][j-2] + f[i-2][j-1]\n\treturn n + m", "def calculate_routes(outposts, vehicles, graph, starting_point=0, **kwargs):\n number_of_vehicles = len(vehicles)\n number_of_nodes = len(outposts) - 1\n\n # Source: https://stackoverflow.com/questions/28965734/general-bars-and-stars\n vehicles_partitions = []\n total_load = outposts.load.sum()\n capacities = list(vehicles.capacity)\n for combination in itertools.combinations(range(number_of_nodes+number_of_vehicles-1), number_of_vehicles-1):\n current_partition = [b-a-1 for a, b in zip((-1,) + combination, combination+(number_of_nodes+number_of_vehicles-1,))]\n current_partition = sorted(current_partition)\n if current_partition not in vehicles_partitions:\n vehicle_presence_vector = [0 if number==0 else 1 for number in current_partition]\n total_capacity = np.dot(vehicle_presence_vector, capacities)\n if total_capacity >= total_load:\n vehicles_partitions.append(current_partition)\n\n dwave_solver = DWaveEngine.default()\n if 'use_capacity_constraints' in kwargs:\n use_capacity_constraints = kwargs['use_capacity_constraints']\n del kwargs['use_capacity_constraints']\n else:\n use_capacity_constraints = True\n print(\"All partitions:\", vehicles_partitions)\n best_solution = None\n for current_partition in vehicles_partitions:\n print(\"Current partition: \", current_partition)\n problem = Problem(vehicles=vehicles,\n outposts=outposts,\n vehicles_partition=current_partition,\n graph=graph,\n starting_point=starting_point,\n use_capacity_constraints=use_capacity_constraints)\n current_solution = dwave_solver.solve(problem)\n if current_solution is None:\n print(\"No valid solutions found with D-Wave\")\n elif best_solution is None:\n best_solution = current_solution\n else:\n current_cost = sum(sub_solution[2] for sub_solution in current_solution)\n best_cost = best_solution.total_cost\n if current_cost < best_cost:\n best_solution = current_solution\n\n if best_solution is None:\n return None\n return best_solution.to_dataframe()", "def solve(self):\n ...", "def evaluate(inp, coll = False):\n distance = 0\n errors = 0\n waypoints = [[0,0]] #The robot originates in 0,0\n waypoints.extend(inp) #Add the generated waypoints inbetween start and goal\n waypoints.append([goal[0],goal[1]]) #Add the goal waypoint to complete the route\n for i in range(0, len(waypoints) -1):\n distance += sqrt(pow(waypoints[i][0] - waypoints[i+1][0], 2) + pow(waypoints[i][1] - waypoints[i+1][1], 2)) #Calculates the distance between this waypoint ond the next\n \n errors += collisions(waypoints[i][0], waypoints[i][1], \n waypoints[i+1][0], waypoints[i+1][1])\n\n if collisions:\n return distance + errors*total_distance*2, errors\n return distance + errors*total_distance*2", "def main():\n\n rules, evolutions = [int(i) for i in input().strip().split()]\n\n rule = {}\n for _ in range(rules):\n start, finish = input().strip().split(' -> ')\n rule[start] = finish\n\n print(lindenmayor(rule, evolutions, input().strip()))", "def main_script(input_vertices, input_num, input_prob, input_run=0):\n\n\tinput_vert_map = choose_values(input_vertices, input_run)\n\tinput_Q = coupler(input_vert_map, input_num)\n\tinput_results = solve_on_isakov(input_Q)\n\tinput_run_first_group, input_run_second_group = obtain_groups(input_results, input_vertices)\n\tinput_num_edges = count_edges(input_run_first_group, input_run_second_group, input_prob)\n\treturn input_num_edges, input_run_first_group, input_run_second_group", "def sol_to_integer ( S, D, R, A, rows = None, cols = None, verbose = False ) :\n\n\tn_vars = len(S)\n\t(c0,max0,mean0) = sol_cost(S,D,R)\n\t\n\tintS = array([0]* (n_vars))\n\tfor j in range(n_vars) :\n\t\tif S[j] < D[j] :\n\t\t\tintS[j] = int(ceil(S[j]))\n\t\telse :\n\t\t\tintS[j] = int(floor(S[j]))\n\t\t\t\t\t\t\n\t(c1,max1,mean1) = sol_cost(intS,D,R)\n\t\n\n\t########## FIRST HEURISTIC #######################################\n\t\n\tintS2 = intS.copy() # solution with first heuristics\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\tcomplete = array([False]* n_constraints)\n\t\n\tif verbose :\n\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t# indices of displacements in decreasing order of reliability\n\tsorted_inds = argsort(-R)\n\t\n\t# complete remaining square in order of less reliable displacements not already set\n\tfor j in range(n_vars) :\n\t\tif not fixed[sorted_inds[j]] :\n\t\t\tconstr_inds = (where(abs(A[:,sorted_inds[j]]) == 1))[0] # indices of constraints containing the displacement considered \n\t\t\tind = constr_inds[0] # adjust the first constraint (all other constraints have at least another displacement not fixed)\n\t\t\terr = dot(A[ind],intS2)\n\t\t\tvars_inds = (where(abs(A[ind]) == 1))[0] # indices of displacements in constraint ind\n\t\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\t\tif A[ind][modi] > 0 :\n\t\t\t\tintS2[modi] -= err\n\t\t\telse :\n\t\t\t\tintS2[modi] += err\n\t\t\tfixed[vars_inds] = True\n\t\t\tcomplete[ind] = True\n\n\t\t\tif verbose :\n\t\t\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t\t\t# complete all squares that miss just one displacement\n\t\t\tstop = False\n\t\t\twhile not stop :\n\t\t\t\tstop = True\n\t\t\t\tfor i in range(n_constraints) :\n\t\t\t\t\tif not complete[i] :\n\t\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\t\terr = dot(A[i],intS2)\n\t\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\t\tintS2[modi] -= err\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tintS2[modi] += err\n\t\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\t\tcomplete[i] = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\t\t\n\t\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\t\t\n\tif verbose :\n\t\tdraw_algo_state(rows,cols,fixed,complete)\t\n\t\n\tcomplete = array([True]* n_constraints)\n\tif check_constraints(A,intS2,complete) :\n\t\t#raise ValueError('')\n\t\t(c2,max2,mean2) = sol_cost(intS2,D,R)\n\telse :\n\t\t(c2,max2,mean2) = (float('inf'),float('inf'),float('inf'))\n\n\n\t########## SECOND HEURISTIC #######################################\n\n\tintS3 = intS.copy() # solution with second heuristics\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\n\tcomplete_bool = array([False]* n_constraints)\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\tR_squares = array([0.0]* n_constraints) # maximum displacement reliability of constraints \n\t\n\tfor i in range(n_constraints) :\n\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\tR_squares[i] = sort(R[vars_inds])[len(R[vars_inds])-1] # set maximum displacement reliability of constraint i\n\t\t\n\t# indices of constraints in decreasing order of reliability\n\tsorted_inds = argsort(-R_squares)\n\t\n\t# complete the square with highest reliability\n\terr = dot(A[sorted_inds[0]],intS3)\n\tvars_inds = (where(abs(A[sorted_inds[0]]) == 1))[0] # indices of displacements in constraint with highest reliability\n\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\tif A[sorted_inds[0]][modi] > 0 :\n\t\tintS3[modi] -= err\n\telse :\n\t\tintS3[modi] += err\n\tfixed[vars_inds] = True\n\tcomplete |= set([sorted_inds[0]])\n\tincomplete -= set([sorted_inds[0]])\n\n\tif verbose :\n\t\tcomplete_bool[list(complete)] = True\n\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\twhile incomplete <> set([]) :\n\t\tfor k in range(1,n_constraints) :\n\t\t\tsq = sorted_inds[k] # candidate constraint to be completed\n\t\t\tif ( sq in incomplete) :\n\t\t\t\tvars_inds = (where(abs(A[sq]) == 1))[0] # indices of displacements in constraint sq\n\t\t\t\tadjacent = False\t\t\t\t\n\t\t\t\tfor j in vars_inds :\n\t\t\t\t\tconstr_inds = (where(abs(A[:,j]) == 1))[0] # indices of constraints containing the displacement considered (and adjacent to constraint sq) \n\t\t\t\t\tif (set(constr_inds) & complete) <> set([]) : # al least one of the adjacent contraint is complete\n\t\t\t\t\t\tadjacent = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif adjacent :\n\t\t\t\t\tbreak\n\t\t# sq is the next squares adjacent to already completed squares with the highest reliability\n\t\t\n\t\t# complete sq\n\t\terr = dot(A[sq],intS3)\n\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\tif A[sq][modi] > 0 :\n\t\t\tintS3[modi] -= err\n\t\telse :\n\t\t\tintS3[modi] += err\n\t\tfixed[vars_inds] = True\n\t\tcomplete |= set([sq])\n\t\tincomplete -= set([sq])\n\t\t\n\t\tif verbose :\n\t\t\tcomplete_bool[list(complete)] = True\n\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\t\t# complete all squares that miss just one displacement\n\t\tstop = False\n\t\twhile not stop :\n\t\t\tstop = True\n\t\t\tfor i in range(n_constraints) :\n\t\t\t\tif i in incomplete :\n\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\terr = dot(A[i],intS3)\n\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\tintS3[modi] -= err\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tintS3[modi] += err\n\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\tcomplete |= set([i])\n\t\t\t\t\t\tincomplete -= set([i])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\tcomplete_bool[list(complete)] = True\n\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\n\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\n\tcomplete_bool[list(complete)] = True\n\tif check_constraints(A,intS3,complete_bool) :\n\t\t#raise ValueError('')\n\t\t(c3,max3,mean3) = sol_cost(intS3,D,R)\n\telse :\n\t\t(c3,max3,mean3) = (float('inf'),float('inf'),float('inf'))\n\t\t\n\n\t########## THIRD HEURISTICS #######################################\n\t\n\tintS4 = intS.copy() # solution with second heuristics\n\t\n\t\n\t# find the order in which the constraints have to be processed\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\tR_squares = array([0.0]* n_constraints) # maximum displacement reliability of constraints \n\t\n\tfor i in range(n_constraints) :\n\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\tR_squares[i] = sort(R[vars_inds])[len(R[vars_inds])-1] # set maximum displacement reliability of constraint i\n\t\t\n\t# indices of constraints in decreasing order of reliability\n\tsorted_inds = argsort(-R_squares)\n\t\n\ti0 = sorted_inds[0]/(cols-1)\n\ti1 = sorted_inds[0]/(cols-1)\n\tj0 = sorted_inds[0]%(cols-1)\n\tj1 = sorted_inds[0]%(cols-1)\n\tcomplete |= set([sorted_inds[0]])\n\tincomplete -= set([sorted_inds[0]])\n\tconstr_order = array([sorted_inds[0]])\n\twhile incomplete <> set([]) :\n\t\tconstr_inds = array([], dtype=int)\n\t\tup = False\n\t\tdown = False\n\t\tleft = False\n\t\tRight = False\t\n\t\tif i0 > 0 :\n\t\t\tf = (i0-1)*(cols-1) + j0\n\t\t\tl = f + (j1-j0+1)\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l)))\n\t\t\tup = True\n\t\tif i1 < (rows-2) :\n\t\t\tf = (i1+1)*(cols-1) + j0\n\t\t\tl = f + (j1-j0+1)\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l)))\n\t\t\tdown = True\n\t\tif j0 > 0 :\n\t\t\tf = i0*(cols-1) + j0 - 1\n\t\t\tl = (i1+1)*(cols-1) + j0 - 1\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l,(cols-1))))\n\t\t\tleft = True\n\t\tif j1 < (cols-2) :\n\t\t\tf = i0*(cols-1) + j1 + 1\n\t\t\tl = (i1+1)*(cols-1) + j1 + 1\n\t\t\tconstr_inds = append(constr_inds,array(range(f,l,(cols-1))))\n\t\t\tright = True\n\t\tif up and left :\n\t\t\tconstr_inds = append(constr_inds,array([(i0-1)*(cols-1) + j0 - 1]))\n\t\tif up and right :\n\t\t\tconstr_inds = append(constr_inds,array([(i0-1)*(cols-1) + j1 + 1]))\n\t\tif down and left :\n\t\t\tconstr_inds = append(constr_inds,array([(i1+1)*(cols-1) + j0 - 1]))\n\t\tif down and right :\n\t\t\tconstr_inds = append(constr_inds,array([(i1+1)*(cols-1) + j1 + 1]))\n\t\tif up :\n\t\t\ti0 -= 1\n\t\tif down :\n\t\t\ti1 += 1\n\t\tif left :\n\t\t\tj0 -= 1\n\t\tif right :\n\t\t\tj1 += 1\n\t\tcomplete |= set(constr_inds)\n\t\tincomplete -= set(constr_inds)\n\t\tconstr_order = append(constr_order,constr_inds[argsort(-R_squares[constr_inds])])\t\n\n\n\t# process the contraints\n\n\t# no displacements are initially fixed\n\tfixed = array([False]* n_vars)\n\n\tcomplete_bool = array([False]* n_constraints)\n\n\tincomplete = set(range(n_constraints)) # constraints indices to be completed\n\tcomplete = set([]) # constraints indices completed\n\n\t# complete the square with highest reliability\n\terr = dot(A[constr_order[0]],intS4)\n\tvars_inds = (where(abs(A[sorted_inds[0]]) == 1))[0] # indices of displacements in constraint with highest reliability\n\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\tif A[constr_order[0]][modi] > 0 :\n\t\tintS4[modi] -= err\n\telse :\n\t\tintS4[modi] += err\n\tfixed[vars_inds] = True\n\tcomplete |= set([constr_order[0]])\n\tincomplete -= set([constr_order[0]])\n\n\tif verbose :\n\t\tcomplete_bool[list(complete)] = True\n\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\twhile incomplete <> set([]) :\n\t\tfor k in range(1,n_constraints) :\n\t\t\tsq = constr_order[k] # candidate constraint to be completed\n\t\t\tif ( sq in incomplete) :\n\t\t\t\tvars_inds = (where(abs(A[sq]) == 1))[0] # indices of displacements in constraint sq\n\t\t\t\tbreak\n\t\t# sq is the next squares adjacent to already completed squares with the highest reliability\n\t\t\n\t\t# complete sq\n\t\terr = dot(A[sq],intS4)\n\t\ttemp_inds = vars_inds[fixed[vars_inds] == False]\n\t\tmodi = temp_inds[R[temp_inds] == min(R[temp_inds])][0] # index of displacement not set yet\n\t\tif A[sq][modi] > 0 :\n\t\t\tintS4[modi] -= err\n\t\telse :\n\t\t\tintS4[modi] += err\n\t\tfixed[vars_inds] = True\n\t\tcomplete |= set([sq])\n\t\tincomplete -= set([sq])\n\t\t\n\t\tif verbose :\n\t\t\tcomplete_bool[list(complete)] = True\n\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\t\n\t\t# complete all squares that miss just one displacement\n\t\tstop = False\n\t\twhile not stop :\n\t\t\tstop = True\n\t\t\tfor i in range(n_constraints) :\n\t\t\t\tif i in incomplete :\n\t\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0] # indices of displacements in constraint i\n\t\t\t\t\tif len(where(fixed[vars_inds] == True)[0]) == 3 : # just one displacement is missing\n\t\t\t\t\t\terr = dot(A[i],intS4)\n\t\t\t\t\t\tmodi = vars_inds[(where(fixed[vars_inds] == False))[0][0]] # index of displacement not set yet\n\t\t\t\t\t\tif A[i][modi] > 0 :\n\t\t\t\t\t\t\tintS4[modi] -= err\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tintS4[modi] += err\n\t\t\t\t\t\tfixed[modi] = True\n\t\t\t\t\t\tcomplete |= set([i])\n\t\t\t\t\t\tincomplete -= set([i])\n\t\t\t\t\t\t\n\t\t\t\t\t\tif verbose :\n\t\t\t\t\t\t\tcomplete_bool[list(complete)] = True\n\t\t\t\t\t\t\tdraw_algo_state(rows,cols,fixed,complete_bool)\t\n\t\t\n\t\t\t\t\t\tstop = False # one more displacement has been set: check for other squares to complete\n\t\n\tcomplete_bool[list(complete)] = True\n\tif check_constraints(A,intS4,complete_bool) :\n\t\t#raise ValueError('')\n\t\t(c4,max4,mean4) = sol_cost(intS4,D,R)\n\telse :\n\t\t(c4,max4,mean4) = (float('inf'),float('inf'),float('inf'))\n\n\t########## FINAL RESULT #######################################\n\t\n\tif c3 < c2 :\n\t\tif c3 < c4 :\n\t\t\t#print 'The second heuristics gives the best result'\n\t\t\tintS2 = intS3\n\t\t\t(c2,max2,mean2) = (c3,max3,mean3)\n\t\telse :\n\t\t\t#print 'The third heuristics gives the best result'\n\t\t\tintS2 = intS4\n\t\t\t(c2,max2,mean2) = (c4,max4,mean4)\n\telif c4 < c2 :\n\t\t#print 'The third heuristics gives the best result'\n\t\tintS2 = intS4\n\t\t(c2,max2,mean2) = (c4,max4,mean4)\n\telse :\n\t\t#print 'The first heuristics gives the best result'\n\t\tpass\n\t\t\t\t\n\treturn (c0,max0,mean0,c1,max1,mean1,intS2,c2,max2,mean2)", "def solve(input: List[List[Literal]]) -> List[Tuple[str, bool]]:\n start = Solver._determine_guess(input)\n result = Solver._solve_step(input, start[1][0], start[1][1])\n if result is None:\n negated = not start[1][1]\n result = Solver._solve_step(input, start[1][0], negated)\n return result", "def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions", "def main():\n # Instantiate a mixed-integer solver.\n solver = pywraplp.Solver('SolveAssignmentProblemMIP',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n # Number of teams (h and i)\n n = 9\n # Number of rooms (j)\n r = 3\n # Number of timeslots (k)\n t = 4\n # Number of matches\n m = 4\n\n # List of teams\n teams = [i for i in range(9)]\n\n x = {}\n\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if (h == i):\n x[h, i, j, k] = solver.IntVar(0, 0, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n else:\n x[h, i, j, k] = solver.IntVar(0, 1, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n\n # # Objective\n # solver.Minimize(solver.Sum([cost[i][j] * x[i,j] for i in range(num_workers)\n # for j in range(num_tasks)]))\n\n # Constraints\n\n # 2 Ensures that the matrix is the same across the diagonal\n for h in range(n):\n for j in range(r):\n for k in range(t):\n solver.Add((x[h, i, j, k] == x[i, h, j, k]))\n\n # 3 No pair plays each other more than once\n for h in range(n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t)]) <= 1)\n\n # 4 No team can be in more than one place at a time\n for h in range(n):\n for k in range(t):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n) for j in range(r)]) <= 2)\n\n # 5 Each team plays exactly m matches\n for i in range(n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t) for h in range(n)]) == 2 * m)\n\n # 6 Need 3 teams in a room at each timeslot\n for j in range(r):\n for k in range(t - 1):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n - 1) for h in range(i + 1, n)]) == 3)\n\n # Need 3 teams in a room at each timeslot\n for g in range(n - 2):\n for h in range(g + 1, n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum(\n [x[g, h, j, k] + x[h, i, j, k] + x[g, i, j, k] for j in range(r) for k in range(t)]) != 2)\n\n sol = solver.Solve()\n\n print('Total cost = ', solver.Objective().Value())\n print()\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if x[h, i, j, k].solution_value() > 0:\n print('teams %i,%i assigned to room %i at time %i.' % (h, i, j, k))\n\n print()\n print(\"Time = \", solver.WallTime(), \" milliseconds\")", "def _run():\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def solve_part2(start):\n inputs = load_inputs(False)\n all_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n if matches[elem]:\n all_matches.append(matches[elem])\n\n # start frmo an aribtrary corner\n # find a match, rotate me so that the match is along the right side\n # fill in properly oriented match\n # repeat, for row = 1+, consider top-match and left-match\n\n # for eery rotations / orientation, look fot the pattern", "def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')", "def cost_function_SO4(params: list):\n cost = 0\n SO4 = SO4_circuit(params[0], params[1], params[2], params[3], params[4], params[5])\n\n for i in range(4):\n for j in range(4):\n cost += abs(SO4[i][j] - U[i][j])\n\n # identity_goal = SO4 @ np.linalg.inv(U)\n # for i in range(4):\n # for j in range(4):\n # cost += abs(identity_goal[i][j] - I4[i][j])\n\n return cost", "def processPart2(inputs):\n total = 0\n inputs = inputs if type(inputs) is list else [inputs]\n for input in inputs:\n [a, b, c] = sorted(map(int, input.split('x')))\n total = total + (a * 2 + b * 2 + (a * b * c))\n return total", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def map(initial_aerosol_pops, params):\n from parcel_model.parcel import ParcelModelError\n import time\n\n ## Pull model settings from params\n T0, S0, P0 = [params[s] for s in ('T0', 'S0', 'P0')]\n z_top, dt, max_steps = params['z_top'], params['dt'], params['max_steps']\n\n ## Helper method for re-submitting jobs which fail.\n def resubmit(ps, initial_aerosols, dt, max_steps):\n x = time.time()\n\n alpha, gamma, V = ps\n\n while dt >= 0.001:\n ## Try to run the model\n activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n ## If it didn't work, report his and cut the timestep in half\n if not activation_results:\n print \"resubmitting %r with dt=%1.2e\" % (ps, dt/2.,)\n dt = dt/2.\n max_steps = int(max_steps*3.)\n ## If it did work, we're done\n else:\n break\n ## If we still don't have a good result after cutting dt several times,\n ## then report this.\n elapsed = time.time() - x\n if not activation_results:\n print \"FAILED (%1.2e seconds) %r\" % (elapsed, ps)\n else:\n print \"SUCCESS (%1.2e seconds) %r\" % (elapsed, ps)\n return activation_results\n\n results = []\n n, initial_aerosol_pops = initial_aerosol_pops\n n_runs = len(initial_aerosol_pops)\n for i, (initial_aerosols, ps) in enumerate(initial_aerosol_pops):\n print \"EXECUTING RUN %d/%d\" % (i+1, n_runs)\n component_results = {}\n\n ## INDIVIDUAL SPECIES\n param_fail = False\n for aerosol in initial_aerosols:\n species = aerosol.species\n\n #activation_results = RunParcelModels.simulation_pair(ps, [aerosol, ], V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, [aerosol, ], dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n param_fail = True\n break\n else:\n component_results[species] = activation_results\n\n if not param_fail:\n ## FULL MIXTURE\n #activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, initial_aerosols, dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n continue\n\n component_results['mixture'] = activation_results\n results.append((ps, component_results))\n yield (n, results)", "def work(params) -> Union[None, float]:\n try:\n # either HTTP or bindings\n if host:\n path = action if action == \"route\" else \"sources_to_targets\"\n params_str = delimit_tuple(\n tuple((delimit_tuple(x) for x in params)), delimiter=\";\"\n )\n route = requests.get(f\"{host}/{path}/v1/driving/{params_str}\")\n else:\n route = router.route(params) if action == \"route\" else None\n except (RuntimeError, requests.exceptions.BaseHTTPError):\n return None\n\n if (\n random() > 0.95\n ): # assume that large number of routes will be tested, only print sample in debug mode\n LOGGER.debug(f\"Calculated route between {params[0]} and {params[1]}\")\n\n if report:\n result = route.json()\n if action == \"route\":\n try:\n dist = sum([x[\"distance\"] for x in result[\"routes\"]])\n except KeyError:\n LOGGER.critical(\n f\"No route was found from {params[0]} to {params[1]}. \"\n f\"Try regenerating the locations or specify a more narrow bounding box.\"\n )\n return None\n else:\n dists = [\n inner[\"distance\"]\n for outer in route[\"sources_to_targets\"]\n for inner in outer\n ]\n dist: float = mean(filter(lambda x: x is not None, dists))\n\n return dist", "def solve(self):\n pass", "def solve(self):\n pass", "def process(date, lat_oi, lon_oi, shared_args, verbose=False):\n \n filename = download(date, shared_args)\n\n atmo_data = data.open_netcdf4(filename)\n\n # choose points\n lat = atmo_data.variables['lat'][:]\n lon = atmo_data.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = funcs.choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n \n t1, t2 = data.closest_hours(atmo_data.variables['time'][:].data,\n atmo_data.variables['time'].units, date)\n t1_dt = num2date(atmo_data.variables['time'][t1], atmo_data.variables['time'].units)\n t2_dt = num2date(atmo_data.variables['time'][t2], atmo_data.variables['time'].units)\n\n index1 = (t1, slice(None), latidx, lonidx)\n index2 = (t2, slice(None), latidx, lonidx)\n\n press = numpy.array(atmo_data.variables['lev'][:])\n\n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data.variables['T'][index2], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data.variables['RH'][index2], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data.variables['H'][index2], axis1=1, axis2=2).T / 1000.0\n\n # interpolate in time, now they are shape (4, N)\n t = interp.interp_time(date, temp1, temp2, t1_dt, t2_dt)\n h = interp.interp_time(date, height1, height2, t1_dt, t2_dt)\n rh = interp.interp_time(date, rhum1, rhum2, t1_dt, t2_dt)\n \n # interpolate in space, now they are shape (1, N)\n height = interp.idw(h, data_coor, [lat_oi, lon_oi])\n temp = interp.idw(t, data_coor, [lat_oi, lon_oi])\n relhum = interp.idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2])\n \n height = height[nr_of_nans:]\n temp = temp[nr_of_nans:]\n relhum = relhum[nr_of_nans:]\n press = press[nr_of_nans:]\n\n # load standard atmosphere for mid-lat summer\n # TODO evaluate standard atmo validity, add different ones for different TOY?\n stan_atmo = numpy.loadtxt(settings.STAN_ATMO, unpack=True)\n stan_height, stan_press, stan_temp, stan_relhum = stan_atmo\n # add standard atmo above cutoff index\n \n cutoff_idx = numpy.abs(stan_press - press[-1]).argmin()\n height = numpy.append(height, stan_height[cutoff_idx:])\n press = numpy.append(press, stan_press[cutoff_idx:])\n temp = numpy.append(temp, stan_temp[cutoff_idx:])\n relhum = numpy.append(relhum, stan_relhum[cutoff_idx:])\n \n # Convert relative humidity to percentage for modtran\n relhum = relhum * 100\n\n # TODO add buoy stuff to bottom of atmosphere\n\n if verbose:\n # send out plots and stuff\n stuff = numpy.asarray([height, press, temp, relhum]).T\n h = 'Height [km], Pressure[kPa], Temperature[k], Relative_Humidity[0-100]' + '\\nCoordinates: {0} Buoy:{1}'.format(data_coor, buoy)\n \n numpy.savetxt('atmosphere_{0}_{1}_{2}.txt'.format('merra', date.strftime('%Y%m%d'), buoy.id), stuff, fmt='%7.2f, %7.2f, %7.2f, %7.2f', header=h)\n\n return height, press, temp, relhum", "def solve(self):\n while self.counter[-1] != len(self.sequences[-1]) + 1:\n basepair = self.generatebasepairs(self.counter) # Get the combination for the current coordination\n moves = self.generatemoves(basepair) # Get all possible ways to get to this current coordination\n\n maxscore = -100000000 # set the maxscore to a value which is always lower than possible scores\n bestmove = None\n\n # FOr each move calculate score\n for move in moves:\n coordinates = self.generatecoordinates(move, self.counter) # generate the origin coordinate for the current move\n score = self.retrievematrixelement(coordinates).score # Get the score at the origin coordinate\n pairs = self.getallpairs(move) # Get all pairs possible for the current move\n scores = [self.scorePair(u) for u in pairs] # Generate scores for all pairs\n newscore = score + sum(scores) # Add generated scores to origin score\n if newscore > maxscore:\n maxscore = newscore\n bestmove = coordinates\n\n self.enterelement(self.counter, Score(bestmove, maxscore))\n self.increase()", "def actualSolve(self, lp):\n\t\traise NotImplementedError", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def calculate_absolute(self, inputs):\n\n # x, y, [error term for each camera]\n minimizer = [0, 0] + [1]*len(inputs)\n lhs_eq = []\n rhs_eq = []\n\n for idx, input in enumerate(inputs):\n lhs_eq_for_input = [-1*input[\"a\"], 1] + [0]*len(inputs)\n lhs_eq_for_input[idx + 2] = -1\n lhs_eq.append(lhs_eq_for_input)\n\n rhs_eq.append(\n -1 * input[\"a\"] * input[\"cx\"] + input[\"cy\"]\n )\n\n opt = linprog(\n c=minimizer,\n A_ub=None,\n b_ub=None,\n A_eq=lhs_eq,\n b_eq=rhs_eq,\n bounds=[],\n method=\"simplex\"\n )\n\n # Solution array is [x, y, ..error components..]\n solution = opt.x\n error = sum([x for idx, x in enumerate(solution) if idx > 1])\n return solution[1], solution[0], error", "def solve_step(self,h,dstep):\n pass", "def handle_request(self, request: RPMRequest) -> Union[RPMRequest, RPMStopRequest]:\n\n resp: dict = request.response\n\n # end solution finding process\n if \"satisfied\" in resp and resp[\"satisfied\"]:\n if resp[\"solution_index\"] == 0: # \"original\" solution\n return RPMStopRequest(self._xs[self._h], self._fs[self._h])\n else: # additional solution\n return RPMStopRequest(\n self._axs[self._h][resp[\"solution_index\"] - 1], self._afs[self._h][resp[\"solution_index\"] - 1]\n )\n\n # continue with new reference point given by the DM\n else:\n self._h += 1\n\n if len(self._qs) - self._h <= 2:\n # \"expand\" space on arrays\n extra_space = [None] * 10\n self._qs = np.array(np.concatenate((self._qs, extra_space), axis=None), dtype=object)\n self._xs = np.array(np.concatenate((self._xs, extra_space), axis=None), dtype=object)\n self._fs = np.array(np.concatenate((self._fs, extra_space), axis=None), dtype=object)\n self._pqs = np.array(np.concatenate((self._pqs, extra_space), axis=None), dtype=object)\n self._axs = np.array(np.concatenate((self._axs, extra_space), axis=None), dtype=object)\n self._afs = np.array(np.concatenate((self._afs, extra_space), axis=None), dtype=object)\n\n # set new reference point\n self._qs[self._h] = resp[\"reference_point\"]\n self._q = self._qs[self._h]\n\n # set weighting vector\n self._w = self._q / (self._utopian - self._nadir)\n\n # set initial values for decision variables\n if isinstance(self._problem, MOProblem):\n x0 = self._problem.get_variable_upper_bounds() / 2\n else:\n # discrete case\n x0 = self._variable_vectors[0] # this is ignored in the discrete case\n\n # solve the ASF-problem\n result = self.solve_asf(\n self._q,\n x0,\n self._w,\n self._nadir,\n self._utopian,\n self._objectives,\n self._variable_vectors,\n self._variable_bounds,\n method=self._method_de,\n )\n\n # update current solution and objective function values\n if isinstance(self._problem, MOProblem):\n self._xs[self._h] = result[\"x\"]\n self._fs[self._h] = self._objectives(self._xs[self._h])[0]\n else:\n # discrete case\n self._xs[self._h] = self._variable_vectors[result[\"x\"]]\n self._fs[self._h] = self._objectives[result[\"x\"]]\n\n # calculate perturbed reference points\n self._pqs[self._h] = self.calculate_prp(self._q, self._fs[self._h])\n\n # calculate n other solutions with perturbed reference points\n results_additional = [\n self.solve_asf(\n pqi,\n x0,\n self._w,\n self._nadir,\n self._utopian,\n self._objectives,\n self._variable_vectors,\n self._variable_bounds,\n self._method_de,\n )\n for pqi in self._pqs[self._h]\n ]\n\n # store results into arrays\n if isinstance(self._problem, MOProblem):\n self._axs[self._h] = [result[\"x\"] for result in results_additional]\n self._afs[self._h] = [self._objectives(xs_i)[0] for xs_i in self._axs[self._h]]\n else:\n self._axs[self._h] = [self._variable_vectors[result[\"x\"]] for result in results_additional]\n self._afs[self._h] = [self._objectives[result[\"x\"]] for result in results_additional]\n\n # return the information from iteration round to be shown to the DM.\n return RPMRequest(self._fs[self._h], self._afs[self._h], self._ideal, self._nadir)", "def solve(args):\n\n global a\n global b\n global c\n\n a, b, c = args\n mem = {}\n\n # a tree of 30 levels should be enough (all values are < 2^30)\n sol = count_pairs(30, 1, 1, 1, mem)\n\n return sol", "def housepredict(intent_request):\r\n \r\n \r\n location_zip = get_slots(intent_request)[\"location\"]\r\n housetype_zip = get_slots(intent_request)[\"housetype\"]\r\n source = intent_request['invocationSource']\r\n \r\n print('received request: ' + str(intent_request))\r\n print (\"housetype\",housetype_zip)\r\n print (\"location1\",location_zip)\r\n\r\n if source == 'DialogCodeHook':\r\n # Perform basic validation on the supplied input slots.\r\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\r\n slots = get_slots(intent_request)\r\n print('slots are' ,str(slots)) \r\n validation_result = validate_housepred(location_zip)\r\n if not validation_result['isValid']:\r\n slots[validation_result['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result['violatedSlot'],\r\n validation_result['message'])\r\n\t\t\r\n validation_result2 = validate_housepred_hstyp(housetype_zip)\r\n if not validation_result2['isValid']:\r\n slots[validation_result2['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result2['violatedSlot'],\r\n validation_result2['message'])\r\n\r\n # Pass the price of the flowers back through session attributes to be used in various prompts defined\r\n # on the bot model.\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n if location_zip is not None and housetype_zip is not None:\r\n output_session_attributes['Price'] = house_price_pred(location_zip,housetype_zip)#len(location_zip)*5#house_price_pred(location_zip,housetype_zip) \r\n #price = house_price_pred(location_zip,housetype_zip)# Elegant pricing model\r\n\t\t\t\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.\r\n # In a real bot, this would likely involve a call to a backend service.\r\n print(intent_request['sessionAttributes']['Price']) \r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Approx. next year growth prediction for {hstyp} in {loc} is {prc}%'.format(hstyp=housetype_zip,loc=location_zip,prc=intent_request['sessionAttributes']['Price'])})", "def main(input_file):\n # Read the map and split it into lines.\n # Make sure to access it like this `mapData[y][x]`\n mapData = input_file.read().strip().splitlines()\n mapWidth = len(mapData[0])\n mapHeight = len(mapData)\n\n # Iterate through each point on the map\n slopeCounts = []\n for y in range(mapHeight):\n for x in range(mapWidth):\n obj = mapData[y][x]\n\n # If the current position is empty, skip it\n if obj == \".\":\n continue\n\n # Iterate through each OTHER point on the map and calculate its\n # slope from the station point\n slopes = set()\n for y2 in range(mapHeight):\n for x2 in range(mapWidth):\n # Make sure to skip the station point and the empty points\n if x2 == x and y2 == y or mapData[y2][x2] == \".\":\n continue\n slopes.add(\n (\n (y2 - y) / (x2 - x) if x2 != x else math.inf,\n math.copysign(1.0, x2 - x),\n math.copysign(1.0, y2 - y),\n )\n )\n\n slopeCounts.append((len(slopes), x, y))\n\n # We need the results of part 1 to solve this\n resultP1 = max(slopeCounts, key=lambda s: s[0])\n\n # Condense the asteroids down to a list and sort it based on distance\n # to the station\n station = (resultP1[1], resultP1[2])\n print(\"STATION AT\", station)\n asteroids = []\n for y in range(mapHeight):\n for x in range(mapWidth):\n obj = mapData[y][x]\n if obj == \"#\" and (x, y) != station:\n asteroids.append((x, y))\n asteroids.sort(\n key=lambda coord: math.hypot(\n coord[0] - station[0], coord[1] - station[1]\n )\n )\n\n # Group the asteroids by cardinality and slope\n groups = dict()\n for target in asteroids:\n slope = (\n (target[1] - station[1]) / (target[0] - station[0])\n if target[0] != station[0]\n else math.inf\n )\n cardinalX = math.copysign(1.0, target[0] - station[0])\n cardinalY = math.copysign(1.0, target[1] - station[1])\n\n if (cardinalX, cardinalY) not in groups:\n groups[(cardinalX, cardinalY)] = collections.defaultdict(list)\n\n # Vertical or horizontal points are subgrouped together\n if slope == math.inf or slope == 0:\n subgroup = slope\n else:\n subgroup = abs(slope) ** (cardinalX * cardinalY)\n # subgroup = math.copysign(slope, cardinalX * cardinalY)\n\n # Add the asteroid to its group and subgroup\n groups[(cardinalX, cardinalY)][subgroup].append(target)\n\n # Iterate through the groups in clockwise order, popping off\n # asteroids to find the 200th\n count = 0\n found = None\n while found is None:\n for direction in [\n (0, -1),\n (1, -1),\n (1, 0),\n (1, 1),\n (0, 1),\n (-1, 1),\n (-1, 0),\n (-1, -1),\n ]:\n if direction in groups:\n for slope, targets in sorted(\n groups[direction].items(), key=lambda item: item[0]\n ):\n if len(targets):\n target = targets.pop(0)\n count += 1\n if count == 200:\n found = target\n\n print(\"RESULT:\", found[0] * 100 + found[1])", "def google_vrp(data):\n # Instantiate the data problem.\n #data = flora_data_model() #TODOcreate_data_model()\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Add Distance constraint.\n dimension_name = 'Distance'\n routing.AddDimension(\n transit_callback_index,\n 0, # no slack\n 300000, # vehicle maximum travel distance\n True, # start cumul to zero\n dimension_name)\n distance_dimension = routing.GetDimensionOrDie(dimension_name)\n distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n print_solution(data, manager, routing, solution)\n # get solution and return it\n sol = get_solution(data, manager, routing, solution)\n return sol\n else:\n print('No solution found !')", "def solve(self):\n \n raise NotImplementedError(\"not implemented!\")", "def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])", "def main():\n dirname = os.path.dirname(__file__)\n input_source = os.path.join(dirname, '..', 'input1.txt')\n # Make list, since the generator has to be used multiple times\n d = data_parser(input_source)\n return (solver_1star(d), solver_2star(d))", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def run(self):\n best_score = float('inf')\n best_route = None\n best_nr_iterations = None\n best_tabu_list_size = None\n for i in range(self.range_iterations_start, self.range_iterations_end, 10):\n for j in range(self.range_tabu_list_start, self.range_tabu_list_end):\n print('testing for nr_iterations', i, ' and tabu list size', j)\n self.hc.generate_initial_solution(use_seed=True)\n score, route, iteration = self.hc.solve(tabu=self.tabu, with_time_windows=self.with_time_windows,\n nr_iterations=i, tabu_size=j,\n allow_infeasibilites=self.allow_infeasibilites)\n\n if score < best_score:\n best_score = score\n best_route = route\n best_nr_iterations = i\n best_tabu_list_size = j\n\n print('best results with sore', best_score, best_nr_iterations, best_tabu_list_size )\n return best_score, best_route, best_tabu_list_size", "def solution(data):\n instructions = preprocess(data)\n solver = Ferry(instructions)\n return solver.solve()", "def solve_za(engine, pt, aend, dlinear_k, s, v, s1):\n code = CodeSegment(engine)\n code.solve_linear_displacement(source_k='dlinear_k', s=s1)\n code.multiply(x1='s1', x2=Literal(pt.D1(aend)), y='s')\n code.multiply(x1='s1', x2=Literal(pt.f1(aend) * aend ** 2 * pt.E(aend) * pt.D1(aend)), y='v')\n return code", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def compute_controller(self):\n # region Input Info\n\n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n # print(messages)\n # endregion\n if messages:\n # similar to laplacian but for each robot\n # for m in messages:\n # dx += m[1][0] - pos[0]\n # dy += m[1][1] - pos[1]\n\n # position of All robots\n Apos = np.zeros([6,2])\n Apos[self.id,:]=pos[0:2]\n for m in messages:\n Apos[m[0],:]=m[1][0:2]\n\n TarM = np.zeros([6,2])\n TarM[self.id, :] = self.TargetP[self.Tid,:]-pos[0:2]\n Cdiff = Apos-pos[0:2]\n Cdiff = np.sqrt(np.square(Cdiff[:,0])+np.square(Cdiff[:,1]))\n Cdiff = np.sum(Cdiff)\n Ddiff = self.P_Des-self.P_Des[self.id]\n Ddiff = np.sqrt(np.square(Ddiff[:, 0]) + np.square(Ddiff[:, 1]))\n Ddiff = np.sum(Ddiff)\n Tdiff = np.abs(Ddiff - Cdiff)\n\n\n\n\n # region Obstacles\n Obc = Apos\n # Obc = self.Obstacles\n # Obc = np.vstack([Obs,pos[0:2]])\n Diff = pos[0:2] - Obc\n for m in range(0, Diff.shape[0]):\n if (np.sqrt(np.square(Diff[m, 0]) + np.square(Diff[m, 1]))) > 0.35:\n Diff[m, :] = np.array([0, 0])\n\n DiffY = Diff[:, 1].reshape([1, -1])\n DiffX = Diff[:, 0].reshape([1, -1])\n x_odot = np.sum(np.exp(-np.square(DiffX) / self.Var) * DiffX)\n y_odot = np.sum(np.exp(-np.square(DiffY) / self.Var) * DiffY)\n\n ObsAv = np.array([x_odot, y_odot])\n # endregion\n\n\n NewGd = np.square(np.transpose(self.E) @ Apos)\n NewGd = (NewGd[:, 0] + NewGd[:, 1]).reshape([-1, 1])\n G = self.Gdsq - NewGd\n Rg = self.DistJacobian(Apos, self.Edges)\n p_ddot = np.zeros(([6, 2]))\n\n if (Tdiff < self.Thr):\n self.StartTimer = True\n\n\n if(self.StartTimer):\n self.Timer += 1\n if (self.Timer > 500+self.OffTimer):\n self.FormStable = True\n self.StartTimer = False\n self.Timer = 0\n\n if(self.Tid > 3 and np.sum(TarM[self.id, 0])<5):\n TarM[self.id, 0] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) < 5):\n TarM[self.id, 1] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n\n if (self.Tid > 3 and np.sum(TarM[self.id, :]) < 0.01):\n self.Tid +=1\n\n if (self.FormStable):\n # Formation Done\n if self.Tid == 0 and self.Formation == \"square\":\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n # self.Tid += 1\n # self.FormStable = False\n print(self.P_Des, self.Formation, \" \", self.Tid)\n # self.K1 = 5\n # self.K2 = 50\n if (self.Tid < self.TargetP.shape[0]-1 and self.FormStable):\n self.Tid += 1\n if(self.Tid == 1):\n self.K1 = 2\n self.K3 = 10\n self.Thr = 0.001\n if (self.Tid == 2):\n self.K1 = 20\n self.K3 = 1\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n self.FormStable = False\n # Linear Control Law\n p_dot = np.zeros([6,2])\n p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # Non - linear Control Law\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_dot += p_ddot*self.dt\n if(self.id == 0):\n # print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n p_dot = self.K3 * TarM\n if (self.id == 0):\n print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n # if(self.Tid == 1):\n # p_dot += -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n\n dx = p_dot[self.id, 0]\n dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n #else:\n # TarM[self.id, :] = Tdiff\n # # Linear Control\n # p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n # p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # dx = p_dot[self.id, 0]\n # dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += self.K3 * TarM\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n\n # region Robot Wheel Control\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n\n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])\n # endregion", "def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True", "def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df", "def optimizeposition(areas, omegas, x0, x1, z0, z1):\n\n # initial position of each quadpoint is at the center\n # of the edge connecting the midpoint and a corner point\n rhos = 0.5 * ones(4)\n a = 1 / sqrt(3)\n deltarhos = 0.25 * ones(4) # delta for finite differences\n\n while True: # while method has not converged\n # print(\"################## new iteration #############\")\n rhs = f(rhos, omegas, a, x0, x1, z0, z1, areas)\n print(\"##\")\n print(rhs)\n print(rhos)\n if norm(rhs) < 1e-5:\n break\n mat = df(rhos, omegas, a, x0, x1, z0, z1, areas, deltarhos)\n update = solve(mat, rhs)\n\n rhos += update\n # for i in range(4):\n # rhos[i] = max(0,min(1,rhos[i]))\n \"\"\"\n print(\"the norm of the rhs is \")\n print(norm(rhs))\n print(mat)\n print(\"rhs\")\n print(rhs)\n print(update)\n print(\"rhos\")\n print(rhos)\n \"\"\"\n # print(alpha)\n return rhos", "def solution(data):\n lines = preprocess(data)\n solver = Code(lines)\n return solver.solve()", "def part2():\n\n program = IntCodeProcessor.load_program('day13input.txt')\n program[0] = 2\n cpu = IntCodeProcessor(program)\n result = None\n next_input = None\n ball_pos = None\n paddle_pos = None\n score = None\n while result is None:\n try:\n result = cpu.execute_program(next_input, reset=False)\n except ExecutionError as err:\n assert err.reason == ExecutionCode.NEED_INPUT\n\n ball_pos, paddle_pos, score = process_output(cpu.outputs, ball_pos, paddle_pos, score)\n cpu.outputs = []\n next_input = next_input_for(ball_pos, paddle_pos)\n print(f'Part 2 answer: {score}')" ]
[ "0.7265158", "0.6186221", "0.5823401", "0.56546044", "0.5640756", "0.55801356", "0.54897994", "0.5467185", "0.5436492", "0.537317", "0.5360428", "0.5298746", "0.5258818", "0.523264", "0.5204271", "0.51753855", "0.5158027", "0.5145004", "0.5122745", "0.5106338", "0.50887746", "0.5086126", "0.5028262", "0.50223607", "0.50223607", "0.5015033", "0.5000663", "0.49936974", "0.49924922", "0.49876985", "0.4978531", "0.49778208", "0.4955575", "0.4949454", "0.49475566", "0.49407578", "0.489641", "0.48950675", "0.48949298", "0.48943576", "0.48728454", "0.48628545", "0.48536828", "0.4847366", "0.48406202", "0.48366123", "0.48366123", "0.48366123", "0.48269933", "0.4825836", "0.48226792", "0.48007634", "0.4799716", "0.47985423", "0.47939017", "0.47928914", "0.47804764", "0.4773595", "0.4769156", "0.47418958", "0.47384506", "0.4737848", "0.47351247", "0.4733323", "0.47278", "0.47221124", "0.47215816", "0.4712797", "0.47101107", "0.47042894", "0.47032204", "0.47020468", "0.46978462", "0.4696341", "0.4696341", "0.46919292", "0.46902114", "0.46849862", "0.46844447", "0.4683843", "0.46776125", "0.46770123", "0.46761692", "0.4673967", "0.4671911", "0.4660883", "0.46589082", "0.46560758", "0.46431592", "0.46408254", "0.4638439", "0.46374813", "0.4637324", "0.46369064", "0.46348712", "0.46309116", "0.46290958", "0.46286565", "0.46170574", "0.46109384" ]
0.73194927
0
Compute Routes between origins and their assigned destinations in parallel and combine results. Compute Routes in parallel and combine the results. This class assumes that the inputs have already been preprocessed and validated.
def __init__( # pylint: disable=too-many-locals, too-many-arguments self, pair_type_str, origins, origin_id_field, destinations, dest_id_field, network_data_source, travel_mode, time_units, distance_units, max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False, assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None ): pair_type = helpers.PreassignedODPairType[pair_type_str] self.origins = origins self.destinations = destinations self.out_routes = out_routes self.scratch_folder = scratch_folder time_units = helpers.convert_time_units_str_to_enum(time_units) distance_units = helpers.convert_distance_units_str_to_enum(distance_units) if not barriers: barriers = [] self.max_processes = max_processes if not time_of_day: time_of_day = None else: time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT) # Initialize the dictionary of inputs to send to each OD solve self.rt_inputs = { "pair_type": pair_type, "origins": self.origins, "origin_id_field": origin_id_field, "destinations": self.destinations, "dest_id_field": dest_id_field, "network_data_source": network_data_source, "travel_mode": travel_mode, "time_units": time_units, "distance_units": distance_units, "time_of_day": time_of_day, "reverse_direction": reverse_direction, "scratch_folder": self.scratch_folder, "assigned_dest_field": assigned_dest_field, "od_pair_table": od_pair_table, "barriers": barriers, "origin_transfer_fields": [], # Populate later "destination_transfer_fields": [] # Populate later } # List of intermediate output OD Line files created by each process self.route_fcs = [] # Construct OID ranges for chunks of origins and destinations if pair_type is helpers.PreassignedODPairType.one_to_one: # Chunks are of the format [first origin ID, second origin ID] self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes) elif pair_type is helpers.PreassignedODPairType.many_to_many: # Chunks are of the format [chunk_num, chunk_size] num_od_pairs = 0 with open(od_pair_table, "r", encoding="utf-8") as f: for _ in f: num_od_pairs += 1 num_chunks = ceil(num_od_pairs / max_routes) self.chunks = [[i, max_routes] for i in range(num_chunks)] # Calculate the total number of jobs to use in logging self.total_jobs = len(self.chunks) self.optimized_cost_field = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n self._validate_route_settings()\r\n\r\n # Check if the input origins and destinations have any fields we should use in the route analysis\r\n self._populate_input_data_transfer_fields()\r\n\r\n # Compute Route in parallel\r\n LOGGER.info(f\"Beginning parallelized Route solves ({self.total_jobs} chunks)\")\r\n completed_jobs = 0 # Track the number of jobs completed so far to use in logging\r\n # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes\r\n with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor:\r\n # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the\r\n # given origin ranges and their assigned destinations.\r\n jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks}\r\n # As each job is completed, add some logging information and store the results to post-process later\r\n for future in futures.as_completed(jobs):\r\n try:\r\n # The Route job returns a results dictionary. Retrieve it.\r\n result = future.result()\r\n except Exception: # pylint: disable=broad-except\r\n # If we couldn't retrieve the result, some terrible error happened and the job errored.\r\n # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely\r\n # causes are:\r\n # a) If you're calling a service, the service was temporarily down.\r\n # b) You had a temporary file read/write or resource issue on your machine.\r\n # c) If you're actively updating the code, you introduced an error.\r\n # To make the tool more robust against temporary glitches, retry submitting the job up to the number\r\n # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries,\r\n # fail the entire tool run.\r\n errs = traceback.format_exc().splitlines()\r\n failed_range = jobs[future]\r\n LOGGER.debug((\r\n f\"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry \"\r\n f\"up to {helpers.MAX_RETRIES} times. Errors: {errs}\"\r\n ))\r\n job_failed = True\r\n num_retries = 0\r\n while job_failed and num_retries < helpers.MAX_RETRIES:\r\n num_retries += 1\r\n try:\r\n future = executor.submit(solve_route, self.rt_inputs, failed_range)\r\n result = future.result()\r\n job_failed = False\r\n LOGGER.debug(f\"Route chunk {failed_range} succeeded after {num_retries} retries.\")\r\n except Exception: # pylint: disable=broad-except\r\n # Update exception info to the latest error\r\n errs = traceback.format_exc().splitlines()\r\n if job_failed:\r\n # The job errored and did not succeed after retries. Fail the tool run because something\r\n # terrible is happening.\r\n LOGGER.debug(f\"Route chunk {failed_range} continued to error after {num_retries} retries.\")\r\n LOGGER.error(\"Failed to get Route result from parallel processing.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n\r\n # If we got this far, the job completed successfully and we retrieved results.\r\n completed_jobs += 1\r\n LOGGER.info(\r\n f\"Finished Route calculation {completed_jobs} of {self.total_jobs}.\")\r\n\r\n # Parse the results dictionary and store components for post-processing.\r\n if result[\"solveSucceeded\"]:\r\n self.route_fcs.append(result[\"outputRoutes\"])\r\n else:\r\n # Typically, a solve fails because no destinations were found for any of the origins in the chunk,\r\n # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less\r\n # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug\r\n # mode only in case the user is having problems. The user can also check the individual OD log\r\n # files.\r\n LOGGER.debug(f\"Solve failed for job id {result['jobId']}.\")\r\n LOGGER.debug(result[\"solveMessages\"])\r\n\r\n # Post-process outputs\r\n if self.route_fcs:\r\n LOGGER.info(\"Post-processing Route results...\")\r\n self.route_fcs = sorted(self.route_fcs)\r\n self._post_process_route_fcs()\r\n else:\r\n LOGGER.warning(\"All Route solves failed, so no output was produced.\")\r\n\r\n # Clean up\r\n # Delete the job folders if the job succeeded\r\n if DELETE_INTERMEDIATE_OUTPUTS:\r\n LOGGER.info(\"Deleting intermediate outputs...\")\r\n try:\r\n shutil.rmtree(self.scratch_folder, ignore_errors=True)\r\n except Exception: # pylint: disable=broad-except\r\n # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.\r\n LOGGER.warning(f\"Unable to delete intermediate Route output folder {self.scratch_folder}.\")\r\n\r\n LOGGER.info(\"Finished calculating Routes.\")", "def compute_waypoints(self, source_loc, destination_loc):\n start_waypoint = self._map.get_waypoint(\n source_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n end_waypoint = self._map.get_waypoint(\n destination_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n assert start_waypoint and end_waypoint, 'Map could not find waypoints'\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n # TODO(ionel): The planner returns several options in intersections.\n # We always take the first one, but this is not correct.\n return deque([to_pylot_transform(waypoint[0].transform)\n for waypoint in route])", "def compute_travel_cost_adjlist(\n origins, destinations, network, index_orig=None, index_dest=None\n):\n\n # NOTE: need to add an option/check for symmetric networks so we only need half the routing calls\n\n origins = origins.copy()\n destinations = destinations.copy()\n\n origins[\"osm_ids\"] = network.get_node_ids(\n origins.centroid.x, origins.centroid.y\n ).astype(int)\n destinations[\"osm_ids\"] = network.get_node_ids(\n destinations.centroid.x, destinations.centroid.y\n ).astype(int)\n\n ods = []\n\n if not index_orig:\n origins[\"idx\"] = origins.index.values\n index_orig = \"idx\"\n if not index_dest:\n destinations[\"idx\"] = destinations.index.values\n index_dest = \"idx\"\n\n # I dont think there's a way to do this in parallel, so we can at least show a progress bar\n with tqdm(total=len(origins[\"osm_ids\"])) as pbar:\n for origin in origins[\"osm_ids\"]:\n df = pd.DataFrame()\n df[\"cost\"] = network.shortest_path_lengths(\n [origin for d in destinations[\"osm_ids\"]],\n [d for d in destinations[\"osm_ids\"]],\n )\n df[\"destination\"] = destinations[index_dest].values\n df[\"origin\"] = origins[origins.osm_ids == origin][index_orig].values[0]\n\n ods.append(df)\n pbar.update(1)\n\n combined = pd.concat(ods)\n # reorder the columns\n return combined[['origin', 'destination', 'cost']]", "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.origin_transfer_fields\r\n ) as icur:\r\n # Loop through origins and insert them into Stops along with their assigned destinations\r\n for origin in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [\"SHAPE@\", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields\r\n ):\r\n dest_id = origin[2]\r\n if dest_id is None:\r\n continue\r\n if dest_id not in destinations:\r\n dest_val = f\"'{dest_id}'\" if isinstance(dest_id, str) else dest_id\r\n with arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [\"SHAPE@\", self.dest_id_field] + self.destination_transfer_fields,\r\n where_clause=f\"{self.dest_id_field} = {dest_val}\"\r\n ) as cur:\r\n try:\r\n destinations[dest_id] = next(cur)\r\n except StopIteration:\r\n # The origin's destination is not present in the destinations table. Just skip the origin.\r\n continue\r\n # Insert origin and destination\r\n destination = destinations[dest_id]\r\n if self.reverse_direction:\r\n route_name = f\"{dest_id} - {origin[1]}\"\r\n origin_sequence = 2\r\n destination_sequence = 1\r\n else:\r\n route_name = f\"{origin[1]} - {dest_id}\"\r\n origin_sequence = 1\r\n destination_sequence = 2\r\n # Define the final origin and destination rows for the input Stops\r\n origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:]\r\n destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \\\r\n list(destination)[2:]\r\n icur.insertRow(origin_row)\r\n destination_rows.append(destination_row)\r\n\r\n # Insert destinations\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.destination_transfer_fields\r\n ) as dcur:\r\n for row in destination_rows:\r\n dcur.insertRow(row)", "def solve_route(inputs, chunk):\r\n rt = Route(**inputs)\r\n if inputs[\"pair_type\"] is helpers.PreassignedODPairType.one_to_one:\r\n rt.logger.info(f\"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}\")\r\n elif inputs[\"pair_type\"] is helpers.PreassignedODPairType.many_to_many:\r\n rt.logger.info(f\"Processing chunk {chunk[0]} as job id {rt.job_id}\")\r\n rt.solve(chunk)\r\n rt.teardown_logger()\r\n return rt.job_result", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def route_matrix(client, origins, destinations, **kwargs):\n\n sep_pattern = re.compile(r'[,;|]')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n\n is_origins_str = isinstance(origins, str)\n is_origins_list = isinstance(origins, list)\n if not any([is_origins_str, is_origins_list]):\n raise ValueError('\"origins\" must be str or list!')\n elif is_origins_str:\n sep_origins = sep_pattern.split(origins)\n u_origins = origins.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_origins)\n if match:\n if len(sep_origins) > 5:\n raise ValueError('\"origins\" incorrect! upper limits is 5.')\n else:\n origins = '|'.join(sep_origins)\n else:\n if len(sep_origins) > 10:\n raise ValueError('\"origins\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_origins[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_origins)/2)]\n origins = '|'.join(temp)\n\n else:\n # element in list is CN_pattern characters.\n if len(origins[0]) == 1:\n origins = '|'.join(origins)\n # element in list is list/tuple of lng,lat.\n else:\n origins = [map(str, l) for l in origins]\n origins = '|'.join([','.join(l[::-1]) for l in origins])\n\n is_destinations_str = isinstance(destinations, str)\n is_destinations_list = isinstance(destinations, list)\n\n if not any([is_destinations_str, is_destinations_list]):\n raise ValueError('\"destinations\" must be str or list!')\n elif is_destinations_str:\n sep_destinations = sep_pattern.split(destinations)\n u_destinations = destinations.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_destinations)\n if match:\n if len(sep_destinations) > 5:\n raise ValueError('\"destinations\" incorrect! upper limits is \\\n 5.')\n else:\n destinations = '|'.join(sep_destinations)\n else:\n if len(sep_destinations) > 10:\n raise ValueError('\"destinations\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_destinations[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_destinations)/2)]\n destinations = '|'.join(temp)\n\n else:\n if len(destinations[0]) == 1:\n destinations = '|'.join(destinations)\n else:\n # first, map to str.\n destinations = [map(str, l) for l in destinations]\n destinations = '|'.join([','.join(l[::-1]) for l in destinations])\n\n kwargs.update({'server_name': 'direction', 'version': 'v1',\n 'subserver_name': 'routematrix', 'origins': origins,\n 'destinations': destinations})\n\n return client.get(kwargs)", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def main(sources: gpd.GeoDataFrame, destinations: gpd.GeoDataFrame, router, annotations='duration', threads: int = 10, mts: int = 2000, keep_columns=None) -> write_stream:\n\tt = table_route(sources['geometry'], destinations['geometry'], router, annotations=annotations, max_table_size=mts, threads=threads)\n\n\tif keep_columns is not None:\n\t\tkeep_columns = keep_columns.split(',')\n\t\tfor k in keep_columns:\n\t\t\tif k not in sources and k not in destinations:\n\t\t\t\traise CommandError(f'column {k} not present in sources, nor in destinations. Available columns are: sources: {\", \".join(list(sources))}, destinations: {\", \".join(list(sources))}')\n\t\tsub_sources = sources[[k for k in keep_columns if k in sources]]\n\t\tsub_destinations = destinations[[k for k in keep_columns if k in destinations]]\n\n\tfor df in t:\n\t\tdf['geometry'] = utils.linestring_between(df.geometry, df.geometry_dest)\n\t\tdf.drop('geometry_dest', axis=1, inplace=True)\n\t\tif keep_columns is not None:\n\t\t\tdf = df.merge(sub_sources, left_on='source', right_index=True, suffixes=('', '_source'))\n\t\t\tdf = df.merge(sub_destinations, left_on='destination', right_index=True, suffixes=('', '_dest'))\n\n\t\tyield gpd.GeoDataFrame(df, crs=4326)", "def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df", "def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)", "def route(vertices_resources, nets, machine, constraints,\n placements, allocations, algorithm, core_resource):\n if algorithm == \"default\":\n module = \"rig.place_and_route\"\n algorithm = \"default\"\n else:\n module = \"rig.place_and_route.route.{}\".format(algorithm)\n \n try:\n router = getattr(import_module(module), \"route\")\n except (ImportError, AttributeError):\n sys.stderr.write(\n \"Routing algorithm {} does not exist\\n\".format(algorithm))\n sys.exit(1)\n \n logger.info(\"Routing netlist using '{}'...\".format(algorithm))\n \n before = time.time()\n routes = router(vertices_resources, nets, machine, constraints,\n placements, allocations, core_resource)\n after = time.time()\n \n logger.info(\"Routed netlist in {:.2f}s\".format(after - before))\n \n return routes", "def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")", "def _get_route(self, inp):\n inp = self.connector_by_label(inp)\n inp_routes = []\n # get the routes starting at the input and the maximum route length\n max_length = 0\n for routes in self.routes[inp.label].values():\n for route in routes:\n inp_routes.append(route)\n if len(route) > max_length:\n max_length = len(route)\n\n # to find the outputs, the switches on possible routes are\n # successively measured. Routes that do not fit the measured switch\n # states are eliminated, such that following switches do not have\n # to be read.\n outputs = [] # list for the outputs\n routes = inp_routes\n measured_switch_states = {} # store already measured switches\n for k in range(max_length):\n routes_left = [] # list to store remaining routes\n if len(routes) == 0:\n break\n for route in routes:\n # action only required if start or end of the\n # connection route[k] is a switch output\n if route[k].start.is_switch_output():\n # check if the switch is already measured\n if route[k].start.switch.label in measured_switch_states:\n state = measured_switch_states[\n route[k].start.switch.label]\n else:\n # measure switch and store result\n state = route[k].start.switch.mode()\n measured_switch_states[\n route[k].start.switch.label] = state\n # got to next route (such that this one is not added to\n # the remaining routes)\n if route[k].start.output_nr != state:\n continue\n # if a output is reached, add it to outputs\n if route[k].end.is_box_output():\n outputs.append(route[k].end.label)\n continue\n # continue analogously to above\n elif route[k].end.is_switch_output():\n if route[k].end.switch.label in measured_switch_states:\n state = measured_switch_states[\n route[k].end.switch.label]\n else:\n state = route[k].end.switch.mode()\n measured_switch_states[\n route[k].end.switch.label] = state\n if route[k].end.output_nr != state:\n continue\n # the route has not been eliminated,\n # add it to the remaining routes\n routes_left.append(route)\n # set routes to routes_left before starting the next iteration\n routes = routes_left\n\n # returns\n if len(outputs) == 0:\n return None\n elif len(outputs) == 1:\n return outputs[0]\n else:\n outputs = [out for out in self.outputs if out in outputs]\n return outputs", "def coalesce_helper(self, netmask, routes):\n # decrement netmask by 1\n netmask = netmask - 1\n routes_checked = []\n # iterate through every route combination\n for r1 in routes:\n for r2 in routes:\n # skip if routes are same, already checked, or source IPs arent matching\n if r1 == r2 or r2 in routes_checked or r1[SRC_IF] != r2[SRC_IF]:\n continue\n # calculate route 1's network prefix and pad until 32 bits\n r1_network = self.ip_to_binary(r1[MESG][NTWK])\n r1_netmask = self.ip_to_binary(r1[MESG][NMSK]).count('1')\n r1_prefix = self.pad_end(r1_network[:r1_netmask], '0', IP4_BIT_MAX)\n # calculate route 2's network prefix and pad until 32 bits\n r2_network = self.ip_to_binary(r2[MESG][NTWK])\n r2_netmask = self.ip_to_binary(r2[MESG][NMSK]).count('1')\n r2_prefix = self.pad_end(r2_network[:r2_netmask], '0', IP4_BIT_MAX)\n # check if same next-hop router\n if r1_prefix[:netmask] == r2_prefix[:netmask]:\n # check if other attributes are matching\n if (r1[MESG][LPRF] == r2[MESG][LPRF] and r1[MESG][ORIG] == r2[MESG][ORIG]\n and r1[MESG][APTH] == r2[MESG][APTH] and r1[MESG][SORG] == r2[MESG][SORG]\n and r1[MESG][SORG] == r2[MESG][SORG]):\n # remove unnecessary route\n self.routes.remove(r2)\n # update netmask for keeper route\n base_bits = self.pad_end('', '1', netmask)\n final_bits = self.pad_end(base_bits, '0', IP4_BIT_MAX)\n r1[MESG][NMSK] = self.binary_to_ip(final_bits)\n #Check if the new netmask is able to aggregate with another\n recheck_routes = []\n r1_nmsk = final_bits.count('1')\n for r in self.routes:\n r_nmsk = self.ip_to_binary(r[MESG][NMSK]).count('1')\n if r1_nmsk == r_nmsk:\n recheck_routes.append(r)\n recheck_routes.append(r1)\n self.coalesce_helper(r_nmsk, recheck_routes)\n # done checking, add to routes checked list\n routes_checked.append(r1)", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def _build_routes(self, routes, allow_redundant_targets=True):\n routes = routes or ()\n joins = {}\n targets_seen = set()\n\n for route in routes:\n if isinstance(route, dict):\n source_label = route.get('source')\n target_label = route.get('target')\n field_label = route.get('field')\n symmetrical = route.get('symmetrical')\n else:\n warnings.warn('Routes are now defined as dicts',\n DeprecationWarning)\n source_label, target_label, field_label, symmetrical = route\n\n # get models\n source = self.get_model(source_label, local=False)\n target = self.get_model(target_label, local=False)\n\n field = None\n\n # get field\n if field_label:\n model_name, field_name = field_label.split('.', 1)\n model_name = model_name.lower()\n\n # determine which model the join field specified exists on\n if model_name == source.__name__.lower():\n field = self.get_field(field_name, source)\n elif model_name == target.__name__.lower():\n field = self.get_field(field_name, target)\n else:\n raise TypeError('model for join field, \"{0}\", '\n 'does not exist'.format(field_name))\n\n if isinstance(field, RelatedObject):\n field = field.field\n\n if not allow_redundant_targets:\n if target in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(target_label))\n else:\n targets_seen.add(target)\n\n # The `joins` hash defines pairs which are explicitly joined\n # via the specified field. If no field is defined, then the\n # join field is implied or does not matter; the route is reduced\n # to a straight lookup.\n joins[(source, target)] = field\n\n if symmetrical:\n if not allow_redundant_targets:\n if source in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(source_label))\n else:\n targets_seen.add(source)\n\n joins[(target, source)] = field\n\n return joins", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def connect(src, *destinations, exclude=set(), fit=False):\n assignemnts = []\n for dst in destinations:\n assignemnts.extend(_connect(src, dst, exclude, fit))\n \n return assignemnts", "def compute_shortest_routes(start_ids, dest_ids, G):\n return ox.distance.shortest_path(G, start_ids, dest_ids, weight='travel_time', cpus=6)", "def naiveGlobalRouting(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path.append(src_slot)\n\n curr = src_slot\n len_x = src_slot.getLenX()\n len_y = src_slot.getLenY()\n\n # first go in X direction\n x_diff = curr.getPositionX() - dst_slot.getPositionX()\n if x_diff:\n dir = 'LEFT' if x_diff > 0 else 'RIGHT'\n for i in range(int(abs(x_diff/len_x))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n\n y_diff = curr.getPositionY() - dst_slot.getPositionY()\n if y_diff:\n dir = 'DOWN' if y_diff > 0 else 'UP'\n for i in range(int(abs(y_diff/len_y))):\n curr = self.slot_manager.createSlotForRouting(curr.getNeighborSlotName(dir))\n slot_path.append(curr)\n \n assert curr == dst_slot\n \n slot_path = slot_path[1:-1] # exclude the src and the dst\n logging.info(f'{e.name}: {self.v2s[e.src].getName()} -> {self.v2s[e.dst].getName()} : ' + ' '.join(s.getName() for s in slot_path))\n self.e_name2path[e.name] = slot_path", "def _find_routes(self, start_node, previous_nodes=None):\n if previous_nodes is None:\n previous_nodes = []\n\n routes = []\n for con in self.connections:\n if start_node == con.end:\n con.flip()\n if start_node == con.start:\n # if the connection ends in a box output,\n # add the connection (as a route of length 1)\n if con.end.is_box_output():\n routes.append([con])\n elif con.end.is_box_input():\n raise Exception(\"Route in connections detected, \"\n \"that ends at an input.\")\n elif con.end.is_switch_output():\n # check if there is conflict with previous nodes\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n # check orientation\n if con.end.switch.orientation == 1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n # Set orientation of the switch\n con.end.switch.orientation = -1\n # Add the node to the previous nodes and call the method\n # for the next node\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n next_step = self._find_routes(\n con.end.switch.input,\n previous_nodes=previous_nodes\n )\n # Merge the current connection with the resulting routes\n for route in next_step:\n routes.append([con] + route)\n # proceed the analogously for a switch input\n elif con.end.is_switch_input():\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n if con.end.switch.orientation == -1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n con.end.switch.orientation = 1\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n\n # continue with both outputs\n next_step0 = self._find_routes(\n con.end.switch.output[0],\n previous_nodes=previous_nodes\n )\n\n next_step1 = self._find_routes(\n con.end.switch.output[1],\n previous_nodes=previous_nodes\n )\n\n for route in next_step0:\n routes.append([con] + route)\n for route in next_step1:\n routes.append([con] + route)\n\n else:\n raise TypeError(f\"Node {con.end} not recognised\")\n\n return routes", "def work(params) -> Union[None, float]:\n try:\n # either HTTP or bindings\n if host:\n path = action if action == \"route\" else \"sources_to_targets\"\n params_str = delimit_tuple(\n tuple((delimit_tuple(x) for x in params)), delimiter=\";\"\n )\n route = requests.get(f\"{host}/{path}/v1/driving/{params_str}\")\n else:\n route = router.route(params) if action == \"route\" else None\n except (RuntimeError, requests.exceptions.BaseHTTPError):\n return None\n\n if (\n random() > 0.95\n ): # assume that large number of routes will be tested, only print sample in debug mode\n LOGGER.debug(f\"Calculated route between {params[0]} and {params[1]}\")\n\n if report:\n result = route.json()\n if action == \"route\":\n try:\n dist = sum([x[\"distance\"] for x in result[\"routes\"]])\n except KeyError:\n LOGGER.critical(\n f\"No route was found from {params[0]} to {params[1]}. \"\n f\"Try regenerating the locations or specify a more narrow bounding box.\"\n )\n return None\n else:\n dists = [\n inner[\"distance\"]\n for outer in route[\"sources_to_targets\"]\n for inner in outer\n ]\n dist: float = mean(filter(lambda x: x is not None, dists))\n\n return dist", "def bicycle_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa E501\n resp = self.routing_api.route(\n transport_mode=\"bicycle\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)", "def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')", "def _calculate_emissions(self):\n parameters = self._get_pollutants_for_vehicle()\n\n self.routes = RouteSet()\n\n if \"routes\" not in self._json_data:\n log.debug(\"Error in returned JSON data from web service.\")\n log.debug(\"data: {}\".format(self._json_data))\n return\n\n # Create a \"set\" of Routes. The planner web service will\n # return 2-4 routes with different paths.\n for idx, r in enumerate(self._json_data[\"routes\"][\"features\"]):\n attributes = r.get(\"attributes\")\n route = Route(distance=attributes.get(\"Total_Meters\"),\n minutes=attributes.get(\"Total_Minutes\"),\n path=r.get(\"geometry\").get(\"paths\")[0], id = idx)\n self.routes.add(route)\n\n log.debug(\"Nr of routes: {}\".format(len(self.routes)))\n for i, route in enumerate(self.routes):\n # A list of x,y,z points that all together represents the route\n path_coordinates = route.path\n distances = []\n\n # Nifty little trick to loop over 'path_coordinates',\n # but keep a reference to the 'prev' item to calculate the\n # distance between them\n iter_points = iter(path_coordinates)\n prev = next(iter_points)\n for point in path_coordinates:\n if not distances:\n # first point\n distances.append(Planner._get_distance_3d(prev, point) / 1000)\n else:\n distances.append(distances[-1] + Planner._get_distance_3d(prev, point) / 1000)\n\n point_slope = Planner._get_slope(prev, point)\n\n # Calculate emission for each pollutants the user has asked for\n for p in self._pollutants:\n parms = [x for x in parameters if x.pollutant.name.startswith(p)]\n calc_emission = self.get_emission(parms, point_slope)\n route.add_pollutant(p, calc_emission)\n\n prev = point\n\n route.add_distances(distances)", "def optimizedRoutePossibilities2(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tpath = find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tif couple[0] in graph[path[-1]]:\n\t\t\t\tyield path", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def get_origin_routes(self, routes):\n outroutes = []\n current_best = \"UNK\"\n # iterate through routes in given list updating the current best if a better\n # option is discovered\n for route in routes:\n if route[ORIG] == current_best:\n outroutes.append(route)\n elif (route[ORIG] == \"EGP\" and current_best != \"IGP\") or route[ORIG] == \"IGP\":\n # if the current best is worse than EGP and the current is EGP,\n # update best and start a new list\n # if the current best is worse than IGP and the current is IGP,\n # update best and start a new list\n current_best = route[ORIG]\n outroutes = [route]\n\n return outroutes", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def airline_connections(region):\n\n outgoing_routes = region.airlines\n ingoing_routes = []\n\n for other_region in regions.values():\n if other_region == region: continue\n\n for airline in other_region.airlines:\n if airline.destination == region:\n ingoing_routes.append(airline)\n\n return (outgoing_routes, ingoing_routes)", "def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes", "def run_destination_reduction(self):\n s = self.sources\n t = self.destinations\n\n # get the union set of source edges\n s_edges = [self.out_edges(x) for x in s]\n s_edges_idx = [set(self.get_edges_indices(src)) for src in s_edges]\n S = set.union(*s_edges_idx)\n\n # now starting from the sources\n T_curr = [set(self.in_edges(x)) for x in t]\n T_curr_idx = [set(self.get_edges_indices(x)) for x in T_curr]\n T_union = set.union(*[set(x) for x in T_curr])\n T_union_idx = set.union(*T_curr_idx)\n self.dst_evolution_rec = [T_curr_idx]\n\n while not T_union_idx.issubset(S):\n # get the edges out of the max-ordered node\n tail_nodes = set([x[0] for x in T_union])\n tail_nodes = tail_nodes - set(s)\n v = max(tail_nodes, key=lambda p: self.node[p]['index'])\n\n E = set([edge for edge in T_union if edge[0] == v])\n E_curr = [x.intersection(E) for x in T_curr]\n\n T_next = []\n Uv = self.in_edges(v)\n for (Ex, Tx) in zip(E_curr, T_curr):\n if Ex:\n T_next.append((Tx - Ex).union(Uv))\n else:\n T_next.append(Tx)\n\n T_curr = T_next\n T_curr_idx = [set(self.get_edges_indices(x)) for x in T_curr]\n self.dst_evolution_rec.append(T_curr_idx)\n\n T_union = set.union(*[set(x) for x in T_curr])\n T_union_idx = set.union(*T_curr_idx)", "def aggregate_routes(self, route1, route2):\n # get current prefix length and subtract 1\n if route1[NMSK] != route2[NMSK] or route1[NTWK] != route2[NTWK]:\n p_len = self.get_prefix(route1) - 1\n else:\n p_len = self.get_prefix(route1)\n # convert new prefix length into netmask ip format (255.255.255.0, e.g.)\n # append (32 - (prefix length)) many 0's -> bin to ip that\n bin_nmsk = (\"1\" * p_len).ljust(32, \"0\")\n nmsk_ip = self.binary_to_ipv4(bin_nmsk)\n # make a new entry with ^^^^ NMSK and lower of the two IPs (compare_ip())\n lower_ip = None\n if self.compare_ip(route1[NTWK], route2[NTWK]) == 1:\n lower_ip = route2[NTWK]\n else:\n lower_ip = route1[NTWK]\n # create new route with original SRCE, DEST and new msg -> NTWK and NMSK\n copy_of_route = copy.deepcopy(route1)\n copy_of_route[NTWK] = lower_ip\n copy_of_route[NMSK] = nmsk_ip\n copy_of_route[\"CIDR\"] = p_len\n return copy_of_route", "def global_plan(\n world: carla.World, # pylint: disable=no-member\n origin: carla.Location, # pylint: disable=no-member\n destination: carla.Location, # pylint: disable=no-member\n) -> Tuple[Sequence[carla.Waypoint], Sequence[Any], float]: # pylint: disable=no-member\n try:\n from agents.navigation.global_route_planner import GlobalRoutePlanner # pylint: disable=import-error\n from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO # pylint: disable=import-error\n except ImportError:\n raise ImportError(\n \"Missing CARLA installation, \"\n \"make sure the environment variable CARLA_ROOT is provided \"\n \"and that the PythonAPI is `easy_install`ed\")\n\n # Setup global planner.\n grp_dao = GlobalRoutePlannerDAO(wmap=world.get_map(), sampling_resolution=1)\n grp = GlobalRoutePlanner(grp_dao)\n grp.setup()\n # Generate plan.\n waypoints, roadoptions = zip(*grp.trace_route(origin, destination))\n # Accummulate pairwise distance.\n distances = [0.0]\n for i in range(1, len(waypoints)):\n loc_tm1 = waypoints[i - 1].transform.location\n loc_tm1 = np.asarray([loc_tm1.x, loc_tm1.y, loc_tm1.z])\n loc_t = waypoints[i].transform.location\n loc_t = np.asarray([loc_t.x, loc_t.y, loc_t.z])\n distances.append(np.linalg.norm(loc_tm1 - loc_t))\n\n return waypoints, roadoptions, distances", "def car_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa: E501\n resp = self.routing_api.route(\n transport_mode=\"car\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def route_layout(self):\n self.route_pins()\n self.route_internal()\n self.route_supplies()", "def coalesce(self):\n routes = []\n # loop through every possible subnet mask\n for netmask in range(0, IP4_BIT_MAX):\n routes.clear() # clear and restart each time\n for r in self.routes:\n r_nmsk = self.ip_to_binary(r[MESG][NMSK]).count('1')\n if netmask == r_nmsk:\n routes.append(r)\n # attempt to coalesce final route choices\n self.coalesce_helper(netmask, routes)", "def table_route(sources, destinations, router, max_table_size=2_000, threads=10, annotations='duration', pbar=True, cache_name=None, executor='process', extra_params=None):\n\timport re\n\tif router not in CONFIG['routers'] and not re.match(r'^https?\\://.*', router):\n\t\traise ValueError(f'router must be a key in erde config routers section, or a URL. got: \\'{router}\\'')\n\n\tsources_indices = {i: v for i, v in enumerate(_index(sources))}\n\tdestinations_indices = {i: v for i, v in enumerate(_index(destinations))}\n\tsources = _tolist(sources, 'sources')\n\tdestinations = _tolist(destinations, 'destinations')\n\n\tann_set = set(annotations.split(','))\n\tif ann_set & {'duration', 'distance'} != ann_set:\n\t\traise ValueError(\"annotations must be one of these: 'duration', 'distance', or 'duration,distance' (order does not matter)\")\n\n\tmts = max_table_size\n\thost_url = CONFIG['routers'].get(router, router)\n\n\ttotal_rows, total_cols = rows, cols = len(sources), len(destinations)\n\tif cols * rows > mts:\n\t\tif rows < cols:\n\t\t\t# split by sources\n\t\t\trows = max(mts // cols, 1) # max(,1) beacuse if 1 row does not fit, then at least split by 1 row\n\t\t\tcols = min(mts, cols)\n\t\telse:\n\t\t\tcols = max(mts // rows, 1)\n\t\t\trows = min(mts, rows)\n\n\t_route_partial = partial(_route_chunk, host_url=host_url, annotations=annotations, extra_params=extra_params)\n\n\twith tqdm(total=total_rows * total_cols, desc='Table routing', disable=(not pbar)) as t, ThreadPoolExecutor(max_workers=threads) as tpe:\n\t\tcombos = list(product(range(0, total_rows, rows), range(0, total_cols, cols)))\n\t\tslices = ((sources[s:s + rows], destinations[d:d + cols], s, d) for s, d in combos)\n\n\t\t# process/thread/an instance of executor\n\t\tfor df in tpe.map(_route_partial, slices):\n\t\t\tdf['source'] = df['source'].map(sources_indices)\n\t\t\tdf['destination'] = df['destination'].map(destinations_indices)\n\t\t\tyield df\n\t\t\tt.update(len(df))", "def routes_with_criteria(self, src, target, criteria):\n\n # BFS\n routes = []\n q = deque() # <- [ ... ] <-\n stops = 0\n distance = 0 # not true for this app, but it works out in the conditional check\n q.append((src, stops, distance, [src]))\n\n while q:\n # this city, stops to this city, distance to this city, route to this city\n city, stops, distance, route = q.popleft()\n if target == city and distance: # no self-loops!\n r = list(route)\n routes.append(r)\n for dest, cost in self.G[city].items():\n if criteria(stops + 1, distance + cost):\n new_route = list(route)\n new_route.append(dest)\n q.append((dest, stops + 1, distance + cost, new_route))\n return routes", "def run_router(self, detour_scale):\n\n # Double check source and taget are not same node, if so, we are done!\n for k,v in self.rg.map.items():\n if v.source and v.target:\n debug.error(\"Grid cell is source and target! {}\".format(k))\n return False\n \n # returns the path in tracks\n (path,cost) = self.rg.route(detour_scale)\n if path:\n debug.info(1,\"Found path: cost={0} \".format(cost))\n debug.info(1,str(path))\n\n self.paths.append(path)\n self.add_route(path)\n \n path_set = grid_utils.flatten_set(path)\n self.path_blockages.append(path_set)\n else:\n self.write_debug_gds(\"failed_route.gds\")\n # clean up so we can try a reroute\n self.rg.reinit()\n return False\n return True", "def read_routes(routes_source: TextIO, airports: AirportDict) -> RouteDict:\n #RouteDict = Dict[str, Set[str]]\n routes_list = routes_source.readlines()\n d = {}\n src_index = ROUTE_DATA_INDEXES['Source airport']\n dst_index = ROUTE_DATA_INDEXES['Destination airport']\n \n for i in range(len(routes_list)):\n source_airport = get_routes_information(routes_list[i], src_index)\n destination_airport = get_routes_information(routes_list[i], dst_index)\n \n if source_airport in airports and destination_airport in airports\\\n and source_airport not in d:\n \n routes = set() # it's a set\n routes.add(destination_airport)\n d[source_airport] = routes\n \n elif source_airport in airports and destination_airport in \\\n airports and source_airport in d:\n d[source_airport].add(destination_airport)\n return d", "def chunk_user_route(detail_of_trip):\n\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n # since I can't get javascript to load, here's a hacky way of loading json\n # that details the route based on the user's point A and point B\n # detail_of_trip = api.directions(\n # (40.760350, -73.976209),\n # (40.754009, -73.981097),\n # mode=\"walking\"\n # )[0]\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n # now that I have javascript sending over the json, load json that details\n # the route based on the user's point A and point B\n\n # -------------- This section is for interpolation/splitting using shapely\n first = True # to see if this is the start position for the entire route\n line_points = [] # stores all the points to the route based on dict passed\n\n for leg in detail_of_trip['legs']:\n for step in leg['steps']:\n # Create a list of two element lists that represent points along the\n # route. via google. line_points = [ [lat1, lng1], [lat2, lng2],...]\n # Only add the starting point the first time. Every other iteration\n # we will just tack on the end points to our line.\n if first:\n line_points.append([step['start_location']['lat'], step['start_location']['lng']])\n first = False\n line_points.append([step['end_location']['lat'], step['end_location']['lng']])\n\n # Now load those points into a geometry, here shapely's LineString type.\n route_line = LineString(line_points)\n return (route_line, line_points)", "def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover", "def savings2routes(self,r1,r2):\n newRoute = VRP_Route(r1.route+r2.route)\n newRoute.update_route(self.vrpdata) # compute distance, quantity for newRoute, check whether valid\n if newRoute.tourValid:\n return r1.distance + r2.distance - newRoute.distance\n return -1", "def buildRouteLinkSequence(self):\n arcpy.env.workspace = PublicTransit.RTD_PATH\n linkSeq = arcpy.SearchCursor(PublicTransit.BUS_ROUTE_TRAVERSAL_EDGES, \"\", \"\", \"\", \"RouteId A; Cumul_Distance A\")\n prevRouteId = -1\n for e in linkSeq:\n if (e.RouteId in self.routeXref):\n routePattern = self.routeXref[e.RouteId]\n if (routePattern in self.transitRoutes): #not all routes are in RTD, so check\n if (prevRouteId != e.RouteId):\n self.transitRoutes[routePattern].linkSequence = []\n self.transitRoutes[routePattern].linkSequence.append(e.SourceOID)\n prevRouteId = e.RouteId\n del e\n del linkSeq", "def set_dests(self, increment=1000000):\n modified = 0\n pb = Progress(len(self.graph.routers), 'Setting destinations', increment=increment, callback=lambda: 'Modified {:,d}'.format(modified))\n for router in pb.iterator(self.graph.routers.values()):\n for interface in router.interfaces:\n # Copy destination ASes to avoid messing up original\n idests: Set[int] = set(interface.dests)\n # If last hop, interface has non-IXP AS mapping, and interface has destination ASes\n if not router.succ and idests and interface.asn > 0:\n origin = interface.asn\n # Interface must have exactly 2 destination ASes and one must be its origin AS\n if len(idests) == 2 and origin in idests:\n other_asn = peek(idests - {origin}) # other AS\n # If other AS is likely customer of interface origin AS, and it's a small AS\n if self.bgp.conesize[origin] > self.bgp.conesize[other_asn] and self.bgp.conesize[other_asn] < 5:\n idests.discard(origin)\n modified += 1\n # Add all remaining destination ASes to the router destination AS set\n router.dests.update(idests)", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def matrix(\n self,\n origins: List[Dict],\n region_definition: Union[\n CircleRegion, BoundingBoxRegion, PolygonRegion, AutoCircleRegion, WorldRegion\n ],\n async_req: bool = False,\n destinations: Optional[List[Dict]] = None,\n profile: Optional[str] = None,\n departure_time: Optional[Union[datetime, str]] = None,\n routing_mode: Optional[str] = None,\n transport_mode: Optional[str] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n truck: Optional[Truck] = None,\n matrix_attributes: Optional[List[str]] = None,\n ) -> MatrixRoutingResponse: # noqa E501\n if profile and type(region_definition) != WorldRegion:\n raise ValueError(\"profile must be used with WorldRegion only.\")\n if truck and transport_mode != \"truck\":\n raise ValueError(\"Truck option must be used when transport_mode is truck\")\n if async_req is True:\n resp = self.matrix_routing_api.matrix_route_async(\n origins=origins,\n region_definition=region_definition,\n destinations=destinations,\n profile=profile,\n departure_time=departure_time,\n routing_mode=routing_mode,\n transport_mode=transport_mode,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n truck=truck,\n matrix_attributes=matrix_attributes,\n )\n status_url = resp[\"statusUrl\"]\n while True:\n resp_status = self.matrix_routing_api.get_async_matrix_route_status(status_url)\n if resp_status.status_code == 200 and resp_status.json().get(\"error\"):\n raise ApiError(resp_status)\n elif resp_status.status_code == 303:\n result_url = resp_status.json()[\"resultUrl\"]\n break\n elif resp_status.status_code in (401, 403, 404, 500):\n raise ApiError(resp_status)\n sleep(2)\n result = self.matrix_routing_api.get_async_matrix_route_results(result_url)\n return MatrixRoutingResponse.new(result)\n else:\n resp = self.matrix_routing_api.matrix_route(\n origins=origins,\n region_definition=region_definition,\n destinations=destinations,\n profile=profile,\n departure_time=departure_time,\n routing_mode=routing_mode,\n transport_mode=transport_mode,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n truck=truck,\n matrix_attributes=matrix_attributes,\n )\n return MatrixRoutingResponse.new(resp)", "def _process_connections(self, connections):\n # create connection\n for con in connections:\n self._add_connection(con)\n\n for inp_lab, inp in self.inputs.items():\n # use self._find_routes() to find routes from input inp\n routes_inp = self._find_routes(inp)\n # create routes\n for route in routes_inp:\n self._add_route(route)\n # sort the routes dictionary\n self._sort_routes()", "def findRoute(self, x1, y1, x2, y2):\r\n\r\n\t\t# Check to see if the start and end node are the same\r\n\t\tif x1 == x2 and y1 == y2:\r\n\t\t\treturn [(x1, y1)]\r\n\r\n\t\troot_node = DijkstraNode(x1, y1, None, 0)\r\n\t\troot_node.neighbours = self.getNeighbours(x1, y1)\r\n\r\n\t\t# Create a dictionary to store all of the nodes\r\n\t\tall_nodes = {(x1, y1): root_node}\r\n\t\t# If no starting place is found return nothing\r\n\t\tif len(root_node.neighbours) == 0:\r\n\t\t\treturn []\r\n\t\tcurrent_node = root_node\r\n\t\twhile (x2, y2) not in all_nodes:\r\n\r\n\t\t\t# If the algorithm hasn't found the target node and cannot explore further then return empty path\r\n\t\t\tif current_node is None:\r\n\t\t\t\treturn []\r\n\r\n\t\t\tcurrent_node.neighbours = self.getNeighbours(current_node.x, current_node.y)\r\n\r\n\t\t\t# The distance from the root node through the current node to the neighbour\r\n\t\t\tcurrent_neighbour_dist = current_node.dist + 1\r\n\r\n\t\t\tfor neighbour in current_node.neighbours:\r\n\t\t\t\tif neighbour in all_nodes:\r\n\t\t\t\t\tneighbour_node = all_nodes[neighbour]\r\n\t\t\t\t\tif current_neighbour_dist < neighbour_node.dist:\r\n\t\t\t\t\t\t# The new best path is through the current node\r\n\t\t\t\t\t\tneighbour_node.parent = current_node\r\n\t\t\t\t\t\tneighbour_node.dist = current_neighbour_dist\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Add a new node if it doesn't exist within the currently explored nodes\r\n\t\t\t\t\tall_nodes[neighbour] = DijkstraNode(neighbour[0], neighbour[1], current_node, current_neighbour_dist)\r\n\r\n\t\t\t# Mark the current node as being explored as you have checked all the neighbours\r\n\t\t\tcurrent_node.explored = True\r\n\r\n\t\t\t# Gets a list of all of the unexplored nodes to check for the next node to explore\r\n\t\t\tunexplored_nodes = [node for _, node in all_nodes.items() if not node.explored]\r\n\r\n\t\t\tif len(unexplored_nodes) > 0:\r\n\t\t\t\t# Go to the next node with the smallest distance that hasn't been explored\r\n\t\t\t\tcurrent_node = min(unexplored_nodes, key=lambda node: node.dist)\r\n\t\t\telse:\r\n\t\t\t\tcurrent_node = None\r\n\r\n\t\t# Make your way back from the target node\r\n\t\tcurrent_node = all_nodes[(x2, y2)]\r\n\t\t# Initialise a list to hold the path going from the target to the root\r\n\t\treversed_path = []\r\n\t\t# This will end when the root node tries to travel to a None node\r\n\t\twhile current_node is not None:\r\n\t\t\t# Add the current node to the list\r\n\t\t\treversed_path.append((current_node.x, current_node.y))\r\n\t\t\t# Travel to the parent node\r\n\t\t\tcurrent_node = current_node.parent\r\n\t\t\t# current_node will be None at the root because the parent of the root node is 'None'\r\n\r\n\t\t# Return the list in the correct order\r\n\t\treturn list(reversed(reversed_path))", "def __routes(self, with_return):\n nonzeo_pois = list(filter(None, self.pois.keys()))\n\n for path in itertools.permutations(nonzeo_pois):\n steps = self.poi_distance(0, path[0])\n for i, j in zip(path, path[1:]):\n steps += self.poi_distance(i, j)\n if with_return:\n steps += self.poi_distance(path[-1], 0)\n yield steps", "def lookup_routes(self, daddr):\n outroutes = []\n binary_of_dest = self.ip_to_binary(daddr)\n best_cidr = float('-inf')\n\n for r in self.routes:\n # convert network and netmask to binary for longest prefix matching\n binary_of_network = self.ip_to_binary(r[MESG][NTWK])\n cidr_of_netmask = self.ip_to_binary(r[MESG][NMSK]).count('1')\n # use subnet mask to get the prefix\n dst = binary_of_dest[:cidr_of_netmask]\n ntk = binary_of_network[:cidr_of_netmask]\n # matching prefixes?\n if dst == ntk:\n # found better match. clear and start over with just this route\n if best_cidr < cidr_of_netmask:\n best_cidr = cidr_of_netmask\n outroutes.clear()\n outroutes.append(r)\n # 1 to 1 match, add route to list\n if best_cidr == cidr_of_netmask:\n outroutes.append(r)\n\n return outroutes", "def compute_distances_for_sources_and_mics(self,\n source_points):\n points = {\"m1\": self.m1, \"m2\": self.m2}\n points.update(dict([(\"s\"+str(i+1), xy)\n for (i, xy) in enumerate(source_points)]))\n distances = {}\n\n for point_1, xy1 in points.items():\n for point_2, xy2 in points.items():\n distances[point_1+point_2] = dst.euclidean(xy1, xy2)\n\n return distances", "def two_user_route_statistics(i,j, source_data, destination_data, source_destination_data, delta=1.2):\n\toccupancy_ratio = 0.0\n\tminimum_distance_so_far = 0.0\n\tcommon_travel_distance = 0.0\n\n\ttry:\n\t\tif source_destination_data[j][i] + source_data[i][j] <= 1.2*source_destination_data[i][i] and source_destination_data[j][i] + destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[j][i] + source_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\tsecond = ((source_destination_data[j][i] + destination_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\toccupancy_ratio = (first+second)/2\n\t\t\tcommon_travel_distance = source_destination_data[j][i]\n\t\t\tminimum_distance_so_far = source_data[i][j] + source_destination_data[j][i] + destination_data[i][j]\n\n\t\tif source_destination_data[i][j] + destination_data[j][i] <= 1.2*source_destination_data[i][i] and source_destination_data[i][j] + source_data[j][i] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[i][j] + destination_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\t\t\n\t\t\tsecond = ((source_destination_data[i][j] + source_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\n\t\t\ttotal_distance = source_data[j][i] + source_destination_data[i][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[i][j]+source_destination_data[j][j]+destination_data[j][i] <= 1.2*source_destination_data[i][i]:\n\t\t\tfirst = (1)\n\t\t\tsecond = (source_destination_data[j][j]/(source_data[i][j]+source_destination_data[j][j]+destination_data[j][i]))\n\n\t\t\ttotal_distance = source_data[i][j] + source_destination_data[j][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[j][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[j][i]+source_destination_data[i][i]+destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = (source_destination_data[i][i]/(source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]))\n\t\t\tsecond = (1)\n\n\t\t\ttotal_distance = source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][i]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\texcept Exception as e:\n\t\toccupancy_ratio = 1.0\n\t\tminimum_distance_so_far = 0.0\n\t\tcommon_travel_distance = 0.0\n\n\n\treturn occupancy_ratio, common_travel_distance, minimum_distance_so_far", "def route_output(self):\n self.Z_position = vector(self.width, self.A_position.y)\n # route nmos drain to Z\n nmos_contact = (self.nmos_position1 \n + self.nmos.active_contact_positions[1] \n + self.nmos.active_contact.second_layer_position\n + vector(self.nmos.active_contact.second_layer_width,\n 0).scale(0.5, 0))\n mid = [nmos_contact.x, self.A_position.y]\n self.add_path(\"metal1\", [self.Z_position, mid, nmos_contact])\n\n for i in range(len(self.pmos.poly_positions) + 1):\n if i % 2 == 1:\n # pmos2 drain to Z\n pmos_contact = (self.pmos_position2\n + self.pmos.active_contact_positions[i]\n + self.pmos.active_contact.second_layer_position.scale(1, 0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5, 0))\n offset = pmos_contact - vector(0.5 * self.m1m2_via.width, 0)\n self.add_via(layers=(\"metal1\", \"via1\", \"metal2\"),\n offset=offset)\n mid = [pmos_contact.x, self.Z_position.y]\n self.add_wire((\"metal1\", \"via1\", \"metal2\"),\n [self.Z_position, mid, pmos_contact])\n\n self.add_layout_pin(text=\"Z\",\n layer=\"metal1\",\n offset=mid - vector(0,0.5*drc[\"minwidth_metal1\"]),\n width=self.Z_position.x-mid[0],\n height=drc[\"minwidth_metal1\"])", "def get_routes(duthost1, duthost2, collect, mg_facts):\n dut1_routes_all = get_dut_routes(duthost1, collect, mg_facts)\n dut2_routes_all = get_dut_routes(duthost2, collect, mg_facts)\n dut_1_diff_routes = list(set(dut1_routes_all).difference(set(dut2_routes_all)))\n dut_2_diff_routes = list(set(dut2_routes_all).difference(set(dut1_routes_all)))\n res1 = natsorted([route for route in dut_1_diff_routes if\n ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])\n res2 = natsorted([route for route in dut_2_diff_routes if\n ipaddress.ip_network(route).subnet_of(ipaddress.ip_network(SUBNET_CHECK))])\n return {duthost1.hostname: res1, duthost2.hostname: res2}", "def pedestrian_route(\n self,\n origin: List,\n destination: List,\n via: Optional[List[Tuple]] = None,\n origin_place_options: Optional[PlaceOptions] = None,\n destination_place_options: Optional[PlaceOptions] = None,\n via_place_options: Optional[PlaceOptions] = None,\n destination_waypoint_options: Optional[WayPointOptions] = None,\n via_waypoint_options: Optional[WayPointOptions] = None,\n departure_time: Optional[datetime] = None,\n routing_mode: str = \"fast\",\n alternatives: int = 0,\n units: str = \"metric\",\n lang: str = \"en-US\",\n return_results: Optional[List] = None,\n spans: Optional[List] = None,\n avoid_features: Optional[List[str]] = None,\n avoid_areas: Optional[List[AvoidBoundingBox]] = None,\n exclude: Optional[List[str]] = None,\n ) -> RoutingResponse: # noqa E501\n resp = self.routing_api.route(\n transport_mode=\"pedestrian\",\n origin=origin,\n destination=destination,\n via=via,\n origin_place_options=origin_place_options,\n destination_place_options=destination_place_options,\n via_place_options=via_place_options,\n destination_waypoint_options=destination_waypoint_options,\n via_waypoint_options=via_waypoint_options,\n departure_time=departure_time,\n routing_mode=routing_mode,\n alternatives=alternatives,\n units=units,\n lang=lang,\n return_results=return_results,\n spans=spans,\n avoid_features=avoid_features,\n avoid_areas=avoid_areas,\n exclude=exclude,\n )\n return RoutingResponse.new(resp.json())", "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n # split netmask and daddr by the IP dots\n netmask_split = entry[NMSK].split('.')\n daddr_split = daddr.split('.')\n\n # bitwise ANd the netmask with the daddr\n result = []\n for i in range(0, len(netmask_split)):\n result.append(str(int(netmask_split[i]) & int(daddr_split[i])))\n \n # compare ANDed result to the network\n is_valid = True\n network_split = entry[NTWK].split('.')\n for i in range(0, len(network_split)):\n if result[i] != network_split[i]:\n is_valid = False\n break\n if is_valid:\n outroutes.append(entry)\n\n if len(outroutes) == 0:\n return outroutes\n\n # reform IP address\n outroutes.sort(key=lambda r: int(r[NMSK].replace('.', '')), reverse=True)\n longest_matching_prefix = int(outroutes[0][NMSK].replace('.', ''))\n outroutes = list(filter(lambda r: int(r[NMSK].replace('.', '')) == longest_matching_prefix, outroutes))\n return outroutes", "def FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, TimeTableList, TimeTableIndex, StationHourIndex, PathInfo=[]):\r\n PathInfo = PathInfo + [ConnectionInfo]\r\n\r\n if Cond.IfTestRouteSearch:\r\n \tStations = GetAllStationsOfRoute(PathInfo)\r\n \tprint \"\\nStations of Path (%s): ++++++++\" % len(Stations)\r\n \tprint Stations\r\n \tprint \"Route Information:\"\r\n \tprint PrettyStringRouteInfo(PathInfo)\r\n\r\n # check successful termination\r\n # if len(PathInfo) > 1 and ConnectionInfo[ConnInfoInd['station_to']] == EndStation: \r\n if CheckIfPathTerminatesSuccessfully(ConnectionInfo, PathInfo, RouteConditions, EndStation):\r\n \tif Cond.IfTestRouteSearch:\r\n \t\tprint \"End Station is reached!\"\t\r\n \treturn [PathInfo]\r\n\r\n # current (this iteration's) path length\r\n CurPathLen = len(PathInfo)\r\n\r\n # get next connections\r\n start_station = ConnectionInfo[ConnInfoInd['station_to']]\r\n departure_hour = ConnectionInfo[ConnInfoInd['arrival_hour']] \t\r\n departure_min = ConnectionInfo[ConnInfoInd['arrival_min']]\r\n\r\n # TEST BU2019\r\n if False:\r\n\t print 'ConnInfoInd: ' + str(ConnectionInfo)\r\n\t print 'start_station,departure_hour,departure_min: %s, %s, %s' % (start_station, departure_hour, departure_min)\r\n\t time.sleep(0.1)\r\n \r\n # mandatory conditions\r\n WaitLimit = RouteConditions[Cond.MaxWaitingTimeAtStation][0]\r\n \r\n # get next connections from the station\r\n ConnectionInfoList = GetListOfNextConnections(TimeTableList, TimeTableIndex, StationHourIndex, start_station, departure_hour, departure_min, WaitLimit)\r\n\r\n # insert on-foot connections (Zu Fuss, ZF) to nearby stations into ConnectionInfoList\r\n # cancel (Tunc 4/3/2019)\r\n if False:\r\n\t StationMeasurementTime = ReqStationMeasureTime\r\n\t \r\n\t if Cond.MaxNumberOfSubsequentStationPassagesOnFoot in RouteConditions \\\r\n\t \tand RouteConditions[Cond.MaxNumberOfSubsequentStationPassagesOnFoot][0] > 0:\r\n\r\n\t\t if RouteConditions.has_key(Cond.MeasureStations):\r\n\t\t \tStationMeasurementTime = RouteConditions[Cond.MeasureStations][1]\r\n\t\t Connections = GetOnFootStationChangeConnections(start_station, departure_hour, departure_min, StationMeasurementTime)\r\n\t\t \r\n\t\t if Connections:\t\t# i.e. if Connections is not None\r\n\t\t \t(OnFootConnections1, OnFootConnections2) = Connections \r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections1)\r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections2)\r\n\r\n if Cond.IfTestRouteSearch:\r\n\t\tprint \"Next connections:\"\r\n\t\tfor c in ConnectionInfoList:\r\n\t\t\tprint c\r\n\t\ttime.sleep(Cond.TestWaitingTime)\r\n\r\n if not ConnectionInfoList:\t\t# Endstation: Node w/o successor nodes\r\n \treturn []\r\n\r\n PathInfoList = []\r\n\r\n for ConnectionInfo in ConnectionInfoList:\r\n\t\tres = Cond.CheckIfConnectionShouldBeSelected(ConnectionInfo, PathInfo, EndStation, RouteConditions)\r\n\r\n\t\t# test\r\n\t\tif Cond.IfTestRouteSearch:\r\n\t\t\tif res == None or res == False:\r\n\t\t\t\tprint \"CheckIfConnectionShouldBeSelected: %s\" % res\r\n\r\n\t \tif res == None: return[] \r\n\t \tif res == False: continue\r\n\r\n\t \t# recursive call\r\n\t\textended_paths = FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, \\\r\n\t\t\tTimeTableList, TimeTableIndex, StationHourIndex, PathInfo)\r\n\r\n\t\t# report status\r\n\t\tif Cond.ReportDuringRouteSearch in RouteConditions:\r\n\t\t\tTimeIntv = default_timer() - Cond.SearchStartTime\r\n\t\t\tRouteSearchReportingIntervalInSeconds = RouteConditions[Cond.ReportDuringRouteSearch][0]\r\n\t\t\tif TimeIntv > Cond.RouteSearchReportCounter * RouteSearchReportingIntervalInSeconds:\r\n\t\t\t\tCond.RouteSearchReportCounter += 1 \r\n\t\t\t\tprint \"%s seconds passed... \" % \"{:.2f}\".format(TimeIntv)\r\n\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\t\r\n\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\r\n\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t# append to path list\r\n\t\tfor p in extended_paths:\r\n\t\t\t# no need to recheck route unless current connection is the last one \r\n\t\t\t# LastConnection = (ConnectionInfo == p[-1])\r\n\t\t\tLastConnection = (CurPathLen == len(p) -1 and ConnectionInfo == p[-1])\r\n\t\t\t\r\n\t\t\tif LastConnection:\r\n\r\n\t\t\t\tif Cond.CheckIfRouteShouldBeSelected(p, RouteConditions):\r\n\t\t\t\t\tPathInfoList.append(p)\r\n\t\t\t\t\tCond.SelectedRoutes.append(ApplyAllRouteInfoCorrections(p))\r\n\r\n\t\t\t\t\t# evaluate route\r\n\t\t\t\t\t# cancel for BU2019\r\n\r\n\t\t\t\t\tif Cond.IfTestRouteSearch:\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\\n\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\t\r\n\t\t\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t\t\t\t# test\r\n\t\t\t\t\tIncrementDicValue(Cond.RouteCountPerRouteLength, CurPathLen)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# not last connection, no need to recheck the route\r\n\t\t\t\t# PathInfoList.append(p)\r\n\t\t\t\t# IncrementDicValue(SelectedRoutesPerLevel, CurPathLen)\r\n\t\t\t\tpass\r\n \r\n return PathInfoList", "def _sort_routes(self):\n sorted_routes = OrderedDict()\n for inp_lab, inp in self.inputs.items():\n if inp_lab not in self.routes:\n continue\n sorted_routes[inp_lab] = OrderedDict()\n for out_lab, out in self.outputs.items():\n if out_lab not in self.routes[inp_lab]:\n continue\n routes = self.routes[inp_lab][out_lab]\n # If multiple routes between a certain input and output exist,\n # order the routes by length\n if len(routes) > 1:\n route_lengths = [len(route) for route in routes]\n sorted_indices = np.argsort(route_lengths)\n routes = [routes[i] for i in sorted_indices]\n sorted_routes[inp_lab][out_lab] = routes\n self.routes = sorted_routes", "def find_routes(\r\n stops,\r\n measurement_units = \"\"\"Minutes\"\"\",\r\n analysis_region = None,\r\n reorder_stops_to_find_optimal_routes = False,\r\n preserve_terminal_stops = \"\"\"Preserve First\"\"\",\r\n return_to_start = False,\r\n use_time_windows = False,\r\n time_of_day = None,\r\n time_zone_for_time_of_day = \"\"\"Geographically Local\"\"\",\r\n uturn_at_junctions = \"\"\"Allowed Only at Intersections and Dead Ends\"\"\",\r\n point_barriers = None,\r\n line_barriers = None,\r\n polygon_barriers = None,\r\n use_hierarchy = True,\r\n restrictions = None,\r\n attribute_parameter_values = None,\r\n route_shape = \"\"\"True Shape\"\"\",\r\n route_line_simplification_tolerance = None,\r\n populate_route_edges = False,\r\n populate_directions = True,\r\n directions_language = \"\"\"en\"\"\",\r\n directions_distance_units = \"\"\"Miles\"\"\",\r\n directions_style_name = \"\"\"NA Desktop\"\"\",\r\n travel_mode = \"\"\"Custom\"\"\",\r\n impedance = \"\"\"Drive Time\"\"\",\r\n gis = None):\r\n kwargs = locals()\r\n\r\n if stops is None:\r\n stops = default_stops\r\n\r\n if point_barriers is None:\r\n point_barriers = default_point_barriers\r\n\r\n if line_barriers is None:\r\n line_barriers = default_line_barriers\r\n\r\n if polygon_barriers is None:\r\n polygon_barriers = default_polygon_barriers\r\n\r\n if restrictions is None:\r\n restrictions = default_restrictions\r\n\r\n if attribute_parameter_values is None:\r\n attribute_parameter_values = default_attributes\r\n\r\n if route_line_simplification_tolerance is None:\r\n route_line_simplification_tolerance = default_tolerance\r\n\r\n param_db = {\r\n \"stops\": (FeatureSet, \"Stops\"),\r\n \"measurement_units\": (str, \"Measurement_Units\"),\r\n \"analysis_region\": (str, \"Analysis_Region\"),\r\n \"reorder_stops_to_find_optimal_routes\": (bool, \"Reorder_Stops_to_Find_Optimal_Routes\"),\r\n \"preserve_terminal_stops\": (str, \"Preserve_Terminal_Stops\"),\r\n \"return_to_start\": (bool, \"Return_to_Start\"),\r\n \"use_time_windows\": (bool, \"Use_Time_Windows\"),\r\n \"time_of_day\": (datetime, \"Time_of_Day\"),\r\n \"time_zone_for_time_of_day\": (str, \"Time_Zone_for_Time_of_Day\"),\r\n \"uturn_at_junctions\": (str, \"UTurn_at_Junctions\"),\r\n \"point_barriers\": (FeatureSet, \"Point_Barriers\"),\r\n \"line_barriers\": (FeatureSet, \"Line_Barriers\"),\r\n \"polygon_barriers\": (FeatureSet, \"Polygon_Barriers\"),\r\n \"use_hierarchy\": (bool, \"Use_Hierarchy\"),\r\n \"restrictions\": (str, \"Restrictions\"),\r\n \"attribute_parameter_values\": (FeatureSet, \"Attribute_Parameter_Values\"),\r\n \"route_shape\": (str, \"Route_Shape\"),\r\n \"route_line_simplification_tolerance\": (LinearUnit, \"Route_Line_Simplification_Tolerance\"),\r\n \"populate_route_edges\": (bool, \"Populate_Route_Edges\"),\r\n \"populate_directions\": (bool, \"Populate_Directions\"),\r\n \"directions_language\": (str, \"Directions_Language\"),\r\n \"directions_distance_units\": (str, \"Directions_Distance_Units\"),\r\n \"directions_style_name\": (str, \"Directions_Style_Name\"),\r\n \"travel_mode\": (str, \"Travel_Mode\"),\r\n \"impedance\": (str, \"Impedance\"),\r\n \"solve_succeeded\": (bool, \"Solve Succeeded\"),\r\n \"output_routes\": (FeatureSet, \"Output Routes\"),\r\n \"output_route_edges\": (FeatureSet, \"Output Route Edges\"),\r\n \"output_directions\": (FeatureSet, \"Output Directions\"),\r\n \"output_stops\": (FeatureSet, \"Output Stops\"),\r\n }\r\n return_values = [\r\n {\"name\": \"solve_succeeded\", \"display_name\": \"Solve Succeeded\", \"type\": bool},\r\n {\"name\": \"output_routes\", \"display_name\": \"Output Routes\", \"type\": FeatureSet},\r\n {\"name\": \"output_route_edges\", \"display_name\": \"Output Route Edges\", \"type\": FeatureSet},\r\n {\"name\": \"output_directions\", \"display_name\": \"Output Directions\", \"type\": FeatureSet},\r\n {\"name\": \"output_stops\", \"display_name\": \"Output Stops\", \"type\": FeatureSet},\r\n ]\r\n\r\n if gis is None:\r\n gis = arcgis.env.active_gis\r\n\r\n url = gis.properties.helperServices.asyncRoute.url\r\n return _execute_gp_tool(gis, \"FindRoutes\", kwargs, param_db, return_values, _use_async, url)", "def get_self_origin(self, routes):\n # TODO\n outroutes = {}\n\n for ip in routes.keys():\n if routes[ip][SORG]:\n outroutes[ip] = routes[ip]\n\n return outroutes", "def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def route(self, mn, direction=None, via_tag=None):\n mn = np.asarray(mn)\n _mn = list()\n for i in range(1, mn.shape[0]): \n # when more than two points are given,\n # create a multi-point wire compose of sub-routing wires\n # connecting the points given by mn in sequence.\n _mn.append([mn[i - 1, :], mn[i, :]])\n route = list()\n # via at the starting point\n if via_tag is not None:\n if via_tag[0] is True:\n route.append(self.via(mn=_mn[0][0], params=None))\n # routing wires\n for i, __mn in enumerate(_mn):\n xy0 = self.abs2phy[__mn[0]]\n xy1 = self.abs2phy[__mn[1]]\n _xy = np.array([[xy0[0], xy0[1]], [xy1[0], xy1[1]]])\n if np.all(xy0 == xy1): # if two points are identical, generate a metal stub on the bottom layer.\n if (direction == 'vertical') or ((direction is None) and (self.primary_grid == 'vertical')):\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension0[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n else:\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension0[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n else:\n if (xy0[0] == xy1[0]) or (direction == 'vertical'): # vertical routing\n width = self.vwidth[__mn[0][0]]\n hextension = int(width/2)\n vextension = self.vextension[__mn[0][0]]\n layer = self.vlayer[__mn[0][0]]\n color = self.xcolor[__mn[0][0]%self.xcolor.shape[0]]\n\n else: # horizontal routing\n width = self.hwidth[__mn[0][1]]\n hextension = self.hextension[__mn[0][1]]\n vextension = int(width/2)\n layer = self.hlayer[__mn[0][1]]\n color = self.ycolor[__mn[0][1]%self.ycolor.shape[0]] # ycolor is determined by its grid layer.\n p = laygo2.object.physical.Rect(xy=_xy, layer=layer, hextension=hextension, vextension=vextension, color=color)\n route.append(p)\n # via placement\n if via_tag is None:\n if (i > 0) and (i < mn.shape[0] - 1):\n route.append(self.via(mn=__mn[0], params=None))\n else:\n if via_tag[i + 1] == True:\n route.append(self.via(mn=__mn[1], params=None))\n if len(route) == 1: # not isinstance(mn[0][0], list):\n return route[0]\n else:\n return route", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def main(edges=[(0, 1, 3), (1, 3, 4), (2, 3, 3), (0, 2, 2) ], num=4):\n\n # initialize routers array\n routers = []\n for x in range(num):\n routers.append([1000] * num)\n routers[x][x] = 0\n \n # set distance to all neighbours \n for edge in edges:\n routers[edge[0]][edge[1]] = edge[2]\n routers[edge[1]][edge[0]] = edge[2]\n\n start_table = routers.copy()\n\n flag = True\n while flag:\n upflag = False\n for nbrs in edges:\n routers[nbrs[0]], up_flag1 = update_table(routers[nbrs[0]], routers[nbrs[1]], dist=nbrs[2])\n routers[nbrs[1]], up_flag2 = update_table(routers[nbrs[1]], routers[nbrs[0]], dist=nbrs[2])\n upflag = upflag or up_flag1 or up_flag2\n\n flag = upflag\n\n return start_table, routers", "def evaluateAllRroutes(self):\n isTrain = 1 # 1 for train, 0 for test\n\n performance = 0\n normalizedPerformance = 0\n priceTolerance = 5 # price to be tolerated\n\n normPerforms = []\n for i in range(8):\n print \"Route: {}\".format(i)\n [perfor, normaPerfor] = self.evaluateOneRouteForMultipleTimes(self.routes[i], priceTolerance)\n normPerforms.append(normaPerfor)\n performance += perfor\n normalizedPerformance += normaPerfor\n\n performance = round(performance/8, 2)\n normalizedPerformance = round(normalizedPerformance/8, 2)\n\n if self.isTrain:\n print \"\\nTRAIN:\"\n else:\n print \"\\nTEST:\"\n print \"Average Performance: {}%\".format(performance)\n print \"Average Normalized Performance: {}%\".format(normalizedPerformance)\n print \"Normalized Performance Variance: {}\".format(np.var(normPerforms))", "def connection_route_from_hosts(self, ingr, egr):\n assert(ingr is not None)\n assert(egr is not None)\n LOG.info(\"Try to connection-route %s -> %s\", ingr, egr)\n\n call_id = CallID(ingr)\n try:\n cep_src = ConnectionEP(ingr)\n cep_dst = ConnectionEP(egr)\n lsp = LspParams()\n\n (wero, pero) = self.routing.connectionRoute(cep_src.ident,\n cep_dst.ident,\n call_id.ident,\n lsp.ident,\n [])\n # in any case flush the call\n self.routing.callFlush(call_id.ident)\n\n return (wero, pero)\n\n except PCERA.CannotFetchConnEndPoint, exe:\n LOG.error(\"CannotFetchConnEndPoint exception: %s\", str(exe))\n except PCERA.ConnectionParamsMismatch, exe:\n LOG.error(\"ConnectionParamsMismatch exception: %s\", str(exe))\n except PCERA.ConnectionEroMismatch, exe:\n LOG.error(\"ConnectionEroMismatch exception: %s\", str(exe))\n except PCERA.ConnectionEroMismatch, exe:\n LOG.error(\"ConnectionEroMismatch exception: %s\", str(exe))\n except PCERA.NoRoute, exe:\n LOG.error(\"NoRoute exception: %s\", str(exe))\n except PCERA.CannotFetchCall, exe:\n LOG.error(\"CannotFetchCall exception: %s\", str(exe))\n except PCERA.InternalProblems, exe:\n LOG.error(\"InternalProblems exception: %s\", str(exe))\n except Exception, exe:\n LOG.error(\"Generic exception: %s\", str(exe))\n\n return (None, None)", "def get_route(start_id, dest_id, csv):\n\n # route is not computed yet\n if csv[start_id][dest_id] is None:\n return -1\n\n route = [start_id]\n\n while route[-1] != dest_id:\n # append the next node on this route\n route.append(csv[route[-1]][dest_id])\n\n return route", "def writeRouteSequence(self):\n print \"writing route sequence\"\n f = open(PublicTransit.OUTFILE_NAME, 'wb')\n if (PublicTransit.LINE_FILE_TYPE == LineFileType.PTLINE):\n lines = [\";;<<PT>><<LINE>>;;\" + os.linesep]\n elif (PublicTransit.LINE_FILE_TYPE == LineFileType.TRNBUILD):\n lines = [\";;<<Trnbuild>>;;\" + os.linesep]\n\n for t in self.transitRoutes:\n if t in self.stopsByRoute:\n i = 0\n self.transitRoutes[t].nodeSequence = []\n prevLinkId = -1\n # Bus routes have a link sequence from BusRouteTraversalEdges. Others just have stops.\n if (len(self.transitRoutes[t].linkSequence) > 0):\n for link in self.transitRoutes[t].linkSequence:\n # make sure this link is within the region (i.e., it is in linksDict)\n if (link in self.linksDict):\n nodeToAppend = -1\n if (i == 0):\n nodeToAppend = self.stopsByRoute[t][0].tanaNode\n if (nodeToAppend == -1):\n if (self.linksDict[link].oneWay == \"FT\"):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n else: # open in both directions; determine traversal direction\n nodeToAppend = -self.linksDict[link].fromNode.nodeId \n elif (i == 1):\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n if (self.linksDict[link].oneWay == \"FT\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n else: # open in both directions\n if (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].toNode.nodeId == self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].fromNode.nodeId == self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n elif (prevLinkId != link and prevLinkId != -1): # ensure there are no repeated links\n if (self.linksDict[link].oneWay == \"FT\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and \n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n else: # open in both directions\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # determine direction based on the previous node in the sequence. If the previous\n # node is the same as this link's from node, append the toNode; otherwise append the fromNode.\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n # previous link doesn't connect to this because the previous link was a duplicate\n else:\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # remove the last node in the sequence and check if the one before connects to this one\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n # if the node is a stop on this route, set the node ID positive\n if (nodeToAppend != -1):\n if (i > 0 and abs(nodeToAppend) in [st.tanaNode for st in self.stopsByRoute[t]]):\n nodeToAppend = -1 * nodeToAppend\n self.transitRoutes[t].nodeSequence.append(nodeToAppend)\n prevLinkId = link\n \n i += 1\n # if the last node is not a stop, remove it\n if (len(self.transitRoutes[t].nodeSequence) > 0 and self.transitRoutes[t].nodeSequence[-1] < 0):\n del(self.transitRoutes[t].nodeSequence[-1])\n \n # if there are no links for the route, just record the stops as the nodes\n else:\n self.transitRoutes[t].nodeSequence = [n.tanaNode for n in self.stopsByRoute[t] if n.tanaNode != -1]\n \n # Only write routes with a node sequence.\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n lines.append(self.__getPrintString(t, PublicTransit.LINE_FILE_TYPE) + os.linesep)\n else:\n print \"No node sequence for \" + str(t) + \" (\" + self.transitRoutes[t].new_name + \")\"\n f.writelines(lines)\n f.close()", "def pairing(self):\n if len(self._paths) == 0:\n second_values = self.data\n get_flight = lambda x: x\n first = True\n else:\n second_values = self._paths\n get_flight = lambda x: x.get_last_flight()\n first = False\n\n for value in second_values:\n f1 = get_flight(value)\n for f2 in self.data:\n if f1.connects_to(f2):\n if first:\n self._paths.append(FlightPath(f1, f2))\n else:\n path_copy = copy.copy(value)\n added = path_copy.try_add(f2)\n if added:\n self._paths.append(path_copy)", "def plan_trip():\n origins = []\n destinations = []\n\n origin_stop = request.args.get('origin', False)\n destination_stop = request.args.get('destination', False)\n origin_is_suburb = request.args.get('origin_suburb', False)\n dest_is_suburb = request.args.get('dest_suburb', False)\n origin_is_suburb = bool(origin_is_suburb)\n dest_is_suburb = bool(dest_is_suburb)\n if origin_stop and destination_stop:\n client = api.connection()\n origins = client.find_stops_by_name('any', origin_stop, True)\n\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n destinations = client.find_stops_by_name('any', destination_stop, True)\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n origins = stop_information_generator(\n origins.locations, [], origin_stop, origin_is_suburb\n )\n destinations = stop_information_generator(\n destinations.locations, [], destination_stop, dest_is_suburb\n )\n\n return render_template(\n \"trip-planner.jinja2\", origins=origins, destinations=destinations, err=200\n )", "def __compute_unit_routes(self):\n unit_rules_graph = NearCNF.__UnitRulesGraph(self)\n unit_routes_as_trees = {var: self.__dijkstra_max_prob_tree(unit_rules_graph, var)\n for var in unit_rules_graph.vertices}\n\n def tree_to_lists(root, current_route=[], routes=None):\n if routes is None:\n routes = []\n current_route.append(root.key)\n if not root.children:\n routes.append(current_route[:])\n else:\n for child in root.children:\n tree_to_lists(child, current_route, routes)\n current_route.pop()\n return routes\n\n unit_routes_as_lists = {var: tree_to_lists(tree.root)\n for var, tree in unit_routes_as_trees.items()}\n return unit_routes_as_lists", "def prepare_gates(chip, source_gate, target_gate):\n crossroad = []\n travelled_path = []\n\n # Source and target always on z-axis 0\n source_coords = [chip.gates[source_gate][\"x\"], chip.gates[source_gate][\"y\"], 0]\n target_coords = [chip.gates[target_gate][\"x\"], chip.gates[target_gate][\"y\"], 0]\n\n chip = calculate_distance(target_coords, chip)\n\n start = chip.coordinates[0][source_coords[1]][source_coords[0]]\n start_node = nd.Node(source_coords, None, 1, start.cost + start.distance_to_goal)\n goal_node = nd.Node(target_coords, None, 1, 0)\n crossroad.append(start_node)\n\n return run_algorithm(target_coords, start_node, goal_node, chip, crossroad, travelled_path)", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def connect(data, x1, y1, x2, y2):\n flag1, points1 = oneRoadConnect(data, x1, y1, x2, y2)\n if flag1:\n return flag1, points1\n flag2, points2 = twoRoadConnect(data, x1, y1, x2, y2)\n if flag2:\n return flag2, points2\n flag3, points3 = threeRoadConnect(data, x1, y1, x2, y2)\n if flag3:\n return flag3, points3\n return False, []", "def routes(x, y, results_dict):\n if (x, y) in results_dict: # if the value is already in the results cache then we don't need to calculate again\n return results_dict[(x, y)]\n\n # we only look at the top half of the grid\n # (as if you swap moves right/down you get equivalent number of paths)\n elif x > y:\n r = routes(y, x, results_dict)\n\n elif x == 0:\n return 1 # only one path when x coordinate is 0\n\n # from any one point you can either go down or left, then the sum of the positions gives the total for the original\n else:\n r = routes(x - 1, y, results_dict) + routes(x, y - 1, results_dict)\n results_dict[(x, y)] = r\n return r", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes", "def run(self):\n source = str(self.form.source_text.toPlainText())\n dest = str(self.form.destination_text.toPlainText())\n \n self.form.source_text.clear()\n self.form.destination_text.clear()\n \n if not source or not dest:\n self.show_dialog(\"Empty argument.\")\n return \n\n \n if self.G.has_node(source) and self.G.has_node(dest):\n if source in nx.algorithms.descendants(self.G, dest):\n graph = nx.to_dict_of_dicts(self.G) # converting to dict based graph.\n dijkstra = Dijkstra(graph)\n parents, visited = dijkstra.find_route(source, dest)\n shortest_path = dijkstra.generate_path(parents, source, dest)\n shortest_path = \" -> \".join(shortest_path)\n result = f\"Distance is {visited[dest]} units.\\nShortest path from {source} to {dest} is {shortest_path}\"\n self.form.result_text.setText(result)\n else:\n self.show_dialog(f\"There is no connection between {source} and {dest}.\")\n else:\n self.show_dialog(f\"Please check source and destination.\")", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def get_bundle_corner(\n ports1: List[Port],\n ports2: List[Port],\n route_filter: Callable[..., Route] = get_route_from_waypoints,\n separation: float = 5.0,\n path_length_match_loops: int = None,\n path_length_match_extra_length: float = 0.0,\n path_length_match_modify_segment_i: int = -2,\n **kwargs,\n) -> List[Route]:\n if \"straight\" in kwargs.keys():\n _ = kwargs.pop(\"straight\")\n\n routes = _get_bundle_corner_waypoints(\n ports1,\n ports2,\n routing_func=generate_manhattan_waypoints,\n separation=separation,\n **kwargs,\n )\n if path_length_match_loops:\n routes = [np.array(route) for route in routes]\n routes = path_length_matched_points(\n routes,\n extra_length=path_length_match_extra_length,\n nb_loops=path_length_match_loops,\n modify_segment_i=path_length_match_modify_segment_i,\n **kwargs,\n )\n\n return [route_filter(r, **kwargs) for r in routes]", "def skyroute(startroute, endroute, grid):\n # Find the start and end point\n start = startroute[-1]\n end = endroute[-1]\n path = astar(grid, start, end)\n\n # If a path is found, stitch the routes together\n if path != None: \n for location in reversed(startroute[:-1]):\n path.insert(0, location)\n\n for location in reversed(endroute[:-1]):\n path.insert(len(path), location)\n\n return path", "def __dfs_roads_directions(self, with_prints=False) -> Set[JuncRoadSingleConnection]:\n\n roads: Set[JuncRoadSingleConnection] = set()\n visited_indices: Set[JuncIndices] = set()\n\n def dfs_rec(junc: JuncNode):\n \"\"\"\n recursively run from the input junction\n :param junc: the junction to run from\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n # run on neighbors\n for neighbor in self.get_connected_juncs(junc):\n # go over unvisited juncs and add roads to them from current\n if neighbor.indices not in visited_indices:\n if with_prints:\n print(junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n \"\"\"\n there is a case where we are currently at junc 1, which has neighbors 2,3.\n from junc 1 wwe move to 2, that moves to 3 from it.\n 3 will not go to 1 because 1 is visited, so the road 3->1 will not be created.\n when returning to 1, it will not go to 3, because 3 is visited, so the road 1->3 will not be created.\n so we result in a conncetion with no road, fix it:\n \"\"\"\n if neighbor.indices in visited_indices \\\n and JuncRoadSingleConnection(junc.indices, neighbor.indices) not in roads \\\n and JuncRoadSingleConnection(neighbor.indices, junc.indices) not in roads:\n if with_prints:\n print(\"special\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n # do not call dfs recursivly\n\n def first_node(junc: JuncNode):\n \"\"\"\n run specifically for a group-start junction\n :param junc: a junction that is the first in a connected group\n \"\"\"\n # add to visited\n visited_indices.add(junc.indices)\n #\n \"\"\"\n first, choose a random road to be in-road. this prevents a first node with only out-roads.\n a problem: if this is not the first first_node, it is possible that the random road will be \n an already existing road, in the otehr direction. so we need to make sure that the random road we choose\n is not already set in the other side.\n \"\"\"\n neighbors = self.get_connected_juncs(junc).copy()\n in_checked_indices: Set[JuncIndices] = set()\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n while JuncRoadSingleConnection(junc.indices, in_road_junc.indices) in roads and len(\n in_checked_indices) != len(neighbors):\n in_road_junc = choice(neighbors)\n in_checked_indices.add(in_road_junc.indices)\n if len(in_checked_indices) != len(neighbors):\n # regular stop, we have found a road to be in_road.\n directions = self.get_connection_directions(in_road_junc, junc)\n roads.add(JuncRoadSingleConnection(in_road_junc.indices, junc.indices, directions[0], directions[1]))\n if with_prints:\n print(\"first in-road\", in_road_junc.indices, junc.indices)\n # run for the rest of the neighbors\n neighbors.remove(in_road_junc)\n # else, this junc has only out-roads and this cannot be fixed.\n for neighbor in neighbors:\n if neighbor.indices not in visited_indices: # the other case is handled through the neighbor in dfs_rec\n if with_prints:\n print(\"first\", junc.indices, neighbor.indices)\n directions = self.get_connection_directions(junc, neighbor)\n roads.add(JuncRoadSingleConnection(junc.indices, neighbor.indices, directions[0], directions[1]))\n dfs_rec(neighbor)\n\n all_juncs_indices: Set[JuncIndices] = {junc.indices for junc in self.get_all_juncs()}\n # the graph may not be connected, should run until all connected parts are visited\n while len(all_juncs_indices) != len(visited_indices):\n # now choose a junc and run on it.\n start_junc = self.get_junc(sample(all_juncs_indices.difference(visited_indices), 1)[0])\n first_node(start_junc)\n return roads", "def calculate_routes(outposts, vehicles, graph, starting_point=0, **kwargs):\n number_of_vehicles = len(vehicles)\n number_of_nodes = len(outposts) - 1\n\n # Source: https://stackoverflow.com/questions/28965734/general-bars-and-stars\n vehicles_partitions = []\n total_load = outposts.load.sum()\n capacities = list(vehicles.capacity)\n for combination in itertools.combinations(range(number_of_nodes+number_of_vehicles-1), number_of_vehicles-1):\n current_partition = [b-a-1 for a, b in zip((-1,) + combination, combination+(number_of_nodes+number_of_vehicles-1,))]\n current_partition = sorted(current_partition)\n if current_partition not in vehicles_partitions:\n vehicle_presence_vector = [0 if number==0 else 1 for number in current_partition]\n total_capacity = np.dot(vehicle_presence_vector, capacities)\n if total_capacity >= total_load:\n vehicles_partitions.append(current_partition)\n\n dwave_solver = DWaveEngine.default()\n if 'use_capacity_constraints' in kwargs:\n use_capacity_constraints = kwargs['use_capacity_constraints']\n del kwargs['use_capacity_constraints']\n else:\n use_capacity_constraints = True\n print(\"All partitions:\", vehicles_partitions)\n best_solution = None\n for current_partition in vehicles_partitions:\n print(\"Current partition: \", current_partition)\n problem = Problem(vehicles=vehicles,\n outposts=outposts,\n vehicles_partition=current_partition,\n graph=graph,\n starting_point=starting_point,\n use_capacity_constraints=use_capacity_constraints)\n current_solution = dwave_solver.solve(problem)\n if current_solution is None:\n print(\"No valid solutions found with D-Wave\")\n elif best_solution is None:\n best_solution = current_solution\n else:\n current_cost = sum(sub_solution[2] for sub_solution in current_solution)\n best_cost = best_solution.total_cost\n if current_cost < best_cost:\n best_solution = current_solution\n\n if best_solution is None:\n return None\n return best_solution.to_dataframe()", "def launch_parallel_rt_pairs():\r\n # Create the parser\r\n parser = argparse.ArgumentParser(description=globals().get(\"__doc__\", \"\"), fromfile_prefix_chars='@')\r\n\r\n # Define Arguments supported by the command line utility\r\n\r\n # --pair-type parameter\r\n help_string = \"The type of origin-destination pair assignment to use. Either one_to_one or many_to_many.\"\r\n parser.add_argument(\"-pt\", \"--pair-type\", action=\"store\", dest=\"pair_type_str\", help=help_string, required=True)\r\n\r\n # --origins parameter\r\n help_string = \"The full catalog path to the feature class containing the origins.\"\r\n parser.add_argument(\"-o\", \"--origins\", action=\"store\", dest=\"origins\", help=help_string, required=True)\r\n\r\n # --origins-id-field parameter\r\n help_string = \"The name of the unique ID field in origins.\"\r\n parser.add_argument(\r\n \"-oif\", \"--origins-id-field\", action=\"store\", dest=\"origin_id_field\", help=help_string, required=True)\r\n\r\n # --destinations parameter\r\n help_string = \"The full catalog path to the feature class containing the destinations.\"\r\n parser.add_argument(\"-d\", \"--destinations\", action=\"store\", dest=\"destinations\", help=help_string, required=True)\r\n\r\n # --destinations-id-field parameter\r\n help_string = \"The name of the unique ID field in destinations.\"\r\n parser.add_argument(\r\n \"-dif\", \"--destinations-id-field\", action=\"store\", dest=\"dest_id_field\", help=help_string, required=True)\r\n\r\n # --network-data-source parameter\r\n help_string = \"The full catalog path to the network dataset or a portal url that will be used for the analysis.\"\r\n parser.add_argument(\r\n \"-n\", \"--network-data-source\", action=\"store\", dest=\"network_data_source\", help=help_string, required=True)\r\n\r\n # --travel-mode parameter\r\n help_string = (\r\n \"The name or JSON string representation of the travel mode from the network data source that will be used for \"\r\n \"the analysis.\"\r\n )\r\n parser.add_argument(\"-tm\", \"--travel-mode\", action=\"store\", dest=\"travel_mode\", help=help_string, required=True)\r\n\r\n # --time-units parameter\r\n help_string = \"String name of the time units for the analysis. These units will be used in the output.\"\r\n parser.add_argument(\"-tu\", \"--time-units\", action=\"store\", dest=\"time_units\", help=help_string, required=True)\r\n\r\n # --distance-units parameter\r\n help_string = \"String name of the distance units for the analysis. These units will be used in the output.\"\r\n parser.add_argument(\r\n \"-du\", \"--distance-units\", action=\"store\", dest=\"distance_units\", help=help_string, required=True)\r\n\r\n # --max-routes parameter\r\n help_string = \"Maximum number of routes that can be in one chunk for parallel processing of Route solves.\"\r\n parser.add_argument(\r\n \"-mr\", \"--max-routes\", action=\"store\", dest=\"max_routes\", type=int, help=help_string, required=True)\r\n\r\n # --max-processes parameter\r\n help_string = \"Maximum number parallel processes to use for the Route solves.\"\r\n parser.add_argument(\r\n \"-mp\", \"--max-processes\", action=\"store\", dest=\"max_processes\", type=int, help=help_string, required=True)\r\n\r\n # --reverse-direction parameter\r\n help_string = \"Whether to reverse the direction of travel (destination to origin).\"\r\n parser.add_argument(\r\n \"-rd\", \"--reverse-direction\", action=\"store\", type=lambda x: bool(strtobool(x)),\r\n dest=\"reverse_direction\", help=help_string, required=True)\r\n\r\n # --out-routes parameter\r\n help_string = \"The full catalog path to the output routes feature class.\"\r\n parser.add_argument(\"-r\", \"--out-routes\", action=\"store\", dest=\"out_routes\", help=help_string, required=True)\r\n\r\n # --scratch-folder parameter\r\n help_string = \"The full catalog path to the scratch folder where intermediate outputs will be stored.\"\r\n parser.add_argument(\r\n \"-sf\", \"--scratch-folder\", action=\"store\", dest=\"scratch_folder\", help=help_string, required=True)\r\n\r\n # --assigned-dest-field parameter\r\n help_string = (\"The name of the field in origins indicating the assigned destination. \"\r\n \"Required for one_to_one pair-type\")\r\n parser.add_argument(\r\n \"-adf\", \"--assigned-dest-field\", action=\"store\", dest=\"assigned_dest_field\", help=help_string, required=False)\r\n\r\n # --od-pair-table parameter\r\n help_string = \"CSV file holding preassigned OD pairs. Required for many_to_many pair-type.\"\r\n parser.add_argument(\r\n \"-odp\", \"--od-pair-table\", action=\"store\", dest=\"od_pair_table\", help=help_string, required=False)\r\n\r\n # --time-of-day parameter\r\n help_string = (f\"The time of day for the analysis. Must be in {helpers.DATETIME_FORMAT} format. Set to None for \"\r\n \"time neutral.\")\r\n parser.add_argument(\"-tod\", \"--time-of-day\", action=\"store\", dest=\"time_of_day\", help=help_string, required=False)\r\n\r\n # --barriers parameter\r\n help_string = \"A list of catalog paths to the feature classes containing barriers to use in the Route.\"\r\n parser.add_argument(\r\n \"-b\", \"--barriers\", action=\"store\", dest=\"barriers\", help=help_string, nargs='*', required=False)\r\n\r\n try:\r\n # Get arguments as dictionary.\r\n args = vars(parser.parse_args())\r\n\r\n # Initialize a parallel Route calculator class\r\n rt_calculator = ParallelRoutePairCalculator(**args)\r\n # Solve the Route in parallel chunks\r\n start_time = time.time()\r\n rt_calculator.solve_route_in_parallel()\r\n LOGGER.info(f\"Parallel Route calculation completed in {round((time.time() - start_time) / 60, 2)} minutes\")\r\n\r\n except Exception: # pylint: disable=broad-except\r\n LOGGER.error(\"Error in parallelization subprocess.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise", "def dist(a, b):\n base_url=\"https://route.api.here.com/routing/7.2/calculateroute.json?\"\n payload = {'app_id':HERE_ID, \n 'app_code':HERE_CODE,\n 'waypoint0':'geo!'+','.join([str(i) for i in a]),\n 'waypoint1':'geo!'+','.join([str(i) for i in b]),\n 'mode':'fastest;car;traffic:disabled',\n }\n resp = requests.get(base_url, params=payload)\n data = json.loads(resp.content)\n #import ipdb; ipdb.set_trace()\n summary = data['response']['route'][0]['summary']\n return {\"distance\" : summary['distance'], \n \"trafficTime\" : summary[\"trafficTime\"],\n \"baseTime\" : summary[\"baseTime\"]}", "def get_self_origin(self, routes):\n outroutes = []\n if len(routes) == 0:\n return routes\n\n for route in routes:\n if route[\"msg\"][\"selfOrigin\"]:\n outroutes.append(route)\n \n if len(outroutes) == 0:\n outroutes = routes\n \n return outroutes", "def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path", "def _map(inputs, n_units, connection_type):\n\tif len(inputs) == n_units:\n\t\toutput = [[inputs[i-1],inputs[i]] for i in range(n_units)]\n\telif len(inputs) < n_units:\n\t\toutput = []\n\t\tinput_dim = len(inputs)\n\t\t#Units that can connect staight forwards\n\t\toutput = [[inputs[i-1],inputs[i]] for i in range(input_dim)]\n\t\tn_units -= input_dim\n\t\tcurrent_input_index = 0\n\t\t#Begin successively connecting units to the least conneceted input\n\t\twhile n_units > 0:\n\t\t\toutput.append([inputs[current_input_index], inputs[current_input_index]])\n\t\t\tn_units -= 1\n\t\t\tcurrent_input_index += 1\n\t\t\t#Run to end of list --> All units are equally connected. (Got to beginning)\n\t\t\tif current_input_index == input_dim:\n\t\t\t\tcurrent_input_index = 0\n\telse: #(len(inputs) > n_units)\n\t\tcollpase_units = len(inputs) - n_units\n\t\tif n_units == 1:\n\t\t\tpass\n\t\telif collpase_units % 2 == 1:\n\t\t\traise ValueError('Must be an even difference between previous layer and next layer')\n\t\toutput = [[inputs[2*i],inputs[2*i + 1]] for i in range(collpase_units)]\n\t\tfor i in range(2*collpase_units, n_units):\n\t\t\tif i == 2*collpase_units:\n\t\t\t\toutput.append([inputs[-1],inputs[i]])\n\t\t\telse:\n\t\t\t\toutput.append([inputs[i-1],inputs[i]])\n\tif connection_type == 'shuffle':\n\t\treturn _shuffle(output)\n\telse:\n\t\treturn output", "def routing_step(self, layout: dict, circuit: Circuit):\n\n _, final_mapping = self.routing_algorithm(circuit, layout)\n return final_mapping", "def _route_to_dest(self):\n # Ask the network\n self.route = self.network.determine_route(self.start, self.dest)\n # Set the index to where we are now\n self.route_index = 0" ]
[ "0.6572114", "0.64402664", "0.59652627", "0.5922691", "0.5920702", "0.5907399", "0.59004366", "0.58980155", "0.5884197", "0.58173287", "0.57752156", "0.57466954", "0.57233685", "0.5712399", "0.57113975", "0.57011473", "0.56868786", "0.56685567", "0.5667153", "0.5647773", "0.5605486", "0.5595886", "0.5592085", "0.5582648", "0.5551592", "0.5549158", "0.5522677", "0.54830205", "0.54818755", "0.5395321", "0.53893286", "0.53654003", "0.533934", "0.53393304", "0.53325164", "0.53195435", "0.5306127", "0.526093", "0.52558076", "0.52555287", "0.52490765", "0.52357984", "0.52272403", "0.5222721", "0.52016884", "0.5199761", "0.5163005", "0.5151626", "0.5144685", "0.51322764", "0.51094437", "0.50999993", "0.50946987", "0.50824183", "0.50776005", "0.50619453", "0.50593674", "0.5057641", "0.5046974", "0.50469273", "0.5045797", "0.5043329", "0.5036945", "0.50266194", "0.50208783", "0.5018812", "0.50164", "0.5012349", "0.4997355", "0.49929446", "0.49853575", "0.49609044", "0.4954701", "0.49543405", "0.49542183", "0.49517858", "0.4944907", "0.4937214", "0.4934022", "0.49321496", "0.49303183", "0.4906692", "0.4899488", "0.48869202", "0.4881746", "0.48812813", "0.4879912", "0.48679522", "0.48636717", "0.48589027", "0.48587388", "0.48518768", "0.48467314", "0.4844236", "0.48396248", "0.48392773", "0.48247617", "0.48176882", "0.48157358", "0.4813623" ]
0.48959836
83
Validate Route settings before spinning up a bunch of parallel processes doomed to failure. Also check which field name in the output OD Lines will store the optimized cost values. This depends on the travel mode being used by the analysis, and we capture it here to use in later steps.
def _validate_route_settings(self): # Create a dummy Route object and set properties. This allows us to # detect any errors prior to spinning up a bunch of parallel processes and having them all fail. LOGGER.debug("Validating Route settings...") rt = None try: rt = Route(**self.rt_inputs) rt.initialize_rt_solver() LOGGER.debug("Route settings successfully validated.") except Exception: LOGGER.error("Error initializing Route analysis.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise finally: if rt: LOGGER.debug("Deleting temporary test Route job folder...") # Close logging rt.teardown_logger() # Delete output folder shutil.rmtree(rt.job_result["jobFolder"], ignore_errors=True) del rt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_check_adr_parameters_correctness(dict):\n\n if int(dict[\"operation_mode_num\"]) not in (0, 1, 2, 3, 4, 5, 6):\n print('\\n Error!!! Operation mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"FFT_size_samples\"]) not in (2048, 4096, 8192, 16384, 32768):\n print('\\n Error!!! FFT size is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"spectra_averaging\"]) < 16 or int(dict[\"spectra_averaging\"]) > 32768:\n print('\\n Error!!! Spectra averaging number is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"start_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16): # 0 … (SFFT-1024)/1024\n print('\\n Error!!! Start frequency line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16):\n print('\\n Error!!! Frequency width line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) > ((int(dict[\"FFT_size_samples\"]) - int(dict[\"start_line_freq\"]) * 1024) / 1024): # 1 … (SFFT-SLINE*1024)/1024\n print('\\n Error!!! Frequency width is bigger than FFT size allows!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"clock_source\"]) not in (0, 1):\n print('\\n Error!!! Clock source is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"sum_diff_mode_num\"]) not in (0, 1):\n print('\\n Error!!! Sum-diff mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"data_file_size\"]) < -1 or int(dict[\"data_file_size\"]) > 4096:\n print('\\n Error!!! File size value is wrong!\\n')\n sys.exit(' Program stopped!')\n\n '''\n if (int(dict[\"chan_diff_delay\"]) < 0 or int(parameters_dict[\"chan_diff_dalay\"]) > 1024):\n print('\\n Error!!! Channel difference delay is wrong!\\n')\n sys.exit(' Program stopped!')\n '''\n\n # print('\\n ADR parameters from file are correct!\\n')\n\n return dict", "def check_violation(route, vehicle_type):\r\n if len(route) == 2: # [0, 0] route\r\n return True, 0, 0, 0\r\n else:\r\n accu_res = [0, 0, 0] # 0-leaving time, 1-accumulated distance, 2-volume\r\n if vehicle_type == 2:\r\n veh_cap = small_veh\r\n elif vehicle_type == 3:\r\n veh_cap = medium_veh\r\n elif vehicle_type == 5:\r\n veh_cap = large_veh\r\n else:\r\n veh_cap = large_veh\r\n print('Input wrong vehicle type!', vehicle_type)\r\n # small_veh = [1, 12, 10, 400000, 0.012, 200]\r\n fixed_cost = veh_cap[5]\r\n trans_cost = 0\r\n # wait_cost = 0\r\n if time_mat[0, route[1]] < num_timez[route[1]][0]:\r\n accu_res[0] = num_timez[route[1]][0] - time_mat[0, route[1]] # vehicle leaving depot time\r\n depart_time = accu_res[0] # departing from depot time\r\n else:\r\n depart_time = 0\r\n for i in range(len(route) - 1):\r\n last_cust = route[i]\r\n curr_cust = route[i+1]\r\n # checking leaving time\r\n arr_time = accu_res[0] + time_mat[last_cust, curr_cust]\r\n if arr_time < num_timez[curr_cust][0]:\r\n accu_res[0] = num_timez[curr_cust][0] + oprt_t\r\n wait_time = num_timez[curr_cust][0] - arr_time\r\n # wait_cost += (wait_time / 60. * wait_cost0)\r\n elif arr_time <= num_timez[curr_cust][1]:\r\n accu_res[0] = arr_time + oprt_t\r\n else:\r\n # print('Infeasible route!(Service Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max distance\r\n trans_cost += (dist_mat[last_cust, curr_cust] * veh_cap[4])\r\n\r\n accu_res[1] += dist_mat[last_cust, curr_cust]\r\n\r\n if accu_res[0] - oprt_t - depart_time > veh_cap[3]:\r\n # print('Infeasible route!(Max Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max volume\r\n accu_res[2] += (num_demd[curr_cust][0] * bskt_vol + num_demd[curr_cust][1] * trsf_vol + (num_demd[curr_cust][2]\r\n + num_demd[curr_cust][3]) * milk_vol + num_demd[curr_cust][4] * paper_bskt)\r\n\r\n if accu_res[2] > veh_cap[2]:\r\n # print('Infeasible route!(Max Weight/Volume Error.)', accu_res[2])\r\n return False, 1000000, 0, 0\r\n route_cost = fixed_cost + accu_res[1] * veh_cap[4]\r\n route_dist = accu_res[1]\r\n route_time = accu_res[0] - oprt_t - depart_time\r\n # print fixed_cost, trvl_cost, trvl_dist\r\n return True, route_cost, route_time, depart_time + 600", "def validate_params(self) -> None:\n # cap must be given when using logistic growth\n if (self.growth == \"logistic\") and (self.cap is False):\n msg = \"Capacity must be provided for logistic growth\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If custom_seasonalities passed, ensure they contain the required keys.\n reqd_seasonality_keys = [\"name\", \"period\", \"fourier_order\"]\n if not all(\n req_key in seasonality\n for req_key in reqd_seasonality_keys\n for seasonality in self.custom_seasonalities\n ):\n msg = f\"Custom seasonality dicts must contain the following keys:\\n{reqd_seasonality_keys}\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If extra_regressors passed, ensure they contain the required keys.\n all_regressor_keys = {\"name\", \"prior_scale\", \"mode\"}\n for regressor in self.extra_regressors:\n if not isinstance(regressor, dict):\n msg = f\"Elements in `extra_regressor` should be a dictionary but receives {type(regressor)}.\"\n _error_msg(msg)\n if \"name\" not in regressor:\n msg = \"Extra regressor dicts must contain the following keys: 'name'.\"\n _error_msg(msg)\n if not set(regressor.keys()).issubset(all_regressor_keys):\n msg = f\"Elements in `extra_regressor` should only contain keys in {all_regressor_keys} but receives {regressor.keys()}.\"\n _error_msg(msg)\n self._reqd_regressor_names = [\n regressor[\"name\"] for regressor in self.extra_regressors\n ]\n # check floor and cap\n if (self.cap is not False) and (\"cap\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"cap\")\n if self.floor is not False and (\"floor\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"floor\")", "def routeOptions(ORBITS, SET_OF_VEHICLES, CURRENT_WEATHER):\n OPTION = []\n for eachOrbit in ORBITS:\n if eachOrbit.traffic_speed == 0:\n print('Route {} Blocked'.format(eachOrbit.route_name))\n OPTION = None\n break\n else:\n for eachVehicle in SET_OF_VEHICLES:\n if eachVehicle.weather_suitability(CURRENT_WEATHER):\n eff_speed = min(eachVehicle.max_speed, eachOrbit.traffic_speed)\n time_taken = (eachOrbit.distance/eff_speed)+(eachVehicle.cross_crater*eachOrbit.craters*crater_factor(CURRENT_WEATHER)/60)\n time_taken = round(time_taken, 2)\n OPTION.append((eachVehicle.veh_type, eachOrbit.route_name, time_taken))\n else:\n continue\n return OPTION", "def _validate(self):\n _models = {'hrrr', 'hrrrak', 'rap'}\n _fields = {'prs', 'sfc', 'nat', 'subh'}\n \n self.date = pd.to_datetime(self.date)\n \n if self.model == 'alaska':\n self.model == 'hrrrak'\n\n assert self.fxx in range(49), \"Forecast lead time `fxx` is too large\"\n assert self.model in _models, f\"`model` must be one of {_models}\"\n if self.model in ['hrrr', 'hrrrak']:\n assert self.field in _fields, f\"`field must be one of {_fields}\"\n else:\n # field is not needed for RAP model.\n self.field = ''\n \n if isinstance(self.priority, str):\n self.priority = [self.priority]\n \n self.priority = [i.lower() for i in self.priority]\n\n # Don't look for data from NOMADS if requested date is earlier\n # than yesterday. NOMADS doesn't keep data that old.\n if 'nomads' in self.priority:\n yesterday = datetime.utcnow() - timedelta(hours=24)\n yesterday = pd.to_datetime(f\"{yesterday:%Y-%m-%d}\")\n if self.date < yesterday:\n self.priority.remove('nomads')", "def validate(self):\n variables = ['bottomDepth', 'layerThickness', 'maxLevelCell',\n 'temperature', 'salinity']\n compare_variables(\n test_case=self, variables=variables,\n filename1='initial_state/initial_state.nc')\n\n variables = ['temperature', 'layerThickness']\n compare_variables(\n test_case=self, variables=variables,\n filename1='forward/output/output.0001-01-01_00.00.00.nc')\n\n if self.with_particles:\n # just do particle validation at coarse res\n variables = [\n 'xParticle', 'yParticle', 'zParticle', 'zLevelParticle',\n 'buoyancyParticle', 'indexToParticleID', 'currentCell',\n 'transfered', 'numTimesReset']\n compare_variables(test_case=self, variables=variables,\n filename1='forward/analysis_members/'\n 'lagrPartTrack.0001-01-01_00.00.00.nc')\n\n timers = ['init_lagrPartTrack', 'compute_lagrPartTrack',\n 'write_lagrPartTrack', 'restart_lagrPartTrack',\n 'finalize_lagrPartTrack']\n compare_timers(self, timers, rundir1='forward')", "def run_parameters_validations(self):\n if self.risk_rule:\n if 'connectApi' not in self.services:\n return_error(\"You entered a risk rule but the 'connectApi' service is not chosen. \"\n \"Add the 'connectApi' service to the list or remove the risk rule.\")\n else:\n for risk_rule in self.risk_rule:\n if not is_valid_risk_rule(self, risk_rule):\n return_error(f\"The given risk rule: {risk_rule} does not exist,\"\n f\"please make sure you entered it correctly. \\n\"\n f\"To see all available risk rules run the '!rf-get-risk-rules' command.\")\n\n if self.fusion_file_path is not None:\n if 'fusion' not in self.services:\n return_error(\"You entered a fusion file path but the 'fusion' service is not chosen. \"\n \"Add the 'fusion' service to the list or remove the fusion file path.\")", "def validate(self):\n self.__log('Validating whether all conditions are met.')\n if not self.config['OUT_FOLDER'] or not self.config['OUTPUT_FOLDER']:\n self.__log('The path to the output folder cannot be found.', 'error')\n raise FileNotFoundError\n\n try:\n if '.' in self.output_filename:\n self.__log('The output filename should not contain an extension.', 'error')\n raise ValueError\n except TypeError:\n pass\n\n if not self.output_filename:\n self.__log('The output filename has not been specified.', 'warning')\n self.output_filename = self.hash_time()\n i = 0\n while self.output_file_exists():\n self.__log('Adding a unique identifier to current filename.', 'warning')\n self.output_filename = self.output_filename + '-' + i\n i += 1\n self.__log(f'Continuing with file: \"{self.output_filename}\"', 'success')\n\n # Iterate over options to check for required parameters, as to not waste requests\n self.__log('Starting to check if all required parameters are set')\n for key, value in self.options.items():\n if key in self.config['REQUIRED_PARAMETERS'] and not value:\n self.__log(f'Missing a required parameter: {key}', 'error')\n raise MissingRequiredParameterError(key)\n\n self.__log('All validation successful.', 'success')", "def validateOutputParams(self):\n \n # success\n retval = 1\n if not self.__args.has_key('separationaxis'):\n return retval\n \n else:\n sepaxis = self.__args['separationaxis']\n\n # Task mstransform\n if self.__taskname == \"mstransform\":\n if sepaxis != 'scan' and (self.__args['combinespws'] == True or self.__args['nspw'] > 1):\n casalog.post('Cannot partition MS per spw or auto when combinespws = True or nspw > 1', 'WARN')\n retval = 0\n \n elif sepaxis != 'spw' and self.__args['timespan'] == 'scan':\n casalog.post('Time averaging across scans may lead to wrong results when separation axis is not spw', 'WARN')\n \n return retval", "def validateTradeRoute(self, tradeRouteDict):\n try:\n systemFrom = self.systems[tradeRouteDict['fromSystem']]\n systemTo = self.systems[tradeRouteDict['toSystem']]\n # has a trade route already been setup between these planets?\n (sysFrom, sysTo, type) = string.split(tradeRouteDict['id'], '-')\n # are these systems adjacent, or share a warp gate with a trade pact\n if systemTo.id in systemFrom.connectedSystems:\n pass\n elif systemTo.id in systemFrom.warpGateSystems:\n tempRoute = anwp.aw.traderoute.TradeRoute(tradeRouteDict)\n warpReq = tempRoute.getWarpRequired()\n if warpReq > (systemFrom.usedWGC + systemFrom.availWGC):\n return 'System:%s Requires %d Warp Capactiy to setup this Trade Route' % (systemFrom.name, warpReq)\n if warpReq > (systemTo.usedWGC + systemTo.availWGC):\n return 'System:%s Requires %d Warp Capactiy to setup this Trade Route' % (systemTo.name, warpReq)\n else:\n return 'Systems are not adjacent and have no warp gates between them'\n # do these systems share the same empire owner, or are the two empires in a trade pact?\n if systemFrom.myEmpireID <> systemTo.myEmpireID and anwp.func.globals.diplomacy[self.empires[systemFrom.myEmpireID].diplomacy[systemTo.myEmpireID].diplomacyID]['trade'] == 0:\n return 'System Owners are not the same, or no Trade Pact in Effect'\n # is a negative trade route being sent?\n if (tradeRouteDict['AL'] < 0 or tradeRouteDict['EC'] < 0 or tradeRouteDict['IA'] < 0):\n return 'you cannot send negative values in trade'\n # is something being sent?\n if (tradeRouteDict['AL'] == 0 and tradeRouteDict['EC'] == 0 and tradeRouteDict['IA'] == 0) and tradeRouteDict['type'] <> 'GEN':\n return 'no resources are being sent, trade route invalid'\n # does the system have the resources to setup this trade?\n if (systemFrom.AL < tradeRouteDict['AL'] or systemFrom.EC < tradeRouteDict['EC'] or\n systemFrom.IA < tradeRouteDict['IA']):\n return '%s does not have enough resources to setup this trade route' % systemFrom.name\n return 1\n except:\n return 'galaxy->validateTradeRoute error'", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def check_kpts(self):\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.parent_folder)\n\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(self.ctx.num_machines,\n self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc,\n self.ctx.use_omp,\n self.ctx.suggest_mpi_omp_ratio,\n fleurinp,\n only_even_MPI=only_even_MPI,\n forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n\n self.report(message)\n\n self.ctx.inputs.metadata.options['resources']['num_machines'] = machines\n self.ctx.inputs.metadata.options['resources']['num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources']['num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables']['OMP_NUM_THREADS'] = str(omp_threads)", "def test_validate(self):\n assert \"skip_validation\" not in self.route.route\n\n route = self.route.validate()\n assert \"skip_validation\" not in route.route\n\n route = self.route.validate(False)\n assert \"skip_validation\" in route.route", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def _check_log_params(self):\n steps_per_stats = self.configs['steps_per_stats']\n if not steps_per_stats or steps_per_stats < 0:\n steps_per_stats = 100\n steps_per_eval = self.configs['steps_per_eval']\n if not steps_per_eval:\n steps_per_eval = 10 * steps_per_stats\n steps_per_external_eval = self.configs['steps_per_external_eval']\n if not steps_per_external_eval:\n steps_per_external_eval = 5 * steps_per_eval\n self.configs['steps_per_stats'] = steps_per_stats\n self.configs['steps_per_eval'] = steps_per_eval\n self.configs['steps_per_external_eval'] = steps_per_external_eval", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def check_params():\n print INITIAL_SCHEDULE\n print SCHEDULE_SA[:10]\n print CONTROL_DECISIONS\n\n print \"T_SLOT= %f\" % T_SLOT\n print \"R=%f\" % R\n print \"A=%f\" % A\n print \"A_IO=%f\" % A_IO\n print \"Q_INT=%d\" % Q_INT", "def updateParameters(self):\r\n\r\n\t\tif self.approach.altered:\r\n\t\t\tself.transform.enabled = True\r\n\r\n\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\tself.predefined_pattern.enabled = False\r\n\t\t\t\tself.pattern_workspace.enabled = False\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telif self.approach.value == 'Locations in the DEM versus pre-defined pattern':\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = True\r\n\t\t\t\tself.mapping_field.enabled = True\r\n\t\t\t\tself.move_to_max.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_table.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_raster_workspace.enabled = False\r\n\t\t\t\tself.output_raster_workspace.value = ''\r\n\r\n\t\t\telse: # seek pre-defined pattern in DEM\r\n\t\t\t\tself.predefined_pattern.enabled = True\r\n\t\t\t\tself.point_matrix_size.enabled = True\r\n\t\t\t\tself.mh_iteration.enabled = True\r\n\t\t\t\tself.output_raster_workspace.enabled = True\r\n\t\t\t\tself.point_vectors.enabled = False\r\n\t\t\t\tself.point_vectors.value = ''\r\n\t\t\t\tself.mapping_field.enabled = False\r\n\t\t\t\tself.move_to_max.enabled = False\r\n\t\t\t\tself.move_to_max.value = False\r\n\t\t\t\tself.mh_dil_val.enabled = True\r\n\t\t\t\tself.output_sim_matrix.enabled = False\r\n\t\t\t\tself.output_sim_matrix.value = ''\r\n\t\t\t\tself.output_table.enabled = False\r\n\t\t\t\tself.output_table.value = ''\r\n\r\n\t\tif self.mh_iteration.altered:\r\n\r\n\t\t\tif self.mh_iteration.value is True:\r\n\t\t\t\tself.mh_dil_start.enabled = True\r\n\t\t\t\tself.mh_dil_stop.enabled = True\r\n\t\t\t\tself.mh_dil_step.enabled = True\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\r\n\t\t\telse:\r\n\t\t\t\tif self.approach.value == 'Locations in the DEM generated from field observations':\r\n\t\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.mh_dil_val.enabled = True\r\n\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\r\n\t\tif self.move_to_max.altered:\r\n\t\t\tif self.move_to_max.value is True:\r\n\t\t\t\tself.move_to_max_distance.enabled = True\r\n\t\t\telse:\r\n\t\t\t\tself.move_to_max_distance.enabled = False\r\n\t\t\t\tself.move_to_max_distance.value = 3\r\n\r\n\t\tif self.transform.altered:\r\n\t\t\tif self.transform.value == 'Work directly on the elevation matrix':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Perform a local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = False\r\n\t\t\telif self.transform.value == 'Compute slopes' or self.transform.value == \\\r\n\t\t\t\t\t'Compute slopes and perform local translation':\r\n\t\t\t\tself.size_of_the_cell.enabled = True\r\n\r\n\t\tif self.predefined_pattern.altered:\r\n\t\t\tif self.predefined_pattern.value == 'Custom pattern':\r\n\t\t\t\tself.pattern_workspace.enabled = True\r\n\r\n\t\t\t\tself.mh_iteration.value = False\r\n\t\t\t\tself.mh_iteration.enabled = False\r\n\t\t\t\tself.mh_dil_start.enabled = False\r\n\t\t\t\tself.mh_dil_stop.enabled = False\r\n\t\t\t\tself.mh_dil_step.enabled = False\r\n\t\t\t\tself.mh_dil_start.value = 0.01\r\n\t\t\t\tself.mh_dil_stop.value = 1\r\n\t\t\t\tself.mh_dil_step.value = 0.1\r\n\t\t\t\tself.mh_dil_val.enabled = False\r\n\t\t\t\tself.mh_dil_val.value = 1\r\n\t\t\telse:\r\n\t\t\t\tself.pattern_workspace.enabled = False", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def update_route(self, vrpdata):\n self.distance = 0\n self.quantity = 0\n self.tourValid = False\n lastc = 0 # first entry is depot\n for c in self.route:\n self.distance += vrpdata.DistMatrix[lastc][c]\n self.quantity += vrpdata.CustDem[c]\n lastc = c\n self.distance += vrpdata.DistMatrix[lastc][0] # last entry is depot\n self.tourValid = (self.quantity <= vrpdata.MaxVehCap)", "def validateInputParams(self): \n # Return dictionary\n retval = {}\n retval['status'] = True\n retval['axis'] = ''\n \n # Get the separationaxis of input MMS. \n sepaxis = ph.axisType(self.__args['vis'])\n if sepaxis.isspace() or sepaxis.__len__() == 0:\n sepaxis = 'unknown'\n elif sepaxis == 'scan,spw':\n sepaxis = 'auto'\n \n #Get list of subMSs in MMS\n subMSList = ParallelTaskHelper.getReferencedMSs(self.__args['vis'])\n \n if self.__taskname == \"mstransform\":\n \n if (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == False):\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n # Get dictionary with spwids of all subMS in the MMS\n spwdict = ph.getScanSpwSummary(subMSList) \n # For each subMS, check if it has the spw selection\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine or separate spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'scan'\n break\n \n elif (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan') and \\\n (self.__args['combinespws'] == False and self.__args['nspw'] == 1):\n # Get the value of timebin as a float\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n # For each subms, check if scans length is <= timebin\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when timespan=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'spw'\n break\n \n # Two transformations are requested.\n elif (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan'):\n # Check spws and scans in subMSs\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw'])\n spwdict = ph.getScanSpwSummary(subMSList) \n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n if self.__isSpwContained(spwsel, slist):\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('The subMSs of input MMS do not contain the necessary scans','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break \n else:\n casalog.post('The subMSs of input MMS do not contain the necessary spws','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break\n \n \n elif self.__taskname == \"split2\" or self.__taskname == \"split\": \n if (sepaxis != 'spw' and self.__args['combine'] == 'scan'):\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when combine=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n retval['status'] = False\n retval['axis'] = ''\n break\n\n elif self.__taskname == \"cvel2\" and sepaxis != 'scan':\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n spwdict = ph.getScanSpwSummary(subMSList) \n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = ''\n break\n \n\n return retval", "def validate(self):\n variables = ['bottomDepth', 'ssh', 'layerThickness', 'zMid',\n 'maxLevelCell', 'temperature', 'salinity']\n compare_variables(\n test_case=self, variables=variables,\n filename1='initial_state/initial_state.nc')\n\n variables = ['temperature', 'salinity', 'layerThickness',\n 'normalVelocity']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')\n\n variables = ['ssh', 'landIcePressure', 'landIceDraft',\n 'landIceFraction',\n 'landIceMask', 'landIceFrictionVelocity', 'topDrag',\n 'topDragMagnitude', 'landIceFreshwaterFlux',\n 'landIceHeatFlux', 'heatFluxToLandIce',\n 'landIceBoundaryLayerTemperature',\n 'landIceBoundaryLayerSalinity',\n 'landIceHeatTransferVelocity',\n 'landIceSaltTransferVelocity',\n 'landIceInterfaceTemperature',\n 'landIceInterfaceSalinity', 'accumulatedLandIceMass',\n 'accumulatedLandIceHeat']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/land_ice_fluxes.nc',\n filename2='restart_run/land_ice_fluxes.nc')\n\n variables = ['accumulatedFrazilIceMass',\n 'accumulatedFrazilIceSalinity',\n 'seaIceEnergy', 'frazilLayerThicknessTendency',\n 'frazilTemperatureTendency', 'frazilSalinityTendency',\n 'frazilSurfacePressure',\n 'accumulatedLandIceFrazilMass']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/frazil.nc',\n filename2='restart_run/frazil.nc')", "def validate_inputs(self):\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))\n\n self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']\n self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para['max_queue_wallclock_sec']\n\n input_options = self.inputs.options.get_dict()\n self.ctx.optimize_resources = input_options.pop('optimize_resources', True)\n self.ctx.inputs.metadata.options = input_options\n\n if 'description' in self.inputs:\n self.ctx.inputs.metadata.description = self.inputs.description\n else:\n self.ctx.inputs.metadata.description = ''\n if 'label' in self.inputs:\n self.ctx.inputs.metadata.label = self.inputs.label\n else:\n self.ctx.inputs.metadata.label = ''\n\n if not self.ctx.optimize_resources:\n self.ctx.can_be_optimised = False # set this for handlers to not change resources\n return\n\n resources_input = self.ctx.inputs.metadata.options['resources']\n try:\n self.ctx.num_machines = int(resources_input['num_machines'])\n self.ctx.num_mpiprocs_per_machine = int(resources_input['num_mpiprocs_per_machine'])\n except KeyError:\n self.ctx.can_be_optimised = False\n self.report('WARNING: Computation resources were not optimised.')\n else:\n try:\n self.ctx.num_cores_per_mpiproc = int(resources_input['num_cores_per_mpiproc'])\n self.ctx.use_omp = True\n self.ctx.suggest_mpi_omp_ratio = self.ctx.num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc\n except KeyError:\n self.ctx.num_cores_per_mpiproc = 1\n self.ctx.use_omp = False\n self.ctx.suggest_mpi_omp_ratio = 1\n\n status = self.check_kpts()\n if status is None:\n self.ctx.can_be_optimised = True\n else:\n self.report('ERROR: Not optimal computational resources.')\n return status", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def validate_parameters(self):\n\n # env and fixed_env\n self._validate_envs()\n # checking optional data and scripts\n self._validate_download_data()\n self.data_path = self.params[\"data\"][\"location\"]\n self._validate_scripts()\n # checking optional data_ref (if not data_ref provided, path is the same as data path)\n if \"data_ref\" in self.params:\n self._validate_download_data(data_nm=\"data_ref\")\n# self.data_ref_path = self.params[\"data_ref\"][\"location\"]\n# else:\n# self.data_ref_path = self.data_path\n # checking analysis\n self._validate_analysis()\n # checking tests\n self._validate_tests()\n\n self.params.setdefault(\"post_build\", None)\n # if copy in post_build part that I'm changing the build_context\n if self.params[\"post_build\"] and \"copy\" in self.params[\"post_build\"]:\n self.build_context = self.workflow_path\n else:\n self.build_context = self.working_dir\n\n self.params.setdefault(\"plots\", [])\n if self.params[\"plots\"]:\n if not isinstance(self.params[\"plots\"], (list, tuple)):\n raise SpecificationError(\n \"Value of key 'plots' must be a list or a tuple\"\n )\n else:\n if any(not isinstance(j, dict) for j in self.params[\"plots\"]):\n raise SpecificationError(\n \"Every item in 'plots' must be a dictionary.\"\n )", "def route_validity_checker(): #below route list was rerturned from bus_routes function above, copy and pasted to eliminate need to re-run\n route_list=['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', 'PP07', '53B', '31A', 'OL84']\n count_dict={}\n for route in route_list: #dictionary with key for every route in the list\n count_dict[route]=0 #used to count number of occurrences in files\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"): #for every file\n print(file)\n reader=csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3])\n if route!=\"\":\n count_dict[extract_bus_route(line[3])]+=1 #incremenent the counter of the route with the associated journey id code\n return count_dict #result is that 3 routes are likely due to strange circumstances or errors in data", "def initialize_rt_solver(self):\r\n # For a local network dataset, we need to checkout the Network Analyst extension license.\r\n if not self.is_service:\r\n arcpy.CheckOutExtension(\"network\")\r\n\r\n # Create a new Route object\r\n self.logger.debug(\"Creating Route object...\")\r\n self.rt_solver = arcpy.nax.Route(self.network_data_source)\r\n\r\n # Set the Route analysis properties.\r\n # Read properties from the rt_config.py config file for all properties not set in the UI as parameters.\r\n # Route properties documentation: https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route.htm\r\n # The properties have been extracted to the config file to make them easier to find and set so users don't have\r\n # to dig through the code to change them.\r\n self.logger.debug(\"Setting Route analysis properties from RT config file...\")\r\n for prop, value in RT_PROPS.items():\r\n if prop in RT_PROPS_SET_BY_TOOL:\r\n self.logger.warning((\r\n f\"Route config file property {prop} is handled explicitly by the tool parameters and will be \"\r\n \"ignored.\"\r\n ))\r\n continue\r\n try:\r\n setattr(self.rt_solver, prop, value)\r\n if hasattr(value, \"name\"):\r\n self.logger.debug(f\"{prop}: {value.name}\")\r\n else:\r\n self.logger.debug(f\"{prop}: {value}\")\r\n except Exception as ex: # pylint: disable=broad-except\r\n # Suppress warnings for older services (pre 11.0) that don't support locate settings and services\r\n # that don't support accumulating attributes because we don't want the tool to always throw a warning.\r\n if not (self.is_service and prop in [\r\n \"searchTolerance\", \"searchToleranceUnits\", \"accumulateAttributeNames\"\r\n ]):\r\n self.logger.warning(\r\n f\"Failed to set property {prop} from RT config file. Default will be used instead.\")\r\n self.logger.warning(str(ex))\r\n # Set properties explicitly specified in the tool UI as arguments\r\n self.logger.debug(\"Setting Route analysis properties specified tool inputs...\")\r\n self.rt_solver.travelMode = self.travel_mode\r\n self.logger.debug(f\"travelMode: {self.travel_mode}\")\r\n self.rt_solver.timeUnits = self.time_units\r\n self.logger.debug(f\"timeUnits: {self.time_units}\")\r\n self.rt_solver.distanceUnits = self.distance_units\r\n self.logger.debug(f\"distanceUnits: {self.distance_units}\")\r\n self.rt_solver.timeOfDay = self.time_of_day\r\n self.logger.debug(f\"timeOfDay: {self.time_of_day}\")", "def validateProcess(process):\n \n schedule=process.schedule_()\n paths=process.paths_()\n endpaths=process.endpaths_()\n \n # check output mods are in paths and have appropriate settings\n for outputModName in process.outputModules_().keys():\n outputMod = getattr(process, outputModName)\n if not hasattr(outputMod, 'dataset'):\n msg = \"Process contains output module without dataset PSET: %s \\n\" % outputModName\n msg += \" You need to add this PSET to this module to set dataTier and filterName\\n\"\n raise RuntimeError(msg)\n ds=getattr(outputMod,'dataset')\n if not hasattr(ds, \"dataTier\"):\n msg = \"Process contains output module without dataTier parameter: %s \\n\" % outputModName\n msg += \" You need to add an untracked parameter to the dataset PSET of this module to set dataTier\\n\"\n raise RuntimeError(msg)\n\n # check module in path or whatever (not sure of exact syntax for endpath)\n omRun=False\n\n if schedule==None:\n for path in paths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n for path in endpaths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n else:\n for path in schedule:\n if outputModName in path.moduleNames():\n omRun=True\n if omRun==False:\n msg = \"Output Module %s not in endPath\" % outputModName\n raise RuntimeError(msg)", "def __init__(self, runway_type):\n self.primary_surface_length = 200\n self.primary_surface_width = 0\n self.approach_surface_extendedwidth = 0\n self.first_section_length = 0\n self.first_section_slope = 0\n self.second_section_length = 0\n self.second_section_slope = 0\n self.horizontal_surface_height = 150\n self.horizontal_surface_radius = 0\n self.conical_surface_slope = 20\n self.conical_surface_offset = 4000\n self.transitional_surface_slope = 7\n \n # The runway types listed in the documentation for FAA FAR 77 do not \n # match what appears when you actually run the tool in ArcMap.\n # These regular expressions should match either version. \n if re.match(\"Visual\\s*(?:Runway)?\\s*Visual\\sApproach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 1500\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Visual Approach\", runway_type, re.I):\n self.primary_surface_width = 250\n self.approach_surface_extendedwidth = 1250\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Utility\\s*(?:Runway)?\\s*Non[\\s\\-]*Precision Instrument Approach\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 2000\n self.first_section_length = 5000\n self.first_section_slope = 20\n self.horizontal_surface_radius = 5000\n elif re.match(\"Precision Instrument\\s*(?:Runway)?\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 16000\n self.first_section_length = 10000\n self.first_section_slope = 50\n self.second_section_length = 40000\n self.second_section_slope = 40\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)?\\s*(?:(?:High)|(?:Greater)) Visibility\", runway_type, re.I):\n self.primary_surface_width = 500\n self.approach_surface_extendedwidth = 3500\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000\n elif re.match(\"Non Precision Instrument\\s*(?:Runway)\\s*Approach Low Visibility\", runway_type, re.I):\n self.primary_surface_width = 1000\n self.approach_surface_extendedwidth = 4000\n self.first_section_length = 10000\n self.first_section_slope = 34\n self.horizontal_surface_radius = 10000", "def testoptdone(self):\r\n assert self.data.optdone\r\n assert numpy.all(numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets)", "def validate(self):\n valid = True\n \n # Check that link information is valid\n for ij in self.link:\n valid = valid and self.link[ij].head in self.node\n valid = valid and self.link[ij].tail in self.node\n if not valid:\n print(\"Error: Link tail/head not found: %s %s\" % (self.link[ij].tail, self.link[ij].head))\n raise utils.BadFileFormatException\n valid = valid and self.link[ij].capacity >= 0\n valid = valid and self.link[ij].length >= 0\n valid = valid and self.link[ij].freeFlowTime >= 0\n valid = valid and self.link[ij].alpha >= 0\n valid = valid and self.link[ij].beta >= 0\n valid = valid and self.link[ij].speedLimit >= 0\n valid = valid and self.link[ij].toll >= 0\n if not valid:\n print(\"Link %s has negative parameters.\" % ij)\n \n # Then check that all OD pairs are in range\n for ODpair in self.ODpair:\n (origin, destination) = (self.ODpair[ODpair].origin, self.ODpair[ODpair].destination)\n valid = valid and origin in self.node\n valid = valid and destination in self.node\n if not valid:\n print(\"Error: Origin/destination %s not found\" % ODpair)\n raise utils.BadFileFormatException\n valid = valid and self.node[origin].isZone == True\n valid = valid and self.node[destination].isZone == True\n if not valid:\n print(\"Error: Origin/destination %s does not connect two zones\" % str(ODpair))\n raise utils.BadFileFormatException\n valid = valid and self.ODpair[ODpair].demand >= 0\n if not valid:\n print(\"Error: OD pair %s has negative demand\" % ODpair)\n raise utils.BadFileFormatException\n \n # Now error-check using metadata\n if self.numNodes != None and len(self.node) != self.numNodes:\n print(\"Warning: Number of nodes implied by network file %d different than metadata value %d\" % (len(self.node), self.numNodes))\n self.numNodes = len(self.node)\n if self.numLinks != None and len(self.link) != self.numLinks:\n print(\"Warning: Number of links given in network file %d different than metadata value %d\" % (len(self.link), self.numLinks))\n self.numLinks = len(self.link)\n if self.numZones != None and len([i for i in self.node if self.node[i].isZone == True]) != self.numZones:\n print(\"Warning: Number of zones given in network file %d different than metadata value %d\" % (len([i for i in self.node if self.node[i].isZone == True]), self.numZones))\n self.numLinks = len(self.link)\n if self.totalDemandCheck != None:\n if self.totalDemand != self.totalDemandCheck:\n print(\"Warning: Total demand is %f compared to metadata value %f\" % ( self.totalDemand, self.totalDemandCheck))", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def verify_pln(self):\n\n warnings = []\n\n self._populate_uncertainties()\n\n # The transitref and transiturl actually end up stored in the 'transit'\n # ExoParam due to the ref and url splits. Pull these out and set the\n # transit entries to the proper pointers.\n if self.transit.value == 1:\n if is_empty(self.transit.reference):\n self.transit.reference = \"__TRANSITREF\"\n if is_empty(self.transit.url):\n self.transit.url = \"__TRANSITURL\"\n\n # If the transit depth is not provided, but an Rp/R* ratio is,\n # calculate the depth value.\n if is_empty(self.depth.value) and is_valid(self.rr.value):\n self.depth.value = self.rr.value ** 2\n if isinstance(self.rr.uncertainty, Decimal):\n self.depth.uncertainty = self.rr.uncertainty * 2\n if isinstance(self.rr.uncertainty_upper, Decimal):\n self.depth.uncertainty_upper = self.rr.uncertainty_upper * 2\n self.depth.reference = \"Calculated from Rp/R*\"\n self.depth.url = self.rr.reference\n\n # If the orbital eccentricity value is 0 and a TT value is provided,\n # use the same values for T0 as well.\n if self.ecc.value == Decimal(0) and is_empty(self.om.value):\n self.om.value = Decimal(90)\n self.om.reference = \"Set to 90 deg with ecc~0\"\n print(\"set omega to 90\")\n if is_valid(self.tt.value):\n print(\"copying TT to T0\")\n self.t0.copy_values(self.tt)\n # OM may already be set to 90.\n elif self.ecc.value == 0 and self.om.value == 90:\n if str(self.tt.value) != \"NaN\":\n print(\"copying TT to T0\")\n self.t0.copy_values(self.tt)\n\n # Set the FREEZE_ECC flag if ECC=0 and no uncertainty is provided.\n if self.ecc.value == 0 and is_empty(self.ecc.uncertainty):\n self.freeze_ecc.value = 1\n\n # Set the MULT flag if NCOMP is more than 1 planet.\n if self.ncomp.value > 1:\n self.mult.value = 1\n\n # Set the TREND flag if a DVDT value is provided.\n if not is_empty(self.dvdt.value):\n self.trend.value = 1\n\n # Exclude planets with period uncertainty >10%.\n self.per.check_constrained(0.1)\n if not self.per.well_constrained:\n self.exclude()\n warnings.append(\"<uncertain PER>\")\n\n # Warn of planets with K speeds <2 m/s.\n if is_valid(self.k.value):\n if self.k.value < 2:\n # self.exclude()\n warnings.append(\"<low K value>\")\n\n # Make sure RA string uses spaces.\n if not is_empty(self.ra_string.value):\n if \"h\" in self.ra_string.value:\n new_value = self.ra_string.value.replace(\"h\", \" \")\n new_value = new_value.replace(\"m\", \" \")\n new_value = new_value.replace(\"s\", \"\")\n self.ra_string.value = new_value\n\n # Make sure DEC string uses spaces.\n if not is_empty(self.dec_string.value):\n if \"d\" in self.dec_string.value:\n new_value = self.dec_string.value.replace(\"d\", \" \")\n new_value = new_value.replace(\"m\", \" \")\n new_value = new_value.replace(\"s\", \"\")\n self.dec_string.value = new_value\n\n # Display warnings generated by final adjustments.\n if len(warnings) > 0:\n print(\"<<<{0} GOT {1} WARNING(S)>>>\".format(self.name.value,\n len(warnings)\n )\n )\n [print(x) for x in warnings]", "def check_required_parameters(required_params_dict=dict()):\r\n print threading.currentThread().getName(), 'Starting'\r\n is_valid = True\r\n required_params_not_set = pythontools.validate_required_parameters(required_params_dict)\r\n if len(required_params_not_set) > 0:\r\n is_valid = False\r\n msg = \"Validate all required input parameters are set failed.\"\r\n for param in required_params_not_set:\r\n steplog.error(\"Required parameter %s is not set.\" % param)\r\n else:\r\n msg = \"Validate all required input parameters are set succeeded.\"\r\n return is_valid, msg", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0", "def print_and_save_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n routes = []\n for vehicle_id in range(data['num_vehicles']):\n route = []\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n point = data['all_points'][node_index]\n point['load'] = route_load\n route.append(point)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n if route_load > 0:\n routes.append({\n 'stops': route,\n 'distance': route_distance,\n 'load': route_load,\n 'number_of_stops': len(route)\n })\n print('Total distance of all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open('./results/capacity_{}_{}.json'.format(DRONES_CAPACITY, MAX_POINT_DEMAND), 'w') as f:\n simplejson.dump({\n 'routes': routes,\n 'total_load': total_load,\n 'total_distance': total_distance,\n 'number_of_drones_used': len(routes),\n 'total_numer_of_stops': sum([x['number_of_stops'] for x in routes])\n }, f)", "def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))", "def check_error(self):\n refine_results = {}\n for phase_path, phase in self.phases.items():\n refine_results[phase_path] = {}\n\n # Save the original grid to the refine results\n tx = phase.options['transcription']\n gd = tx.grid_data\n num_nodes = gd.subset_num_nodes['all']\n numseg = gd.num_segments\n\n refine_results[phase_path]['num_segments'] = numseg\n refine_results[phase_path]['order'] = gd.transcription_order\n refine_results[phase_path]['segment_ends'] = gd.segment_ends\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n refine_results[phase_path]['error'] = np.zeros(numseg, dtype=float)\n\n if isinstance(tx, dm.RungeKutta):\n continue\n\n outputs = phase.list_outputs(units=False, out_stream=None)\n\n out_values_dict = {k: v['value'] for k, v in outputs}\n\n prom_to_abs_map = phase._var_allprocs_prom2abs_list['output']\n\n num_scalar_states = 0\n for state_name, options in phase.state_options.items():\n shape = options['shape']\n size = np.prod(shape)\n num_scalar_states += size\n\n x = np.zeros([num_nodes, num_scalar_states])\n f = np.zeros([num_nodes, num_scalar_states])\n c = 0\n\n # Obtain the solution on the current grid\n for state_name, options in phase.state_options.items():\n prom_name = f'timeseries.states:{state_name}'\n abs_name = prom_to_abs_map[prom_name][0]\n rate_source_prom_name = f\"timeseries.state_rates:{state_name}\"\n rate_abs_name = prom_to_abs_map[rate_source_prom_name][0]\n x[:, c] = out_values_dict[prom_name].ravel()\n f[:, c] = out_values_dict[rate_source_prom_name].ravel()\n c += 1\n\n # Obtain the solution on the new grid\n # interpolate x at t_hat\n new_order = gd.transcription_order + 1\n # Gauss-Lobatto does not allow even orders so increase order by 2 instead\n if gd.transcription == 'gauss-lobatto':\n new_order += 1\n new_grid = GridData(numseg, gd.transcription, new_order, gd.segment_ends, gd.compressed)\n left_end_idxs = new_grid.subset_node_indices['segment_ends'][0::2]\n left_end_idxs = np.append(left_end_idxs, new_grid.subset_num_nodes['all'] - 1)\n\n L = interpolation_lagrange_matrix(gd, new_grid)\n I = integration_matrix(new_grid)\n\n # Call the ODE at all nodes of the new grid\n x_hat, x_prime = self.eval_ode(phase, new_grid, L, I)\n E = {}\n e = {}\n err_over_states = {}\n for state_name, options in phase.state_options.items():\n E[state_name] = np.absolute(x_prime[state_name] - x_hat[state_name])\n for k in range(0, numseg):\n e[state_name] = E[state_name]/(1 + np.max(x_hat[state_name][left_end_idxs[k]:left_end_idxs[k + 1]]))\n err_over_states[state_name] = np.zeros(numseg)\n\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n err_over_states[state_name][k] = np.max(e[state_name][left_end_idxs[k]:left_end_idxs[k + 1]])\n\n self.error[phase_path] = np.zeros(numseg)\n refine_results[phase_path]['error'] = np.zeros(numseg)\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n\n # Assess the errors in each state\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n if err_over_states[state_name][k] > self.error[phase_path][k]:\n self.error[phase_path][k] = err_over_states[state_name][k]\n refine_results[phase_path]['error'][k] = err_over_states[state_name][k]\n if self.error[phase_path][k] > phase.refine_options['tolerance']:\n refine_results[phase_path]['need_refinement'][k] = True\n\n return refine_results", "def evaluateAllRroutes(self):\n isTrain = 1 # 1 for train, 0 for test\n\n performance = 0\n normalizedPerformance = 0\n priceTolerance = 5 # price to be tolerated\n\n normPerforms = []\n for i in range(8):\n print \"Route: {}\".format(i)\n [perfor, normaPerfor] = self.evaluateOneRouteForMultipleTimes(self.routes[i], priceTolerance)\n normPerforms.append(normaPerfor)\n performance += perfor\n normalizedPerformance += normaPerfor\n\n performance = round(performance/8, 2)\n normalizedPerformance = round(normalizedPerformance/8, 2)\n\n if self.isTrain:\n print \"\\nTRAIN:\"\n else:\n print \"\\nTEST:\"\n print \"Average Performance: {}%\".format(performance)\n print \"Average Normalized Performance: {}%\".format(normalizedPerformance)\n print \"Normalized Performance Variance: {}\".format(np.var(normPerforms))", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def readOptimizationResultsFile(self):\n requiredLineNo = 0\n self.createParamters()\n \n self.optimizationResultsFile = open(self.fileName, 'r')\n \n for lineIndex, line in enumerate(self.optimizationResultsFile):\n if lineIndex == 0:\n startingPhase1, startingPhase2 = line.split()\n self.startingPhase1, self.startingPhase2 = int(startingPhase1), int(startingPhase2)\n\n elif lineIndex == 1:\n init1, init2, elapsedGreen1, elapsedGreen2 = line.split()\n self.init1, self.init2 = float(init1), float(init2)\n\n elif lineIndex == 2:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 3:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 4:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 5:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 6:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 7:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 14:\n noOfRequest = int(line)\n requiredLineNo = 15 + noOfRequest\n # break\n \n elif lineIndex >=15 and lineIndex < requiredLineNo:\n self.getPriorityRequests(line)\n \n elif lineIndex >=15 and lineIndex >= requiredLineNo:\n break\n # self.optimizationResultsFile = open(self.fileName, 'r')\n # for i, line in enumerate(self.optimizationResultsFile):\n # if i in range(15, requiredLineNo):\n\n self.optimizationResultsFile.close()\n # self.getPriorityRequests(requiredLineNo)\n \n self.getCummulativeValues()\n self.generateTimePhaseDiagram()", "def check_required_props(self,\n df,\n node,\n dd,\n exclude_props = [ # submitters don't provide these properties, so remove them from QC check\n # case props not provided by submitters\n \"datasets.submitter_id\",\n \"token_record_id\",\n \"linked_external_data\",\n #series_file props not provided by submitters\n \"file_name\",\n \"md5sum\",\n \"file_size\",\n \"object_id\",\n \"storage_urls\",\n \"core_metadata_collections.submitter_id\",\n \"core_metadata_collections\",\n \"associated_ids\",\n #imaging_study props not provided by submitters\n \"loinc_code\",\n \"loinc_system\",\n \"loinc_contrast\",\n \"loinc_long_common_name\",\n \"loinc_method\",\n \"days_from_study_to_neg_covid_test\",\n \"days_from_study_to_pos_covid_test\"\n ]\n ):\n errors = []\n links = self.list_links(node, dd)\n any_na = df.columns[df.isna().any()].tolist()\n required_props = list(set(dd[node]['required']).difference(links).difference(exclude_props))\n for prop in required_props:\n if prop not in df:\n error = \"{} TSV does not have required property header '{}'!\".format(node,prop)\n print(error)\n errors.append(error)\n elif prop in any_na:\n error = \"{} TSV does not have complete data for required property '{}'!\".format(node,prop)\n print(error)\n errors.append(error)\n return errors", "def check_data_params(self, out_log, err_log):\n self.io_dict[\"in\"][\"input_dataset_path\"] = check_input_path(self.io_dict[\"in\"][\"input_dataset_path\"], \"input_dataset_path\", out_log, self.__class__.__name__)\n self.io_dict[\"out\"][\"output_results_path\"] = check_output_path(self.io_dict[\"out\"][\"output_results_path\"], \"output_results_path\", False, out_log, self.__class__.__name__)\n if self.io_dict[\"out\"][\"output_plot_path\"]:\n self.io_dict[\"out\"][\"output_plot_path\"] = check_output_path(self.io_dict[\"out\"][\"output_plot_path\"], \"output_plot_path\", True, out_log, self.__class__.__name__)", "def check_if_repeated_route(route, user_lat, user_lon):\n bus_route = route\n repeated_routes = {'101':'','105':'','106':'','107':'','230':'','111':'','113':'','116':'','119':'','240':'','120':'','271':'','70':'','2':'pt','7':'','8':'','18':'','29':'','1':'pt','3':'pt','4':'pt','402':'pt','425':'pt','202':'pt','212':'pt','214':'pt','102':'pt','10':'pt','11':'pt','13':'','28':'pt','41':'','45':'','55':'pt','57':'pt','63':'pt','47':'','48':'','60':'','64':'','67':'','42':'','12':'','21':''}\n intercity_transit = ['47','48','60','64','67','42','12','13','21','41','45']\n king_county_metro = ['917', 'A Line', '225', '231', '239', '230', '250', '37', '910', '628', '372', '373', '630', '218', '631', '63', '4', '36', '43', '986', '823', '44', '987', '212', '45', '988', 'Trailhead Direct Issaquah Alps', '989', '824', '214', '47', '180', '48', '635', '216', '5', '217', '982', '41', '21', '984', 'F Line', 'E Line', '342', '345', '346', '952', '347', '894', '348', '49', '248', '355', '895', '116', '243', '245', '893', '118', '246', '661', '931', '119', '67', '915', '12', '249', '120', '238', '62', '226', '111', '24', '64', '193', '113', '240', '65', '930', '241', '114', '255', '73', '128', '74', '257', '75', '13', '907', '121', '122', '7', '123', '252', '70', '124', '71', '125', '221', '244', 'Trailhead Direct Cougar Mt.', '55', '994', '50', '995', 'Trailhead Direct Mailbox Peak', '219', '981', 'Trailhead Direct Mt. Si', '22', '224', '157', '204', '101', '232', '102', '105', '57', '106', '234', '156', '107', '235', '236', '60', '980', '237', 'B Line', '11', '775', '56', '1', '10', '166', '167', '903', '158', '908', '159', '3', '906', '301', '913', '914', '303', '164', '304', '916', '901', '178', '169', '308', '17', '309', '31', '311', '312', '177', '168', '629', 'Duvall-Monroe Shuttle', '268', '14', '76', '77', '131', '26', '773', '29', '132', '78', '40', '8', '887', 'C Line', '277', '9', '153', '28', '154', '269', 'D Line', '27', '143', '271', '886', '148', '888', '889', '15', '150', '891', '892', '208', '200', '181', '32', '182', '33', '183', '330', '331', '186', '187', '316', '179', '18', '192', '197', '2', '19', '190']\n pierce_transit = ['1','2','3','4','402','425','202','212','214','102','10','11','13','28','41','45','48','55','57','63']\n north_routes = ['101','105','106','107','230','111','113','116','119','240','120','271','70','2','3','4','7','8','12','18','29']\n \n if bus_route in repeated_routes:\n if user_lon < -122.5 and user_lat > 47.1:\n # going to be pierce county (or kitsap)\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.7:\n # going to be community transit or everett transit (N)\n if bus_route in north_routes: \n bus_route += 'N'\n else:\n bus_route += repeated_routes[bus_route] \n elif user_lat > 47.33:\n # going to be king county metro\n if bus_route in king_county_metro:\n bus_route = bus_route\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.08:\n # going to be pierce transit\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route] \n else:\n # going to be intercity transit\n if bus_route in intercity_transit: \n bus_route += 'it'\n else:\n bus_route += repeated_routes[bus_route] \n\n return bus_route", "def _check_case_parameters_aero(subcase: Subcase, fem: BDF, sol: int,\n ierror: int=0, nerrors: int=100,\n stop_on_failure: bool=True) -> int:\n log = fem.log\n if 'TRIM' in subcase:\n trim_id = subcase.get_parameter('TRIM')[0]\n if trim_id not in fem.trims:\n msg = (\n f'SOL={sol}\\n'\n f'TRIM = {trim_id}\\n'\n f'trims={fem.trims}\\n'\n f'subcase:\\n{subcase}')\n log_error(sol, [144, 200], msg, log)\n else:\n trim = fem.trims[trim_id]\n suport1 = None\n if 'SUPORT1' in subcase:\n suport_id = subcase.get_parameter('SUPORT1')[0]\n suport1 = fem.suport1[suport_id]\n try:\n trim.verify_trim(\n fem.suport, suport1, fem.aestats, fem.aeparams,\n fem.aelinks, fem.aesurf, xref=True)\n except RuntimeError:\n if stop_on_failure or ierror == nerrors:\n raise\n ierror += 1\n exc_info = sys.exc_info()\n traceback.print_exception(*exc_info)\n #traceback.print_stack()\n #fem.log.error(e.msg)\n #raise\n assert 'DIVERG' not in subcase, subcase\n #allowed_sols = [144, 200]\n\n if 'DIVERG' in subcase:\n value = subcase.get_parameter('DIVERG')[0]\n assert value in fem.divergs, 'value=%s\\n divergs=%s\\n subcase:\\n%s' % (value, str(fem.divergs), str(subcase))\n assert 'TRIM' not in subcase, subcase\n #allowed_sols = [144, 200]\n\n if 'FMETHOD' in subcase:\n # FLUTTER\n fmethod_id = subcase.get_parameter('FMETHOD')[0]\n unused_fmethod = fem.flutters[fmethod_id]\n allowed_sols = [145, 200]\n ierror = check_sol(sol, subcase, allowed_sols, 'FMETHOD', log, ierror, nerrors)\n return ierror", "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n self._validate_route_settings()\r\n\r\n # Check if the input origins and destinations have any fields we should use in the route analysis\r\n self._populate_input_data_transfer_fields()\r\n\r\n # Compute Route in parallel\r\n LOGGER.info(f\"Beginning parallelized Route solves ({self.total_jobs} chunks)\")\r\n completed_jobs = 0 # Track the number of jobs completed so far to use in logging\r\n # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes\r\n with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor:\r\n # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the\r\n # given origin ranges and their assigned destinations.\r\n jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks}\r\n # As each job is completed, add some logging information and store the results to post-process later\r\n for future in futures.as_completed(jobs):\r\n try:\r\n # The Route job returns a results dictionary. Retrieve it.\r\n result = future.result()\r\n except Exception: # pylint: disable=broad-except\r\n # If we couldn't retrieve the result, some terrible error happened and the job errored.\r\n # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely\r\n # causes are:\r\n # a) If you're calling a service, the service was temporarily down.\r\n # b) You had a temporary file read/write or resource issue on your machine.\r\n # c) If you're actively updating the code, you introduced an error.\r\n # To make the tool more robust against temporary glitches, retry submitting the job up to the number\r\n # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries,\r\n # fail the entire tool run.\r\n errs = traceback.format_exc().splitlines()\r\n failed_range = jobs[future]\r\n LOGGER.debug((\r\n f\"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry \"\r\n f\"up to {helpers.MAX_RETRIES} times. Errors: {errs}\"\r\n ))\r\n job_failed = True\r\n num_retries = 0\r\n while job_failed and num_retries < helpers.MAX_RETRIES:\r\n num_retries += 1\r\n try:\r\n future = executor.submit(solve_route, self.rt_inputs, failed_range)\r\n result = future.result()\r\n job_failed = False\r\n LOGGER.debug(f\"Route chunk {failed_range} succeeded after {num_retries} retries.\")\r\n except Exception: # pylint: disable=broad-except\r\n # Update exception info to the latest error\r\n errs = traceback.format_exc().splitlines()\r\n if job_failed:\r\n # The job errored and did not succeed after retries. Fail the tool run because something\r\n # terrible is happening.\r\n LOGGER.debug(f\"Route chunk {failed_range} continued to error after {num_retries} retries.\")\r\n LOGGER.error(\"Failed to get Route result from parallel processing.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n\r\n # If we got this far, the job completed successfully and we retrieved results.\r\n completed_jobs += 1\r\n LOGGER.info(\r\n f\"Finished Route calculation {completed_jobs} of {self.total_jobs}.\")\r\n\r\n # Parse the results dictionary and store components for post-processing.\r\n if result[\"solveSucceeded\"]:\r\n self.route_fcs.append(result[\"outputRoutes\"])\r\n else:\r\n # Typically, a solve fails because no destinations were found for any of the origins in the chunk,\r\n # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less\r\n # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug\r\n # mode only in case the user is having problems. The user can also check the individual OD log\r\n # files.\r\n LOGGER.debug(f\"Solve failed for job id {result['jobId']}.\")\r\n LOGGER.debug(result[\"solveMessages\"])\r\n\r\n # Post-process outputs\r\n if self.route_fcs:\r\n LOGGER.info(\"Post-processing Route results...\")\r\n self.route_fcs = sorted(self.route_fcs)\r\n self._post_process_route_fcs()\r\n else:\r\n LOGGER.warning(\"All Route solves failed, so no output was produced.\")\r\n\r\n # Clean up\r\n # Delete the job folders if the job succeeded\r\n if DELETE_INTERMEDIATE_OUTPUTS:\r\n LOGGER.info(\"Deleting intermediate outputs...\")\r\n try:\r\n shutil.rmtree(self.scratch_folder, ignore_errors=True)\r\n except Exception: # pylint: disable=broad-except\r\n # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.\r\n LOGGER.warning(f\"Unable to delete intermediate Route output folder {self.scratch_folder}.\")\r\n\r\n LOGGER.info(\"Finished calculating Routes.\")", "def __init__(self):\n self.parameter = [[(0,1),(1,1),(0,0),(1,0)],\n [(1,0),(1,0),(0,1),(0,1)],\n [(1,0),(0,1),(1,0),(0,1)],\n [(0,0),(0,0),(0,0),(0,0)]]\n \"\"\"Distance is number of whole route from origin to destination\"\"\"\n self.distance = 100\n \"\"\"action sets\"\"\"\n self.action = [0,55,75]\n self.maxSpeed = self.action[-1]\n \"\"\"time period for each stage\"\"\"\n self.time_interval = 0.5\n \"\"\"Number of stages we want to check. Here we can use this to limit the\n travel time, since sometimes we want driver to arrive in a time window.\n For example, if stage is 4, that means we want to driver finish route within \n 2 hours. \"\"\"\n self.stage = 4\n self.time_block = 0.5\n self.distance_block = 25", "def _calculate_emissions(self):\n parameters = self._get_pollutants_for_vehicle()\n\n self.routes = RouteSet()\n\n if \"routes\" not in self._json_data:\n log.debug(\"Error in returned JSON data from web service.\")\n log.debug(\"data: {}\".format(self._json_data))\n return\n\n # Create a \"set\" of Routes. The planner web service will\n # return 2-4 routes with different paths.\n for idx, r in enumerate(self._json_data[\"routes\"][\"features\"]):\n attributes = r.get(\"attributes\")\n route = Route(distance=attributes.get(\"Total_Meters\"),\n minutes=attributes.get(\"Total_Minutes\"),\n path=r.get(\"geometry\").get(\"paths\")[0], id = idx)\n self.routes.add(route)\n\n log.debug(\"Nr of routes: {}\".format(len(self.routes)))\n for i, route in enumerate(self.routes):\n # A list of x,y,z points that all together represents the route\n path_coordinates = route.path\n distances = []\n\n # Nifty little trick to loop over 'path_coordinates',\n # but keep a reference to the 'prev' item to calculate the\n # distance between them\n iter_points = iter(path_coordinates)\n prev = next(iter_points)\n for point in path_coordinates:\n if not distances:\n # first point\n distances.append(Planner._get_distance_3d(prev, point) / 1000)\n else:\n distances.append(distances[-1] + Planner._get_distance_3d(prev, point) / 1000)\n\n point_slope = Planner._get_slope(prev, point)\n\n # Calculate emission for each pollutants the user has asked for\n for p in self._pollutants:\n parms = [x for x in parameters if x.pollutant.name.startswith(p)]\n calc_emission = self.get_emission(parms, point_slope)\n route.add_pollutant(p, calc_emission)\n\n prev = point\n\n route.add_distances(distances)", "def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatGnomv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.inputCurve, \"No input Curve file provided\")", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def check_is_ready(self):\n self.logger.info(\"Checking optimization inputs for automatic design.\")\n if self.optimization_graph is None:\n raise RuntimeError(\"The optimization graph needs to be defined in order to optimize the network.\")\n if self.network_objective is None:\n raise RuntimeError(\"A network objective has to be set (in MW).\")", "def _checkSettings(self):\n geomsThatNeedMeshSize = (\"1D slab\", \"1D cylinder\")\n if self.geometry in geomsThatNeedMeshSize:\n if self.meshSubdivisionsPerCm is None:\n raise ValueError(\n \"{} geometry requires `mesh points per cm` to be defined in cross sections.\".format(\n self.geometry\n )\n )\n if self.criticalBuckling != False:\n raise ValueError(\n \"{} geometry cannot model critical buckling. Please disable\".format(\n self.geometry\n )\n )", "def validate(self):\n super(ReferenceMapping, self).validate()\n self.check_observatory()\n self.check_instrument()\n self.check_filekind()\n self.check_schema_uri()\n if \"reference_to_dataset\" in self.header:\n parkeys = self.get_required_parkeys()\n for _reference, dataset in self.reference_to_dataset.items():\n assert dataset.upper() in parkeys, \\\n \"reference_to_dataset dataset keyword not in parkey keywords.\"\n with log.augment_exception(\"Invalid mapping:\", self.instrument, self.filekind):\n self.selector.validate_selector(self.tpn_valid_values)", "def validate(self):\n errors = []\n app = errors.append\n\n if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:\n app(\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\")\n\n if self.omp_threads > self.hw.cores_per_node:\n app(\"omp_threads > hw.cores_per_node\")\n\n if self.mem_per_proc > self.hw.mem_per_node:\n app(\"mem_mb >= self.hw.mem_per_node\")\n\n if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:\n app(\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\")\n\n if self.priority <= 0:\n app(\"priority must be > 0\")\n\n if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):\n app(\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\")\n\n if errors:\n raise self.Error(str(self) + \"\\n\".join(errors))", "def validate(self):\r\n for opt in self.required:\r\n if not getattr(self, opt):\r\n print \"Error: %s is not specified.\" % opt\r\n self.optp.print_help()\r\n sys.exit(1)", "def delivery_optimization(trucks_max_loadings,\n drivers_weights,\n items_csv_file,\n trucks_measuring_unit='kg',\n drivers_measuring_unit='kg',\n csv_delimiter=';',\n column_mapping_required=False,\n columns_mapping={}):\n\n if len(trucks_max_loadings) != len(drivers_weights):\n print(\"The count of trucks must be equal to the count of drivers!\")\n exit(1)\n\n\n # Calculate the possible loads for the trucks\n\n # If necessary, converting kg to gram for the trucks max loading and drivers weight.\n if trucks_measuring_unit == 'kg':\n trucks_max_loadings_gram = [max_load*1000 for max_load in trucks_max_loadings]\n else:\n trucks_max_loadings_gram = trucks_max_loadings\n\n if drivers_measuring_unit == 'kg':\n drivers_weights_gram = [weight*1000 for weight in drivers_weights]\n else:\n drivers_weights_gram = drivers_weights\n\n\n # Subtract the drivers weights from trucks max loading weight.\n trucks_possible_loads = [max_load - driver_weight\n for max_load,driver_weight\n in zip(trucks_max_loadings_gram, drivers_weights_gram)]\n\n print(\"\\nTrucks max possible items loadings: \")\n for i, max_load in enumerate(trucks_possible_loads):\n print(\"Truck {}: {} gram\".format(i, max_load))\n\n\n # Read the data from csv file into a dataframe by pandas.\n df_items = pd.read_csv(items_csv_file, delimiter=csv_delimiter, decimal='.')\n\n # Map the column names if required\n if column_mapping_required and columns_mapping is not None:\n df_items.rename(columns = {origin_name : new_name for new_name, origin_name in columns_mapping.items()},\n inplace=True)\n\n\n # Start with pulp action\n\n # Create a new LpProblem from pulp.\n # The LpProblem should maximize a objective function, which will be set later.\n prob = pl.LpProblem(\"Delivery_Problem\",pl.LpMaximize)\n\n\n items_names = df_items['name'].tolist()\n\n # Create LpVariables from pulp for optimizing.\n # The values of this variables will be changed by the solver included in pulp to optimize the objective function.\n # Set variables to optimize as items count, lower bound = 0 and as Integer numbers\n # create optimizeable variables for each item for each truck as a list of dicts\n lp_trucks_items = [pl.LpVariable.dicts(\"Truck {}\".format(truck_number), items_names, lowBound=0, cat='Integer')\n for truck_number in range(len(trucks_possible_loads))]\n\n\n # Adding main objective function\n #\n # Construct a objective function and add it to the LpProblem.\n # The objective function will be calculated repeatedly to adjust the LpVariables to maximize itself.\n #\n # For the objective function we calculate the total sum of utility for the items on the trucks.\n # To do this, we multiply the utility value of the item by its count from the LpVariable.\n # We do that with each item on each truck and get the sum of all.\n prob += pl.lpSum([\n item['utility'] * lp_truck_items[item['name']]\n for _, item in df_items.iterrows()\n for lp_truck_items in lp_trucks_items\n ])\n\n\n # Adding constraints\n\n # Adding max capacity constraints\n #\n # We have a limit of load for each truck, what we can implement as constraints.\n # Instead of taking the total sum as in the objective function, here we create a separate constraint for each truck.\n # To do this, we multiply the weight of the item by its count from the LpVariable.\n # We do that with each item on one truck and get the sum of its weight.\n # Then we add a new constraint where the sum of weight must be lower or equal\n # to the truck max possible load (which we got further up).\n # Repeat for all trucks.\n\n # for each truck get the possible load\n for truck_number, truck_possible_load in enumerate(trucks_possible_loads):\n lp_truck_items = lp_trucks_items[truck_number]\n # multiply the weight of the item with its count on the truck\n lp_sum = pl.lpSum([float(item['weight']) * lp_truck_items[item['name']] for _, item in df_items.iterrows()])\n # set the constraint, that the sum of weights on the truck must be lower or equal to his possible load\n prob += lp_sum <= truck_possible_load\n\n\n # Adding item max count constraints\n #\n # Like the max possible load above it exists also a limit by the max count of each item type.\n # This limit applies one item type over all trucks.\n # So we go through the item list and get the count of it from each truck by the LpVariables in `lp_trucks_items`.\n # We sum the count over all trucks and add a new constraint\n # where it must be lower or equal to the defined max count.\n # Repeat for all items.\n for _, item in df_items.iterrows():\n total_item_count = sum([lp_truck_items[item['name']] for lp_truck_items in lp_trucks_items])\n prob += total_item_count <= item['max_count']\n\n\n # Try to solve the defined problem\n #\n # It can be set a certain solver from the pulp library.\n # We set the msg parameter to False, because it disables the solvers output to stdout\n print(\"\\nStart solving...\", end='')\n prob.solve(pl.getSolver('PULP_CBC_CMD', msg=False))\n print(\"done\\n\")\n\n # Show the status of solving.\n print(\"Result status:\", pl.LpStatus[prob.status])\n\n\n # Convert results to own data structure\n #\n # Because of the subdivide by upper name in the dicts has each LpVariable name a string as prefix like \"Truck_1_\".\n # Furthermore the library has replaced all whitespaces in the variable names by underscores.\n # So we can not use the original item name as key to get the value of a item.\n #\n # To make easy access we make a own result list for each truck.\n # In this list we add a dictionary for all items on the respective truck.\n # Because the division for each truck, we can remove the prefix on all variables.\n # Just the replaced whitespaces are taken and handled later.\n # The result values keep.\n\n # get optimized variables\n result_variables = prob.variables()\n # create a result list for the trucks\n result_trucks_items = []\n # for each truck add his items\n for truck_number in range(len(trucks_possible_loads)):\n prefix_len = len(\"Truck_{}_\".format(truck_number))\n # create a result item dict for this truck\n truck_items = {var.name[prefix_len:] : var.varValue # remove the prefix of truck in the name\n for var in result_variables\n # get only the items from the current truck\n if var.name.startswith(\"Truck_{}_\".format(truck_number))}\n # add count of items for this truck to result list\n result_trucks_items.insert(truck_number, truck_items)\n\n\n # Show each truck with containing driver weight and item count from results\n print(\"\\nResult count for each item:\")\n\n for truck_number, result_truck_items in enumerate(result_trucks_items):\n # increase the number of truck by 1 to skip the index 0 (only for presentation)\n # also divide the weight of driver by 1000 to get kg from gram\n print(\"\\nTruck {} with driver weight {}kg:\".format(truck_number+1, drivers_weights_gram[truck_number]/1000))\n for item_name, item_count in result_truck_items.items():\n # show only the items which counts are greater 0\n if item_count > 0:\n # replace underscores from pulp with whitespaces for better readability\n print(\"{:<25} = {:>10}\".format(item_name.replace('_', ' '), item_count))\n\n print(\"\")\n\n # Show possible loads of trucks\n for truck_number, truck_possible_load in enumerate(trucks_possible_loads):\n print(\"Allowed weight for truck {} (with included driver): {} gram or {} kg\".format(\n truck_number+1,\n truck_possible_load,\n truck_possible_load / 1000))\n\n print(\"\")\n\n # Show total weights of trucks containing items\n #\n # To get the total sum of weight for each truck and the chosen count of items,\n # we go through each truck from our own result list `result_trucks_items`.\n # We multiply the weight of item with the count from truck results.\n #\n # Because the library has replaced all whitespaces in the variable names by underscores,\n # we access the result value by do this with the origin name too and use it as key.\n #\n # Notice: We can not just replace all underscores with whitespaces,\n # because a origin name could contain a underscore too.\n\n # for each truck\n for truck_number, result_truck_items in enumerate(result_trucks_items):\n # calculate the total sum of weight for each item\n total_truck_weight = sum([\n item_count * item['weight']\n for item_name, item_count in result_truck_items.items()\n for _,item in df_items.iterrows()\n # the item names from pulp have replaced whitespaces ' ' with underscore '_'\n # to compare we must do this too\n if item['name'].replace(' ', '_') == item_name\n ])\n print(\"Total weight of truck {}: {} gram or {} kg\".format(truck_number+1,\n total_truck_weight,\n total_truck_weight/1000))\n\n print(\"\")\n\n # Show the total value of utility over all trucks and items\n\n # print the end value of the objective function, in our case the sum of utility from all picked items\n result_utility = pl.value(prob.objective)\n print(\"The total utility value (rounded to 2 decimal places) of picked items in both trucks is: {}\".\n format(round(result_utility, 2)))\n\n print(\"\\nProcess completed.\")", "def testoptdone(self):\r\n assert self.data.optdone\r\n convergence = numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets\r\n assert sum(convergence) >= 2", "def validate_input(request):\n\n # Validate errors inputs\n request_data = JSONParser().parse(request)\n\n if 'action' not in request_data:\n return_data = {'error': 'No action'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n if 'east' not in request_data:\n return_data = {'error': 'No east'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n if 'west' not in request_data:\n return_data = {'error': 'No west'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Debug mode checking condition\n if 'number' in request_data and 'stops' in request_data:\n\n # If current status is break then making debug mode\n if ValidateError.check_current_status() == 'break':\n return CreateDebugMode.validate_input_to_create_debug_mode(request_data)\n\n # If current status is started or pending then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status debug mode method: error:robotworking request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status'}\n logger.error('validation_errors.check_current_status debug mode method: error:status unknown request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Disconnection checking condition\n elif request_data['action'] == 'disconnect':\n\n # If current status is success or revoked or no_uuid then making disconnection\n if ValidateError.check_current_status() in ['success', 'revoked', 'no_uuid', 'failure']:\n return CreateDisconnect.validate_input_to_create_disconnection(request_data)\n\n # If current status is started or pending or break then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending', 'break']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status disconnection method: '\n 'error:robotworking request:{}'.format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status'}\n logger.error('validation_errors.check_current_status disconnection method:'\n ' error:status unknown request:{}'.format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Connection checking condition\n elif request_data['action'] == 'connect':\n\n # If current status is success or revoked or no_uuid then making connection\n if ValidateError.check_current_status() in ['success', 'revoked', 'no_uuid', 'failure']:\n return CreateConnection.validate_input_to_create_connection(request_data)\n\n # If current status is started or pending or break then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending', 'break']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status connection method: error:robotworking request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status unknown'}\n logger.error('validation_errors.check_current_status connection method: error:status request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Create connection in connection table\n elif request.data['action'] == 'create_connection':\n return ConnectionUtilities.create_dummy_connection(request_data)\n\n # Else return error_operation message\n else:\n return_data = {'status': 'error', 'error': 'operation'}\n logger.error('validation_errors.check_current_status connection method: error:action unknown request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('r', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Radius r must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Radius r must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n # Check piezometer depth\n elif 'z' in self.parameters:\n z = self.parameters.get('z', -1)\n if type(z) not in [int, float]:\n flag = False\n warnings += \"Depth of piezometer must be a float value\\n\"\n else:\n if z < 0:\n flag = False\n warnings += \"Depth z must be higher than 0\\n\"\n else:\n flag = False\n warnings += \"Well don't contain well depth attributes\\n\"\n return(flag, warnings) # End Function", "def validate(self):\n self.filter_passing_hits()\n\n checks = {\"number of hits\":self.check_hits(),\n \"base pair count\":self.check_bp(),\n \"contig count\":self.check_contigs(),\n \"characters\": self.check_chars(),\n \"checksum\":not check_checksum(self.seqdata.checksum)}\n\n failed_checks = {(k, v) for k, v in checks.iteritems() if v is False}\n\n if failed_checks:\n \"\"\"\n replace this with logger, break would be replaced by a raised\n Exception where the Exception would be caught by the\n Sequence_Upload code\n \"\"\"\n for k, v in failed_checks:\n with open(generate_path(\"outputs/seq_errors.txt\"), \"a\") as file_:\n file_.write(\n '%s failed validation:'\n 'the %s was not valid\\n' %(self.seqdata.accession, k)\n )\n self.seqdata.valid = False\n else:\n self.seqdata.valid = True", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def validate_router_gw_info(self, context, router_id, gw_info):\n # check if this router has a vpn service\n admin_con = context.elevated()\n # get all relevant services, except those waiting to be deleted or in\n # ERROR state\n filters = {'router_id': [router_id],\n 'status': [constants.ACTIVE, constants.PENDING_CREATE,\n constants.INACTIVE, constants.PENDING_UPDATE]}\n services = self.vpn_plugin.get_vpnservices(admin_con, filters=filters)\n if services:\n # do not allow enable-snat\n if (gw_info and\n gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)):\n raise RouterWithSNAT(router_id=router_id)\n else:\n # if this is a non-vpn router. if snat was disabled, should check\n # there is no overlapping with vpn connections\n if (gw_info and\n not gw_info.get('enable_snat',\n cfg.CONF.enable_snat_by_default)):\n # get router subnets\n subnets = self._core_plugin._find_router_subnets_cidrs(\n context, router_id)\n # find all vpn services with connections\n if not self._check_subnets_overlap_with_all_conns(\n admin_con, subnets):\n raise RouterWithOverlapNoSnat(router_id=router_id)", "def _further_validate_and_setup(self) -> None:\n\n # Make sure parameters make sense/are valid\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The lists of of learners and parameter '\n 'grids must be the same size.')\n if (self.validated['hashed_features'] is not None\n and self.validated['hashed_features'] == 0):\n self.validated['hashed_features'] = self._n_features_feature_hashing\n if self.validated['lognormal'] and self.validated['power_transform']:\n raise SchemaError(autos=None,\n errors='Both \"lognormal\" and \"power_transform\" '\n 'were set simultaneously.')\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The \"learners\" and \"param_grids\" '\n 'parameters were both set and the '\n 'lengths of the lists are unequal.')", "def validate_all_fields(self):\n\n if self.validate_byr() and \\\n self.validate_iyr() and \\\n self.validate_eyr() and \\\n self.validate_hgt() and \\\n self.validate_hcl() and \\\n self.validate_ecl() and \\\n self.validate_pid() and \\\n self.validate_cid():\n return True\n return False", "def testoptdone(self):\r\n assert self.data.optdone\r\n target_e, target_g, target_s = self.data.geotargets\r\n value_e, value_g, value_s = self.data.geovalues[-1]\r\n converged = (value_e < target_e and value_g < target_g) or (value_g < target_g and value_s < target_s)\r\n assert converged", "def _validate(self):\n self.params['report date'] = None\n if any(self.params.values()):\n s = self.params['start']\n e = self.params['end']\n cond1 = s is None\n cond2 = e is None\n \n if cond1 and not cond2:\n self.params['report date'] = e\n if not cond1 and cond2:\n self.params['report date'] = s\n if not cond1 and not cond2:\n if s == e:\n self.params['report date'] = s\n else:\n if s > e:\n self.params['start'] = e\n self.params['end'] = s\n else:\n self.params['report date'] = MAX_DATE", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def testoptdone(self):\r\n\r\n assert self.data.optdone\r\n\r\n targets = self.data.geotargets\r\n values = numpy.abs(self.data.geovalues[-1])\r\n\r\n # Since the other criteria are not used and are not printed in this case, they should\r\n # be parsed as numpy.inf, for which we can check.\r\n assert numpy.isinf(targets[2])\r\n assert numpy.isinf(targets[4])\r\n\r\n conv = values[1] < targets[1] and (values[0] < targets[0] or values[3] < targets[3])\r\n assert conv", "def print_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/route/route_vehicle{vehicle_id}.txt\", \"w\") as file:\n file.write(plan_output)\n file.close()\n print(\"aaa\")\n print('Total cost for all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/load_dist_{data['num_vehicles']}vehicles.txt\", \"w\") as file:\n out_file = \"\"\n out_file += str(total_load) + \",\" + str(total_distance)\n file.write(out_file)\n file.close() # OPEN AND ANALYZE LATER WITH PANDAS", "def validate(self):\n variables = ['waterThickness', 'waterPressure']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')", "def validate_model_args(self, cgi_map, network):\n model_kwargs = InfoPropModel.validate_model_args(self, cgi_map, network)\n\n dfcrit = model_kwargs.pop('dfcrit')\n if dfcrit == 'df':\n model_kwargs['ap'] = None\n elif dfcrit == 'ap':\n model_kwargs['df'] = None\n\n if ('sink_nodes' not in model_kwargs) or \\\n (len(model_kwargs['sink_nodes']) == 0):\n raise exc.InsufficientSinks()\n if (len(model_kwargs['sink_nodes']) > self.max_sinks):\n raise exc.TooManyBoundaries('sinks', self.max_sinks)\n\n return model_kwargs", "def traffic_concentration(rFile, sheets, x, y, pollutant):\r\n p = Point(x, y)\r\n road, dis = nearest_road(p, rFile)\r\n \r\n # step 1. check whether this location is within the calculation range. If so, go on. Otherwise exit.\r\n if dis > 60: # I think we dont have to consider points that are too far from the streets.\r\n return 'e1' # error 1\r\n \r\n if dis < 3.5:\r\n dis = 3.5 # In the NSL calculation tool, calculation distance smaller than 3.5 meters are limited to 3.5 meters.\r\n \r\n # step 2. determine all the parameters required.\r\n \r\n #calibration factor\r\n Fk = 0.62\r\n \r\n # Emission number. for SO2, NO2, NOx, PM10, PM2.5, lead, and CO\r\n N = int(road['properties']['intensity']) #the traffic intensity, being the number of vehicles per day\r\n Fs = float(road['properties']['f_cong']) #fraction of stagnant traffic, a number between 0 and 1\r\n Fm = float(road['properties']['f_medium']) #fraction of medium-weight motor vehicles\r\n Fz = float(road['properties']['f_heavy']) #fraction of heavy motor vehicles\r\n Fb = float(road['properties']['f_bus']) #fraction of buses\r\n st = str(road['properties']['speed_type']) #intotal 5 types: a:100, b:44, c:19, d:13, e:26 (km/h)\r\n El = emission_factor(sheets, 'p', st, pollutant) #emission factor of light motor vehicles\r\n Em = emission_factor(sheets, 'm', st, pollutant) #emission factor of medium-weight motor vehicles\r\n Ez = emission_factor(sheets, 'v', st, pollutant) #emission factor of heavy motor vehicles\r\n Eb = emission_factor(sheets, 'b', st, pollutant) #emission factor of buses\r\n Eld = emission_factor(sheets, 'p', 'd', pollutant) #emission factor of light motor vehicles (speedType: d)\r\n Emd = emission_factor(sheets, 'm', 'd', pollutant) #emission factor of medium-weight motor vehicles (speedType: d)\r\n Ezd = emission_factor(sheets, 'v', 'd', pollutant) #emission factor of heavy motor vehicles (speedType: d)\r\n Ebd = emission_factor(sheets, 'b', 'd', pollutant) #emission factor of buses (speedType: d)\r\n \r\n E_regular = N * (1 - Fs) * ((1 - Fm - Fz - Fb) * El + Fm * Em + Fz * Ez + Fb * Eb) * 1000 / 24 / 3600\r\n E_cong = N * Fs * ((1 - Fm - Fz - Fb) * Eld + Fm * Emd + Fz * Ezd + Fb * Ebd) * 1000 / 24 / 3600\r\n E = E_regular + E_cong\r\n# print(\"{}: {}, {}\".format(pollutant, E_regular, E_cong))\r\n #dilution factor\r\n roadType = str(road['properties']['class'])\r\n if roadType == '1': # Broad street canyon\r\n a = 0.000325\r\n b = -0.0205\r\n c = 0.39\r\n alpha = 0.856\r\n elif roadType == '2': # Small street canyon\r\n a = 0.000488\r\n b = -0.0308\r\n c = 0.59\r\n alpha = None\r\n elif roadType == '3': # One-sided buildings\r\n a = 0.0005\r\n b = -0.0316\r\n c = 0.57\r\n alpha = None\r\n elif roadType == '4': # General urban\r\n a = 0.000310\r\n b = -0.0182\r\n c = 0.33\r\n alpha = 0.799\r\n \r\n if dis > 30 and (roadType == 1 or roadType == 4):\r\n theta = alpha * pow(dis, -0.747)\r\n else:\r\n theta = a * dis**2 + b * dis + c\r\n \r\n #tree factor\r\n Fb = road['properties']['t_factor']\r\n \r\n #wind speed\r\n ws = wind_speed(sheets, x, y) # average speed from CAR VL3.0\r\n \r\n #regional factor related to meteorology\r\n Fregio = 5 / ws\r\n \r\n # step 3. calculate the traffic concentration based on the parameters above.\r\n C_traffic = Fk * E * theta * Fb * Fregio\r\n \r\n # If it is NO2, then NOx has to be considered due to its chemical reaction with O3.\r\n if pollutant == 'NO2':\r\n B = 0.6 # fixed number?\r\n K = 100 # parameter for the conversion from NO to NO2\r\n C_background_O3 = background_concentration(sheets, x, y, 'O3')\r\n C_traffic_NOx = traffic_concentration(rFile, sheets, x, y, 'NOx')\r\n C_traffic = C_traffic + B * C_background_O3 * (C_traffic_NOx - C_traffic) / (C_traffic_NOx - C_traffic + K)\r\n \r\n return C_traffic", "def __check_inputs__(self):\n # | - __check_inputs__\n # #####################################################################\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n # #####################################################################\n\n if stop_mode == \"num_generations\":\n mess_i = \"stop_mode='num_generations', \\\n Must pass int to 'stop_num_generations'\"\n assert type(stop_num_generations) == type(1), mess_i\n #__|", "def check_parameters(self):\n\n if self.process not in [\"Like\", \"Like-and-follow\"]:\n raiser('process')\n\n if \"type\" not in self.duration or \"value\" not in self.duration:\n raiser('duration(type or value)')\n else:\n typ = self.duration['type']\n val = self.duration['value']\n if self.process == \"Like\":\n if typ not in ['by_time', 'by_likes']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour:\n raiser('limitsPerHour(like)')\n else:\n try:\n self.limits_per_hour['like'] = float(self.limits_per_hour['like'])\n except ValueError:\n raiser('like')\n elif self.process == \"Like-and-follow\":\n if typ not in ['by_time', 'by_users']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour or \"follow\" not in self.limits_per_hour \\\n or \"unfollow\" not in self.limits_per_hour:\n raiser('limitsPerHour(like or follow or unfollow)')\n else:\n for i in [\"like\", \"follow\", \"unfollow\"]:\n try:\n self.limits_per_hour[i] = float(self.limits_per_hour[i])\n except ValueError:\n raiser(i)\n try:\n self.duration['value'] = float(val)\n except ValueError:\n raiser('value')\n\n if not isinstance(self.search_hashtags, list):\n raiser('hashtags')\n\n if not isinstance(self.white_list, list):\n raiser('whiteList')", "def main():\n\n print('Drones capacity = {}'.format(DRONES_CAPACITY))\n\n # Instantiate the data of the problem\n data = create_data_model(MAX_POINT_DEMAND, USE_CACHE)\n\n # Create the routing index manager\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model\n routing = pywrapcp.RoutingModel(manager)\n\n # Defining weights of the edges\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Addding capacity constraints.\n def demand_callback(from_index):\n \"\"\"Returns the demand for tests of the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['demands'][from_node]\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n\n def counter_callback(from_index):\n \"\"\"Returns the number of stops done at the node.\"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['counter'][from_node]\n\n counter_callback_index = routing.RegisterUnaryTransitCallback(\n counter_callback)\n\n # Limiting the number of tests each drone can carry\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n\n # Limiting the overall number of nodes a drone can serve in one tour\n routing.AddDimensionWithVehicleCapacity(\n counter_callback_index,\n 0, # null capacity slack\n data['vehicle_max_number_of_stops'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Counter')\n\n # Setting parameters of the solver\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = HEURISTIC_TIME_LIMIT\n search_parameters.log_search = True\n\n\n print('START SOLVING')\n assignment = routing.SolveWithParameters(search_parameters)\n\n if assignment:\n print_and_save_solution(data, manager, routing, assignment)", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def test_field_value_exact(self):\n field = 'M30'\n ref_idx = [2]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatcmpv2_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().inputCurve, \"No input 1D curves file provided\")", "def test_13(self, test):\r\n globalConfig.test = test\r\n\r\n outputList = getOutputList()\r\n inputList = getInputList()\r\n\r\n if len(inputList) == 0 and len(outputList) == 0:\r\n return test.UNCLEAR(\"Not tested. No resources found.\")\r\n\r\n constrainedOutputList = []\r\n for outputInstance in outputList:\r\n outputCaps = outputInstance.getCaps()\r\n try:\r\n routableInputs = outputCaps['routable_inputs']\r\n except KeyError:\r\n return test.FAIL(\"Could not find 'routable_inputs' in /caps \"\r\n \"for Output {}\".format(outputInstance.id))\r\n if routableInputs is not None:\r\n constrainedOutputList.append(\r\n {\r\n \"output\": outputInstance,\r\n \"routableInputs\": routableInputs\r\n }\r\n )\r\n\r\n if len(constrainedOutputList) == 0:\r\n return test.UNCLEAR(\"Could not test - no outputs have routing constraints set.\")\r\n\r\n inputIDList = [None]\r\n for inputInstance in inputList:\r\n inputIDList.append(inputInstance.id)\r\n\r\n for constrainedOutput in constrainedOutputList:\r\n forbiddenRoutes = copy.deepcopy(inputIDList)\r\n\r\n for routableInputID in constrainedOutput['routableInputs']:\r\n forbiddenRoutes.remove(routableInputID)\r\n\r\n if len(forbiddenRoutes) > 0:\r\n\r\n action = Action(forbiddenRoutes[0], constrainedOutput['output'].id)\r\n activation = Activation()\r\n activation.addAction(action)\r\n\r\n try:\r\n activation.checkReject()\r\n return test.PASS()\r\n except NMOSTestException:\r\n msg = (\"Was able to create a forbidden route between Input {} \"\r\n \"and Output {} despite routing constraint.\"\r\n .format(forbiddenRoutes[0], outputInstance.id))\r\n return test.FAIL(msg)\r\n return test.UNCLEAR(\"Could not test - no route is forbidden.\")", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route", "def _check_params(self):\n pass", "def getPriorityRequests(self, line):\n # self.optimizationResultsFile = open(self.fileName, 'r')\n # for i, line in enumerate(self.optimizationResultsFile):\n # if i in range(15, requiredLineNo):\n reqPhase, earliestArrival, latestArrival, delay, vehicleClass = line.split()\n if int(vehicleClass) == EV:\n self.requestedPhase_EV.append(int(reqPhase))\n self.ETA_EV.append(float(latestArrival) - 4.0)\n self.ETA_EV.append(float(latestArrival) - 4.0)\n self.ETA_Duration_EV.append(4.0)\n self.ETA_Duration_EV.append(4.0)\n self.vehicleType_EV.append(int(vehicleClass))\n self.delay_EV.append(float(delay))\n\n elif int(vehicleClass) == TRANSIT:\n self.requestedPhase_Transit.append(int(reqPhase))\n self.ETA_Transit.append(float(latestArrival) - 4.0)\n self.ETA_Transit.append(float(latestArrival) - 4.0)\n self.ETA_Duration_Transit.append(4.0)\n self.ETA_Duration_Transit.append(4.0)\n self.vehicleType_Transit.append(int(vehicleClass))\n if (float(delay) > 0):\n self.delay_Transit.append(\n float(delay) - (float(latestArrival) - float(earliestArrival) - 4.0))\n\n elif int(vehicleClass) == TRUCK:\n self.requestedPhase_Truck.append(int(reqPhase))\n self.ETA_Truck.append(float(latestArrival) - 4.0)\n self.ETA_Truck.append(float(latestArrival) - 4.0)\n self.ETA_Duration_Truck.append(4.0)\n self.ETA_Duration_Truck.append(4.0)\n self.vehicleType_Truck.append(int(vehicleClass))\n if (float(delay) > 0):\n self.delay_Truck.append(\n float(delay) - (float(latestArrival) - float(earliestArrival) - 4.0))\n\n elif int(vehicleClass) == DILEMMAZONE:\n self.requestedPhase_DilemmaZone.append(int(reqPhase))\n self.ETA_DilemmaZone.append(float(latestArrival) - 4.0)\n self.ETA_DilemmaZone.append(float(latestArrival) - 4.0)\n self.ETA_Duration_DilemmaZone.append(4.0)\n self.ETA_Duration_DilemmaZone.append(4.0)\n self.vehicleType_DilemmaZone.append(int(vehicleClass))\n self.delay_DilemmaZone.append(float(delay))\n\n elif int(vehicleClass) == COORDINATION:\n self.requestedPhase_Coordination.append(int(reqPhase))\n self.ETA_Coordination.append(float(earliestArrival))\n self.ETA_Coordination.append(float(earliestArrival))\n self.ETA_Duration_Coordination.append(\n float(latestArrival) - float(earliestArrival))\n self.ETA_Duration_Coordination.append(\n float(latestArrival) - float(earliestArrival))\n self.vehicleType_Coordination.append(int(vehicleClass))\n self.delay_Coordination.append(float(delay))\n\n # self.optimizationResultsFile.close()", "def ValidateInputs(lat_min, lat_max, lon_min, lon_max, lonres, latres, basepath, \\\n GFED_path, EDGAR_path, CAMS_path, behaviour_settings):\n # Assert sure extents fall within boundary\n assert -180 <= lon_min < 180 and -180 < lon_max <= 180, 'Longitude should be within range -180 -- 180!'\n assert -90 <= lat_min < 90 and -90 < lat_max <= 90, 'latitude should be within range -90 -- 90!'\n assert lon_min < lon_max, 'maximum longitude cannot be smaller than or equal to minimum!'\n assert lat_min < lat_max, 'maximum latitude cannot be smaller than or equal to minimum!'\n \n # Assert resolution is larger than TROPOMI minimum:\n assert lonres > 7, 'TROPOMI minimum longitude resolution is 7 km!'\n assert latres > 7, 'TROPOMI minimum latitude resolution is 7 km!'\n \n # Assert if given directories exist\n if behaviour_settings[1] == True:\n assert os.path.isdir(CAMS_path), f'Directory {CAMS_path} was not found!'\n if behaviour_settings[2] == True:\n assert os.path.isdir(GFED_path), f'Directory {GFED_path} was not found!'\n assert os.path.isdir(EDGAR_path), f'Directory {EDGAR_path} was not found!'\n\n \n return", "def Validate(self):\n \n hklmin = self.hklmin_txtCtrl.GetValue()\n hklmax = self.hklmax_txtCtrl.GetValue()\n hklsteps = self.hkl_steps_ctrl.GetValue()\n \n wmin = self.wmin_txtCtrl.GetValue()\n wmax = self.wmax_txtCtrl.GetValue()\n wsteps = self.w_steps_ctrl.GetValue()\n \n kx = self.kx_txtCtrl.GetValue()\n ky = self.ky_txtCtrl.GetValue()\n kz = self.kz_txtCtrl.GetValue()\n \n zmin = self.zmin_ctrl.GetValue()\n zmax = self.zmax_ctrl.GetValue()\n colorbar_bool = self.color_bar_box.GetValue()\n \n temp = self.temp_ctrl.GetValue()\n sphavg_bool = self.spherical_avg_box.GetValue()\n \n bgColor = \"pink\"\n failed = False\n \n #Validate hkl values\n num_hklmin = None\n num_hklmax = None\n try:\n num_hklmin = float(hklmin)*np.pi\n self.hklmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_hklmax = float(hklmax)*np.pi\n self.hklmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate w values\n num_wmin = None\n num_wmax = None\n try:\n num_wmin = float(wmin)\n self.wmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_wmax = float(wmax)\n self.wmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate kx,ky,kz,temp,zmin,zmax values\n num_kx = None\n num_ky = None\n num_kz = None\n num_temp = None\n num_zmin = None\n num_zmax = None\n try:\n num_kx = float(kx)\n self.kx_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kx_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_ky = float(ky)\n self.ky_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.ky_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n try:\n num_kz = float(kz)\n self.kz_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kz_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_temp = float(temp)\n self.temp_ctrl.SetBackgroundColour(\"white\")\n except:\n self.temp_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmin = float(zmin)\n self.zmin_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmin_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmax = float(zmax)\n self.zmax_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmax_ctrl.SetBackgroundColour(bgColor)\n failed = True\n \n #Validate File Fields\n int_str = self.int_file_txtCtrl.GetValue()\n spin_str = self.spin_file_txtCtrl.GetValue()\n tau_str = self.tau_file_txtCtrl.GetValue()\n out_str = self.output_file_txtCtrl.GetValue()\n if int_str:\n self.int_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.int_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if spin_str:\n self.spin_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.spin_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if tau_str:\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if out_str:\n self.output_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.output_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n \n direction = {}\n direction['kx'] = num_kx\n direction['ky'] = num_ky\n direction['kz'] = num_kz\n hkl_interval = [num_hklmin, num_hklmax, int(self.hkl_steps_ctrl.GetValue())]\n w_interval = [num_wmin, num_wmax, int(self.w_steps_ctrl.GetValue())]\n \n tau_text = ''\n try:\n tau_file = open(tau_str,'r')\n tau_text = tau_file.read()\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n\n items = tau_text.split()\n if len(items)%3 and not len(items):\n failed = True\n\n tau_list = []\n i = 0\n while not failed and i < len(items)-3:\n tau1, tau2, tau3 = None, None, None\n try:\n tau1 = float(items[i])\n tau2 = float(items[i+1])\n tau3 = float(items[i+2])\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n tau_list.append([tau1,tau2,tau3])\n i+=3\n \n self.Refresh()\n# self.window.Show(True,True)\n \n plotstats = [zmin, zmax, colorbar_bool]\n \n return failed, hkl_interval, w_interval, tau_list, direction, num_temp, sphavg_bool, plotstats", "def solve_model(self,max_wait_time = 0,max_per_veh = 99999, save_output=True):\n self.manager = pywrapcp.RoutingIndexManager(len(self.data['time_matrix']),\n self.data['num_vehicles'], self.data['depot'])\n routing = pywrapcp.RoutingModel(self.manager)\n\n transit_callback_index = routing.RegisterTransitCallback(self.time_callback)\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n time = 'Time'\n routing.AddDimension(\n transit_callback_index,\n max_wait_time, # allow waiting time\n max_per_veh, # maximum time per vehicle\n False, # Don't force start cumul to zero.\n time)\n time_dimension = routing.GetDimensionOrDie(time)\n # Add time window constraints for each location except depot.\n for location_idx, time_window in enumerate(self.data['time_windows']):\n if location_idx == 0:\n continue\n index = self.manager.NodeToIndex(location_idx)\n time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1]-self.VisitTime)\n # Add time window constraints for each vehicle start node.\n for vehicle_id in range(self.data['num_vehicles']):\n index = routing.Start(vehicle_id)\n time_dimension.CumulVar(index).SetRange(self.data['time_windows'][self.data['depot']][0],\n self.data['time_windows'][self.data['depot']][1])\n for i in range(self.data['num_vehicles']):\n routing.AddVariableMinimizedByFinalizer(\n time_dimension.CumulVar(routing.Start(i)))\n routing.AddVariableMinimizedByFinalizer(\n time_dimension.CumulVar(routing.End(i)))\n '''Routing Settings:https://developers.google.com/optimization/routing/routing_options\n '''\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_MOST_CONSTRAINED_ARC)\n search_parameters.time_limit.seconds = 3600\n #search_parameters.log_search = True\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.AUTOMATIC)\n sol_status={0:'ROUTING_NOT_SOLVED: Problem not solved yet.',\n 1:'ROUTING_SUCCESS: Problem solved successfully.',\n 2:'ROUTING_FAIL: No solution found to the problem.',\n 3:'ROUTING_FAIL_TIMEOUT: Time limit reached before finding a solution.',\n 4:'ROUTING_INVALID: Model, model parameters, or flags are not valid.'}\n print('Start Solving the problem....')\n _start_ = systime.time()\n assignment = routing.SolveWithParameters(search_parameters)\n print(\"Solver status: \", sol_status[routing.status()])\n soltime = systime.time()-_start_\n print('Solving takes: '+ str(round(soltime,2))+' Secs')\n if assignment:\n self.print_save_solution(routing, assignment,save_res=save_output)\n else:\n print('Solving Failed')", "def parse_route(self):\n self.error = 0\n for item in self.arg_list:\n if len(item) == 2:\n self.make_log_write(self.fixed_width_parse(item), item)\n elif len(item) == 3:\n self.make_log_write(self.delimited_parse(item), item)\n else:\n raise Exception('Invalid Parameters: %s please correct' % item)", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def onCalculateParameters(self, evt):\n \n if not self.config.iActivationMode == \"User-defined\":\n if self.onCheckParameters(evt=None) == False: \n print('Please fill in all necessary fields first!')\n return\n divisibleCheck = abs(self.config.iEndVoltage-self.config.iStartVoltage)/self.config.iStepVoltage\n divisibleCheck2 = divisibleCheck%1\n if divisibleCheck2 != 0:\n msg = \"Are you sure your collision voltage range is divisible by your increment?\"\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return\n else:\n if self.config.iScanTime == None or self.config.iScanTime == \"\": \n msg = 'Please make sure you to fill in the scan time input box.'\n dialogs.dlgBox(exceptionTitle='Mistake in the input', \n exceptionMsg= msg,\n type=\"Error\")\n return\n \n if self.config.iActivationMode == \"Linear\":\n self.wrensInput, ColEnergyX, scanPerVoltageList, timeList, totalAcqTime = self.onPrepareLinearMethod()\n elif self.config.iActivationMode == \"Exponential\":\n self.wrensInput, ColEnergyX, scanPerVoltageList, timeList, totalAcqTime = self.onPrepareExponentialMethod()\n elif self.config.iActivationMode == \"Boltzmann\":\n self.wrensInput, ColEnergyX, scanPerVoltageList, timeList, totalAcqTime = self.onPrepareBoltzmannMethod()\n elif self.config.iActivationMode == \"User-defined\":\n self.wrensInput, ColEnergyX, scanPerVoltageList, timeList, totalAcqTime = self.onPrepareListMethod() \n \n self.wrensCMD = self.wrensInput.get('command', None)\n # Setup status:\n self.view.SetStatusText(''.join(['Acq. time: ',str(totalAcqTime),' mins']), number=0)\n self.view.SetStatusText(''.join([str(len(scanPerVoltageList)), ' steps']), number=1)\n \n # Add wrensCMD to config file\n self.config.wrensCMD = self.wrensCMD\n \n self.onPlotSPV(ColEnergyX, scanPerVoltageList)\n self.onPlotTime(ColEnergyX, timeList)\n print(''.join([\"Your submission code: \", self.wrensCMD]))", "def test_validate_good_order(self):\n for proj in testorders.good_test_projections:\n valid_order = copy.deepcopy(self.base_order)\n valid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n try:\n good = api.validation(valid_order, self.staffuser.username)\n except ValidationException as e:\n self.fail('Raised ValidationException: {}'.format(e.message))", "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def validate_settings(_cfg, _ctx):\n pass", "def state_processing_validate(cfg, app, win, events):", "def skip_field_info_validation(config):\n\n reformatters = ['PCPCombine', 'RegridDataPlane']\n process_list = [item[0] for item in get_process_list(config)]\n\n # if running MTD in single mode, you don't need matching FCST/OBS\n if 'MTD' in process_list and config.getbool('config', 'MTD_SINGLE_RUN'):\n return True\n\n # if running any app other than the reformatters, you need matching FCST/OBS, so don't skip\n if [item for item in process_list if item not in reformatters]:\n return False\n\n return True" ]
[ "0.58897823", "0.5755311", "0.5660653", "0.557182", "0.5522201", "0.550903", "0.54448694", "0.53637433", "0.5346746", "0.5307992", "0.52819705", "0.52334815", "0.52329284", "0.52285826", "0.5222194", "0.52111304", "0.51922095", "0.5165339", "0.51619154", "0.51523876", "0.5147929", "0.5141172", "0.5106939", "0.51051646", "0.51023626", "0.5094769", "0.50754744", "0.5071408", "0.50690913", "0.5057215", "0.50538415", "0.50496626", "0.504522", "0.5038793", "0.5009345", "0.4992004", "0.49895364", "0.4989308", "0.49881244", "0.49808773", "0.49793383", "0.4976816", "0.4975787", "0.49677554", "0.49487126", "0.49386606", "0.4937263", "0.49306104", "0.4926666", "0.49221393", "0.4917611", "0.4913966", "0.49123615", "0.49107024", "0.49076533", "0.4906574", "0.4905208", "0.48952687", "0.48880726", "0.48879987", "0.488566", "0.48856527", "0.48802283", "0.48778808", "0.4877464", "0.48735306", "0.486137", "0.4852452", "0.4844913", "0.48311692", "0.48305282", "0.48273328", "0.48261476", "0.48245794", "0.4823767", "0.48213306", "0.48172995", "0.48104942", "0.48100328", "0.47984508", "0.47856048", "0.47738904", "0.47718078", "0.47621393", "0.4760542", "0.47475764", "0.4746359", "0.4741863", "0.4737301", "0.47370505", "0.47321674", "0.4731373", "0.47278702", "0.47246718", "0.47225377", "0.47212604", "0.47125098", "0.4703572", "0.47020015", "0.4700683" ]
0.7251164
0
Discover if the origins and destinations include valid fields we can use in the Route analysis. Any fields with the correct names and data types matching valid fields recognized by the Route solver for the Stops input can be used in the analysis. Compare the input origins and destinations fields with the list of supported Route Stops fields and populate the list of fields to transfer in the route inputs dictionary.
def _populate_input_data_transfer_fields(self): # Valid fields for the Route Stops input are described here: # https://pro.arcgis.com/en/pro-app/latest/arcpy/network-analyst/route-input-data-types.htm # Do not transfer RouteName or Sequence as these are explicitly controlled by this tool. Do not transfer # LocationType because we want all inputs to be Stops. Waypoints don't make sense for this analysis. int_types = ["Integer", "SmallInteger"] numerical_types = ["Double", "Single"] + int_types rt_stops_input_fields = { "Name": ["String"], "AdditionalTime": numerical_types, "AdditionalDistance": numerical_types, "AdditionalCost": numerical_types, "TimeWindowStart": ["Date"], "TimeWindowEnd": ["Date"], "CurbApproach": int_types, "Bearing": numerical_types, "BearingTol": numerical_types, "NavLatency": numerical_types, "SourceID": int_types, "SourceOID": int_types, "PosAlong": numerical_types, "SideOfEdge": int_types } # Preserve origin and destination input fields that match names and types origin_transfer_fields = [ f.name for f in arcpy.ListFields(self.origins) if f.name in rt_stops_input_fields and f.type in rt_stops_input_fields[f.name]] self.rt_inputs["origin_transfer_fields"] = origin_transfer_fields if origin_transfer_fields: LOGGER.info(( "Supported fields in the input Origins table that will be used in the analysis: " f"{origin_transfer_fields}" )) destination_transfer_fields = [ f.name for f in arcpy.ListFields(self.destinations) if f.name in rt_stops_input_fields and f.type in rt_stops_input_fields[f.name]] self.rt_inputs["destination_transfer_fields"] = destination_transfer_fields if destination_transfer_fields: LOGGER.info(( "Supported fields in the input Destinations table that will be used in the analysis: " f"{destination_transfer_fields}" ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_stops_one_to_one(self): # pylint: disable=too-many-locals\r\n # Use an insertCursor to insert Stops into the Route analysis\r\n destinations = {}\r\n destination_rows = []\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.origin_transfer_fields\r\n ) as icur:\r\n # Loop through origins and insert them into Stops along with their assigned destinations\r\n for origin in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [\"SHAPE@\", self.origin_id_field, self.assigned_dest_field] + self.origin_transfer_fields\r\n ):\r\n dest_id = origin[2]\r\n if dest_id is None:\r\n continue\r\n if dest_id not in destinations:\r\n dest_val = f\"'{dest_id}'\" if isinstance(dest_id, str) else dest_id\r\n with arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [\"SHAPE@\", self.dest_id_field] + self.destination_transfer_fields,\r\n where_clause=f\"{self.dest_id_field} = {dest_val}\"\r\n ) as cur:\r\n try:\r\n destinations[dest_id] = next(cur)\r\n except StopIteration:\r\n # The origin's destination is not present in the destinations table. Just skip the origin.\r\n continue\r\n # Insert origin and destination\r\n destination = destinations[dest_id]\r\n if self.reverse_direction:\r\n route_name = f\"{dest_id} - {origin[1]}\"\r\n origin_sequence = 2\r\n destination_sequence = 1\r\n else:\r\n route_name = f\"{origin[1]} - {dest_id}\"\r\n origin_sequence = 1\r\n destination_sequence = 2\r\n # Define the final origin and destination rows for the input Stops\r\n origin_row = [route_name, origin_sequence, origin[1], origin[0], None] + list(origin)[3:]\r\n destination_row = [route_name, destination_sequence, None, destination[0], destination[1]] + \\\r\n list(destination)[2:]\r\n icur.insertRow(origin_row)\r\n destination_rows.append(destination_row)\r\n\r\n # Insert destinations\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\", self.dest_unique_id_field_name] +\r\n self.destination_transfer_fields\r\n ) as dcur:\r\n for row in destination_rows:\r\n dcur.insertRow(row)", "def verify_destinations(**kwargs):\n if \"mapd_db\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"dest_db_server\"] is None:\n # If dest_server is not set for mapd_db, then exit\n logging.error(\n '\"dest_server\" is required when destination = \"mapd_db\"'\n )\n if \"file_json\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"output_file_json\"] is None:\n # If output_file_json is not set for file_json, then exit\n logging.error(\n '\"output_file_json\" is required when destination = \"file_json\"'\n )\n if \"output\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if \"jenkins_bench\" in kwargs[\"destinations\"]:\n valid_destination_set = True\n if kwargs[\"output_file_jenkins\"] is None:\n # If output_file_jenkins is not set for jenkins_bench, then exit\n logging.error(\n '\"output_file_jenkins\" is required '\n + 'when destination = \"jenkins_bench\"'\n )\n if not valid_destination_set:\n return False\n else:\n return True", "def test_route_init() -> None:\n rschema = RouteSchema(SpecificLocation())\n route = Route(schema=rschema)\n\n assert route.schema is rschema # Cannot use __eq__\n assert isinstance(route.stops, list)", "def route_matrix(client, origins, destinations, **kwargs):\n\n sep_pattern = re.compile(r'[,;|]')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n\n is_origins_str = isinstance(origins, str)\n is_origins_list = isinstance(origins, list)\n if not any([is_origins_str, is_origins_list]):\n raise ValueError('\"origins\" must be str or list!')\n elif is_origins_str:\n sep_origins = sep_pattern.split(origins)\n u_origins = origins.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_origins)\n if match:\n if len(sep_origins) > 5:\n raise ValueError('\"origins\" incorrect! upper limits is 5.')\n else:\n origins = '|'.join(sep_origins)\n else:\n if len(sep_origins) > 10:\n raise ValueError('\"origins\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_origins[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_origins)/2)]\n origins = '|'.join(temp)\n\n else:\n # element in list is CN_pattern characters.\n if len(origins[0]) == 1:\n origins = '|'.join(origins)\n # element in list is list/tuple of lng,lat.\n else:\n origins = [map(str, l) for l in origins]\n origins = '|'.join([','.join(l[::-1]) for l in origins])\n\n is_destinations_str = isinstance(destinations, str)\n is_destinations_list = isinstance(destinations, list)\n\n if not any([is_destinations_str, is_destinations_list]):\n raise ValueError('\"destinations\" must be str or list!')\n elif is_destinations_str:\n sep_destinations = sep_pattern.split(destinations)\n u_destinations = destinations.decode('utf-8')\n CN_pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n match = CN_pattern.search(u_destinations)\n if match:\n if len(sep_destinations) > 5:\n raise ValueError('\"destinations\" incorrect! upper limits is \\\n 5.')\n else:\n destinations = '|'.join(sep_destinations)\n else:\n if len(sep_destinations) > 10:\n raise ValueError('\"destinations\"incorrect! upper limits is 5.')\n else:\n temp = [','.join(sep_destinations[(2*x):(2*x+2)][::-1])\n for x in range(0, len(sep_destinations)/2)]\n destinations = '|'.join(temp)\n\n else:\n if len(destinations[0]) == 1:\n destinations = '|'.join(destinations)\n else:\n # first, map to str.\n destinations = [map(str, l) for l in destinations]\n destinations = '|'.join([','.join(l[::-1]) for l in destinations])\n\n kwargs.update({'server_name': 'direction', 'version': 'v1',\n 'subserver_name': 'routematrix', 'origins': origins,\n 'destinations': destinations})\n\n return client.get(kwargs)", "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def test_parse_routes(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n problem = problem_builder.build(riders, vehicles, depots)\n model = model_builder.build(problem)\n solution = model.solve()\n routes = Router._parse_routes(problem, solution)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def read_routes(routes_source: TextIO, airports: AirportDict) -> RouteDict:\n #RouteDict = Dict[str, Set[str]]\n routes_list = routes_source.readlines()\n d = {}\n src_index = ROUTE_DATA_INDEXES['Source airport']\n dst_index = ROUTE_DATA_INDEXES['Destination airport']\n \n for i in range(len(routes_list)):\n source_airport = get_routes_information(routes_list[i], src_index)\n destination_airport = get_routes_information(routes_list[i], dst_index)\n \n if source_airport in airports and destination_airport in airports\\\n and source_airport not in d:\n \n routes = set() # it's a set\n routes.add(destination_airport)\n d[source_airport] = routes\n \n elif source_airport in airports and destination_airport in \\\n airports and source_airport in d:\n d[source_airport].add(destination_airport)\n return d", "def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate", "def route_gateway_validator(\n args: StackValidationArgs, report_violation: ReportViolation\n):\n routes = filter(lambda r: r.resource_type == \"aws:ec2/route:Route\", args.resources)\n gateways = filter(\n lambda r: r.resource_type == \"aws:ec2/internetGateway:InternetGateway\",\n args.resources)\n gateways_ids = [gateway.props['id'] for gateway in gateways]\n for route in routes:\n if route.props[\"gatewayId\"] not in gateways_ids:\n report_violation(\n \"You tried to set unexpected Internet Gateway id for Route\")", "def directions_calc(self):\n \n # create route_dict, {'radio_button_name': {'geometries': list of coords,\n # 'values': list of values}}\n route_dict = self._selectInput()\n \n # generate lists with locations and values\n (start_layer_name,\n end_layer_name) = [x.objectName() for x in self.radio_buttons]\n \n locations_list = list(product(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n values_list = list(product(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n \n # If row-by-row in two-layer mode, then only zip the locations\n if all([button.isChecked() for button in self.radio_buttons]) and self.dlg.routing_twolayer_rowbyrow.isChecked():\n locations_list = list(zip(route_dict[start_layer_name]['geometries'],\n route_dict[end_layer_name]['geometries']))\n\n values_list = list(zip(route_dict[start_layer_name]['values'],\n route_dict[end_layer_name]['values']))\n\n # Add via point if specified\n route_via = None\n if self.dlg.routing_via_label.text() != 'Long,Lat':\n route_via = [float(x) for x in self.dlg.routing_via_label.text().split(\",\")]\n \n message_bar, progress_widget = progressbar.pushProgressBar(self.iface)\n \n responses = []\n delete_values = []\n for i, coords_tuple in enumerate(locations_list):\n if coords_tuple[0] == coords_tuple[-1]:\n # Skip when same location\n delete_values.append(i)\n continue\n if route_via:\n # add via coords\n coords_tuple = list(coords_tuple)\n coords_tuple.insert(1, route_via)\n \n # Update progress bar\n percent = (i/len(locations_list)) * 100\n message_bar.setValue(percent)\n \n # Make the request\n self.params['coordinates'] = convert.build_coords(coords_tuple)\n responses.append(self.client.request(self.url, self.params))\n \n # Delete entries in values_list where coords where the same\n values_list = [value for idx, value in enumerate(values_list) if idx not in delete_values]\n \n # Only proceed when there actual responses\n if responses: \n layer_out = self._addLine(responses, values_list)\n layer_out.updateExtents()\n \n QgsProject.instance().addMapLayer(layer_out)\n \n self.iface.messageBar().popWidget(progress_widget)", "def buildStopsDict(self):\n \n if len(self.nodesDict) == 0:\n raise Exception('Nodes dictionary is empty!')\n if len(self.linksDict) == 0:\n raise Exception('Links dictionary is empty!')\n \n self.stopsByRoute = dict()\n self.stopsByNode = dict()\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n \n tempStops = \"temp_stops\"\n tempStopsSp = \"temp_stops_sp\"\n \n # Delete temp_stops and temp_stops_sp feature classes if they exist.\n if arcpy.Exists(tempStops):\n arcpy.Delete_management(tempStops)\n if arcpy.Exists(tempStopsSp):\n arcpy.Delete_management(tempStopsSp)\n arcpy.CopyFeatures_management(PublicTransit.RTD_PATH + PublicTransit.RTD_STOPS,\n tempStops)\n \n # Project temp_stops to CA state plane and add XY.\n install_dir = arcpy.GetInstallInfo()['InstallDir']\n out_coordinate_system = os.path.join(install_dir, PublicTransit.NAD_83_DIRECTORY)\n arcpy.Project_management(tempStops, tempStopsSp, out_coordinate_system,\n \"NAD_1983_To_WGS_1984_1\")\n arcpy.AddXY_management(tempStopsSp)\n \n # Create a search cursor to traverse all stops.\n stops = arcpy.SearchCursor(tempStopsSp, \"\", \"\",\n \"CPT_STOPPOINTID; SCH_STOPPOINTSEQNO; \" +\n \"SCH_ROUTEID; SCH_PATTERNID; ROUTE_PATTERN; \" +\n \"SourceOID; POINT_X; POINT_Y\",\n \"ROUTE_PATTERN A; SCH_STOPPOINTSEQNO A\")\n numStops = int(arcpy.GetCount_management(tempStopsSp).getOutput(0))\n print \"Found %d stops\" % numStops\n \n p = index.Property()\n p.overwrite = True\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE,properties=p)\n \n # For each stop determine the nearest network node.\n scount = 0\n icount = 0\n for s in stops:\n # only create stops for routes which exist in RTD\n if not s.ROUTE_PATTERN in self.transitRoutes:\n continue\n scount += 1\n st = TransitStop(s.CPT_STOPPOINTID, s.SCH_ROUTEID, s.SCH_PATTERNID,\n s.ROUTE_PATTERN, s.SourceOID, s.SCH_STOPPOINTSEQNO)\n # If the stop's linkId is in the links dictionary use the link from\n # and to node (these should all be bus routes since MTC's route\n # traversal FC was created for buses only at this time).\n if s.SourceOID in self.linksDict:\n link = self.linksDict[s.SourceOID]\n # Determine which node is nearest and snap to it.\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.fromNode.x,\n link.fromNode.y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n link.toNode.x,\n link.toNode.y):\n st.tanaNode = link.fromNode.nodeId\n else:\n st.tanaNode = link.toNode.nodeId\n st.inRegion = True\n \n # The stop's link is not in linksDict. These are either stops \n # outside the region or non-bus routes for which there are no\n # route traversal edges. Do a link lookup from the Roadways\n # feature class.\n else:\n arcpy.env.workspace = PublicTransit.RTD_PATH\n roadwaysSearch = arcpy.SearchCursor(PublicTransit.ROADWAYS_FC,\n \"LinkId = \" + str(s.SourceOID),\n \"\", \"\", \"F_JNCTID; T_JNCTID\", \"\")\n for r in roadwaysSearch:\n fromNode = self.__getIdHash(r.F_JNCTID)\n toNode = self.__getIdHash(r.T_JNCTID)\n if fromNode in self.nodesDict and toNode in self.nodesDict:\n if self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[fromNode].x,\n self.nodesDict[fromNode].y) <= \\\n self.__getDistance(s.POINT_X,\n s.POINT_Y,\n self.nodesDict[toNode].x,\n self.nodesDict[toNode].y):\n st.tanaNode = fromNode\n else:\n st.tanaNode = toNode\n st.inRegion = True\n else:\n st.inRegion = False\n \n # Add the stop to stopsByRoute and stopsByNode dictionaries\n if s.ROUTE_PATTERN in self.stopsByRoute:\n self.stopsByRoute[s.ROUTE_PATTERN].append(st)\n else:\n self.stopsByRoute[s.ROUTE_PATTERN] = [st]\n if (st.tanaNode in self.stopsByNode):\n self.stopsByNode[st.tanaNode].append(st)\n else:\n self.stopsByNode[st.tanaNode] = [st]\n # add the stop node to the spatial index\n if st.tanaNode in self.nodesDict:\n icount += 1\n self.spIndex.insert(st.stopPointId,\n (self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y,\n self.nodesDict[st.tanaNode].x,\n self.nodesDict[st.tanaNode].y))\n del stops", "def transit_to_dict(\n cities: List[City], transfers: List[Set[StopArea]]\n) -> dict:\n data = {\n \"stopareas\": {}, # stoparea id => stoparea data\n \"networks\": {}, # city name => city data\n \"transfers\": {}, # set(tuple(stoparea_id1, stoparea_id2)), id1<id2\n }\n\n for city in (c for c in cities if c.is_good):\n network = {\n \"id\": city.id,\n \"name\": city.name,\n \"routes\": [],\n }\n\n for route_master in city:\n route_data = {\n \"id\": route_master.id,\n \"mode\": route_master.mode,\n \"ref\": route_master.ref,\n \"name\": route_master.name,\n \"colour\": route_master.colour,\n \"infill\": route_master.infill,\n \"itineraries\": [],\n }\n\n for route in route_master:\n variant_data = {\n \"id\": route.id,\n \"tracks\": route.get_tracks_geometry(),\n \"start_time\": route.start_time,\n \"end_time\": route.end_time,\n \"interval\": route.interval,\n \"stops\": [\n {\n \"stoparea_id\": route_stop.stoparea.id,\n \"distance\": route_stop.distance,\n }\n for route_stop in route.stops\n ],\n }\n\n # Store stopareas participating in the route\n # and that have not been stored yet\n for route_stop in route.stops:\n stoparea = route_stop.stoparea\n if stoparea.id in data[\"stopareas\"]:\n continue\n stoparea_data = {\n \"id\": stoparea.id,\n \"center\": stoparea.center,\n \"name\": stoparea.station.name,\n \"entrances\": [\n {\n \"id\": egress_id,\n \"name\": egress[\"tags\"].get(\"name\"),\n \"ref\": egress[\"tags\"].get(\"ref\"),\n \"center\": el_center(egress),\n }\n for (egress_id, egress) in (\n (egress_id, city.elements[egress_id])\n for egress_id in stoparea.entrances\n | stoparea.exits\n )\n ],\n }\n data[\"stopareas\"][stoparea.id] = stoparea_data\n\n route_data[\"itineraries\"].append(variant_data)\n\n network[\"routes\"].append(route_data)\n\n data[\"networks\"][city.name] = network\n\n # transfers\n pairwise_transfers = set()\n for stoparea_set in transfers:\n stoparea_list = list(stoparea_set)\n for first_i in range(len(stoparea_list) - 1):\n for second_i in range(first_i + 1, len(stoparea_list)):\n stoparea1_id = stoparea_list[first_i].id\n stoparea2_id = stoparea_list[second_i].id\n if all(\n st_id in data[\"stopareas\"]\n for st_id in (stoparea1_id, stoparea2_id)\n ):\n id1, id2 = sorted([stoparea1_id, stoparea2_id])\n pairwise_transfers.add((id1, id2))\n\n data[\"transfers\"] = pairwise_transfers\n return data", "def get_a_routes_closest_stop_and_arrival_time(request, lat, lon, bus_route, audio_string='from_input'):\n# 1. Clean the data.\n clean_data = clean_route_data(lat,lon,bus_route)\n if not clean_data:\n return JsonResponse({'status': 'bad', 'error': 'not clean data. determined invalid route query', 'query': bus_route})\n\n bus_id = clean_data['bus_id']\n bus_route = clean_data['bus_route']\n user_lat = clean_data['user_lat']\n user_lon = clean_data['user_lon']\n \n# 2. Find closest stops.\n closest_stops = find_closest_stops(user_lat,user_lon,bus_id)\n\n if not closest_stops: # zero stops\n return JsonResponse({'status': 'bad', 'error': 'no stops available nearby', 'lat:': user_lat, 'lon:': user_lon, 'query': bus_route})\n\n# 3. Finding arrival times for: the specific_bus at the nearest_stops\n closest_arrival = find_estimated_arrival(closest_stops['closest_stop_id'], bus_id)\n next_closest_arrival = find_estimated_arrival(closest_stops['next_closest_stop_id'], bus_id)\n\n# 4. Check that a valid time was returned from find_estimated_arrival \n if closest_arrival['estimated'] or next_closest_arrival['estimated']:\n\n return JsonResponse({\n 'status': 'good',\n 'route': bus_route,\n 'closest_stop': { \n 'closest_name': closest_stops['name_of_closest'],\n 'closest_direction': closest_stops['closest_direction'],\n 'closest_stop_id': closest_stops['closest_stop_id'],\n 'closest_minutes': closest_arrival['estimated'],\n 'closest_lat': closest_stops['closest_stop_lat'],\n 'closest_lon': closest_stops['closest_stop_lon'],\n 'closest_destination': closest_arrival['destination']\n },\n 'next_closest_stop': {\n 'next_closest_name': closest_stops['name_of_next_closest'],\n 'next_closest_direction': closest_stops['next_closest_direction'],\n 'next_closest_stop_id': closest_stops['next_closest_stop_id'],\n 'next_closest_minutes': next_closest_arrival['estimated'],\n 'next_closest_lat': closest_stops['next_closest_stop_lat'],\n 'next_closest_lon': closest_stops['next_closest_stop_lon'],\n 'next_closest_destination': next_closest_arrival['destination']\n },\n 'testing': audio_string,\n })\n \n return JsonResponse({'status': 'bad', 'error': 'no arrival time', 'closest_stop': {'id': closest_stops['closest_stop_id'], 'estimated_arrival': closest_arrival['estimated']}, 'next_closest_stop':{'id': closest_stops['next_closest_stop_id'], 'estimated_arrival': next_closest_arrival['estimated']}})", "def possible_routes(srcLat, srcLon, destLat, destLon, searchPreference, dateTime):\n\n dateTime = dateTime.split(\",\")\n\n routes = Db().get_best_route(srcLat, srcLon, destLat, destLon)\n try:\n best_routes = get_three_best_routes(routes, searchPreference, dateTime)\n except IndexError:\n best_routes = \"No Journey Found\"\n\n # Get the address for map display purposes\n try:\n for i in range(len(best_routes)):\n #address is a dataframe, hency the use of .loc\n address = Db().get_single_address(best_routes[i][2]).loc[0,\"Address\"]\n best_routes[i].append(address)\n except IndexError:\n # In case the source is outside Dublin\n best_routes = \"No Journey Found\"\n\n return json.dumps(best_routes, ensure_ascii=False)", "def _build_routes(self, routes, allow_redundant_targets=True):\n routes = routes or ()\n joins = {}\n targets_seen = set()\n\n for route in routes:\n if isinstance(route, dict):\n source_label = route.get('source')\n target_label = route.get('target')\n field_label = route.get('field')\n symmetrical = route.get('symmetrical')\n else:\n warnings.warn('Routes are now defined as dicts',\n DeprecationWarning)\n source_label, target_label, field_label, symmetrical = route\n\n # get models\n source = self.get_model(source_label, local=False)\n target = self.get_model(target_label, local=False)\n\n field = None\n\n # get field\n if field_label:\n model_name, field_name = field_label.split('.', 1)\n model_name = model_name.lower()\n\n # determine which model the join field specified exists on\n if model_name == source.__name__.lower():\n field = self.get_field(field_name, source)\n elif model_name == target.__name__.lower():\n field = self.get_field(field_name, target)\n else:\n raise TypeError('model for join field, \"{0}\", '\n 'does not exist'.format(field_name))\n\n if isinstance(field, RelatedObject):\n field = field.field\n\n if not allow_redundant_targets:\n if target in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(target_label))\n else:\n targets_seen.add(target)\n\n # The `joins` hash defines pairs which are explicitly joined\n # via the specified field. If no field is defined, then the\n # join field is implied or does not matter; the route is reduced\n # to a straight lookup.\n joins[(source, target)] = field\n\n if symmetrical:\n if not allow_redundant_targets:\n if source in targets_seen:\n raise ValueError('Model {0} cannot be the target of '\n 'more than one route in this list'\n .format(source_label))\n else:\n targets_seen.add(source)\n\n joins[(target, source)] = field\n\n return joins", "def find_routes(\r\n stops,\r\n measurement_units = \"\"\"Minutes\"\"\",\r\n analysis_region = None,\r\n reorder_stops_to_find_optimal_routes = False,\r\n preserve_terminal_stops = \"\"\"Preserve First\"\"\",\r\n return_to_start = False,\r\n use_time_windows = False,\r\n time_of_day = None,\r\n time_zone_for_time_of_day = \"\"\"Geographically Local\"\"\",\r\n uturn_at_junctions = \"\"\"Allowed Only at Intersections and Dead Ends\"\"\",\r\n point_barriers = None,\r\n line_barriers = None,\r\n polygon_barriers = None,\r\n use_hierarchy = True,\r\n restrictions = None,\r\n attribute_parameter_values = None,\r\n route_shape = \"\"\"True Shape\"\"\",\r\n route_line_simplification_tolerance = None,\r\n populate_route_edges = False,\r\n populate_directions = True,\r\n directions_language = \"\"\"en\"\"\",\r\n directions_distance_units = \"\"\"Miles\"\"\",\r\n directions_style_name = \"\"\"NA Desktop\"\"\",\r\n travel_mode = \"\"\"Custom\"\"\",\r\n impedance = \"\"\"Drive Time\"\"\",\r\n gis = None):\r\n kwargs = locals()\r\n\r\n if stops is None:\r\n stops = default_stops\r\n\r\n if point_barriers is None:\r\n point_barriers = default_point_barriers\r\n\r\n if line_barriers is None:\r\n line_barriers = default_line_barriers\r\n\r\n if polygon_barriers is None:\r\n polygon_barriers = default_polygon_barriers\r\n\r\n if restrictions is None:\r\n restrictions = default_restrictions\r\n\r\n if attribute_parameter_values is None:\r\n attribute_parameter_values = default_attributes\r\n\r\n if route_line_simplification_tolerance is None:\r\n route_line_simplification_tolerance = default_tolerance\r\n\r\n param_db = {\r\n \"stops\": (FeatureSet, \"Stops\"),\r\n \"measurement_units\": (str, \"Measurement_Units\"),\r\n \"analysis_region\": (str, \"Analysis_Region\"),\r\n \"reorder_stops_to_find_optimal_routes\": (bool, \"Reorder_Stops_to_Find_Optimal_Routes\"),\r\n \"preserve_terminal_stops\": (str, \"Preserve_Terminal_Stops\"),\r\n \"return_to_start\": (bool, \"Return_to_Start\"),\r\n \"use_time_windows\": (bool, \"Use_Time_Windows\"),\r\n \"time_of_day\": (datetime, \"Time_of_Day\"),\r\n \"time_zone_for_time_of_day\": (str, \"Time_Zone_for_Time_of_Day\"),\r\n \"uturn_at_junctions\": (str, \"UTurn_at_Junctions\"),\r\n \"point_barriers\": (FeatureSet, \"Point_Barriers\"),\r\n \"line_barriers\": (FeatureSet, \"Line_Barriers\"),\r\n \"polygon_barriers\": (FeatureSet, \"Polygon_Barriers\"),\r\n \"use_hierarchy\": (bool, \"Use_Hierarchy\"),\r\n \"restrictions\": (str, \"Restrictions\"),\r\n \"attribute_parameter_values\": (FeatureSet, \"Attribute_Parameter_Values\"),\r\n \"route_shape\": (str, \"Route_Shape\"),\r\n \"route_line_simplification_tolerance\": (LinearUnit, \"Route_Line_Simplification_Tolerance\"),\r\n \"populate_route_edges\": (bool, \"Populate_Route_Edges\"),\r\n \"populate_directions\": (bool, \"Populate_Directions\"),\r\n \"directions_language\": (str, \"Directions_Language\"),\r\n \"directions_distance_units\": (str, \"Directions_Distance_Units\"),\r\n \"directions_style_name\": (str, \"Directions_Style_Name\"),\r\n \"travel_mode\": (str, \"Travel_Mode\"),\r\n \"impedance\": (str, \"Impedance\"),\r\n \"solve_succeeded\": (bool, \"Solve Succeeded\"),\r\n \"output_routes\": (FeatureSet, \"Output Routes\"),\r\n \"output_route_edges\": (FeatureSet, \"Output Route Edges\"),\r\n \"output_directions\": (FeatureSet, \"Output Directions\"),\r\n \"output_stops\": (FeatureSet, \"Output Stops\"),\r\n }\r\n return_values = [\r\n {\"name\": \"solve_succeeded\", \"display_name\": \"Solve Succeeded\", \"type\": bool},\r\n {\"name\": \"output_routes\", \"display_name\": \"Output Routes\", \"type\": FeatureSet},\r\n {\"name\": \"output_route_edges\", \"display_name\": \"Output Route Edges\", \"type\": FeatureSet},\r\n {\"name\": \"output_directions\", \"display_name\": \"Output Directions\", \"type\": FeatureSet},\r\n {\"name\": \"output_stops\", \"display_name\": \"Output Stops\", \"type\": FeatureSet},\r\n ]\r\n\r\n if gis is None:\r\n gis = arcgis.env.active_gis\r\n\r\n url = gis.properties.helperServices.asyncRoute.url\r\n return _execute_gp_tool(gis, \"FindRoutes\", kwargs, param_db, return_values, _use_async, url)", "def import_data(self):\n\n # Import ordered names of origins\n origins_file = os.path.join(self.data_directory,'origins.txt')\n self.origins = np.loadtxt(origins_file,dtype=str,ndmin=1)\n\n # Import ordered names of destinations\n destinations_file = os.path.join(self.data_directory,'destinations.txt')\n self.destinations = np.loadtxt(destinations_file,dtype=str,ndmin=1)\n\n # Import origin supply\n originsupply_file = os.path.join(self.data_directory,'origin_supply.txt')\n self.origin_supply = np.loadtxt(originsupply_file,ndmin=1).astype('float64')\n\n # In case origin supply is not a list\n if not isinstance(self.origin_supply,(np.ndarray, np.generic)):\n self.origin_supply = np.array([self.origin_supply])\n\n # Import destination demand\n destinationdemand_file = os.path.join(self.data_directory,'destination_demand.txt')\n self.destination_demand = np.loadtxt(destinationdemand_file,ndmin=1).astype('float64')\n\n # In case destination demand is not a list\n if not isinstance(self.destination_demand,(np.ndarray, np.generic)):\n self.destination_demand = np.array([self.destination_demand])\n\n # Import origin locations\n originlocations_file = os.path.join(self.data_directory,'origin_locations.txt')\n self.origin_locations = np.loadtxt(originlocations_file,ndmin=1)\n\n # Import destination locations\n destinationlocations_file = os.path.join(self.data_directory,'destination_locations.txt')\n self.destination_locations = np.loadtxt(destinationlocations_file,ndmin=1)\n\n # Import initial and final destination sizes\n initialdestinationsizes_file = os.path.join(self.data_directory,'initial_destination_sizes.txt')\n self.initial_destination_sizes = np.loadtxt(initialdestinationsizes_file,ndmin=1)\n\n # In case destination sizes are not a list\n if not isinstance(self.initial_destination_sizes,(np.ndarray, np.generic)):\n self.initial_destination_sizes = np.array([self.initial_destination_sizes])\n\n # Import N,M\n self.N = self.origin_supply.shape[0]\n self.M = self.initial_destination_sizes.shape[0]\n\n # Import cost matrix\n costmatrix_file = os.path.join(self.data_directory,'cost_matrix.txt')\n self.cost_matrix = np.loadtxt(costmatrix_file).astype('float64')\n\n # Reshape cost matrix if necessary\n if self.N == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[:,np.newaxis],(self.N,self.M))\n if self.M == 1:\n self.cost_matrix = np.reshape(self.cost_matrix[np.newaxis,:],(self.N,self.M))\n\n # Compute total initial and final destination sizes\n self.total_initial_sizes = np.sum(self.initial_destination_sizes)\n\n # Compute naive total cost\n self.total_cost = 0\n for i in range(self.N):\n for j in range(self.M):\n self.total_cost += self.cost_matrix[i,j]*(self.origin_supply[i]/self.N)", "def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def _add_unique_id_fields(self):\r\n field_types = {\"String\": \"TEXT\", \"Single\": \"FLOAT\", \"Double\": \"DOUBLE\", \"SmallInteger\": \"SHORT\",\r\n \"Integer\": \"LONG\", \"OID\": \"LONG\"}\r\n origin_field_def = [self.origin_unique_id_field_name, field_types[self.origin_id_field_obj.type]]\r\n if self.origin_id_field_obj.type == \"String\":\r\n origin_field_def += [self.origin_unique_id_field_name, self.origin_id_field_obj.length]\r\n dest_field_def = [self.dest_unique_id_field_name, field_types[self.dest_id_field_obj.type]]\r\n if self.dest_id_field_obj.type == \"String\":\r\n dest_field_def += [self.dest_unique_id_field_name, self.dest_id_field_obj.length]\r\n self.rt_solver.addFields(arcpy.nax.RouteInputDataType.Stops, [origin_field_def, dest_field_def])", "def findRoute(self, source:str, target:str, option:str = None, startTime:str = None) -> []:\n \n if (source not in self.stationInterchanges or target not in self.stationInterchanges):\n print(\"Invalid inputs\")\n return []\n\n if not option:\n option = 'shortest'\n\n # You can have multiple sources and destinations because the station can be an \n # interchange where you have multiple lines to chose from. \n sources = self.stationInterchanges[source]\n dests = self.stationInterchanges[target]\n\n route = []\n if option == 'shortest':\n route = self.__findRouteShortestPath(sources, dests)\n elif option == 'fastest':\n if not startTime or len(startTime) == 0:\n startTime = StationMap.dateUtils.getTodaysDate()\n route = self.__scheduleRoute(sources, dests, startTime)\n else:\n print('Invalid option: {0}'.format(option))\n return []\n return route", "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, pair_type_str, origins, origin_id_field, destinations, dest_id_field,\r\n network_data_source, travel_mode, time_units, distance_units,\r\n max_routes, max_processes, out_routes, scratch_folder, reverse_direction=False,\r\n assigned_dest_field=None, od_pair_table=None, time_of_day=None, barriers=None\r\n ):\r\n pair_type = helpers.PreassignedODPairType[pair_type_str]\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.out_routes = out_routes\r\n self.scratch_folder = scratch_folder\r\n time_units = helpers.convert_time_units_str_to_enum(time_units)\r\n distance_units = helpers.convert_distance_units_str_to_enum(distance_units)\r\n if not barriers:\r\n barriers = []\r\n self.max_processes = max_processes\r\n if not time_of_day:\r\n time_of_day = None\r\n else:\r\n time_of_day = datetime.datetime.strptime(time_of_day, helpers.DATETIME_FORMAT)\r\n\r\n # Initialize the dictionary of inputs to send to each OD solve\r\n self.rt_inputs = {\r\n \"pair_type\": pair_type,\r\n \"origins\": self.origins,\r\n \"origin_id_field\": origin_id_field,\r\n \"destinations\": self.destinations,\r\n \"dest_id_field\": dest_id_field,\r\n \"network_data_source\": network_data_source,\r\n \"travel_mode\": travel_mode,\r\n \"time_units\": time_units,\r\n \"distance_units\": distance_units,\r\n \"time_of_day\": time_of_day,\r\n \"reverse_direction\": reverse_direction,\r\n \"scratch_folder\": self.scratch_folder,\r\n \"assigned_dest_field\": assigned_dest_field,\r\n \"od_pair_table\": od_pair_table,\r\n \"barriers\": barriers,\r\n \"origin_transfer_fields\": [], # Populate later\r\n \"destination_transfer_fields\": [] # Populate later\r\n }\r\n\r\n # List of intermediate output OD Line files created by each process\r\n self.route_fcs = []\r\n\r\n # Construct OID ranges for chunks of origins and destinations\r\n if pair_type is helpers.PreassignedODPairType.one_to_one:\r\n # Chunks are of the format [first origin ID, second origin ID]\r\n self.chunks = helpers.get_oid_ranges_for_input(origins, max_routes)\r\n elif pair_type is helpers.PreassignedODPairType.many_to_many:\r\n # Chunks are of the format [chunk_num, chunk_size]\r\n num_od_pairs = 0\r\n with open(od_pair_table, \"r\", encoding=\"utf-8\") as f:\r\n for _ in f:\r\n num_od_pairs += 1\r\n num_chunks = ceil(num_od_pairs / max_routes)\r\n self.chunks = [[i, max_routes] for i in range(num_chunks)]\r\n\r\n # Calculate the total number of jobs to use in logging\r\n self.total_jobs = len(self.chunks)\r\n\r\n self.optimized_cost_field = None", "def bus_routes_direction():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file) #useful for monitoring progress of function\n reader = csv.reader(open(file))\n for line in reader:\n route = extract_route_and_direction(line[3]) # Journey ID field\n if route not in route_list and route != \"\": # error handling for extract_bus_routes function\n route_list.append(route)\n return route_list", "def validate(self, params, address_input_data):\n processed_address_list = []\n # check avoids redundancy for combined 'forward geocode and validate' \n # option as API does both by default\n if self.__is_address_list_processed:\n processed_address_list = address_input_data\n else:\n request_list = self.__prepare_smarty_request_list(address_input_data)\n processed_address_list = self.__process_smarty_request_list(request_list,\n address_input_data )\n self.__is_address_list_processed = True\n print(f'< {self.num_addresses_processed} addresses processed >')\n return processed_address_list", "def check_if_repeated_route(route, user_lat, user_lon):\n bus_route = route\n repeated_routes = {'101':'','105':'','106':'','107':'','230':'','111':'','113':'','116':'','119':'','240':'','120':'','271':'','70':'','2':'pt','7':'','8':'','18':'','29':'','1':'pt','3':'pt','4':'pt','402':'pt','425':'pt','202':'pt','212':'pt','214':'pt','102':'pt','10':'pt','11':'pt','13':'','28':'pt','41':'','45':'','55':'pt','57':'pt','63':'pt','47':'','48':'','60':'','64':'','67':'','42':'','12':'','21':''}\n intercity_transit = ['47','48','60','64','67','42','12','13','21','41','45']\n king_county_metro = ['917', 'A Line', '225', '231', '239', '230', '250', '37', '910', '628', '372', '373', '630', '218', '631', '63', '4', '36', '43', '986', '823', '44', '987', '212', '45', '988', 'Trailhead Direct Issaquah Alps', '989', '824', '214', '47', '180', '48', '635', '216', '5', '217', '982', '41', '21', '984', 'F Line', 'E Line', '342', '345', '346', '952', '347', '894', '348', '49', '248', '355', '895', '116', '243', '245', '893', '118', '246', '661', '931', '119', '67', '915', '12', '249', '120', '238', '62', '226', '111', '24', '64', '193', '113', '240', '65', '930', '241', '114', '255', '73', '128', '74', '257', '75', '13', '907', '121', '122', '7', '123', '252', '70', '124', '71', '125', '221', '244', 'Trailhead Direct Cougar Mt.', '55', '994', '50', '995', 'Trailhead Direct Mailbox Peak', '219', '981', 'Trailhead Direct Mt. Si', '22', '224', '157', '204', '101', '232', '102', '105', '57', '106', '234', '156', '107', '235', '236', '60', '980', '237', 'B Line', '11', '775', '56', '1', '10', '166', '167', '903', '158', '908', '159', '3', '906', '301', '913', '914', '303', '164', '304', '916', '901', '178', '169', '308', '17', '309', '31', '311', '312', '177', '168', '629', 'Duvall-Monroe Shuttle', '268', '14', '76', '77', '131', '26', '773', '29', '132', '78', '40', '8', '887', 'C Line', '277', '9', '153', '28', '154', '269', 'D Line', '27', '143', '271', '886', '148', '888', '889', '15', '150', '891', '892', '208', '200', '181', '32', '182', '33', '183', '330', '331', '186', '187', '316', '179', '18', '192', '197', '2', '19', '190']\n pierce_transit = ['1','2','3','4','402','425','202','212','214','102','10','11','13','28','41','45','48','55','57','63']\n north_routes = ['101','105','106','107','230','111','113','116','119','240','120','271','70','2','3','4','7','8','12','18','29']\n \n if bus_route in repeated_routes:\n if user_lon < -122.5 and user_lat > 47.1:\n # going to be pierce county (or kitsap)\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.7:\n # going to be community transit or everett transit (N)\n if bus_route in north_routes: \n bus_route += 'N'\n else:\n bus_route += repeated_routes[bus_route] \n elif user_lat > 47.33:\n # going to be king county metro\n if bus_route in king_county_metro:\n bus_route = bus_route\n else:\n bus_route += repeated_routes[bus_route]\n elif user_lat > 47.08:\n # going to be pierce transit\n if bus_route in pierce_transit: \n bus_route += 'pt'\n else:\n bus_route += repeated_routes[bus_route] \n else:\n # going to be intercity transit\n if bus_route in intercity_transit: \n bus_route += 'it'\n else:\n bus_route += repeated_routes[bus_route] \n\n return bus_route", "def __init__(__self__, *,\n destination_region_id: pulumi.Input[str],\n destination_zone_id: pulumi.Input[str],\n source_region_id: pulumi.Input[str],\n source_zone_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"destination_region_id\", destination_region_id)\n pulumi.set(__self__, \"destination_zone_id\", destination_zone_id)\n pulumi.set(__self__, \"source_region_id\", source_region_id)\n pulumi.set(__self__, \"source_zone_id\", source_zone_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if group_name is not None:\n pulumi.set(__self__, \"group_name\", group_name)\n if rpo is not None:\n pulumi.set(__self__, \"rpo\", rpo)", "def validateTradeRoute(self, tradeRouteDict):\n try:\n systemFrom = self.systems[tradeRouteDict['fromSystem']]\n systemTo = self.systems[tradeRouteDict['toSystem']]\n # has a trade route already been setup between these planets?\n (sysFrom, sysTo, type) = string.split(tradeRouteDict['id'], '-')\n # are these systems adjacent, or share a warp gate with a trade pact\n if systemTo.id in systemFrom.connectedSystems:\n pass\n elif systemTo.id in systemFrom.warpGateSystems:\n tempRoute = anwp.aw.traderoute.TradeRoute(tradeRouteDict)\n warpReq = tempRoute.getWarpRequired()\n if warpReq > (systemFrom.usedWGC + systemFrom.availWGC):\n return 'System:%s Requires %d Warp Capactiy to setup this Trade Route' % (systemFrom.name, warpReq)\n if warpReq > (systemTo.usedWGC + systemTo.availWGC):\n return 'System:%s Requires %d Warp Capactiy to setup this Trade Route' % (systemTo.name, warpReq)\n else:\n return 'Systems are not adjacent and have no warp gates between them'\n # do these systems share the same empire owner, or are the two empires in a trade pact?\n if systemFrom.myEmpireID <> systemTo.myEmpireID and anwp.func.globals.diplomacy[self.empires[systemFrom.myEmpireID].diplomacy[systemTo.myEmpireID].diplomacyID]['trade'] == 0:\n return 'System Owners are not the same, or no Trade Pact in Effect'\n # is a negative trade route being sent?\n if (tradeRouteDict['AL'] < 0 or tradeRouteDict['EC'] < 0 or tradeRouteDict['IA'] < 0):\n return 'you cannot send negative values in trade'\n # is something being sent?\n if (tradeRouteDict['AL'] == 0 and tradeRouteDict['EC'] == 0 and tradeRouteDict['IA'] == 0) and tradeRouteDict['type'] <> 'GEN':\n return 'no resources are being sent, trade route invalid'\n # does the system have the resources to setup this trade?\n if (systemFrom.AL < tradeRouteDict['AL'] or systemFrom.EC < tradeRouteDict['EC'] or\n systemFrom.IA < tradeRouteDict['IA']):\n return '%s does not have enough resources to setup this trade route' % systemFrom.name\n return 1\n except:\n return 'galaxy->validateTradeRoute error'", "def plan_trip():\n origins = []\n destinations = []\n\n origin_stop = request.args.get('origin', False)\n destination_stop = request.args.get('destination', False)\n origin_is_suburb = request.args.get('origin_suburb', False)\n dest_is_suburb = request.args.get('dest_suburb', False)\n origin_is_suburb = bool(origin_is_suburb)\n dest_is_suburb = bool(dest_is_suburb)\n if origin_stop and destination_stop:\n client = api.connection()\n origins = client.find_stops_by_name('any', origin_stop, True)\n\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n destinations = client.find_stops_by_name('any', destination_stop, True)\n if client.error == 404:\n render_template(\n \"trip-planner.jinja2\", origins=[], destinations=[], err=404\n )\n\n origins = stop_information_generator(\n origins.locations, [], origin_stop, origin_is_suburb\n )\n destinations = stop_information_generator(\n destinations.locations, [], destination_stop, dest_is_suburb\n )\n\n return render_template(\n \"trip-planner.jinja2\", origins=origins, destinations=destinations, err=200\n )", "def compute_waypoints(self, source_loc, destination_loc):\n start_waypoint = self._map.get_waypoint(\n source_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n end_waypoint = self._map.get_waypoint(\n destination_loc,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n assert start_waypoint and end_waypoint, 'Map could not find waypoints'\n route = self._grp.trace_route(\n start_waypoint.transform.location,\n end_waypoint.transform.location)\n # TODO(ionel): The planner returns several options in intersections.\n # We always take the first one, but this is not correct.\n return deque([to_pylot_transform(waypoint[0].transform)\n for waypoint in route])", "def process_vrps(self):\n self.origins = set()\n for afi in (\"ipv4\", \"ipv6\"):\n self.info(\"Creating prefix-lists for {} address-family\"\n .format(afi))\n self.covered[afi] = [\"seq {seq} permit {prefix} le {maxLength}\"\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.covered(afi))]\n origins = self.vrps.origins(afi)\n self.for_origin[afi] = {}\n for asn in origins:\n self.for_origin[afi][asn] = [\"seq {seq} permit {prefix} le {maxLength}\" # noqa: E501\n .format(seq=seq, **entry)\n for seq, entry\n in enumerate(self.vrps.for_origin(asn, afi))] # noqa: E501\n self.origins.update(origins)", "def validate(self):\n super(ReferenceMapping, self).validate()\n self.check_observatory()\n self.check_instrument()\n self.check_filekind()\n self.check_schema_uri()\n if \"reference_to_dataset\" in self.header:\n parkeys = self.get_required_parkeys()\n for _reference, dataset in self.reference_to_dataset.items():\n assert dataset.upper() in parkeys, \\\n \"reference_to_dataset dataset keyword not in parkey keywords.\"\n with log.augment_exception(\"Invalid mapping:\", self.instrument, self.filekind):\n self.selector.validate_selector(self.tpn_valid_values)", "def test_directions_handles_more_than_max_waypoints(self):\n stops = [\n mommy.make(Stop, trips_year=self.trips_year, lat_lng=coord)\n for coord in (\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n )\n ]\n directions = maps.get_directions(stops)\n self.assertEqual(len(stops), len(directions.legs) + 1)\n for i, leg in enumerate(directions.legs):\n self.assertEqual(leg.start_stop, stops[i])\n self.assertEqual(leg.end_stop, stops[i + 1])", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_cidr_block: Optional[pulumi.Input[str]] = None,\n destination_port_range: Optional[pulumi.Input['TrafficMirrorFilterRuleDestinationPortRangeArgs']] = None,\n protocol: Optional[pulumi.Input[int]] = None,\n rule_action: Optional[pulumi.Input[str]] = None,\n rule_number: Optional[pulumi.Input[int]] = None,\n source_cidr_block: Optional[pulumi.Input[str]] = None,\n source_port_range: Optional[pulumi.Input['TrafficMirrorFilterRuleSourcePortRangeArgs']] = None,\n traffic_direction: Optional[pulumi.Input[str]] = None,\n traffic_mirror_filter_id: Optional[pulumi.Input[str]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_cidr_block is not None:\n pulumi.set(__self__, \"destination_cidr_block\", destination_cidr_block)\n if destination_port_range is not None:\n pulumi.set(__self__, \"destination_port_range\", destination_port_range)\n if protocol is not None:\n pulumi.set(__self__, \"protocol\", protocol)\n if rule_action is not None:\n pulumi.set(__self__, \"rule_action\", rule_action)\n if rule_number is not None:\n pulumi.set(__self__, \"rule_number\", rule_number)\n if source_cidr_block is not None:\n pulumi.set(__self__, \"source_cidr_block\", source_cidr_block)\n if source_port_range is not None:\n pulumi.set(__self__, \"source_port_range\", source_port_range)\n if traffic_direction is not None:\n pulumi.set(__self__, \"traffic_direction\", traffic_direction)\n if traffic_mirror_filter_id is not None:\n pulumi.set(__self__, \"traffic_mirror_filter_id\", traffic_mirror_filter_id)", "def stops_on_routes():\n routes = ['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', '31A']\n routes_and_stops={}\n for route in routes:\n routes_and_stops[route]=[] #new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route=extract_bus_route(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops", "def _validate_route_settings(self):\r\n # Create a dummy Route object and set properties. This allows us to\r\n # detect any errors prior to spinning up a bunch of parallel processes and having them all fail.\r\n LOGGER.debug(\"Validating Route settings...\")\r\n rt = None\r\n try:\r\n rt = Route(**self.rt_inputs)\r\n rt.initialize_rt_solver()\r\n LOGGER.debug(\"Route settings successfully validated.\")\r\n except Exception:\r\n LOGGER.error(\"Error initializing Route analysis.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n finally:\r\n if rt:\r\n LOGGER.debug(\"Deleting temporary test Route job folder...\")\r\n # Close logging\r\n rt.teardown_logger()\r\n # Delete output folder\r\n shutil.rmtree(rt.job_result[\"jobFolder\"], ignore_errors=True)\r\n del rt", "def FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, TimeTableList, TimeTableIndex, StationHourIndex, PathInfo=[]):\r\n PathInfo = PathInfo + [ConnectionInfo]\r\n\r\n if Cond.IfTestRouteSearch:\r\n \tStations = GetAllStationsOfRoute(PathInfo)\r\n \tprint \"\\nStations of Path (%s): ++++++++\" % len(Stations)\r\n \tprint Stations\r\n \tprint \"Route Information:\"\r\n \tprint PrettyStringRouteInfo(PathInfo)\r\n\r\n # check successful termination\r\n # if len(PathInfo) > 1 and ConnectionInfo[ConnInfoInd['station_to']] == EndStation: \r\n if CheckIfPathTerminatesSuccessfully(ConnectionInfo, PathInfo, RouteConditions, EndStation):\r\n \tif Cond.IfTestRouteSearch:\r\n \t\tprint \"End Station is reached!\"\t\r\n \treturn [PathInfo]\r\n\r\n # current (this iteration's) path length\r\n CurPathLen = len(PathInfo)\r\n\r\n # get next connections\r\n start_station = ConnectionInfo[ConnInfoInd['station_to']]\r\n departure_hour = ConnectionInfo[ConnInfoInd['arrival_hour']] \t\r\n departure_min = ConnectionInfo[ConnInfoInd['arrival_min']]\r\n\r\n # TEST BU2019\r\n if False:\r\n\t print 'ConnInfoInd: ' + str(ConnectionInfo)\r\n\t print 'start_station,departure_hour,departure_min: %s, %s, %s' % (start_station, departure_hour, departure_min)\r\n\t time.sleep(0.1)\r\n \r\n # mandatory conditions\r\n WaitLimit = RouteConditions[Cond.MaxWaitingTimeAtStation][0]\r\n \r\n # get next connections from the station\r\n ConnectionInfoList = GetListOfNextConnections(TimeTableList, TimeTableIndex, StationHourIndex, start_station, departure_hour, departure_min, WaitLimit)\r\n\r\n # insert on-foot connections (Zu Fuss, ZF) to nearby stations into ConnectionInfoList\r\n # cancel (Tunc 4/3/2019)\r\n if False:\r\n\t StationMeasurementTime = ReqStationMeasureTime\r\n\t \r\n\t if Cond.MaxNumberOfSubsequentStationPassagesOnFoot in RouteConditions \\\r\n\t \tand RouteConditions[Cond.MaxNumberOfSubsequentStationPassagesOnFoot][0] > 0:\r\n\r\n\t\t if RouteConditions.has_key(Cond.MeasureStations):\r\n\t\t \tStationMeasurementTime = RouteConditions[Cond.MeasureStations][1]\r\n\t\t Connections = GetOnFootStationChangeConnections(start_station, departure_hour, departure_min, StationMeasurementTime)\r\n\t\t \r\n\t\t if Connections:\t\t# i.e. if Connections is not None\r\n\t\t \t(OnFootConnections1, OnFootConnections2) = Connections \r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections1)\r\n\t\t \tConnectionInfoList = AddConnectionsToListAfterDepartureTime(ConnectionInfoList, OnFootConnections2)\r\n\r\n if Cond.IfTestRouteSearch:\r\n\t\tprint \"Next connections:\"\r\n\t\tfor c in ConnectionInfoList:\r\n\t\t\tprint c\r\n\t\ttime.sleep(Cond.TestWaitingTime)\r\n\r\n if not ConnectionInfoList:\t\t# Endstation: Node w/o successor nodes\r\n \treturn []\r\n\r\n PathInfoList = []\r\n\r\n for ConnectionInfo in ConnectionInfoList:\r\n\t\tres = Cond.CheckIfConnectionShouldBeSelected(ConnectionInfo, PathInfo, EndStation, RouteConditions)\r\n\r\n\t\t# test\r\n\t\tif Cond.IfTestRouteSearch:\r\n\t\t\tif res == None or res == False:\r\n\t\t\t\tprint \"CheckIfConnectionShouldBeSelected: %s\" % res\r\n\r\n\t \tif res == None: return[] \r\n\t \tif res == False: continue\r\n\r\n\t \t# recursive call\r\n\t\textended_paths = FindAllRoutesRec(ConnectionInfo, EndStation, RouteConditions, \\\r\n\t\t\tTimeTableList, TimeTableIndex, StationHourIndex, PathInfo)\r\n\r\n\t\t# report status\r\n\t\tif Cond.ReportDuringRouteSearch in RouteConditions:\r\n\t\t\tTimeIntv = default_timer() - Cond.SearchStartTime\r\n\t\t\tRouteSearchReportingIntervalInSeconds = RouteConditions[Cond.ReportDuringRouteSearch][0]\r\n\t\t\tif TimeIntv > Cond.RouteSearchReportCounter * RouteSearchReportingIntervalInSeconds:\r\n\t\t\t\tCond.RouteSearchReportCounter += 1 \r\n\t\t\t\tprint \"%s seconds passed... \" % \"{:.2f}\".format(TimeIntv)\r\n\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\t\r\n\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\" \\\r\n\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\r\n\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t# append to path list\r\n\t\tfor p in extended_paths:\r\n\t\t\t# no need to recheck route unless current connection is the last one \r\n\t\t\t# LastConnection = (ConnectionInfo == p[-1])\r\n\t\t\tLastConnection = (CurPathLen == len(p) -1 and ConnectionInfo == p[-1])\r\n\t\t\t\r\n\t\t\tif LastConnection:\r\n\r\n\t\t\t\tif Cond.CheckIfRouteShouldBeSelected(p, RouteConditions):\r\n\t\t\t\t\tPathInfoList.append(p)\r\n\t\t\t\t\tCond.SelectedRoutes.append(ApplyAllRouteInfoCorrections(p))\r\n\r\n\t\t\t\t\t# evaluate route\r\n\t\t\t\t\t# cancel for BU2019\r\n\r\n\t\t\t\t\tif Cond.IfTestRouteSearch:\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all connection selection criteria (before route selection)\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterConnectionSelection\r\n\t\t\t\t\t\tprint \"%s routes found so far, that passed all route selection criteria (before final route filtering)\\n\" \\\r\n\t\t\t\t\t\t\t% Cond.RouteCountAfterRouteSelection\t\t\r\n\t\t\t\t\t\tprint \"----------------------\"\t\r\n\r\n\t\t\t\t\t# test\r\n\t\t\t\t\tIncrementDicValue(Cond.RouteCountPerRouteLength, CurPathLen)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t# not last connection, no need to recheck the route\r\n\t\t\t\t# PathInfoList.append(p)\r\n\t\t\t\t# IncrementDicValue(SelectedRoutesPerLevel, CurPathLen)\r\n\t\t\t\tpass\r\n \r\n return PathInfoList", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n # split netmask and daddr by the IP dots\n netmask_split = entry[NMSK].split('.')\n daddr_split = daddr.split('.')\n\n # bitwise ANd the netmask with the daddr\n result = []\n for i in range(0, len(netmask_split)):\n result.append(str(int(netmask_split[i]) & int(daddr_split[i])))\n \n # compare ANDed result to the network\n is_valid = True\n network_split = entry[NTWK].split('.')\n for i in range(0, len(network_split)):\n if result[i] != network_split[i]:\n is_valid = False\n break\n if is_valid:\n outroutes.append(entry)\n\n if len(outroutes) == 0:\n return outroutes\n\n # reform IP address\n outroutes.sort(key=lambda r: int(r[NMSK].replace('.', '')), reverse=True)\n longest_matching_prefix = int(outroutes[0][NMSK].replace('.', ''))\n outroutes = list(filter(lambda r: int(r[NMSK].replace('.', '')) == longest_matching_prefix, outroutes))\n return outroutes", "def f_check_adr_parameters_correctness(dict):\n\n if int(dict[\"operation_mode_num\"]) not in (0, 1, 2, 3, 4, 5, 6):\n print('\\n Error!!! Operation mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"FFT_size_samples\"]) not in (2048, 4096, 8192, 16384, 32768):\n print('\\n Error!!! FFT size is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"spectra_averaging\"]) < 16 or int(dict[\"spectra_averaging\"]) > 32768:\n print('\\n Error!!! Spectra averaging number is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"start_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16): # 0 … (SFFT-1024)/1024\n print('\\n Error!!! Start frequency line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16):\n print('\\n Error!!! Frequency width line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) > ((int(dict[\"FFT_size_samples\"]) - int(dict[\"start_line_freq\"]) * 1024) / 1024): # 1 … (SFFT-SLINE*1024)/1024\n print('\\n Error!!! Frequency width is bigger than FFT size allows!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"clock_source\"]) not in (0, 1):\n print('\\n Error!!! Clock source is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"sum_diff_mode_num\"]) not in (0, 1):\n print('\\n Error!!! Sum-diff mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"data_file_size\"]) < -1 or int(dict[\"data_file_size\"]) > 4096:\n print('\\n Error!!! File size value is wrong!\\n')\n sys.exit(' Program stopped!')\n\n '''\n if (int(dict[\"chan_diff_delay\"]) < 0 or int(parameters_dict[\"chan_diff_dalay\"]) > 1024):\n print('\\n Error!!! Channel difference delay is wrong!\\n')\n sys.exit(' Program stopped!')\n '''\n\n # print('\\n ADR parameters from file are correct!\\n')\n\n return dict", "def lookup_routes(self, daddr):\n outroutes = []\n for entry in self.routes:\n for varat in entry[\"varats\"]:\n ip = varat[\"network\"].split(\".\")\n netmask = varat[\"netmask\"].split(\".\")\n\n mask_bit = \"\".join([ format(int(quad), \"08b\") for quad in netmask ])\n num_ones = mask_bit.count(\"1\")\n ip_bin = \"\".join([ format(int(quad), \"08b\") for quad in ip ])\n ip_start = ip_bin[:num_ones]\n daddr_bin = \"\".join([ format(int(quad), \"08b\") for quad in daddr.split(\".\") ])\n if daddr_bin.startswith(ip_start):\n outroutes.append({\"peer\": entry[\"peer\"], \"us\": entry[\"us\"], \"ghoti\": num_ones, \"msg\": varat})\n\n #print(\"outroutessssssssssssssssssssss\", outroutes)\n return outroutes", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def _process_departure_fix_list(fix_list, runway, airport, fixes, tagged_routes, top_level=True, altitude=None):\r\n if fix_list:\r\n try:\r\n if top_level:\r\n yield fix_list[0]\r\n fix_list = fix_list[1:]\r\n # climb altitude\r\n if fix_list[0].strip().isdigit():\r\n if altitude is None:\r\n altitude = fix_list[0].strip()\r\n fix_list = fix_list[1:]\r\n if fix_list[0].startswith('@'):\r\n tag = fix_list[0].lstrip('@')\r\n if tag.startswith('!') and tag in tagged_routes[None]:\r\n tagged_route = tagged_routes[None][tag]\r\n elif tag in tagged_routes[runway]:\r\n tagged_route = tagged_routes[runway][tag]\r\n elif tag in tagged_routes[airport]:\r\n tagged_route = tagged_routes[airport][tag]\r\n else:\r\n raise KeyError(f'''Unable to find route tagged @{tag}.\r\ntags for runway {runway}: {tagged_routes[runway]}\r\ntags for airport {airport}: {tagged_routes[airport]}''')\r\n yield from _process_departure_fix_list(tagged_route,\r\n runway, airport, fixes, tagged_routes, top_level=False, altitude=altitude)\r\n yield from _process_departure_fix_list(fix_list[1:],\r\n runway, airport, fixes, tagged_routes, top_level=False)\r\n\r\n elif fix_list[-1].startswith('@'):\r\n tag = fix_list[-1].lstrip('@')\r\n if tag.startswith('!') and tag in tagged_routes[None]:\r\n tagged_route = tagged_routes[None][tag]\r\n elif tag in tagged_routes[runway]:\r\n tagged_route = tagged_routes[runway][tag]\r\n elif tag in tagged_routes[airport]:\r\n tagged_route = tagged_routes[airport][tag]\r\n else:\r\n raise KeyError(f'''Unable to find route tagged @{tag}.\r\ntags for runway {runway}: {tagged_routes[runway]}\r\ntags for airport {airport}: {tagged_routes[airport]}''')\r\n yield from _process_departure_fix_list(fix_list[:-1],\r\n runway, airport, fixes, tagged_routes, top_level=False, altitude=altitude)\r\n yield from _process_departure_fix_list(tagged_route,\r\n runway, airport, fixes, tagged_routes, top_level=False)\r\n\r\n else:\r\n if altitude is not None:\r\n fix_list = fix_list_with_altitude(fix_list, altitude)\r\n yield from process_fix_list(fix_list, fixes)\r\n\r\n except Exception as e:\r\n raise RuntimeError(\r\n f\"Could not process departure route {fix_list} for runway {runway}\"\r\n ) from e", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def _validate_router_tz(self, context, tier0_uuid, subnets):\n pass", "def lookup_routes(self, daddr):\n # TODO\n outroutes = []\n\n net_pre = daddr[0 : daddr.index('.')] + '.0.0.0'\n\n #print(self.routes)\n\n for ip in self.routes.keys():\n network = self.routes[ip][NTWK]\n net_pre_2 = network[0:network.index('.')] + '.0.0.0'\n if net_pre_2 == net_pre:\n outroutes.append(ip)\n return outroutes", "def count_routes_exact_stops(self, src, dest, num_stops):\n\n count = 0\n criteria = lambda stops, distance: stops <= num_stops # inconsistent max, per test cases\n for route in self.routes_with_criteria(src, dest, criteria):\n if num_stops == len(route) - 1:\n count += 1\n return count", "def CheckIfRouteIncludesAllStationsInList(PathInfo, IncludedStations):\r\n\tif not IncludedStations:\r\n\t\treturn True \r\n\r\n\tpath = set(GetAllStationsOfRoute(PathInfo))\r\n\tstations = set(IncludedStations)\r\n\r\n\tif path.issuperset(stations):\r\n\t\treturn True \r\n\telse:\r\n\t\treturn False", "def get_origin_routes(self, routes):\n outroutes = []\n current_best = \"UNK\"\n # iterate through routes in given list updating the current best if a better\n # option is discovered\n for route in routes:\n if route[ORIG] == current_best:\n outroutes.append(route)\n elif (route[ORIG] == \"EGP\" and current_best != \"IGP\") or route[ORIG] == \"IGP\":\n # if the current best is worse than EGP and the current is EGP,\n # update best and start a new list\n # if the current best is worse than IGP and the current is IGP,\n # update best and start a new list\n current_best = route[ORIG]\n outroutes = [route]\n\n return outroutes", "def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0", "def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )", "def validate_incoming_request(route_mapper, request, schema_map, resolver):\n if schema_map.request_query_schema:\n # You'll notice we use Draft3 some places and Draft4 in others.\n # Unfortunately this is just Swagger's inconsistency showing. It\n # may be nice in the future to do the necessary munging to make\n # everything Draft4 compatible, although the Swagger UI will\n # probably never truly support Draft4.\n request_query_params = dict(\n (k, cast_request_param(schema_map.request_query_schema, k, v))\n for k, v\n in request.params.items()\n )\n Draft3Validator(\n schema_map.request_query_schema,\n resolver=resolver,\n types=EXTENDED_TYPES,\n ).validate(request_query_params)\n\n if schema_map.request_path_schema:\n # We don't have access to this yet but let's go ahead and build the\n # matchdict so we can validate it.\n info = route_mapper(request)\n matchdict = dict(\n (k, cast_request_param(schema_map.request_path_schema, k, v))\n for k, v\n in info.get('match', {}).items()\n )\n Draft3Validator(\n schema_map.request_path_schema,\n resolver=resolver,\n types=EXTENDED_TYPES,\n ).validate(matchdict)\n\n # Body validation\n if schema_map.request_body_schema:\n body = getattr(request, 'json_body', {})\n Draft4Validator(\n schema_map.request_body_schema,\n resolver=resolver,\n types=EXTENDED_TYPES,\n ).validate(body)", "def test_route_schema_init() -> None:\n destination = SpecificLocation()\n rschema = RouteSchema(destination)\n\n assert rschema.destination is not None\n assert rschema.destination._id == destination._id", "def _validate(self):\n REQUIRED_KEYS = [ 'name', 'year', 'artist_id', 'genre_ids', 'sources' ]\n\n missing_keys = get_missing_keys(self.request.data, REQUIRED_KEYS)\n if len(missing_keys) > 0:\n return f\"Request body is missing the following required properties: {', '.join(missing_keys)}.\"\n\n artist_id = self.request.data['artist_id']\n\n try:\n Artist.objects.get(pk=artist_id)\n except Artist.DoesNotExist:\n return \"`artistId` supplied does not match an existing artist.\" \n\n genre_ids = self.request.data['genre_ids']\n if len(genre_ids) == 0:\n return \"You must specify at least one genre id in `genreIds` array.\"\n\n for genre_id in genre_ids:\n try:\n Genre.objects.get(pk=genre_id)\n except Genre.DoesNotExist:\n return f\"The genre id {genre_id} does not match an existing genre.\"\n\n sources = self.request.data['sources']\n if len(sources) == 0:\n return \"You must specify at least one source in `sources` array.\"\n\n for source in sources:\n if 'service' not in source or 'url' not in source or 'is_primary' not in source:\n return \"All sources must contain `service`, `url`, and `is_primary` properties.\"\n\n primary_sources = [ source for source in sources if source['is_primary'] == True ]\n if len(primary_sources) != 1:\n return \"There must be one and only one primary source.\"\n\n return False", "def validate_routes(route):\n if ROUTE_PATTERN.match(route):\n if route[0] == route[1]:\n raise argparse.ArgumentTypeError('Invalid route format, cannot have same city: %s' % route)\n return route\n else:\n raise argparse.ArgumentTypeError('Invalid route format for: %s. Should be {A-Z}{A-Z}{0-9}+' % route)", "def test_fields(self):\n expected = (\n 'id',\n # Incoming foreign keys from subclasses\n 'routeredirect', # conman.redirects.models.RouteRedirect\n 'routesubclass', # tests.models.RouteSubclass\n 'templateroute', # tests.models.TemplateRoute\n 'urlconfroute', # tests.models.URLConfRoute\n 'urlredirect', # conman.redirects.models.URLRedirect\n 'viewroute', # tests.models.ViewRoute\n ) + NODE_BASE_FIELDS\n fields = field_names(Route)\n self.assertCountEqual(fields, expected)", "def test_different_routes_from_c_to_c_and_distance_less_than_30(self):\n railroad = trains.Railroad()\n routes = railroad.find_routes('C', 'C', 9)\n routes = railroad.filter_routes_by_distance(routes, 0, 30)\n self.assertEqual(len(routes), 7)", "def valid_trip(self):\n if self.pickupcoords is None or self.dropoffcoords is None:\n return False\n valid = lambda x, y: 41 < x < 43.5 and -72.5 < y < - 70.5\n return valid(self.pickupcoords[0], self.pickupcoords[1]) and valid(self.dropoffcoords[0], self.dropoffcoords[1])", "def CheckIfRouteExludesAllStationInList(PathInfo, ExcludedStations):\r\n\tif ExcludedStations == None or len(ExcludedStations) == 0:\r\n\t\treturn True \r\n\r\n\tpath = set(GetAllStationsOfRoute(PathInfo))\r\n\tstations = set(IncludedStations)\r\n\r\n\tif len(path.intersection(stations)) == 0:\r\n\t\treturn True \r\n\telse:\r\n\t\treturn False", "def allowed_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_origins\")", "def optimizedRoutePossibilities(routes,cities):\n\tgraph = createOptimizedGraph(routes)\n\tfor couple in permutationsFromOrigin(cities):\n\t\tif couple is not None:\n\t\t\t#yield find_all_paths2(graph,couple[0],couple[1])[0]\n\t\t\tprint(find_all_paths2(graph,couple[0],couple[1])[0])", "def find_directions(start, end, transit_mode=None):\n dir_url = build_url(start, end, transit_mode)[0]\n json_response = json.loads(urllib.request.urlopen(dir_url).read().decode('utf-8'))\n route_legs = json_response[routes_key][0][legs_key]\n directions = []\n for leg in route_legs:\n for step in leg[steps_key]:\n directions.append(strip_tags(step[instr_key]))\n\n return directions", "def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation", "def validate(self):\n valid = True\n \n # Check that link information is valid\n for ij in self.link:\n valid = valid and self.link[ij].head in self.node\n valid = valid and self.link[ij].tail in self.node\n if not valid:\n print(\"Error: Link tail/head not found: %s %s\" % (self.link[ij].tail, self.link[ij].head))\n raise utils.BadFileFormatException\n valid = valid and self.link[ij].capacity >= 0\n valid = valid and self.link[ij].length >= 0\n valid = valid and self.link[ij].freeFlowTime >= 0\n valid = valid and self.link[ij].alpha >= 0\n valid = valid and self.link[ij].beta >= 0\n valid = valid and self.link[ij].speedLimit >= 0\n valid = valid and self.link[ij].toll >= 0\n if not valid:\n print(\"Link %s has negative parameters.\" % ij)\n \n # Then check that all OD pairs are in range\n for ODpair in self.ODpair:\n (origin, destination) = (self.ODpair[ODpair].origin, self.ODpair[ODpair].destination)\n valid = valid and origin in self.node\n valid = valid and destination in self.node\n if not valid:\n print(\"Error: Origin/destination %s not found\" % ODpair)\n raise utils.BadFileFormatException\n valid = valid and self.node[origin].isZone == True\n valid = valid and self.node[destination].isZone == True\n if not valid:\n print(\"Error: Origin/destination %s does not connect two zones\" % str(ODpair))\n raise utils.BadFileFormatException\n valid = valid and self.ODpair[ODpair].demand >= 0\n if not valid:\n print(\"Error: OD pair %s has negative demand\" % ODpair)\n raise utils.BadFileFormatException\n \n # Now error-check using metadata\n if self.numNodes != None and len(self.node) != self.numNodes:\n print(\"Warning: Number of nodes implied by network file %d different than metadata value %d\" % (len(self.node), self.numNodes))\n self.numNodes = len(self.node)\n if self.numLinks != None and len(self.link) != self.numLinks:\n print(\"Warning: Number of links given in network file %d different than metadata value %d\" % (len(self.link), self.numLinks))\n self.numLinks = len(self.link)\n if self.numZones != None and len([i for i in self.node if self.node[i].isZone == True]) != self.numZones:\n print(\"Warning: Number of zones given in network file %d different than metadata value %d\" % (len([i for i in self.node if self.node[i].isZone == True]), self.numZones))\n self.numLinks = len(self.link)\n if self.totalDemandCheck != None:\n if self.totalDemand != self.totalDemandCheck:\n print(\"Warning: Total demand is %f compared to metadata value %f\" % ( self.totalDemand, self.totalDemandCheck))", "def __init__(self, trip_update, stops, position_in_list):\n self.trip_update = trip_update\n self.stops = stops\n self.routeID = str(self.trip_update.trip.route_id)\n # A minor quirk in the MTA's data is fixed here. S trains were listed as GS for some reason\n if self.routeID == \"GS\":\n self.routeID = \"S\"\n self.index = position_in_list", "def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes", "def create_url(_origin_details, travel_start_date, travel_start_time, destination_list):\n prefix = 'https://timetable.search.ch/api/route.json?one_to_many=1'\n\n origin_body = f'&from={_origin_details}&date={travel_start_date}&time={travel_start_time}'\n\n # Build iteratively with necessary syntax between destinations\n destination_body = ''\n for i, dest in enumerate(destination_list):\n destination_body = f'{destination_body}&to[{i}]={dest}'\n\n return f'{prefix}{origin_body}{destination_body}'", "def compute_travel_cost_adjlist(\n origins, destinations, network, index_orig=None, index_dest=None\n):\n\n # NOTE: need to add an option/check for symmetric networks so we only need half the routing calls\n\n origins = origins.copy()\n destinations = destinations.copy()\n\n origins[\"osm_ids\"] = network.get_node_ids(\n origins.centroid.x, origins.centroid.y\n ).astype(int)\n destinations[\"osm_ids\"] = network.get_node_ids(\n destinations.centroid.x, destinations.centroid.y\n ).astype(int)\n\n ods = []\n\n if not index_orig:\n origins[\"idx\"] = origins.index.values\n index_orig = \"idx\"\n if not index_dest:\n destinations[\"idx\"] = destinations.index.values\n index_dest = \"idx\"\n\n # I dont think there's a way to do this in parallel, so we can at least show a progress bar\n with tqdm(total=len(origins[\"osm_ids\"])) as pbar:\n for origin in origins[\"osm_ids\"]:\n df = pd.DataFrame()\n df[\"cost\"] = network.shortest_path_lengths(\n [origin for d in destinations[\"osm_ids\"]],\n [d for d in destinations[\"osm_ids\"]],\n )\n df[\"destination\"] = destinations[index_dest].values\n df[\"origin\"] = origins[origins.osm_ids == origin][index_orig].values[0]\n\n ods.append(df)\n pbar.update(1)\n\n combined = pd.concat(ods)\n # reorder the columns\n return combined[['origin', 'destination', 'cost']]", "def allowed_origin_patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_origin_patterns\")", "def route_validity_checker(): #below route list was rerturned from bus_routes function above, copy and pasted to eliminate need to re-run\n route_list=['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', 'PP07', '53B', '31A', 'OL84']\n count_dict={}\n for route in route_list: #dictionary with key for every route in the list\n count_dict[route]=0 #used to count number of occurrences in files\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"): #for every file\n print(file)\n reader=csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3])\n if route!=\"\":\n count_dict[extract_bus_route(line[3])]+=1 #incremenent the counter of the route with the associated journey id code\n return count_dict #result is that 3 routes are likely due to strange circumstances or errors in data", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def combineGPSandPhoneStops(arg):\r\n\r\n # unpack parameters\r\n user_gps, user_cell, dur_constr, spat_constr_gps, spat_cell_split = arg\r\n\r\n # combine cellular stay if it is close to a gps stay\r\n cell_stays = list(set([(trace[6],trace[7]) for d in user_cell for trace in user_cell[d] if int(trace[9]) >= dur_constr]))\r\n gps_stays = list(set([(trace[6],trace[7]) for d in user_gps for trace in user_gps[d] if int(trace[9]) >= dur_constr]))\r\n pairs_close = set()\r\n for cell_stay in cell_stays:\r\n for gps_stay in gps_stays:\r\n if distance(cell_stay[0],cell_stay[1],gps_stay[0],gps_stay[1]) <= spat_constr_gps:\r\n pairs_close.add((gps_stay[0],gps_stay[1],cell_stay[0],cell_stay[1]))\r\n break\r\n # find all pair[1]s in list, and replace it with pair[0]\r\n for pair in list(pairs_close):\r\n for d in user_cell.keys():\r\n for trace in user_cell[d]:\r\n if trace[6] == pair[2] and trace[7] == pair[3]:\r\n trace[5], trace[6], trace[7] = 99, pair[0], pair[1] #pretend as gps\r\n\r\n user = user_gps\r\n for d in user.keys():\r\n if len(user_cell[d]):\r\n user[d].extend(user_cell[d])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr) #OscillationPairList = oscillation_h1_oscill(user, dur_constr)\r\n # ## when replaced, can only replaced with a gps stay; so let modify exchange ping-pong pair in the pairList\r\n # gpslist_temp = {(trace[6], trace[7]):int(trace[5]) for d in user.keys() for trace in user[d]}\r\n # for pair_i in range(len(OscillationPairList)):\r\n # if gpslist_temp[(OscillationPairList[pair_i][0],OscillationPairList[pair_i][1])] <= spat_constr_gps:# wrong(2,3)\r\n # OscillationPairList[pair_i] = [OscillationPairList[pair_i][2],OscillationPairList[pair_i][3],\r\n # OscillationPairList[pair_i][0],OscillationPairList[pair_i][1]]\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n for d in user:\r\n phone_index = [k for k in range(len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) == 0: # if no phone trace\r\n continue\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr: # passing phone observ\r\n user[d][i].append('checked')\r\n # combine consecutive obsv on a phone stay into two observ\r\n i = min(phone_index) # i has to be a phone index\r\n j = i + 1\r\n while i < len(user[d]) - 1:\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n break\r\n if int(user[d][j][5]) > spat_cell_split and user[d][j][6] == user[d][i][6] \\\r\n and user[d][j][7] == user[d][i][7] and j < len(user[d]):\r\n j += 1\r\n else:\r\n for k in range(i + 1, j - 1, 1):\r\n user[d][k] = []\r\n phone_index = [k for k in range(j, len(user[d])) if int(user[d][k][5]) > spat_cell_split]\r\n if len(phone_index) < 3: # if no phone trace\r\n break\r\n i = min(phone_index) ##i has to be a phone index\r\n j = i + 1\r\n i = 0 # remove []\r\n while i < len(user[d]):\r\n if len(user[d][i]) == 0:\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # adress phone stay one by one\r\n flag_changed = True\r\n phone_list_check = []\r\n while (flag_changed):\r\n # print('while........')\r\n flag_changed = False\r\n gps_list = []\r\n phone_list = []\r\n for i in range(len(user[d])):\r\n if int(user[d][i][5]) <= spat_cell_split:#or user[d][i][2] == 'addedphonestay': #changed on 0428\r\n gps_list.append(user[d][i])\r\n else:\r\n phone_list.append(user[d][i])\r\n\r\n phone_list.extend(phone_list_check)\r\n # when updating duration for phone stay, we have to put back passing obs\r\n phone_list = sorted(phone_list, key=itemgetter(0))\r\n # update phone stay\r\n i = 0\r\n j = i\r\n while i < len(phone_list):\r\n if j >= len(phone_list): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n # we don't want to change a pssing into a stay; as we have not process the combine this stay\r\n # this is possible when a stay that prevents two passing is mergeed into gps as gps points\r\n phone_list[k][9] = dur\r\n break\r\n if phone_list[j][6] == phone_list[i][6] and phone_list[j][7] == phone_list[i][7] and j < len(\r\n phone_list):\r\n j += 1\r\n else:\r\n dur = str(int(phone_list[j - 1][0]) - int(phone_list[i][0]))\r\n for k in range(i, j, 1):\r\n if int(phone_list[k][9]) >= dur_constr:\r\n phone_list[k][9] = dur\r\n i = j\r\n for trace in phone_list: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(phone_list) == 1: phone_list[0][9] = -1\r\n\r\n # update check lable\r\n for i in range(len(phone_list)):\r\n if int(phone_list[i][5]) > spat_cell_split and int(phone_list[i][9]) < dur_constr \\\r\n and phone_list[i][-1] != 'checked':\r\n # passing phone observ\r\n phone_list[i].append('checked')\r\n\r\n # put those not checked together with gps\r\n user[d] = gps_list\r\n phone_list_check = []\r\n for i in range(len(phone_list)):\r\n if phone_list[i][-1] == 'checked':\r\n phone_list_check.append(phone_list[i])\r\n else:\r\n user[d].append(phone_list[i])\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # find a stay which is not checked\r\n flag_phonestay_notchecked = False\r\n phonestay_left, phonestay_right = -1, -1\r\n for i in range(max(0, phonestay_right+1), len(user[d])):\r\n phonestay_left, phonestay_right = -1, -1\r\n if int(user[d][i][5]) > spat_cell_split \\\r\n and int(user[d][i][9]) >= dur_constr and user[d][i][-1] != 'checked':\r\n phonestay_left = phonestay_right\r\n phonestay_right = i\r\n if phonestay_left != -1 and phonestay_right != -1 \\\r\n and user[d][phonestay_left][9] == user[d][phonestay_right][9]:\r\n flag_phonestay_notchecked = True\r\n\r\n ## modified on 04152019\r\n if flag_phonestay_notchecked == False or len(phone_list) == 0: # if all phone observation are checked, end\r\n break\r\n # if they are not two consecutive observation\r\n if phonestay_right != phonestay_left + 1: # attention: only phonestay_left is addressed\r\n # not consecutive two observations\r\n if any([int(user[d][j][9]) >= dur_constr for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # found a gps stay in betw\r\n # print('23: found a gps stay in betw, just use one gps stay trade one phone stay')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # find close gps\r\n # print('24: do not found a gps stay in betw')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n if all([(phone_uncernt + int(user[d][j][5])) > 1000 * distance(user[d][j][3], user[d][j][4],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n for j in range(phonestay_left + 1, phonestay_right, 1)]):\r\n # total uncerty larger than distance\r\n # this case should be rare, as those close gps may be clustered\r\n # print('241: all gps falling betw are close with phone stay')\r\n temp = user[d][phonestay_left][3:] # copy neighbor gps\r\n user[d][phonestay_left][3:] = user[d][phonestay_left + 1][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n flag_changed = True\r\n else:\r\n # print('242: find a gps in betw,\r\n # which is far away with phone stay, contradic with a stay (with phone obsv)')\r\n temp = user[d][phonestay_left][6:]\r\n user[d][phonestay_left][6:] = [-1, -1, -1, -1, -1, -1] # phone disappear\r\n # user[d][phonestay_left].extend(temp)\r\n user[d][phonestay_left].append('checked')\r\n # del user[d][phonestay_left] # phone disappear\r\n flag_changed = True\r\n else: # if they are two consecutive traces\r\n # two consecutive observation\r\n # if phonestay_left != 0 and phonestay_right < len(user[d]) - 1:\r\n # ignore if they are at the beginning or the end of traj\r\n prev_gps = next_gps = 0 # find prevous and next gps\r\n found_prev_gps = False\r\n found_next_gps = False\r\n for prev in range(phonestay_left - 1, -1, -1):\r\n # if int(user[d][prev][5]) <= spat_cell_split: ########## changed on 04282018\r\n if int(user[d][prev][5]) <= spat_cell_split and int(user[d][prev][9]) >= dur_constr:\r\n prev_gps = prev\r\n found_prev_gps = True\r\n break\r\n for nxt in range(phonestay_right + 1, len(user[d])):\r\n if int(user[d][nxt][5]) <= spat_cell_split and int(user[d][nxt][9]) >= dur_constr:\r\n next_gps = nxt\r\n found_next_gps = True\r\n break\r\n\r\n if found_prev_gps and found_next_gps and user[d][prev_gps][6] == user[d][next_gps][6]:\r\n # this is a phone stay within a gps stay\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 \\\r\n and dist > 1000*spat_constr_gps and speed_dep < 200 and speed_retn < 200:\r\n # print('1111: distance larger than acc, and can travel, add phone stay, shorten gps stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else: # merge into gps stay\r\n # print('1112: distance less than acc, or cannot travel, merge into gps stay')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_prev_gps and found_next_gps and user[d][prev_gps][6] != user[d][next_gps][6]:\r\n phone_uncernt_l = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_l = int(user[d][prev_gps][8])\r\n dist_l = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist_l - phone_uncernt_l - gps_uncernt_l) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n phone_uncernt_r = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt_r = int(user[d][next_gps][8])\r\n dist_r = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist_r - phone_uncernt_r - gps_uncernt_r) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n comb_l = 0 #revised on 03202019 to pick up one gps stay to combine with; if spatial conti with multi\r\n comb_r = 0\r\n if (dist_l - phone_uncernt_l - gps_uncernt_l) < 0 \\\r\n or dist_l < 1000*spat_constr_gps or speed_dep > 200:\r\n comb_l = 1\r\n if (dist_r - phone_uncernt_r - gps_uncernt_r) < 0 \\\r\n or dist_r < 1000 * spat_constr_gps or speed_retn > 200:\r\n comb_r = 1\r\n if comb_l*comb_r == 1:\r\n if dist_l < dist_r:\r\n comb_r = 0\r\n else:\r\n comb_l = 0\r\n if comb_l:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif comb_r:\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else:\r\n user[d][phonestay_left].append('checked')\r\n # those phone stay not removed have to be marked with 'checked'!\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n elif found_prev_gps: # a gps stay #right# before\r\n # print('113: before phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][prev_gps][8])\r\n dist = 1000 * distance(user[d][prev_gps][6],\r\n user[d][prev_gps][7],\r\n user[d][phonestay_left][6],\r\n user[d][phonestay_left][7])\r\n speed_dep = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][phonestay_left][0]) - int(user[d][prev_gps][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_dep < 200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1132: dist>low_acc, add phone stay')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:\r\n # print('1131: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][prev_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n elif found_next_gps: # a gps stay #right# after\r\n # print('112: after phone stay, we have gps stay')\r\n phone_uncernt = max([int(user[d][phonestay_left][8]), int(user[d][phonestay_left][5]),\r\n int(user[d][phonestay_right][5])])\r\n gps_uncernt = int(user[d][next_gps][8])\r\n dist = 1000 * distance(user[d][next_gps][6],\r\n user[d][next_gps][7],\r\n user[d][phonestay_right][6],\r\n user[d][phonestay_right][7])\r\n speed_retn = (dist - phone_uncernt - gps_uncernt) / \\\r\n (int(user[d][next_gps][0]) - int(user[d][phonestay_right][0])) * 3.6\r\n if (dist - phone_uncernt - gps_uncernt) > 0 and dist > 1000*spat_constr_gps and speed_retn<200:\r\n # spatially seperate enough and can travel, add in gps\r\n # print('1122: dist>low_acc, add phone stay')\r\n # leave phone stay there, we later update duration for the gps stay\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n else:# remain phone observ, but use gps location\r\n # print('1121: low_acc > dist, merge with gps stay, meaning extend gps dur')\r\n temp = user[d][phonestay_left][3:]\r\n user[d][phonestay_left][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_left][11] = temp[8]\r\n # user[d][phonestay_left].extend(temp)\r\n temp = user[d][phonestay_right][3:]\r\n user[d][phonestay_right][3:] = user[d][next_gps][3:]\r\n user[d][phonestay_right][11] = temp[8]\r\n # user[d][phonestay_right].extend(temp)\r\n flag_changed = True\r\n else: # if don't match any case, just add it\r\n # print('donot match any case, just add it (e.g., consecutive two phone stays)')\r\n # leave phone stay there\r\n user[d][phonestay_left].append('checked')\r\n user[d][phonestay_right].append('checked')\r\n user[d][phonestay_left][2] = 'addedphonestay'\r\n user[d][phonestay_right][2] = 'addedphonestay'\r\n flag_changed = True\r\n\r\n\r\n # user[d].extend(phone_list_check)\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace[:])\r\n # remove passingby cellular traces\r\n i = 0\r\n while i<len(user[d]):\r\n if user[d][i][5] == 99 and float(user[d][i][9]) < dur_constr:\r\n del user[d][i]\r\n else:\r\n i+=1\r\n # remove passing traces\r\n ## Flag_changed = True\r\n ## while (Flag_changed):\r\n ## Flag_changed = False\r\n # i = 0\r\n # while i < len(user[d]):\r\n # if int(user[d][i][5]) > spat_cell_split and int(user[d][i][9]) < dur_constr:\r\n # # Flag_changed = True\r\n # del user[d][i]\r\n # else:\r\n # i += 1\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n # update duration\r\n i = 0\r\n j = i\r\n while i < len(user[d]):\r\n if j >= len(user[d]): # a day ending with a stay, j goes beyond the last observation\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n break\r\n if user[d][j][6] == user[d][i][6] and user[d][j][7] == user[d][i][7] and j < len(\r\n user[d]):\r\n j += 1\r\n else:\r\n dur = str(int(user[d][j - 1][0]) - int(user[d][i][0]))\r\n for k in range(i, j, 1):\r\n user[d][k][9] = dur\r\n i = j\r\n for trace in user[d]: # those trace with gps as -1,-1 (not clustered) should not assign a duration\r\n if float(trace[6]) == -1: trace[9] = -1\r\n if len(user[d]) == 1: user[d][0][9] = -1\r\n # remove and add back; because phone stays are distroyed as multiple, should be combined as one\r\n i = 0\r\n while i < len(user[d]):\r\n if user[d][i][2] == 'addedphonestay':\r\n del user[d][i]\r\n else:\r\n i += 1\r\n # add back and sort\r\n for trace in phone_list_check:\r\n if trace[2] == 'addedphonestay':\r\n user[d].append(trace)\r\n\r\n user[d] = sorted(user[d], key=itemgetter(0))\r\n\r\n # remove temp marks\r\n user[d]=[trace[:12] for trace in user[d]]\r\n\r\n # oscillation\r\n # modify grid\r\n for day in user.keys():\r\n for trace in user[day]:\r\n if float(trace[6]) == -1:\r\n found_stay = False\r\n if found_stay == False:\r\n trace[6] = trace[3] + '000' # in case do not have enough digits\r\n trace[7] = trace[4] + '000'\r\n digits = (trace[6].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2]) / 2)\r\n trace[6] = (trace[6].split('.'))[0] + '.' + digits\r\n # trace[6] = trace[6][:5] + str(int(trace[6][5]) / 2) # 49.950 to 49.952 220 meters\r\n digits = (trace[7].split('.'))[1]\r\n digits = digits[:2] + str(int(digits[2:4]) / 25)\r\n trace[7] = (trace[7].split('.'))[0] + '.' + digits\r\n # trace[7] = trace[7][:7] + str(int(trace[7][7:9]) / 25) # -122.3400 to -122.3425 180 meters\r\n\r\n # added to address oscillation\r\n user = oscillation_h1_oscill(user, dur_constr)\r\n ## find pong in trajactory, and replace it with ping\r\n ## this part is now integreted into the function itself\r\n ## OscillationPairList is in format: {, (ping[0], ping[1]): (pong[0], pong[1])}\r\n # for d in user.keys():\r\n # for trace in user[d]:\r\n # if (trace[6], trace[7]) in OscillationPairList:\r\n # trace[6], trace[7] = OscillationPairList[(trace[6], trace[7])]\r\n\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n\r\n # end addressing oscillation\r\n # those newly added stays should be combined with close stays\r\n user = cluster_incremental(user, spat_constr_gps, dur_constr=dur_constr)\r\n # update duration\r\n user = update_duration(user, dur_constr)\r\n # use only one record for one stay\r\n for d in user:\r\n i = 0\r\n while i < len(user[d]) - 1:\r\n if user[d][i + 1][6] == user[d][i][6] and user[d][i + 1][7] == user[d][i][7] \\\r\n and user[d][i + 1][9] == user[d][i][9] and int(user[d][i][9]) >= dur_constr:\r\n del user[d][i + 1]\r\n else:\r\n i += 1\r\n # mark stay\r\n staylist = set() # get unique staylist\r\n for d in user.keys():\r\n for trace in user[d]:\r\n if float(trace[9]) >= dur_constr:\r\n staylist.add((trace[6], trace[7]))\r\n else: # change back keep full trajectory: do not use center for those are not stays\r\n trace[6], trace[7], trace[8], trace[9] = -1, -1, -1, -1 # for non stay, do not give center\r\n staylist = list(staylist)\r\n for d in user.keys():\r\n for trace in user[d]:\r\n for i in range(len(staylist)):\r\n if trace[6] == staylist[i][0] and trace[7] == staylist[i][1]:\r\n trace[10] = 'stay' + str(i)\r\n break\r\n\r\n return user", "def destination_route_table_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_route_table_ids\")", "def destination_route_table_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"destination_route_table_ids\")", "def __init__(__self__, *,\n as_path_match_mode: Optional[pulumi.Input[str]] = None,\n cen_id: Optional[pulumi.Input[str]] = None,\n cen_region_id: Optional[pulumi.Input[str]] = None,\n cidr_match_mode: Optional[pulumi.Input[str]] = None,\n community_match_mode: Optional[pulumi.Input[str]] = None,\n community_operate_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n destination_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n map_result: Optional[pulumi.Input[str]] = None,\n match_asns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n match_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n next_priority: Optional[pulumi.Input[int]] = None,\n operate_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preference: Optional[pulumi.Input[int]] = None,\n prepend_as_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n route_map_id: Optional[pulumi.Input[str]] = None,\n route_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n source_region_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_route_table_id: Optional[pulumi.Input[str]] = None,\n transmit_direction: Optional[pulumi.Input[str]] = None):\n if as_path_match_mode is not None:\n pulumi.set(__self__, \"as_path_match_mode\", as_path_match_mode)\n if cen_id is not None:\n pulumi.set(__self__, \"cen_id\", cen_id)\n if cen_region_id is not None:\n pulumi.set(__self__, \"cen_region_id\", cen_region_id)\n if cidr_match_mode is not None:\n pulumi.set(__self__, \"cidr_match_mode\", cidr_match_mode)\n if community_match_mode is not None:\n pulumi.set(__self__, \"community_match_mode\", community_match_mode)\n if community_operate_mode is not None:\n pulumi.set(__self__, \"community_operate_mode\", community_operate_mode)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_child_instance_types is not None:\n pulumi.set(__self__, \"destination_child_instance_types\", destination_child_instance_types)\n if destination_cidr_blocks is not None:\n pulumi.set(__self__, \"destination_cidr_blocks\", destination_cidr_blocks)\n if destination_instance_ids is not None:\n pulumi.set(__self__, \"destination_instance_ids\", destination_instance_ids)\n if destination_instance_ids_reverse_match is not None:\n pulumi.set(__self__, \"destination_instance_ids_reverse_match\", destination_instance_ids_reverse_match)\n if destination_route_table_ids is not None:\n pulumi.set(__self__, \"destination_route_table_ids\", destination_route_table_ids)\n if map_result is not None:\n pulumi.set(__self__, \"map_result\", map_result)\n if match_asns is not None:\n pulumi.set(__self__, \"match_asns\", match_asns)\n if match_community_sets is not None:\n pulumi.set(__self__, \"match_community_sets\", match_community_sets)\n if next_priority is not None:\n pulumi.set(__self__, \"next_priority\", next_priority)\n if operate_community_sets is not None:\n pulumi.set(__self__, \"operate_community_sets\", operate_community_sets)\n if preference is not None:\n pulumi.set(__self__, \"preference\", preference)\n if prepend_as_paths is not None:\n pulumi.set(__self__, \"prepend_as_paths\", prepend_as_paths)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if route_map_id is not None:\n pulumi.set(__self__, \"route_map_id\", route_map_id)\n if route_types is not None:\n pulumi.set(__self__, \"route_types\", route_types)\n if source_child_instance_types is not None:\n pulumi.set(__self__, \"source_child_instance_types\", source_child_instance_types)\n if source_instance_ids is not None:\n pulumi.set(__self__, \"source_instance_ids\", source_instance_ids)\n if source_instance_ids_reverse_match is not None:\n pulumi.set(__self__, \"source_instance_ids_reverse_match\", source_instance_ids_reverse_match)\n if source_region_ids is not None:\n pulumi.set(__self__, \"source_region_ids\", source_region_ids)\n if source_route_table_ids is not None:\n pulumi.set(__self__, \"source_route_table_ids\", source_route_table_ids)\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if transit_router_route_table_id is not None:\n pulumi.set(__self__, \"transit_router_route_table_id\", transit_router_route_table_id)\n if transmit_direction is not None:\n pulumi.set(__self__, \"transmit_direction\", transmit_direction)", "def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs):\n # pop any strand name:data pairs out of kwargs and into their own dict\n source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS)\n sources = dict((name, kwargs.pop(name)) for name in source_kwargs)\n for strand_name, strand_data in sources.items():\n\n if not allow_extra:\n if (strand_data is not None) and (strand_name not in self.available_strands):\n raise exceptions.StrandNotFound(\n f\"Source data is provided for '{strand_name}' but no such strand is defined in the twine\"\n )\n\n if not allow_missing:\n if (strand_name in self.available_strands) and (strand_data is None):\n raise exceptions.TwineValueException(\n f\"The '{strand_name}' strand is defined in the twine, but no data is provided in sources\"\n )\n\n if strand_data is not None:\n # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the\n # responsibility of the caller to determine what has already been validated and what hasn't.\n # # Use the twine to validate and instantiate as the desired class\n # if not isinstance(value, type(cls)):\n # self.logger.debug(\n # \"Instantiating %s as %s and validating against twine\", name, cls.__name__ if cls else \"default_class\"\n # )\n # return self.twine.validate(name, source=value, cls=cls)\n method = getattr(self, f\"validate_{strand_name}\")\n klass = self._get_cls(strand_name, cls)\n sources[strand_name] = method(strand_data, cls=klass, **kwargs)\n else:\n sources[strand_name] = None\n\n return sources", "def test_get_routes(self):\n routes = self.stop.routes\n self.assertEqual(type(routes), type([]))\n [self.assertEqual(type(i), BusRoute) for i in routes]\n routes[0].__repr__()\n routes[0].__str__()\n routes[0].__unicode__()", "def _make_reverse_relations_valid(self, data):\n for field_name, (field, related_field) in self._get_reverse_fields().items():\n if data.get(field.source) is None:\n continue\n if isinstance(field, serializers.ListSerializer):\n field = field.child\n if isinstance(field, serializers.ModelSerializer):\n # find the serializer field matching the reverse model relation\n for sub_field in field.fields.values():\n if sub_field.source == related_field.name:\n sub_field.required = False\n # found the matching field, move on\n break", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # if destination within the board\n if to_col in range(9) and to_row in range(10):\n # if destination is 1 up and diagonally to the left\n if to_col - from_col == -2 and to_row - from_row == -3 and board[from_col][from_row - 1] == '' and board[from_col - 1][from_row - 2] == '':\n return True\n # if destination is 1 up and diagonally to the right\n if to_col - from_col == 2 and to_row - from_row == -3 and board[from_col][from_row - 1] == '' and board[from_col + 1][from_row - 2] == '':\n return True\n # if destination is 1 down and diagonally to the left\n if to_col - from_col == -2 and to_row - from_row == 3 and board[from_col][from_row + 1] == '' and board[from_col - 1][from_row + 2] == '':\n return True\n # if destination is 1 down and diagonally to the right\n if to_col - from_col == 2 and to_row - from_row == 3 and board[from_col][from_row + 1] == '' and board[from_col + 1][from_row + 2] == '':\n return True\n # if destination is 1 left and diagonally up\n if to_col - from_col == -3 and to_row - from_row == -2 and board[from_col - 1][from_row] == '' and board[from_col - 2][from_row - 1] == '':\n return True\n # if destination is 1 left and diagonally down\n if to_col - from_col == -3 and to_row - from_row == 2 and board[from_col - 1][from_row] == '' and board[from_col - 2][from_row + 1] == '':\n return True\n # if destination is 1 right and diagonally up\n if to_col - from_col == 3 and to_row - from_row == -2 and board[from_col + 1][from_row] == '' and board[from_col + 2][from_row - 1] == '':\n return True\n # if destination is 1 right and diagonally down\n if to_col - from_col == 3 and to_row - from_row == 2 and board[from_col + 1][from_row] == '' and board[from_col + 2][from_row + 1] == '':\n return True\n return False", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def test_trips_starting_at_a_and_ending_at_c_by_4_stops(self):\n railroad = trains.Railroad()\n self.assertEqual(sorted(railroad.find_routes('A', 'C', 4, 4)), sorted(['ABCDC', 'ADCDC', 'ADEBC']))", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n destination_region_id: Optional[pulumi.Input[str]] = None,\n destination_zone_id: Optional[pulumi.Input[str]] = None,\n group_name: Optional[pulumi.Input[str]] = None,\n rpo: Optional[pulumi.Input[int]] = None,\n source_region_id: Optional[pulumi.Input[str]] = None,\n source_zone_id: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if destination_region_id is not None:\n pulumi.set(__self__, \"destination_region_id\", destination_region_id)\n if destination_zone_id is not None:\n pulumi.set(__self__, \"destination_zone_id\", destination_zone_id)\n if group_name is not None:\n pulumi.set(__self__, \"group_name\", group_name)\n if rpo is not None:\n pulumi.set(__self__, \"rpo\", rpo)\n if source_region_id is not None:\n pulumi.set(__self__, \"source_region_id\", source_region_id)\n if source_zone_id is not None:\n pulumi.set(__self__, \"source_zone_id\", source_zone_id)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def validate(self, *, destination: FieldModel, source: list) -> bool:\n if not isinstance(source, list):\n raise ValueError(f\"Action source script does not conform to required structure. ({source})\")\n for chunk in self.core.chunks(lst=source, n=len(self.structure)):\n # Loops through the phrasing of the structure, and checks that each term is as expected\n # e.g. [ModifierModel, FieldModel] for [modifier1, field1, modifier2, field2]\n # does not check that the actual terms match, though\n for i, term in enumerate(self.structure):\n if isinstance(chunk[i], term):\n continue\n elif isinstance(chunk[i], dict):\n # Nested source\n if not chunk[i].get(\"action\") and not not chunk[i].get(\"source\"):\n raise ValueError(f\"Nested script does not conform to required structure. ({chunk[i]})\")\n chunk[i][\"action\"].validate(source=chunk[i][\"source\"])\n else:\n raise ValueError(\n f\"Source structure ({chunk}) doesn't conform to ACTION structure requirements ({self.structure}).\"\n )\n return True", "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"", "def process_departure_fix_list(fix_list, runway, airport, fixes, tagged_routes, base_runway=None):\r\n if fix_list:\r\n if runway not in tagged_routes:\r\n tagged_routes[runway] = {}\r\n if airport not in tagged_routes:\r\n tagged_routes[airport] = {}\r\n if fix_list[0].startswith('@'):\r\n tag = fix_list[0].lstrip('@')\r\n if tag.startswith('!!'):\r\n tag_namespace = None\r\n elif tag.startswith('!'):\r\n tag_namespace = airport\r\n elif runway:\r\n tag_namespace = runway\r\n else:\r\n raise RuntimeError(f\"departure tagged {tag} as a runway-specific route, but no runway was specified\")\r\n tagged_routes[tag_namespace][tag] = fix_list[1:]\r\n return None\r\n else:\r\n if base_runway is not None:\r\n runway = base_runway\r\n return _process_departure_fix_list(fix_list, runway, airport, fixes, tagged_routes)", "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def savings2routes(self,r1,r2):\n newRoute = VRP_Route(r1.route+r2.route)\n newRoute.update_route(self.vrpdata) # compute distance, quantity for newRoute, check whether valid\n if newRoute.tourValid:\n return r1.distance + r2.distance - newRoute.distance\n return -1", "def get_valid_locations(location_list, grid, shape):", "def __init__(self):\n\n self._utils = utils.SearchUtils()\n self._logger = self._utils.logger\n\n self._dms_pattern = (\n r\"\\s*(\\d+)\\s*°\\s*(\\d+)\\s*(?:ʹ|′|')\\s*(\\d+\\.?\\d*)\\s*(?:ʺ|\\\"|″)\"\n r\"\\s*([EWNS])\\s*\")\n self._ddm_pattern = (\n r\"\\s*(\\d+)\\s*°\\s*(\\d+\\.?\\d*)\\s*(?:ʹ|′|')\\s*([EWNS])\\s*\")\n self._dd_pattern = r\"\\s*(-?\\d+\\.?\\d*)\\s*(?:°)?\\s*[EWNS]?\"\n self._mgrs_pattern = r\"\\s*\\d?\\d[A-Z]([A-Z][A-Z])?(\\d\\d){0,5}\\s*\"\n self._utm_pattern = (\n r\"(\\d?\\d)\\s*([A-Z])\\s*(\\d+\\.?\\d*)\\s*M\\s*E\\s*\"\n r\"\\s*(\\d+\\.?\\d*)\\s*M\\s*N\\s*\")\n\n # List of lists for storing the input pattern and types\n self._input_pattern_types = [\n [self._dms_pattern, \"DMS\"],\n [self._ddm_pattern, \"DDM\"],\n [self._utm_pattern, \"UTM\"],\n [self._mgrs_pattern, \"MGRS\"],\n [self._dd_pattern, \"DD\"]]\n\n self._valid_latitude_directions = [\"S\", \"N\"]\n self._valid_longitude_directions = [\"E\", \"W\"]\n self._dms_negative_directions = [\"S\", \"W\"]\n\n self._e1 = ((1 - math.sqrt(1- CoordinateTransform.ECC_SQUARED))/\n (1 + math.sqrt(1- CoordinateTransform.ECC_SQUARED)))\n self._e1_prime = (\n CoordinateTransform.ECC_SQUARED/(1 - CoordinateTransform.ECC_SQUARED))", "def info_for_model(stop_list, stops, route):\n\n # Need to know where the bus number 1 and 2 are\n # This if and elif were put in due to an error where the bus list for bus 1 would come up empty, but not sure if necessary\n if len(stops[0]) == 0:\n bus_1 = stops[1][len(stops[1]) - 1]\n elif len(stops[0]) != 0:\n bus_1 = stops[0][0]\n bus_2 = stops[1][0]\n\n # Create empty lists to hold the information for each bus\n stops_bus_1 = []\n stops_bus_2 = []\n stops_bus_3 = []\n\n # Ste bus_number to 3, we will start filling the buses from the end, the last bus first\n bus_number = 3\n\n # Populate our lists\n for i in stops[len(stops) - 1]:\n # Get the times for the buses at the given stop\n first_3_buses = get_due_time(str(i), route)\n\n if len(first_3_buses) == 0:\n # print('Something went wrong!')\n continue\n # Add in the delay\n get_delay(first_3_buses)\n\n # Have to check if the bus it at the first stop, in which case, we just say 'Starting stop' for previous_stop\n if i == stop_list[0]:\n previous_stop = 'Starting stop'\n # Else, we get the previous stop\n else:\n previous_stop = stop_list[stop_list.index(i) - 1]\n\n # If the bus is the last one, we will only append to bus_number_3\n if bus_number == 3:\n # If we reach the stop where bus number 2 is, we must append this stop to both bus_number_3 and bus_number2 and\n # decrease the bus_number counter\n if i == bus_2:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Now, we keep adding bus 2 and bus 3\n elif bus_number == 2:\n # If we reach the stop where bus number 1 is, we must append this stop to both bus_number_3 and bus_number2 and\n # bus_number1 and decrease the bus_number counter\n if i == bus_1:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Here, we are now appending all the buses, until we finally reach the source stop\n elif bus_number == 1:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n joined = [stops_bus_1, stops_bus_2, stops_bus_3]\n return joined", "def _validate_columns(data, ip_column, lat_column, long_column, other_columns):\n if not ip_column and not (lat_column and long_column):\n raise ValueError(\n \"Data must have either an IpAddress ('ip_column')\",\n \"or latitude ('lat_column') and longitude ('long_column')\",\n )\n param_cols: List[str] = []\n for param in other_columns:\n if not param:\n continue\n if isinstance(param, list):\n param_cols.extend(param)\n else:\n param_cols.append(param)\n missing_columns = {col for col in param_cols if col not in data.columns}\n if missing_columns:\n raise LookupError(\n \"The following columns are not in the supplied DataFrame\",\n \",\".join(f\"'{col}'\" for col in missing_columns),\n )", "def _validate_trajectory_transition(subgaits, from_subgait_names, to_subgait_names):\n for from_subgait_name, to_subgait_name in zip(from_subgait_names, to_subgait_names):\n\n if not all(name not in ('start', 'end', None) for name in (from_subgait_name, to_subgait_name)):\n continue # a start or end point can not be compared to a subgait\n\n from_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == from_subgait_name), None)\n to_subgait = next((subgait for subgait in subgaits if subgait.subgait_name == to_subgait_name), None)\n\n if not from_subgait.validate_subgait_transition(to_subgait):\n raise NonValidGaitContent(msg='End setpoint of subgait {sn} to subgait {ns} does not match'\n .format(sn=from_subgait.subgait_name, ns=to_subgait.subgait_name))", "def validate(self) -> bool:\n required = self.crud.validate(required=True)\n if required:\n raise ValueError(\n f\"Validation error. Required destination fields are not present in the crosswalk: {required}\"\n )", "def testPossibleSitesLocationFlags(self):\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and no location, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": []}\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but input flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_IT_CNAF\", \"T2_CH_CERN\"]}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and one match, but pu flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [\"T1_IT_CNAF\"])\n # test element with InputDataset and one match, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n\n # test element with InputDataset and ParentData and no location, but both flags on\n ele['ParentFlag'] = True\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": []}\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset and ParentData and no location, but pileup flag on\n ele['NoInputUpdate'] = False\n ele['NoPileupUpdate'] = True\n self.assertEqual(possibleSites(ele), [])\n\n # test element with InputDataset, PileupData and ParentData with no location, but pileup flag on\n ele['Inputs'] = {\"/MY/BLOCK/NAME#73e99a52\": [\"T1_US_FNAL\", \"T2_CH_CERN\", \"T2_DE_DESY\"]}\n ele['ParentData'] = {\"/MY/BLOCK2/NAME#002590494c06\": [\"T2_DE_DESY\"]}\n ele['PileupData'] = {\"/MY/DATASET/NAME\": []}\n self.assertEqual(possibleSites(ele), [\"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but both flags on\n ele['NoInputUpdate'] = True\n self.assertItemsEqual(possibleSites(ele), [\"T1_IT_CNAF\", \"T2_DE_DESY\"])\n # test element with InputDataset, PileupData and ParentData with no location, but input flag on\n ele['NoPileupUpdate'] = False\n self.assertEqual(possibleSites(ele), [])", "def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")", "def __init__(self, route_idx=None, arr_date=None, dep_time=None, lon=None, dep_date=None, track=None, rt_track=None, id=None, rt_dep_time=None, rt_arr_time=None, name=None, arr_time=None, lat=None, rt_dep_date=None, rt_arr_date=None):\n\n self._route_idx = None\n self._arr_date = None\n self._dep_time = None\n self._lon = None\n self._dep_date = None\n self._track = None\n self._rt_track = None\n self._id = None\n self._rt_dep_time = None\n self._rt_arr_time = None\n self._name = None\n self._arr_time = None\n self._lat = None\n self._rt_dep_date = None\n self._rt_arr_date = None\n\n self.route_idx = route_idx\n if arr_date is not None:\n self.arr_date = arr_date\n if dep_time is not None:\n self.dep_time = dep_time\n self.lon = lon\n if dep_date is not None:\n self.dep_date = dep_date\n self.track = track\n if rt_track is not None:\n self.rt_track = rt_track\n self.id = id\n if rt_dep_time is not None:\n self.rt_dep_time = rt_dep_time\n if rt_arr_time is not None:\n self.rt_arr_time = rt_arr_time\n self.name = name\n if arr_time is not None:\n self.arr_time = arr_time\n self.lat = lat\n if rt_dep_date is not None:\n self.rt_dep_date = rt_dep_date\n if rt_arr_date is not None:\n self.rt_arr_date = rt_arr_date", "def _check_polyline(x_coords_metres, y_coords_metres):\n\n error_checking.assert_is_numpy_array_without_nan(x_coords_metres)\n error_checking.assert_is_numpy_array(x_coords_metres, num_dimensions=1)\n num_vertices = len(x_coords_metres)\n\n error_checking.assert_is_numpy_array_without_nan(y_coords_metres)\n error_checking.assert_is_numpy_array(\n y_coords_metres, exact_dimensions=numpy.array([num_vertices]))", "def coalesce_helper(self, netmask, routes):\n # decrement netmask by 1\n netmask = netmask - 1\n routes_checked = []\n # iterate through every route combination\n for r1 in routes:\n for r2 in routes:\n # skip if routes are same, already checked, or source IPs arent matching\n if r1 == r2 or r2 in routes_checked or r1[SRC_IF] != r2[SRC_IF]:\n continue\n # calculate route 1's network prefix and pad until 32 bits\n r1_network = self.ip_to_binary(r1[MESG][NTWK])\n r1_netmask = self.ip_to_binary(r1[MESG][NMSK]).count('1')\n r1_prefix = self.pad_end(r1_network[:r1_netmask], '0', IP4_BIT_MAX)\n # calculate route 2's network prefix and pad until 32 bits\n r2_network = self.ip_to_binary(r2[MESG][NTWK])\n r2_netmask = self.ip_to_binary(r2[MESG][NMSK]).count('1')\n r2_prefix = self.pad_end(r2_network[:r2_netmask], '0', IP4_BIT_MAX)\n # check if same next-hop router\n if r1_prefix[:netmask] == r2_prefix[:netmask]:\n # check if other attributes are matching\n if (r1[MESG][LPRF] == r2[MESG][LPRF] and r1[MESG][ORIG] == r2[MESG][ORIG]\n and r1[MESG][APTH] == r2[MESG][APTH] and r1[MESG][SORG] == r2[MESG][SORG]\n and r1[MESG][SORG] == r2[MESG][SORG]):\n # remove unnecessary route\n self.routes.remove(r2)\n # update netmask for keeper route\n base_bits = self.pad_end('', '1', netmask)\n final_bits = self.pad_end(base_bits, '0', IP4_BIT_MAX)\n r1[MESG][NMSK] = self.binary_to_ip(final_bits)\n #Check if the new netmask is able to aggregate with another\n recheck_routes = []\n r1_nmsk = final_bits.count('1')\n for r in self.routes:\n r_nmsk = self.ip_to_binary(r[MESG][NMSK]).count('1')\n if r1_nmsk == r_nmsk:\n recheck_routes.append(r)\n recheck_routes.append(r1)\n self.coalesce_helper(r_nmsk, recheck_routes)\n # done checking, add to routes checked list\n routes_checked.append(r1)", "def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')", "def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def _parse_departures(self, data, stop, servernow):\n servernow.replace(second=0, microsecond=0)\n results = []\n departures = data.findall('./itdDeparture')\n for departure in departures:\n # Get Line Information\n origin, destination, line, ridenum, ridedir, canceled = self._parse_mot(departure.find('./itdServingLine'))\n\n if departure.find('./genAttrList/genAttrElem[value=\"HIGHSPEEDTRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance.highspeed')\n elif departure.find('./genAttrList/genAttrElem[value=\"LONG_DISTANCE_TRAINS\"]') is not None:\n line.linetype = LineType('train.longdistance')\n\n # if ridenum is None:\n # ridedata = departure.find('./itdServingTrip')\n # if ridedata is not None:\n # ridenum = ridedata.attrib.get('tripCode', None)\n # if ridenum is not None:\n # ridenum = ridenum.strip()\n\n # Build Ride Objekt with known stops\n ride = Ride(line, ridenum)\n ride.direction = ridedir\n ride.canceled = canceled\n\n train_line = line.linetype in self.train_station_lines\n\n # todo: take delay and add it to next stops\n mypoint = self._parse_trip_point(departure, train_line=train_line)\n\n before_delay = None\n if mypoint.arrival:\n before_delay = mypoint.arrival.delay\n after_delay = None\n if mypoint.departure:\n after_delay = mypoint.departure.delay\n\n delay = None\n if departure.find('./itdServingLine/itdNoTrain'):\n delay = departure.find('./itdServingLine/itdNoTrain').attrib.get('delay', None)\n if delay is not None:\n delay = timedelta(minutes=delay)\n\n if delay is not None:\n if (mypoint.arrival and servernow < mypoint.arrival.livetime) or (mypoint.departure and servernow < mypoint.departure.livetime):\n before_delay = delay\n else:\n after_delay = delay\n\n prevs = False\n for pointdata in departure.findall('./itdPrevStopSeq/itdPoint'):\n point = self._parse_trip_point(pointdata, train_line=train_line)\n if point is not None:\n if before_delay is not None:\n if point.arrival is not None and point.arrival.delay is None and point.arrival.time + before_delay >= servernow:\n point.arrival.delay = before_delay\n if point.departure is not None and point.departure.delay is None and point.departure.time + before_delay >= servernow:\n point.departure.delay = before_delay\n prevs = True\n ride.append(point)\n\n pointer = ride.append(mypoint)\n\n onwards = False\n for pointdata in departure.findall('./itdOnwardStopSeq/itdPoint'):\n point = self._parse_trip_point(pointdata, train_line=train_line)\n if point is not None:\n if after_delay is not None:\n if point.arrival is not None and point.arrival.delay is None and point.arrival.time + after_delay >= servernow:\n point.arrival.delay = after_delay\n if point.departure is not None and point.departure.delay is None and point.departure.time + after_delay >= servernow:\n point.departure.delay = after_delay\n onwards = True\n ride.append(point)\n\n if not prevs and not onwards:\n ride.prepend(None)\n if origin is not None:\n ride.prepend(TimeAndPlace(Platform(origin)))\n\n ride.append(None)\n if destination is not None:\n ride.append(TimeAndPlace(Platform(destination)))\n\n # Return RideSegment from the Station we depart from on\n results.append(ride[pointer:])\n return Ride.Results(results)" ]
[ "0.6113399", "0.5643113", "0.54353887", "0.5379565", "0.5359705", "0.53044266", "0.5218753", "0.5165528", "0.51130545", "0.5098658", "0.5056272", "0.5042018", "0.50327575", "0.5018349", "0.50022423", "0.49981675", "0.49830243", "0.49737117", "0.49564588", "0.4953842", "0.4924345", "0.4917949", "0.49014866", "0.48645994", "0.4860535", "0.4841463", "0.48041278", "0.47955987", "0.47941554", "0.47916156", "0.47796628", "0.4764645", "0.47489432", "0.4743729", "0.47377113", "0.4730246", "0.47172463", "0.47106752", "0.47071373", "0.47024453", "0.4700851", "0.46986845", "0.46893647", "0.4660571", "0.46493804", "0.46454594", "0.46424076", "0.46366444", "0.4631169", "0.46273604", "0.46260473", "0.4621468", "0.4608468", "0.4596233", "0.45948556", "0.45942777", "0.45890737", "0.4582906", "0.45815352", "0.4581336", "0.45637", "0.4558393", "0.455638", "0.45503503", "0.4546487", "0.4541943", "0.4533546", "0.45312437", "0.4530331", "0.4529895", "0.45248944", "0.45241934", "0.45241934", "0.45241714", "0.4521176", "0.4518772", "0.4517988", "0.45164606", "0.45139936", "0.4509129", "0.45084268", "0.45070317", "0.45033896", "0.45005926", "0.4498953", "0.4497077", "0.4496484", "0.44942656", "0.4486236", "0.44772097", "0.44752535", "0.4473414", "0.4470767", "0.44618616", "0.4460617", "0.445731", "0.44548145", "0.44524792", "0.44514057", "0.44464836" ]
0.69479537
0
Solve the Route in chunks and postprocess the results.
def solve_route_in_parallel(self): # Validate Route settings. Essentially, create a dummy Route class instance and set up the # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes # that are guaranteed to all fail. self._validate_route_settings() # Check if the input origins and destinations have any fields we should use in the route analysis self._populate_input_data_transfer_fields() # Compute Route in parallel LOGGER.info(f"Beginning parallelized Route solves ({self.total_jobs} chunks)") completed_jobs = 0 # Track the number of jobs completed so far to use in logging # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor: # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the # given origin ranges and their assigned destinations. jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks} # As each job is completed, add some logging information and store the results to post-process later for future in futures.as_completed(jobs): try: # The Route job returns a results dictionary. Retrieve it. result = future.result() except Exception: # pylint: disable=broad-except # If we couldn't retrieve the result, some terrible error happened and the job errored. # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely # causes are: # a) If you're calling a service, the service was temporarily down. # b) You had a temporary file read/write or resource issue on your machine. # c) If you're actively updating the code, you introduced an error. # To make the tool more robust against temporary glitches, retry submitting the job up to the number # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries, # fail the entire tool run. errs = traceback.format_exc().splitlines() failed_range = jobs[future] LOGGER.debug(( f"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry " f"up to {helpers.MAX_RETRIES} times. Errors: {errs}" )) job_failed = True num_retries = 0 while job_failed and num_retries < helpers.MAX_RETRIES: num_retries += 1 try: future = executor.submit(solve_route, self.rt_inputs, failed_range) result = future.result() job_failed = False LOGGER.debug(f"Route chunk {failed_range} succeeded after {num_retries} retries.") except Exception: # pylint: disable=broad-except # Update exception info to the latest error errs = traceback.format_exc().splitlines() if job_failed: # The job errored and did not succeed after retries. Fail the tool run because something # terrible is happening. LOGGER.debug(f"Route chunk {failed_range} continued to error after {num_retries} retries.") LOGGER.error("Failed to get Route result from parallel processing.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise # If we got this far, the job completed successfully and we retrieved results. completed_jobs += 1 LOGGER.info( f"Finished Route calculation {completed_jobs} of {self.total_jobs}.") # Parse the results dictionary and store components for post-processing. if result["solveSucceeded"]: self.route_fcs.append(result["outputRoutes"]) else: # Typically, a solve fails because no destinations were found for any of the origins in the chunk, # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug # mode only in case the user is having problems. The user can also check the individual OD log # files. LOGGER.debug(f"Solve failed for job id {result['jobId']}.") LOGGER.debug(result["solveMessages"]) # Post-process outputs if self.route_fcs: LOGGER.info("Post-processing Route results...") self.route_fcs = sorted(self.route_fcs) self._post_process_route_fcs() else: LOGGER.warning("All Route solves failed, so no output was produced.") # Clean up # Delete the job folders if the job succeeded if DELETE_INTERMEDIATE_OUTPUTS: LOGGER.info("Deleting intermediate outputs...") try: shutil.rmtree(self.scratch_folder, ignore_errors=True) except Exception: # pylint: disable=broad-except # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool. LOGGER.warning(f"Unable to delete intermediate Route output folder {self.scratch_folder}.") LOGGER.info("Finished calculating Routes.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route(inputs, chunk):\r\n rt = Route(**inputs)\r\n if inputs[\"pair_type\"] is helpers.PreassignedODPairType.one_to_one:\r\n rt.logger.info(f\"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}\")\r\n elif inputs[\"pair_type\"] is helpers.PreassignedODPairType.many_to_many:\r\n rt.logger.info(f\"Processing chunk {chunk[0]} as job id {rt.job_id}\")\r\n rt.solve(chunk)\r\n rt.teardown_logger()\r\n return rt.job_result", "def solve(self, chunk_definition): # pylint: disable=too-many-locals, too-many-statements, too-many-branches\r\n # Select the inputs to process\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._select_inputs_one_to_one(chunk_definition)\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._get_od_pairs_for_chunk(chunk_definition)\r\n self._select_inputs_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n # Initialize the Route solver object\r\n self.initialize_rt_solver()\r\n self._add_unique_id_fields()\r\n\r\n # Insert the origins and destinations\r\n self.logger.debug(f\"Route solver fields transferred from Origins: {self.origin_transfer_fields}\")\r\n self.logger.debug(f\"Route solver fields transferred from Destinations: {self.destination_transfer_fields}\")\r\n if self.pair_type is helpers.PreassignedODPairType.one_to_one:\r\n self._insert_stops_one_to_one()\r\n elif self.pair_type is helpers.PreassignedODPairType.many_to_many:\r\n self._insert_stops_many_to_many()\r\n else:\r\n raise NotImplementedError(f\"Invalid PreassignedODPairType: {self.pair_type}\")\r\n\r\n if self.rt_solver.count(arcpy.nax.RouteInputDataType.Stops) == 0:\r\n # There were no valid destinations for this set of origins\r\n self.logger.debug(\"No valid destinations for this set of origins. Skipping Route calculation.\")\r\n return\r\n\r\n # Load barriers\r\n # Note: This loads ALL barrier features for every analysis, even if they are very far away from any of\r\n # the inputs in the current chunk. You may want to select only barriers within a reasonable distance of the\r\n # inputs, particularly if you run into the maximumFeaturesAffectedByLineBarriers,\r\n # maximumFeaturesAffectedByPointBarriers, and maximumFeaturesAffectedByPolygonBarriers tool limits for portal\r\n # solves. However, since barriers is likely an unusual case, deal with this only if it becomes a problem.\r\n for barrier_fc in self.barriers:\r\n self.logger.debug(f\"Loading barriers feature class {barrier_fc}...\")\r\n shape_type = arcpy.Describe(barrier_fc).shapeType\r\n if shape_type == \"Polygon\":\r\n class_type = arcpy.nax.RouteInputDataType.PolygonBarriers\r\n elif shape_type == \"Polyline\":\r\n class_type = arcpy.nax.RouteInputDataType.LineBarriers\r\n elif shape_type == \"Point\":\r\n class_type = arcpy.nax.RouteInputDataType.PointBarriers\r\n else:\r\n self.logger.warning(\r\n f\"Barrier feature class {barrier_fc} has an invalid shape type and will be ignored.\"\r\n )\r\n continue\r\n barriers_field_mappings = self.rt_solver.fieldMappings(class_type, True)\r\n self.rt_solver.load(class_type, barrier_fc, barriers_field_mappings, True)\r\n\r\n # Solve the Route analysis\r\n self.logger.debug(\"Solving Route...\")\r\n solve_start = time.time()\r\n self.solve_result = self.rt_solver.solve()\r\n solve_end = time.time()\r\n self.logger.debug(f\"Solving Route completed in {round(solve_end - solve_start, 3)} seconds.\")\r\n\r\n # Handle solve messages\r\n solve_msgs = [msg[-1] for msg in self.solve_result.solverMessages(arcpy.nax.MessageSeverity.All)]\r\n for msg in solve_msgs:\r\n self.logger.debug(msg)\r\n\r\n # Update the result dictionary\r\n self.job_result[\"solveMessages\"] = solve_msgs\r\n if not self.solve_result.solveSucceeded:\r\n self.logger.debug(\"Solve failed.\")\r\n return\r\n self.logger.debug(\"Solve succeeded.\")\r\n self.job_result[\"solveSucceeded\"] = True\r\n\r\n # Save output\r\n self._export_to_feature_class(chunk_definition)\r\n\r\n self.logger.debug(\"Finished calculating Route.\")", "def run(self):\n\n self._get_routes()\n self._calculate_emissions()", "def _route_chunk(data, host_url, annotations='duration', retries=10, extra_params=None):\n\t# offsets are used to make correct indice of the result dataframe\n\tsources, destinations, sources_offset, destinations_offset = data\n\tsources_count = len(sources)\n\tdestinations_count = len(destinations)\n\n\t# OSRM takes all points as one list, and then numbers of sources & dests in it\n\tall_points = sources + destinations\n\tencoded = encode_poly([(p.y, p.x) for p in all_points])\n\n\t# numerate sources & dests. sources come first\n\tsource_numbers = ';'.join(map(str, range(sources_count)))\n\tdestination_numbers = ';'.join(map(str,\n\t\trange(sources_count, sources_count + destinations_count)))\n\n\n\textra_params = extra_params or {}\n\tparams = {\n\t\t'sources': source_numbers,\n\t\t'destinations': destination_numbers,\n\t\t'generate_hints': 'false',\n\t\t'annotations': annotations,\n\t\t**extra_params\n\t}\n\n\tencoded_params = urllib.parse.quote_plus(urllib.parse.urlencode(params))\n\t# if we pass url and params separately to requests.get, it will make a malformed URL\n\tencoded_url = f'{host_url}/table/v1/driving/polyline({encoded})?{encoded_params}'\n\tresp = get_retry(encoded_url, {}, retries)\n\n\tif resp.status_code != 200:\n\t\traise RuntimeError(f'OSRM server responded with {resp.status_code} code. Content: {resp.content}')\n\n\tresp_data = resp.json()\n\tif resp_data.get('code', 'Ok') != 'Ok':\n\t\traise RuntimeError(f'OSRM server responded with error message: {resp_data[\"message\"]}')\n\n\t# if 'duration' is requested, then take resp_data['durations'], or resp_data['distances'] if distances.\n\t# also, 'duration,distance' might be requested, then take both and concatenate results (= join columns)\n\tresults = []\n\t\n\tfor key in annotations.split(','):\n\t\tdf = pd.DataFrame(resp_data[f'{key}s']).reset_index().rename(columns={'index': 'source'}).melt(id_vars='source', var_name='destination', value_name=key)\n\t\tdf[key] = df[key].astype(float)\n\t\tif len(results) > 0:\n\t\t\t# only append the data column\n\t\t\tresults.append(df[[key]])\n\t\telse:\n\t\t\tresults.append(df)\n\n\tresult_df = pd.concat(results, axis=1)\n\n\t# snapping distances\n\tresult_df['source_snap'] = result_df.source.map(pd.DataFrame(resp_data['sources'])['distance'])\n\tresult_df['destination_snap'] = result_df.destination.map(pd.DataFrame(resp_data['destinations'])['distance'])\n\n\t# instead of join/merge lookup\n\tresult_df['geometry'] = result_df['source'].map({i: g for i, g in enumerate(sources)})\n\tresult_df['geometry_dest'] = result_df['destination'].map({i: g for i, g in enumerate(destinations)})\n\n\t# shift back by the given offset\n\tresult_df['destination'] = result_df['destination'].astype(int) + destinations_offset\n\tresult_df['source'] = result_df['source'].astype(int) + sources_offset\n\treturn result_df", "def completion_processor(self):\n while True:\n _ = (yield)\n self.solve_completed = True", "def __routes(self, with_return):\n nonzeo_pois = list(filter(None, self.pois.keys()))\n\n for path in itertools.permutations(nonzeo_pois):\n steps = self.poi_distance(0, path[0])\n for i, j in zip(path, path[1:]):\n steps += self.poi_distance(i, j)\n if with_return:\n steps += self.poi_distance(path[-1], 0)\n yield steps", "def process(self):\n try:\n if not self._successor:\n return self.loading_strategy()\n else:\n return self._successor.process_next(self.loading_strategy())\n except Exception as e:\n Oprint.err(e, 'lmdo')", "def dochunk( request, chunk=None, trailers=None ):", "def execute_waypoint_sequence(detail_of_trip):\n\n # rets (route_line, line_points)\n sliced_route_and_line_points = chunk_user_route(detail_of_trip)\n\n sliced_route = sliced_route_and_line_points[0]\n line_points = sliced_route_and_line_points[1]\n\n # Interpolate/Break into 1/10 segments\n segmented_points = interpolate_points(sliced_route, line_points)\n waypoints = find_crime_areas(segmented_points)\n\n # print \"segmented_points\", json.dumps(segmented_points, indent=2)\n print \"\\n\\n\\n\\n\" # compensating for the giant GET request\n return waypoints", "def process(self, results):\n raise NotImplementedError", "def request_chunk(self, x, z):\n\n if (x, z) in self.chunk_cache:\n returnValue(self.chunk_cache[x, z])\n elif (x, z) in self.dirty_chunk_cache:\n returnValue(self.dirty_chunk_cache[x, z])\n elif (x, z) in self._pending_chunks:\n # Rig up another Deferred and wrap it up in a to-go box.\n retval = yield self._pending_chunks[x, z].deferred()\n returnValue(retval)\n\n chunk = Chunk(x, z)\n yield maybeDeferred(self.serializer.load_chunk, chunk)\n\n if chunk.populated:\n self.chunk_cache[x, z] = chunk\n self.postprocess_chunk(chunk)\n #self.factory.scan_chunk(chunk)\n returnValue(chunk)\n\n if self.async:\n from ampoule import deferToAMPProcess\n from bravo.remote import MakeChunk\n\n d = deferToAMPProcess(MakeChunk,\n x=x,\n z=z,\n seed=self.seed,\n generators=configuration.getlist(self.config_name, \"generators\")\n )\n\n # Get chunk data into our chunk object.\n def fill_chunk(kwargs):\n chunk.blocks = fromstring(kwargs[\"blocks\"],\n dtype=uint8).reshape(chunk.blocks.shape)\n chunk.heightmap = fromstring(kwargs[\"heightmap\"],\n dtype=uint8).reshape(chunk.heightmap.shape)\n chunk.metadata = fromstring(kwargs[\"metadata\"],\n dtype=uint8).reshape(chunk.metadata.shape)\n chunk.skylight = fromstring(kwargs[\"skylight\"],\n dtype=uint8).reshape(chunk.skylight.shape)\n chunk.blocklight = fromstring(kwargs[\"blocklight\"],\n dtype=uint8).reshape(chunk.blocklight.shape)\n\n return chunk\n d.addCallback(fill_chunk)\n else:\n # Populate the chunk the slow way. :c\n for stage in self.pipeline:\n stage.populate(chunk, self.seed)\n\n chunk.regenerate()\n d = succeed(chunk)\n\n # Set up our event and generate our return-value Deferred. It has to\n # be done early becaues PendingEvents only fire exactly once and it\n # might fire immediately in certain cases.\n pe = PendingEvent()\n # This one is for our return value.\n retval = pe.deferred()\n # This one is for scanning the chunk for automatons.\n #pe.deferred().addCallback(self.factory.scan_chunk)\n self._pending_chunks[x, z] = pe\n\n def pp(chunk):\n chunk.populated = True\n chunk.dirty = True\n\n self.postprocess_chunk(chunk)\n\n self.dirty_chunk_cache[x, z] = chunk\n del self._pending_chunks[x, z]\n\n return chunk\n\n # Set up callbacks.\n d.addCallback(pp)\n d.chainDeferred(pe)\n\n # Because multiple people might be attached to this callback, we're\n # going to do something magical here. We will yield a forked version\n # of our Deferred. This means that we will wait right here, for a\n # long, long time, before actually returning with the chunk, *but*,\n # when we actually finish, we'll be ready to return the chunk\n # immediately. Our caller cannot possibly care because they only see a\n # Deferred either way.\n retval = yield retval\n returnValue(retval)", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def run(self):\n while True:\n path, params = self.path_queue.get()\n errors = check_path(path, **params)\n self.result_queue.put(errors)\n self.path_queue.task_done()", "def __call__(self):\n dv = None\n #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for.\n while self.active_call_count < self.parallel and self.queue:\n #Get a chunk of entries from the command queue so we can make a batch.\n subqueue = self.queue[:self.max_batch_size]\n self.queue = self.queue[self.max_batch_size:]\n #Send a single batch to the currently selected RPC node.\n dv = self._process_batch(subqueue)\n #If there is nothing left to do, there is nothing left to do\n if not self.queue and self.active_call_count == 0:\n self.log.error(\"Queue is empty and no active HTTPS-POSTs remaining.\")\n if self.stop_when_empty:\n #On request, stop reactor when queue empty while no active queries remain.\n self.reactor.stop() \n return dv", "def run(self) -> Any:\n self.prepare()\n for step in self.stream:\n self.output = step\n return self.output", "def _calculate_emissions(self):\n parameters = self._get_pollutants_for_vehicle()\n\n self.routes = RouteSet()\n\n if \"routes\" not in self._json_data:\n log.debug(\"Error in returned JSON data from web service.\")\n log.debug(\"data: {}\".format(self._json_data))\n return\n\n # Create a \"set\" of Routes. The planner web service will\n # return 2-4 routes with different paths.\n for idx, r in enumerate(self._json_data[\"routes\"][\"features\"]):\n attributes = r.get(\"attributes\")\n route = Route(distance=attributes.get(\"Total_Meters\"),\n minutes=attributes.get(\"Total_Minutes\"),\n path=r.get(\"geometry\").get(\"paths\")[0], id = idx)\n self.routes.add(route)\n\n log.debug(\"Nr of routes: {}\".format(len(self.routes)))\n for i, route in enumerate(self.routes):\n # A list of x,y,z points that all together represents the route\n path_coordinates = route.path\n distances = []\n\n # Nifty little trick to loop over 'path_coordinates',\n # but keep a reference to the 'prev' item to calculate the\n # distance between them\n iter_points = iter(path_coordinates)\n prev = next(iter_points)\n for point in path_coordinates:\n if not distances:\n # first point\n distances.append(Planner._get_distance_3d(prev, point) / 1000)\n else:\n distances.append(distances[-1] + Planner._get_distance_3d(prev, point) / 1000)\n\n point_slope = Planner._get_slope(prev, point)\n\n # Calculate emission for each pollutants the user has asked for\n for p in self._pollutants:\n parms = [x for x in parameters if x.pollutant.name.startswith(p)]\n calc_emission = self.get_emission(parms, point_slope)\n route.add_pollutant(p, calc_emission)\n\n prev = point\n\n route.add_distances(distances)", "def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'", "def _run(self) -> None:\n while True:\n args: MigrationArgs = self._queue.get(block=True)\n with self._lock:\n if args.collection in self._chunks:\n if args.shard_key not in self._chunks[args.collection]:\n self._split_chunk(args.collection, args.shard_key)\n self._move_chunk(args)", "def run(self):\n while True:\n try:\n processor, iprot, oprot, otrans, callback = self.queue.get()\n if processor is None:\n break\n callback.getContext().setProtocols(iprot, oprot)\n processor.process(iprot, oprot, callback.getContext())\n callback.success(reply=otrans.getvalue())\n except Exception:\n logging.exception(\"Exception while processing request\")\n callback.failure()", "def handle_chunk( chunk, trailers=None ):", "def applyMethod(self, neighbours ):\n \n nid = neighbours.mQueryToken\n\n if self.mContinueAt:\n if nid == self.mContinueAt:\n self.info(\"continuing processing at %s\" % str(self.mContinueAt ) )\n self.mContinueAt = None\n return\n\n length = self.mFasta.getLength( nid )\n \n if length > self.mConfig.get(\"segments\", \"max_sequence_length\"):\n self.warn( \"skipped: nid=%s, length=%i -> too long\" % (nid, length) )\n return False\n \n self.debug( \"starting nid=%s, length=%i\" % (nid, length) )\n\n tree = self.getTree( nid, length, neighbours ) \n\n if self.covering_trees:\n tree = self.convertTreeToCoveringTree( tree )\n \n max_depth = 0\n if tree:\n for node in range(len(tree)):\n (level, parent, left_child, right_child, ranges) = tree[node]\n max_depth = max(max_depth, level)\n for xfrom, xto in ranges:\n self.mOutfile.write(\"\\t\".join( map(str,(nid,node,parent,level,xfrom,xto)))+ \"\\n\")\n self.mOutfile.flush()\n\n self.debug( \"finished: nid=%s, length=%i, size=%i, depth=%i\" % (nid, length,\n len(tree), max_depth ) )\n return True\n else:\n self.warn( \"failed: nid=%s, length=%i, time=%i\" % (job_id, nid, length,\n len(tree), max_depth ) )\n \n return False", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def handle_result(self) -> Callable[['Request'], 'Request']:\n\n def _handle_result(result: 'Request'):\n \"\"\"\n Function that handles the result when extracted from the request future\n\n :param result: The result returned to the gateway. It extracts the request to be returned to the client\n :return: Returns a request to be returned to the client\n \"\"\"\n for route in result.routes:\n if route.executor == GATEWAY_NAME:\n route.end_time.GetCurrentTime()\n\n self._update_end_request_metrics(result)\n\n return result\n\n return _handle_result", "def process():", "def run_chunk(self, part_suffix, remote_home_dir, remote_index_dir, remote_work_dir,\n remote_username, input_files, key_path, service, lazy_run):\n assert service in (\"gsnap\", \"rapsearch2\")\n\n chunk_id = input_files[0].split(part_suffix)[-1]\n multihit_basename = f\"multihit-{service}-out{part_suffix}{chunk_id}.m8\"\n multihit_local_outfile = os.path.join(self.chunks_result_dir_local, multihit_basename)\n multihit_remote_outfile = os.path.join(remote_work_dir, multihit_basename)\n multihit_s3_outfile = os.path.join(self.chunks_result_dir_s3, multihit_basename)\n\n base_str = \"aws s3 cp --only-show-errors {s3_path}/{input_fa} {remote_work_dir}/{input_fa} \"\n download_input_from_s3 = \" ; \".join(\n base_str.format(\n s3_path=self.chunks_result_dir_s3,\n input_fa=input_fa,\n remote_work_dir=remote_work_dir) for input_fa in input_files)\n\n base_str = \"mkdir -p {remote_work_dir} ; {download_input_from_s3} ; \"\n environment = self.additional_attributes[\"environment\"]\n if service == \"gsnap\":\n commands = base_str + \"{remote_home_dir}/bin/gsnapl -A m8 --batch=0 --use-shared-memory=0 --gmap-mode=none --npaths=100 --ordered -t 36 --max-mismatches=40 -D {remote_index_dir} -d nt_k16 {remote_input_files} > {multihit_remote_outfile}\"\n else:\n commands = base_str + \"/usr/local/bin/rapsearch -d {remote_index_dir}/nr_rapsearch -e -6 -l 10 -a T -b 0 -v 50 -z 24 -q {remote_input_files} -o {multihit_remote_outfile}\"\n\n commands = commands.format(\n remote_work_dir=remote_work_dir,\n download_input_from_s3=download_input_from_s3,\n remote_home_dir=remote_home_dir,\n remote_index_dir=remote_index_dir,\n remote_input_files=\" \".join(\n remote_work_dir + \"/\" + input_fa for input_fa in input_files),\n multihit_remote_outfile=multihit_remote_outfile\n if service == \"gsnap\" else multihit_remote_outfile[:-3]\n # Strip the .m8 for RAPSearch as it adds that\n )\n\n if not lazy_run or not fetch_from_s3(multihit_s3_outfile,\n multihit_local_outfile):\n correct_number_of_output_columns = 12\n min_column_number = 0\n max_tries = 2\n try_number = 1\n instance_ip = \"\"\n\n\n def interpret_min_column_number_string(min_column_number_string,\n correct_number_of_output_columns,\n try_number):\n if min_column_number_string:\n min_column_number = float(min_column_number_string)\n log.write(\n \"Try no. %d: Smallest number of columns observed in any line was %d\"\n % (try_number, min_column_number))\n else:\n log.write(\"Try no. %d: No hits\" % try_number)\n min_column_number = correct_number_of_output_columns\n return min_column_number\n\n # Check if every row has correct number of columns (12) in the output\n # file on the remote machine\n while min_column_number != correct_number_of_output_columns \\\n and try_number <= max_tries:\n log.write(\"waiting for {} server for chunk {}\".format(\n service, chunk_id))\n max_concurrent = self.additional_attributes[\"max_concurrent\"]\n\n with server.ASGInstance(service, key_path,\n remote_username, environment,\n max_concurrent, chunk_id,\n self.additional_attributes.get(\"max_interval_between_describe_instances\") or 900,\n self.additional_attributes.get(\"job_tag_prefix\") or \"RunningIDseqBatchJob_\",\n self.additional_attributes.get(\"job_tag_refresh_seconds\") or 600,\n self.additional_attributes.get(\"draining_tag\") or \"draining\") as instance_ip:\n command.execute(command.remote(commands, key_path, remote_username, instance_ip))\n\n if service == \"gsnap\":\n verification_command = \"cat %s\" % multihit_remote_outfile\n else:\n # For rapsearch, first remove header lines starting with '#'\n verification_command = \"grep -v '^#' %s\" % multihit_remote_outfile\n verification_command += \" | awk '{print NF}' | sort -nu | head -n 1\"\n min_column_number_string = command.execute_with_output(\n command.remote(verification_command, key_path, remote_username, instance_ip))\n min_column_number = interpret_min_column_number_string(\n min_column_number_string, correct_number_of_output_columns,\n try_number)\n\n try_number += 1\n\n # Move output from remote machine to local machine\n msg = \"Chunk %s output corrupt; not copying to S3. Re-start pipeline \" \\\n \"to try again.\" % chunk_id\n assert min_column_number == correct_number_of_output_columns, msg\n\n with self.iostream_upload: # Limit concurrent uploads so as not to stall the pipeline.\n command.execute(\n command.scp(key_path, remote_username, instance_ip,\n multihit_remote_outfile, multihit_local_outfile))\n command.execute(\"aws s3 cp --only-show-errors %s %s/\" %\n (multihit_local_outfile,\n self.chunks_result_dir_s3))\n log.write(\"finished alignment for chunk %s on %s server %s\" % (chunk_id, service, instance_ip))\n return multihit_local_outfile", "def solve(self):\n for step in self.run.values():\n step.solve()", "def chunk_user_route(detail_of_trip):\n\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n # since I can't get javascript to load, here's a hacky way of loading json\n # that details the route based on the user's point A and point B\n # detail_of_trip = api.directions(\n # (40.760350, -73.976209),\n # (40.754009, -73.981097),\n # mode=\"walking\"\n # )[0]\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n # now that I have javascript sending over the json, load json that details\n # the route based on the user's point A and point B\n\n # -------------- This section is for interpolation/splitting using shapely\n first = True # to see if this is the start position for the entire route\n line_points = [] # stores all the points to the route based on dict passed\n\n for leg in detail_of_trip['legs']:\n for step in leg['steps']:\n # Create a list of two element lists that represent points along the\n # route. via google. line_points = [ [lat1, lng1], [lat2, lng2],...]\n # Only add the starting point the first time. Every other iteration\n # we will just tack on the end points to our line.\n if first:\n line_points.append([step['start_location']['lat'], step['start_location']['lng']])\n first = False\n line_points.append([step['end_location']['lat'], step['end_location']['lng']])\n\n # Now load those points into a geometry, here shapely's LineString type.\n route_line = LineString(line_points)\n return (route_line, line_points)", "def _process_batch(self, subqueue):\n try:\n timeoutCall = None\n jo = None\n if self.max_batch_size == 1:\n #At time of writing, the regular nodes have broken JSON-RPC batch handling.\n #So when max_batch_size is set to one, we assume we need to work around this fact.\n jo = json.dumps(self.entries[subqueue[0]]._get_rpc_call_object())\n else:\n #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes.\n qarr = list()\n for num in subqueue:\n qarr.append(self.entries[num]._get_rpc_call_object())\n jo = json.dumps(qarr)\n url = \"https://\" + self.nodes[self.node_index] + \"/\"\n url = str.encode(url)\n deferred = self.agent.request('POST',\n url,\n Headers({\"User-Agent\" : ['Async Steem for Python v0.6.1'],\n \"Content-Type\": [\"application/json\"]}),\n _StringProducer(jo))\n def process_one_result(reply):\n \"\"\"Process a single response from an JSON-RPC command.\"\"\"\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n def handle_response(response):\n \"\"\"Handle response for JSON-RPC batch query invocation.\"\"\"\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addCallback(handle_response)\n def _handle_error(error):\n \"\"\"Handle network level error for JSON-RPC request.\"\"\"\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addErrback(_handle_error)\n timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel)\n #Keep track of the number of active parallel HTTPS posts.\n self.active_call_count = self.active_call_count + 1\n return deferred\n except Exception as ex:\n self.log.failure(\"Error in _process_batch {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])", "def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')", "def process(self):\n while not self.halted:\n self.step()\n return self.outputs", "def chunk_generator( callback, request, c ):", "def handle_rpc(self):\n while True: # loop handling\n self.rbuf.seek(0)\n length_prefix = self.rbuf.read(4)\n if len(length_prefix) < 4: # half-package\n break\n\n try:\n length, = struct.unpack(\"I\", length_prefix.encode(\"utf-8\"))\n except Exception as e:\n print(e.__traceback__)\n body = self.rbuf.read(length)\n if len(body) < length: # half-package\n break\n\n request = json.loads(body)\n input = request[\"in\"]\n params = request[\"params\"]\n handler = self.handlers[input]\n handler(params)\n # cut read buffer\n left = self.rbuf.getvalue()[length + 4:]\n self.rbuf = StringIO()\n self.rbuf.write(left)\n # move position to EOF\n self.rbuf.seek(0, 2)", "def inference(self):\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send(\"inference\")\n \n ## wait for the finalization to be completed\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()", "def finish(self) -> Iterable[Chunk]:", "def evaluate_batch(self, pipelines):", "def get_next_inner_computation(self, arg: str):\n print(\"[get_next - inner computation] starts here.\")\n # Start of transformation and component encoding\n name_str = self.transform_inner(arg)\n # print(\"[get_next - inner computation] after transform:\", arg)\n name_after_transform = Name(name_str)\n name = self.encode_name_components(name_after_transform)\n # End of transformation and component encoding\n print(\"[get_next - inner computation] after encoding:\", name)\n self.queue_to_lower.put((self.packetid, Interest(name)))\n inner_result = self.get_content(name)\n print(\"[get_next - inner computation] ends here with result:\", inner_result)\n return inner_result", "async def solve(self):\n\n \"\"\"TODO:\n Handle guess and checking:\n 1) Make guess (Make logical guess based on what could be most impactful...i.e. if two spots can have either number in a row)\n 2) Fork based on guess\n 3) Check if one raises from impossible square (delete this fork)\n 4) Check if one completes (will simply return from .gather)\n 5) Each board can recurse through this guess and checking, just in case\n \"\"\"\n tasks = [square.check() for row in self for square in row]\n\n return await asyncio.gather(*tasks, return_exceptions=False)", "def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data", "def stream(self, batch):\n response = self.post(batch)\n try:\n for i, result_data in grouped(response):\n result = JobResult.hydrate(assembled(result_data), batch)\n log.info(\"< %s\", result)\n yield result\n finally:\n response.close()", "def __call__(self, frame, tlbrs):\n self.extract_async(frame, tlbrs)\n return self.postprocess()", "def transform( request, data, finishing=False ):", "def transform( request, data, finishing=False ):", "def routing_step(self, layout: dict, circuit: Circuit):\n\n _, final_mapping = self.routing_algorithm(circuit, layout)\n return final_mapping", "def run(self):\n while self._num_workers > 0:\n self.server.handle_request()\n self._graph = None", "def num_step_reach(rr, a, b, k, num_steps = 5):\n\n # Time this function\n start_time = time.time()\n\n # Help us divide and conquer with multiple threads\n pool = ThreadPool(processes = 2)\n\n # Create our BDD variables, matching those exactly in rr (except zz's)\n xx_list = [bddvar(\"xx{}\".format(i)) for i in range(k)]\n yy_list = [bddvar(\"yy{}\".format(i)) for i in range(k)]\n zz_list = [bddvar(\"zz{}\".format(i)) for i in range(k)]\n\n # Compose for each step\n # NOTE: Very slow for many nodes and edges; may want divide and conquer here\n hh = rr\n yyzz_compose_dict = {a:b for a, b in zip(yy_list, zz_list)}\n xxzz_compose_dict = {a:b for a, b in zip(xx_list, zz_list)}\n for i in range(0, num_steps - 1):\n\n # Kickoff threads on async composition\n yyzz_async_result = pool.apply_async(hh.compose, [yyzz_compose_dict])\n xxzz_async_result = pool.apply_async(rr.compose, [xxzz_compose_dict])\n\n # Block: get results from threads\n yyzz_compose = yyzz_async_result.get()\n xxzz_compose = xxzz_async_result.get()\n\n # Conjunct them and do smoothing\n hh = (yyzz_compose & xxzz_compose).smoothing(set(zz_list))\n\n print(\"\\tComposed for step\", i)\n\n print(\"Completed in {} seconds\".format(round(time.time() - start_time)))\n print(\"Number of satisfiable variables\", len(list(hh.satisfy_all())))\n\n # See if a and b can reach eachother in the given number of steps\n restrict_dict = {c:d for c, d in zip(set(xx_list), to_bin(a, k))}\n restrict_dict.update({c:d for c, d in zip(set(yy_list), to_bin(b, k))})\n return hh.restrict(restrict_dict)", "def send_and_receive_many(world: CueBeamWorld):\n elements_vectorized1 = []\n for idxElement in range(0, len(world.elements) - 1):\n elements_vectorized1.extend(\n [world.elements[idxElement].x, world.elements[idxElement].y, world.elements[idxElement].z,\n world.elements[idxElement].amplitude, world.elements[idxElement].phase, 0.0])\n time_start = time.clock()\n current_ray_count = world.get_ray_count()\n estimated_worker_performance = 300000.0\n need_workers = math.ceil(current_ray_count / estimated_worker_performance)\n each_worker_does_ylines = math.ceil(world.rxPlane.ny / need_workers )\n # update\n handles = []\n for idx_worker in range(need_workers):\n yline0 = idx_worker*each_worker_does_ylines # starts at zero\n yline_y = world.rxPlane.y0 + world.rxPlane.dy * yline0\n handles.append({\n 'yline_y': yline_y,\n 'async_handle': beamsim_instant.delay(\n k=world.wavenumber,\n x0=world.rxPlane.x0,\n y0=yline_y,\n z0=world.rxPlane.z0,\n nx=world.rxPlane.nx,\n ny=each_worker_does_ylines,\n nz=world.rxPlane.nz,\n dx=world.rxPlane.dx,\n dy=world.rxPlane.dy,\n dz=world.rxPlane.dz,\n elements_vectorized=elements_vectorized1)\n })\n # TODO: FRONTIER HERE ===================\n\n # TODO: Wait for first worker, and load the result,\n #while not (async_handle.ready()):\n # time.sleep(0.02)\n\n world.rxPlane.pressurefield = pickle.loads(async_handle.result)\n time_end = time.clock()\n world.last_performance_rays_per_second = world.get_ray_count() / (time_end - time_start)\n print('performance = {} kRays/sec'.format(world.last_performance_rays_per_second / 1e3))\n return world", "def __transform(self):\n try:\n self.tokenized_document, self.stack = None, []\n\n InlineProcessor.initialize()\n LinkHelper.initialize()\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_blocks_pass>>>>>>\")\n first_pass_results = self.__parse_blocks_pass()\n\n POGGER.debug(\"\\n\\n>>>>>>>coalesce_text_blocks>>>>>>\")\n coalesced_results = CoalesceProcessor.coalesce_text_blocks(\n first_pass_results\n )\n\n POGGER.debug(\"\\n\\n>>>>>>>parse_inline>>>>>>\")\n final_pass_results = InlineProcessor.parse_inline(coalesced_results)\n\n POGGER.debug(\"\\n\\n>>>>>>>final_pass_results>>>>>>\")\n return final_pass_results\n except Exception as this_exception:\n raise BadTokenizationError(\n \"An unhandled error occurred processing the document.\"\n ) from this_exception", "def async_fetch(self):\n args = (async_get_pipe, self.zargs, self.connections)\n mapped = yield ait.async_map(*args)\n return_value(multiplex(mapped))", "def parse_chunks(self):\n logger.info('parse_chunks()')\n\n while (self.replay.pos < len(self.replay)):\n chunk_type = self.replay.read_uint32()\n chunk_size = self.replay.read_int32()\n offset = self.replay.bytepos\n\n if chunk_type == ChunkTypes.CHECKPOINT.value:\n self.parse_checkpoint()\n\n elif chunk_type == ChunkTypes.EVENT.value:\n self.parse_event()\n\n elif chunk_type == ChunkTypes.REPLAYDATA.value:\n self.parse_replaydata()\n\n elif chunk_type == ChunkTypes.HEADER.value:\n self.parse_header(chunk_size)\n\n self.replay.bytepos = offset + chunk_size", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def run_router(self, detour_scale):\n\n # Double check source and taget are not same node, if so, we are done!\n for k,v in self.rg.map.items():\n if v.source and v.target:\n debug.error(\"Grid cell is source and target! {}\".format(k))\n return False\n \n # returns the path in tracks\n (path,cost) = self.rg.route(detour_scale)\n if path:\n debug.info(1,\"Found path: cost={0} \".format(cost))\n debug.info(1,str(path))\n\n self.paths.append(path)\n self.add_route(path)\n \n path_set = grid_utils.flatten_set(path)\n self.path_blockages.append(path_set)\n else:\n self.write_debug_gds(\"failed_route.gds\")\n # clean up so we can try a reroute\n self.rg.reinit()\n return False\n return True", "def _process_requests_in_background(self):\n while True:\n readable, writable, exceptional = self._bg_select_peers()\n\n for peer in readable:\n data = peer.socket.recv(RECV_BYTES)\n if data:\n peer.incoming_buffer.feed(data)\n try:\n response = peer.incoming_buffer.unpack()\n except msgpack.OutOfData:\n continue\n peer.handle_response(response)\n else:\n self._bg_clean_up_peer(peer)\n if peer in writable:\n writable.remove(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n\n for peer in writable:\n # single-reader configuration means we can safely unlock between\n # peeking and committing.\n with peer.lock:\n next_bytes = peer.outgoing_buffer.peek(SEND_BYTES)\n if not next_bytes:\n continue\n\n sent_bytes = peer.socket.send(next_bytes)\n if sent_bytes == 0:\n self._bg_clean_up_peer(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n continue\n\n with peer.lock:\n peer.outgoing_buffer.commit_read(sent_bytes)\n\n for peer in exceptional:\n self._bg_clean_up_peer(peer)", "def compute_response(self, items_to_process):\n pass", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result", "def routes(self, body):\n pass", "async def fetch_posts(self) -> None:\n\n async def fetch_posts_for_offset(offset) -> list:\n logger.info(\n \"(offset %i) Start fetching posts from vk.com/%s...\",\n offset,\n self.vk_domain,\n )\n\n # VK Script code for /execute method.\n vks_code = get_wall_post_template.substitute(\n {\n \"domain\": self.vk_domain,\n \"offset\": offset,\n \"posts_per_portion\": self._posts_per_portion,\n \"execution_times\": self._execution_times,\n }\n )\n params = {\n \"v\": settings.VKAPI_VERSION,\n \"access_token\": settings.VKAPI_TOKEN,\n \"code\": vks_code,\n }\n url = self._url_execute\n\n # Posts fetching.\n resp_json = await vk_asynchronous_request(\n url,\n params,\n domain=self.vk_domain,\n offset=offset,\n )\n\n logger.info(\n \"(offset %i) End fetching posts from vk.com/%s...\",\n offset,\n self.vk_domain,\n )\n\n # Gathered posts handling.\n posts_from_vk = resp_json[\"response\"][\"items\"]\n posts = posts_as_schemas(posts_from_vk)\n del posts_from_vk\n return posts\n\n # Checks and preparations.\n await self._set_total_posts_in_domain()\n if not self._total_posts_in_domain:\n return\n\n # Creating tasks for fetching.\n tasks = []\n posts_per_task = self._posts_per_portion * self._execution_times\n offsets = list(range(0, self._total_posts_in_domain, posts_per_task))\n for offset in offsets:\n tasks.append(asyncio.create_task(fetch_posts_for_offset(offset)))\n\n # Running tasks.\n logger.info(\"Start fetching posts from vk.com/%s...\", self.vk_domain)\n results = await asyncio.gather(*tasks)\n logger.info(\"End fetching posts from vk.com/%s...\", self.vk_domain)\n\n # Flatting results from many tasks into one list.\n self._posts = [post for result in results for post in result]\n\n # Final actions.\n if self.sort_by_likes:\n self._posts = list(sorted(self.posts, key=lambda p: p.likes, reverse=True))\n if self.amount_to_fetch:\n self._posts = self._posts[: self.amount_to_fetch]", "def run(self):\n self.debug('Starting new thread')\n while True:\n try:\n i, pset = self._readq.get(block=False)\n except Empty:\n break\n\n result = self.solve(pset)\n self._solveq.put((i, result))\n self.debug('Finishing thread')", "def run_traverse(fln, fln_chunk):\n paths_chunk = {}\n clicks_chunk = {} #page: clicks\n path_length_chunk = {} #page : path length\n feed_count_chunk = {} #page : feed clicks\n i = 0\n for page in fln_chunk.iterkeys():\n i +=1\n if i % 10000 == 0: \n print i\n gc.collect()\n #traverse path\n page_path = traverse(fln, page)\n #add to dictionary\n paths_chunk[page] = page_path\n #update clicks \n for pt in page_path:\n clicks_chunk[pt] = clicks_chunk.get(pt, 0) + 1\n #update len(path) \n path_length_chunk[page] = len(page_path)\n #update feeder clicks\n feeder = return_feeder_page(fln, page, page_path)\n if feeder:\n feed_count_chunk[feeder] = feed_count_chunk.get(feeder, 0) + 1\n return paths_chunk, clicks_chunk, path_length_chunk, feed_count_chunk", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def run(self):\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n with _threadpool_limits(limits=1):\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def process_results(self):\n\n while not self.results.empty():\n mvt = self.results.get()\n\n for peer in self.peers_list:\n peer.check_mvt(mvt)\n\n self.results.task_done()", "async def _load_next_chunk(self):\n raise NotImplementedError", "def run(self):\n\n # keep track of counter\n counter = 0\n\n while self.queue:\n\n # print depth of tree every 10000 steps\n if counter % 10000 == 0:\n print(len(self.queue[0]))\n\n # get first moves set from queue\n moves_set = self.get_moves_set()\n\n # move all moves from set\n self.try_moves(moves_set)\n\n # continue branch (add to queue) if layout is not in archive\n if self.not_in_archive():\n self.add_to_queue(moves_set)\n \n # check for win\n if self.won_game():\n\n # return winning set of moves\n return moves_set\n \n # reverse moves to original layout\n self.reverse_moves(moves_set)\n \n # add to counter\n counter += 1", "def computeRrup(self, lon, lat, depth):\n\n mesh_dx = self._mesh_dx\n\n #----------------------------------------------------------------------\n # Sort out sites\n #----------------------------------------------------------------------\n oldshape = lon.shape\n\n if len(oldshape) == 2:\n newshape = (oldshape[0] * oldshape[1], 1)\n else:\n newshape = (oldshape[0], 1)\n\n x, y, z = latlon2ecef(lat, lon, depth)\n x.shape = newshape\n y.shape = newshape\n z.shape = newshape\n sites_ecef = np.hstack((x, y, z))\n\n #----------------------------------------------------------------------\n # Get mesh\n #----------------------------------------------------------------------\n mx = []\n my = []\n mz = []\n u_groups = np.unique(self._group_index)\n n_groups = len(u_groups)\n for j in range(n_groups):\n g_ind = np.where(u_groups[j] == self._group_index)[0]\n nq = len(self._toplats[g_ind]) - 1\n for i in range(nq):\n q = [Point(self._toplons[g_ind[i]],\n self._toplats[g_ind[i]],\n self._topdeps[g_ind[i]]),\n Point(self._toplons[g_ind[i + 1]],\n self._toplats[g_ind[i + 1]],\n self._topdeps[g_ind[i + 1]]),\n Point(self._botlons[g_ind[i + 1]],\n self._botlats[g_ind[i + 1]],\n self._botdeps[g_ind[i + 1]]),\n Point(self._botlons[g_ind[i]],\n self._botlats[g_ind[i]],\n self._botdeps[g_ind[i]])\n ]\n mesh = get_quad_mesh(q, dx=mesh_dx)\n mx.extend(list(np.reshape(mesh['x'], (-1,))))\n my.extend(list(np.reshape(mesh['y'], (-1,))))\n mz.extend(list(np.reshape(mesh['z'], (-1,))))\n mesh_mat = np.array([np.array(mx), np.array(my), np.array(mz)])\n\n #----------------------------------------------------------------------\n # Compute distance\n #----------------------------------------------------------------------\n dist = np.zeros_like(x)\n for i in range(len(x)):\n sitecol = sites_ecef[i, :].reshape([3, 1])\n dif = sitecol - mesh_mat\n distarray = np.sqrt(np.sum(dif * dif, axis=0))\n dist[i] = np.min(distarray) / 1000.0 # convert to km\n\n dist = np.reshape(dist, oldshape)\n\n return dist", "def process10(res):", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def walk_bus_algor(start,end):\n #---CLASSES---#\n class my_dictionary(dict):\n \"\"\"\n Creates a dictionary\n \"\"\"\n def __init__(self):\n self = dict()\n def add(self, key, value):\n self[key] = value\n\n #---FUNCTIONS---#\n def bus_layer(start,end, results, case):\n \"\"\"\n It generates a bus route with the bus numbers via greedy algorithm\n\n Parameters\n ----------\n start : node id\n end : node id\n results : dict (From lta datamall)\n case : int\n Returns\n -------\n final_route_list : list\n \"\"\"\n def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None):\n \"\"\"\n Send a request to the Overpass API via HTTP POST and return the JSON\n response.\n Parameters\n ----------\n data : dict or OrderedDict\n key-value pairs of parameters to post to the API\n pause_duration : int\n how long to pause in seconds before requests, if None, will query API\n status endpoint to find when next slot is available\n timeout : int\n the timeout interval for the requests library\n error_pause_duration : int\n how long to pause in seconds before re-trying requests if error\n Returns\n -------\n dict\n \"\"\"\n\n # define the Overpass API URL, then construct a GET-style URL as a string to\n # hash to look up/save to cache\n url = settings.overpass_endpoint.rstrip('/') + '/interpreter'\n prepared_url = requests.Request('GET', url, params=data).prepare().url\n cached_response_json = get_from_cache(prepared_url)\n\n if cached_response_json is not None:\n # found this request in the cache, just return it instead of making a\n # new HTTP call\n return cached_response_json\n\n else:\n # if this URL is not already in the cache, pause, then request it\n if pause_duration is None:\n this_pause_duration = get_pause_duration()\n log('Pausing {:,.2f} seconds before making API POST request'.format(this_pause_duration))\n time.sleep(this_pause_duration)\n start_time = time.time()\n log('Posting to {} with timeout={}, \"{}\"'.format(url, timeout, data))\n response = requests.post(url, data=data, timeout=timeout, headers=get_http_headers())\n\n # get the response size and the domain, log result\n size_kb = len(response.content) / 1000.\n domain = re.findall(r'(?s)//(.*?)/', url)[0]\n log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'.format(size_kb, domain, time.time() - start_time))\n\n try:\n response_json = response.json()\n if 'remark' in response_json:\n log('Server remark: \"{}\"'.format(response_json['remark'], level=lg.WARNING))\n save_to_cache(prepared_url, response_json)\n except Exception:\n # 429 is 'too many requests' and 504 is 'gateway timeout' from server\n # overload - handle these errors by recursively calling\n # overpass_request until we get a valid response\n if response.status_code in [429, 504]:\n # pause for error_pause_duration seconds before re-trying request\n if error_pause_duration is None:\n error_pause_duration = get_pause_duration()\n log(\n 'Server at {} returned status code {} and no JSON data. Re-trying request in {:.2f} seconds.'.format(\n domain,\n response.status_code,\n error_pause_duration),\n level=lg.WARNING)\n time.sleep(error_pause_duration)\n response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout)\n\n # else, this was an unhandled status_code, throw an exception\n else:\n log('Server at {} returned status code {} and no JSON data'.format(domain, response.status_code),\n level=lg.ERROR)\n raise Exception(\n 'Server returned no JSON data.\\n{} {}\\n{}'.format(response, response.reason, response.text))\n\n return response_json\n def get_node(element):\n \"\"\"\n Convert an OSM node element into the format for a networkx node.\n\n Parameters\n ----------\n element : dict\n an OSM node element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_node = ['ref', 'highway', 'route_ref', 'asset_ref']\n\n node = {}\n node['y'] = element['lat']\n node['x'] = element['lon']\n node['osmid'] = element['id']\n\n\n if 'tags' in element:\n for useful_tag in useful_tags_node:\n if useful_tag in element['tags']:\n node[useful_tag] = element['tags'][useful_tag]\n return node\n def get_path(element,element_r):\n \"\"\"\n Convert an OSM way element into the format for a networkx graph path.\n\n Parameters\n ----------\n element : dict\n an OSM way element\n element_r : dict\n an OSM way element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_path_e = ['bridge', 'tunnel', 'oneway', 'lanes', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n useful_tags_path_r = ['bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'direction', 'from', 'to', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n\n\n path = {}\n path['osmid'] = element['id']\n\n # remove any consecutive duplicate elements in the list of nodes\n grouped_list = groupby(element['nodes'])\n path['nodes'] = [group[0] for group in grouped_list]\n\n\n\n if 'tags' in element:\n # for relation in element_r['elements']:\n # if relation['type'] == 'relation':\n # for members in relation['members']:\n # if members['ref'] == element['id']:\n for useful_tag in useful_tags_path_e:\n if useful_tag in element['tags']:\n path[useful_tag] = element['tags'][useful_tag]\n # for useful_tag in useful_tags_path_r:\n # if useful_tag in relation['tags']:\n # try:\n # path[useful_tag] = path[useful_tag] + \";\" + relation['tags'][useful_tag]\n # except KeyError:\n # path[useful_tag] = relation['tags'][useful_tag]\n # pass\n\n return path\n def parse_osm_nodes_paths(osm_data):\n \"\"\"\n Construct dicts of nodes and paths with key=osmid and value=dict of\n attributes.\n\n Parameters\n ----------\n osm_data : dict\n JSON response from from the Overpass API\n\n Returns\n -------\n nodes, paths : tuple\n \"\"\"\n\n nodes = {}\n paths = {}\n relation = {}\n\n # for element in osm_data['elements']:\n # if element['type'] == 'relation':\n\n\n for element in osm_data['elements']:\n if element['type'] == 'node':\n key = element['id']\n nodes[key] = get_node(element)\n\n elif element['type'] == 'way': #osm calls network paths 'ways'\n key = element['id']\n # pp.pprint(element)\n paths[key] = get_path(element,osm_data)\n\n return nodes, paths\n def create_graph(response_jsons, name='unnamed', retain_all=True, bidirectional=False):\n \"\"\"\n Create a networkx graph from Overpass API HTTP response objects.\n\n Parameters\n ----------\n response_jsons : list\n list of dicts of JSON responses from from the Overpass API\n name : string\n the name of the graph\n retain_all : bool\n if True, return the entire graph even if it is not connected\n bidirectional : bool\n if True, create bidirectional edges for one-way streets\n\n Returns\n -------\n networkx multidigraph\n \"\"\"\n\n log('Creating networkx graph from downloaded OSM data...')\n start_time = time.time()\n\n # make sure we got data back from the server requests\n elements = []\n # for response_json in response_jsons:\n elements.extend(response_json['elements'])\n if len(elements) < 1:\n raise EmptyOverpassResponse('There are no data elements in the response JSON objects')\n\n # create the graph as a MultiDiGraph and set the original CRS to default_crs\n G = nx.MultiDiGraph(name=name, crs=settings.default_crs)\n\n # extract nodes and paths from the downloaded osm data\n nodes = {}\n paths = {}\n # for osm_data in response_jsons:\n nodes_temp, paths_temp = parse_osm_nodes_paths(response_jsons)\n for key, value in nodes_temp.items():\n nodes[key] = value\n for key, value in paths_temp.items():\n paths[key] = value\n\n # add each osm node to the graph\n for node, data in nodes.items():\n G.add_node(node, **data)\n\n # add each osm way (aka, path) to the graph\n G = ox.add_paths(G, paths, bidirectional=bidirectional)\n\n # retain only the largest connected component, if caller did not\n # set retain_all=True\n if not retain_all:\n G = get_largest_component(G)\n\n log('Created graph with {:,} nodes and {:,} edges in {:,.2f} seconds'.format(len(list(G.nodes())), len(list(G.edges())), time.time()-start_time))\n\n # add length (great circle distance between nodes) attribute to each edge to\n # use as weight\n if len(G.edges) > 0:\n G = ox.add_edge_lengths(G)\n\n return G\n def calculate_H(s_lat,s_lon,e_lat,e_lon):\n \"\"\"\n Calculate a distance with x,y coordinates with\n\n Parameters\n ----------\n s_lat : float (starting lat)\n s_lon : float (starting lon)\n e_lat : float (ending lat)\n e_lon : float (ending lon)\n\n Returns\n -------\n distance\n \"\"\"\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist\n def bus_details_SD(adjacent_list):\n \"\"\"\n store all details from LTA data mall into dictionary\n\n Parameters\n ----------\n adjacent_list : dict\n\n Returns\n -------\n adjacent_list : dict\n \"\"\"\n\n temp = 0\n for x in results:\n if temp != x.get('ServiceNo'):\n temp = x.get('ServiceNo')\n count = 0\n adja_bus_stop = my_dictionary()\n adjacent_list.add(temp, adja_bus_stop)\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n else:\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n return adjacent_list\n def get_nearestedge_node(osm_id, a, G):\n \"\"\"\n Find the nearest node available in Open street map\n\n Parameters\n ----------\n osm_id : node ID\n a : plotting graph\n g : bus graph\n\n Returns\n -------\n temp_nearest_edge[1]/temp_nearest_edge[2] : nearest node to a way ID\n \"\"\"\n temp_y = G.nodes.get(osm_id).get('y')\n temp_x = G.nodes.get(osm_id).get('x')\n temp_nearest_edge = ox.get_nearest_edge(a, (temp_y, temp_x))\n temp_1 = temp_nearest_edge[0].coords[0]\n temp_2 = temp_nearest_edge[0].coords[1]\n temp1_x = temp_1[0]\n temp1_y = temp_1[1]\n temp_1_distance = calculate_H(temp1_y,temp1_x,temp_y,temp_x)\n\n temp2_x = temp_2[0]\n temp2_y = temp_2[1]\n temp_2_distance = calculate_H(temp2_y,temp2_x,temp_y,temp_x)\n if temp_1_distance < temp_2_distance:\n return temp_nearest_edge[1]\n else:\n return temp_nearest_edge[2]\n def delete_duplicate(x):\n \"\"\"\n Delete duplicate within a list\n\n Parameters\n ----------\n x : list\n\n Returns\n -------\n list\n \"\"\"\n return list(dict.fromkeys(x))\n def request_busG():\n \"\"\"\n Find all nodes that is a bus stop\n\n Returns\n -------\n busG : dict\n \"\"\"\n busG = {}\n for x in G.nodes.items():\n if x[1].get('highway') == 'bus_stop':\n xy = []\n xy.append(x[1].get('osmid'))\n xy.append(x[1].get('x'))\n xy.append(x[1].get('y'))\n busG[x[1].get('osmid')] = xy\n\n return busG\n\n # ---MAIN---#\n\n query_str = '[out:json][timeout:180];node[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);way[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);(relation[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);>;);out;'\n response_json = overpass_request(data={'data': query_str}, timeout=180)\n pp = pprint.PrettyPrinter(indent=4)\n # start = 1847853709\n # end = 410472575\n # end = 3737148763\n # bus transit\n # start = 2110621974\n # end = 2085845884\n\n adjacent_list = my_dictionary()\n\n G = ox.load_graphml('Bus_Overpass.graphml')\n\n if case == 1:\n return request_busG()\n n, e = ox.graph_to_gdfs(G)\n # e.to_csv(\"Edge_test_busstop.csv\")\n if len(results) == 0:\n\n results = bus_details_all(results) # Details from LTA Datamall, extracting all details such as service no, bus stop number\n\n adjacent_list = bus_details_SD(adjacent_list) # From results, it extracts bus stop number and distance\n start_busstop = (G.nodes.get(start)).get('asset_ref')\n end_busstop = (G.nodes.get(end)).get('asset_ref')\n\n #Start finding common bus service within the start bus stop and end bus stop\n try:\n if \";\" in (G.nodes.get(start).get('route_ref')):\n start_rr = (G.nodes.get(start).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n print(\"TEST - G.nodes.get(end): \", G.nodes.get(end))\n if \";\" in (G.nodes.get(end).get('route_ref')):\n end_rr = (G.nodes.get(end).get('route_ref')).split(\";\")\n else:\n end_rr = []\n end_rr.append((G.nodes.get(end).get('route_ref')))\n common = list(set(start_rr) & set(end_rr))\n except:\n return -1\n\n \"\"\"\n This method strictly emphasis on greedy algorithm. Thus it will prioritze the numbers of transit rather than distance\n Check if any common bus service within start and end bus stop.\n If found, route_list will capture the entire route of the common bus service \n No transit will occuer as it is a straight path, start busstop -> end busstop\n If not found, the program will proceed to find a common bus stop within the start and end bus services. \n Thus a transit will occur, start busstop -> mid busstop -> end busstop\n \"\"\"\n route_list = {}\n mid_route_list = {}\n # print(\"TEST - Start: \", start_busstop)\n # print(\"TEST - End: \", end_busstop)\n # print(\"TEST - start_rr: \", start_rr)\n # print(\"TEST - end_rr: \", end_rr)\n # print(\"TEST - Common: \", common)\n common_mid = []\n if len(common) == 0: #No common bus service found\n while(len(common_mid) == 0): #Start finding a common mid busstop\n rona_one = []\n rona_two = []\n for start_to_mid in start_rr: #Capture all common mid busstop\n print(\"TEST - start_to_mid: \", start_to_mid)\n for bus_sequence in adjacent_list.get(start_to_mid):\n rona_one.append(str(adjacent_list.get(start_to_mid).get(bus_sequence)[0]))\n for mid_to_end in end_rr:\n print(\"TEST - mid_to_end: \", mid_to_end)\n for bus_sequence in adjacent_list.get(mid_to_end):\n rona_two.append(str(adjacent_list.get(mid_to_end).get(bus_sequence)[0]))\n found_br = []\n print(\"TEST rona 1:\", rona_one)\n print (\"TEST rona 2:\", rona_two)\n found_br.append(start_to_mid+\";\"+mid_to_end)\n found_br.extend(list(set(rona_one)&set(rona_two)))\n common_mid.append(found_br)\n\n print(\"TEST - common_mid: \",common_mid)\n\n bus_service = start_to_mid\n temp_bus = []\n mid_busstop = 0\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Finding bus service for start busstop -> mid busstop\n for x in range (0, len(common_mid)):\n for i in common_mid[x]:\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(i) and approved == 1:\n mid_busstop = str(i)\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if mid_busstop != 0:\n break\n if str(start_busstop) not in temp_bus or str(mid_busstop) not in temp_bus: #If not found, continue to next loop\n continue\n temp_bus = delete_duplicate(temp_bus)\n mid_route_list[bus_service] = temp_bus\n\n for x in G.nodes: #After finding bus service to mid busstop, start finding path mid busstop to end busstop\n if G.nodes.get(x).get('asset_ref') == mid_busstop:\n if \";\" in (G.nodes.get(x).get('route_ref')):\n start_rr = (G.nodes.get(x).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n\n common = list(set(start_rr) & set(end_rr))\n start_busstop = mid_busstop\n if start == 1847853709: #If bus service started from punggol interchange\n for bus_service in common:\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop) and adjacent_list.get(bus_service).get(bus_sequence)[1] == 0:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n else:\n for bus_service in common: #If bus service does not start from punggol interchange\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n\n \"\"\"\n After capturing all the bus serivce. A comparison is made in favor for the number of bus stops\n It will choose the least amount of bus stops and store in post_compare\n \"\"\"\n compare = [0, 100]\n if len(route_list.keys()) > 1:\n for i in route_list:\n if len(route_list.get(i)) < compare[1]:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n else:\n for i in route_list:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n post_compare = []\n print(\"TEST - Mid route list: \", mid_route_list)\n if len(mid_route_list) != 0:\n for i in mid_route_list:\n post_compare.append(i)\n route_list[i] = mid_route_list.get(i)\n post_compare.append(compare[0])\n else:\n post_compare.append(compare[0])\n\n\n\n \"\"\"\n Upon comparison, it will start capturing the nodes within the bus path and store in plot_list\n \"\"\"\n plot_list = []\n try:\n print(\"TEST - post_Compare: \", post_compare)\n print(\"TEST - Route list: \", route_list)\n for count in range (0, len(post_compare)):\n for x in route_list.get(str(post_compare[count])):\n for i in G.nodes:\n if str(G.nodes.get(i).get('asset_ref')) == str(x):\n plot_list.append(G.nodes.get(i).get('osmid'))\n break\n except:\n return -1\n edge_list = []\n punggol = (1.403948, 103.909048)\n \"\"\"\n It will generate out the list of node ID for the UI to plot\n \"\"\"\n a = ox.load_graphml('Bus_graph.graphml')\n for x in plot_list:\n edge_list.append(get_nearestedge_node(x,a,G))\n\n print(\"TEST - Plot list: \", plot_list)\n print(\"TEST - Edge list: \", edge_list)\n final_route_list = []\n count_stops = len(plot_list)\n for x in range (0, len(edge_list)-1):\n final_route_list.append(nx.shortest_path(a, edge_list[x], edge_list[x+1]))\n print(final_route_list)\n return final_route_list\n\n def bus_details_all():\n headers = {\n 'AccountKey': '84lbH3B/QeOkRK/CHm3c2w==',\n 'UniqueUserID': '8ecabd56-08a2-e843-0a7a-9944dccf124a',\n 'accept': 'application/json'\n }\n global new_results\n if __name__ == \"__main__\":\n results = []\n bus_stop_url = \"http://datamall2.mytransport.sg/ltaodataservice/BusRoutes\"\n\n while True:\n new_results = requests.get(bus_stop_url,headers=headers,params={'$skip': len(results)}).json()['value']\n if new_results == []:\n return results\n else:\n results += new_results\n def calculate_H(s_lat,s_lon,e_lat,e_lon): #y,x y,x\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist\n def walk_pathfinder(start_osmid, end_osmid):\n priority_Q = []\n heap_Q = []\n closed_routes = {}\n start_node = (0, None, start_osmid, 0)\n heapq.heappush(heap_Q, (start_node))\n closed_routes[start_osmid] = None\n while(True):\n temp = heapq.heappop(heap_Q)\n if temp[2] == end_osmid:\n temp_end = end_osmid\n path = []\n path.append(end_osmid)\n while (temp_end is not None):\n temp_list = closed_routes.get(temp_end)\n if temp_list is not None:\n temp_end = temp_list[0]\n path.append(temp_end)\n else:\n final_path = path[::-1]\n return final_path\n\n for counter, x in enumerate(list(G.edges())[0:]):\n if x[0] == temp[2]:\n if x[1] in closed_routes:\n continue\n else:\n length = list(G.edges.values())[counter].get(\"length\", None)\n current_length = length + temp[3]\n slat = radians(G.nodes.get(x[1]).get('y'))\n slon = radians(G.nodes.get(x[1]).get('x'))\n dist = 6371.01 * acos(sin(slat) * sin(elat) + cos(slat) * cos(elat) * cos(slon - elon))\n H = dist*1000\n if H < actual_dist + 100:\n F = current_length + H\n heapq.heappush(heap_Q, (F, x[0], x[1], current_length))\n closed_routes[x[1]] = [x[0], length]\n def delete_duplicate(x):\n return list(dict.fromkeys(x))\n def get_nearestedge_node(osm_id, y , x):\n temp_nearest_edge = ox.get_nearest_edge(G, (y, x))\n temp_1 = temp_nearest_edge[0].coords[0]\n temp_2 = temp_nearest_edge[0].coords[1]\n temp1_x = temp_1[0]\n temp1_y = temp_1[1]\n temp_1_distance = calculate_H(temp1_y, temp1_x, y, x)\n temp2_x = temp_2[0]\n temp2_y = temp_2[1]\n temp_2_distance = calculate_H(temp2_y, temp2_x, y, x)\n if temp_1_distance < temp_2_distance:\n return [temp_nearest_edge[1],temp_1_distance,temp1_x,temp1_y]\n else:\n return [temp_nearest_edge[2],temp_2_distance,temp2_x,temp2_y]\n def find_XY(node, graph):\n for x in graph.nodes.items():\n if x[1].get('osmid') == node:\n node_x = x[1].get('x')\n node_y = x[1].get('y')\n node_list = (node_y, node_x)\n return node_list\n\n start_time = time.time()\n\n # start = (103.9028788, 1.4044948)\n # end = (103.8999124, 1.4035004)\n # start = (103.9073345, 1.4060506)\n # end = (103.9172982, 1.3956014)\n #\n # start = (103.9073345, 1.4060506)\n # end = (103.9172982, 1.3956014)\n\n # start = (103.910650, 1.400818)\n # end = (103.910296, 1.399252)\n\n # start =(103.9024 , 1.4052)\n # end = (103.897332 , 1.402272)\n\n # start = (103.91256451606752, 1.402580108598971)\n # end = (103.91270935535432, 1.401523634635178)\n\n start_osmid = 0\n end_osmid = 0\n punggol = (1.403948, 103.909048)\n # G = ox.graph_from_point(punggol, distance=3500, truncate_by_edge=True, network_type=\"walk\")\n G = ox.load_graphml('AStar_walk.graphml')\n nodelist_G = list(G.nodes.values())\n\n \"\"\"\n Start finding start and end Node ID.\n If not found, find nearest node from the given coordinates by the user\n \"\"\"\n for i in range (0, len(nodelist_G)):\n if nodelist_G[i].get('y') == start[1] and nodelist_G[i].get('x') == start[0]:\n start_osmid = nodelist_G[i].get('osmid')\n if nodelist_G[i].get('y') == end[1] and nodelist_G[i].get('x') == end[0]:\n end_osmid = nodelist_G[i].get('osmid')\n\n if start_osmid == 0 or end_osmid == 0:\n start_osmid = ox.get_nearest_node(G, (start[1], start[0]))\n end_osmid = ox.get_nearest_node(G, (end[1], end[0]))\n\n \"\"\"\n To calculate distance from 2 x,y axis\n \"\"\"\n R = 6371.0\n snlat = radians(start[1])\n snlon = radians(start[0])\n elat = radians(end[1])\n elon = radians(end[0])\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist*1000\n edgelist_G = list(G.edges.values())\n\n\n \"\"\"\n After having start and end nodes.\n The program will set a radius of 200 meters from start and end nodes\n Every nodes within 200 meters and is a bus stop node will be captured and stored in end1 and end2\n If within 200meters no bus stop is found, it will have a constant increment of 200meters until bus stop if found on both sides\n \"\"\"\n bus_G = bus_layer(start_osmid,end_osmid, None, 1)\n start1 = start\n start2 = end\n\n for i in bus_G:\n temp_dis = calculate_H(bus_G.get(i)[2],bus_G.get(i)[1],start1[1], start1[0])\n bus_G.get(i).append(temp_dis)\n temp_dis = calculate_H(bus_G.get(i)[2], bus_G.get(i)[1], start2[1], start2[0])\n bus_G.get(i).append(temp_dis)\n end1 = []\n end2 = []\n limit = 0\n while (len(end1) == 0):\n limit += 200\n for i in bus_G:\n if bus_G.get(i)[3] < limit:\n temp = []\n temp.append(bus_G.get(i)[3])\n temp.append(bus_G.get(i)[0])\n temp.append(bus_G.get(i)[1])\n temp.append(bus_G.get(i)[2])\n hq.heappush(end1, temp)\n limit = 0\n while (len(end2) == 0):\n limit += 200\n for i in bus_G:\n if bus_G.get(i)[4] < limit:\n temp = []\n temp.append(bus_G.get(i)[4])\n temp.append(bus_G.get(i)[0])\n temp.append(bus_G.get(i)[1])\n temp.append(bus_G.get(i)[2])\n hq.heappush(end2, temp)\n\n \"\"\"\n The following codes will capture all nodes on the road that is closest to the bus stop\n It will be stored in path1 and path2.\n \"\"\"\n path1 = []\n for i in range (0, len(end1)):\n if 1847853709 == end1[i][1]:\n path1 = []\n path1.append([2019165453, 0, 0, 0, 0])\n break\n else:\n path1.append(get_nearestedge_node(end1[i][1], end1[i][3], end1[i][2]))\n\n for x in range (0, len(path1)):\n path1[x].append(calculate_H(path1[x][3],path1[x][2], start1[1], start1[0]))\n\n path2 = []\n for i in range (0, len(end2)):\n path2.append(get_nearestedge_node(end2[i][1], end2[i][3], end2[i][2]))\n for x in range (0, len(path2)):\n path2[x].append(calculate_H(path2[x][3],path2[x][2], start2[1], start2[0]))\n\n \"\"\"\n Bus results will store all data obtained from lta datamall\n It will start calculating all possibilities from all bus stop captured in end1 and end2\n Example, end1 contains [1,2,3], end2 contains [4,5,6]\n The following code will start to find a route from [1,4] , [1,5] , [1,6] then [2,4] , [2,5] , [2,6] then [3,4] , [3,5] , [3,6]\n Once all these route is found, it will proceed to compare the derived routes and capture the least amount of bus stop\n Example, [1,4] is the shortest route found\n Upon capturing the route with the least amount of bus stop, it will start to plot the walking A* algorithm from start point to bus stop\n Example, [Start point, 1] then [End point, 4]\n In this case, it will return [[Start point,1] , [1,4] , [End point,4]]\n \"\"\"\n # bus_results = bus_details_all()\n # with open(\"data\\ltadatamall.txt\",\"w+\") as filehandler:\n # json.dump(bus_results,filehandler)\n with open(\"data\\ltadatamall.txt\", \"r\") as filehandler:\n bus_results=json.load(filehandler)\n approved = 0\n path1_end_count = 0\n path2_end_count = 0\n for i in range (0, len(end1)):\n if 1847853709 == end1[i][1]:\n approved = 1\n final_route_list = []\n if approved == 1:\n count = 99\n for x in range (0, len(end2)):\n final_route_list = bus_layer(1847853709, end2[x][1], bus_results, None)\n try:\n if len(final_route_list) < count:\n path1[path1_end_count][0] = 4598672210\n path2_end_count = x\n temp_route_list = final_route_list.copy()\n count = len(temp_route_list)\n except:\n continue\n else:\n count = 99\n if len(final_route_list) == 0:\n for i in range (0, len(end1)):\n for x in range (0, len(end2)):\n final_route_list = bus_layer(end1[i][1], end2[x][1], bus_results, None)\n if final_route_list == -1:\n continue\n if len(final_route_list) < count:\n path1_end_count = i\n path2_end_count = x\n temp_route_list = final_route_list\n count = len(temp_route_list)\n\n path1 = walk_pathfinder(start_osmid, path1[path1_end_count][0])\n path2 = walk_pathfinder(end_osmid, path2[path2_end_count][0])\n walking_Path1 = []\n walking_Path2 = []\n bus_path = []\n walking_Path2.append((end[1], end[0]))\n for x in path1:\n walking_Path1.append(find_XY(x, G))\n for x in path2:\n walking_Path2.append(find_XY(x, G))\n\n #ox.plot_graph_routes(G, [path1, path2])\n plotting_route = []\n \"\"\"\n Upon capturing all the bus routes and walking routes, it will proceed to return the route for further processing\n \"\"\"\n\n a = ox.load_graphml('WalkBus_end_graph.graphml')\n try:\n for x in temp_route_list:\n plotting_route.extend(x)\n plotting_route = delete_duplicate(plotting_route)\n Tried = True\n except:\n return [[0], [0], [0]]\n try:\n #ox.plot_graph_route(a, plotting_route)\n for x in plotting_route:\n bus_path.append(find_XY(x, a))\n except:\n #ox.plot_graph_routes(a, temp_route_list)\n Tried = False\n for x in plotting_route:\n for i in x:\n bus_path.append(find_XY(i, a))\n\n # print(\"TEST - Start OSMID: \", start_osmid)\n # print(\"TEST - End OSMID: \", end_osmid)\n # print(\"TEST - Path 1: \" ,path1)\n # print(\"TEST - Path 1 (X,Y): \", walking_Path1)\n # print(\"TEST - Path 2: \" ,path2)\n # print(\"TEST - Path 2 (X,Y): \", walking_Path2)\n # print(\"TEST - BusRoute: \", plotting_route)\n # print(\"TEST - Bus Path (X,Y): \", bus_path)\n # ox.plot_graph_route(G, final_path, fig_height=10, fig_width=10)\n if Tried == True:\n return [walking_Path1, bus_path, walking_Path2]\n else:\n return [walking_Path1, bus_path, walking_Path2]\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "def _run(self):\n self._algorithm(self._list, self)", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def route_layout(self):\n self.route_pins()\n self.route_internal()\n self.route_supplies()", "def post_process(self, xout, params_out):\n # Should be used by all methods matching \"solve_*\"\n for post_processor in self.post_processors:\n xout, params_out = post_processor(xout, params_out)\n return xout, params_out", "def extractor_multiprocess(self):\n pool = multiprocessing.Pool()\n queue = multiprocessing.Queue()\n queue.put(\"safe\")\n end = len(next(os.walk(self.datadir))[2])\n error = 0\n\n extractor_iterator = ((directory)\n for directory in os.listdir(self.datadir))\n with jsonlines.open(self.output, \"w\") as f:\n for x in tqdm.tqdm(\n pool.imap_unordered(self.extract_unpack, extractor_iterator), total=end\n ):\n if not x:\n \"\"\"\n To input error class or function\n \"\"\"\n error += 1\n continue\n msg = queue.get()\n if msg == \"safe\":\n f.write(x)\n queue.put(\"safe\")\n\n pool.close()", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def apply(self, callback, route):", "def partition_processor(partitionlinechunks):\n\n model_pipe_object = joblib.load(SparkFiles.get(\"mmp_phase1_D2.clf\"))\n\n def set_predictions(x):\n segment = model_pipe_object.predict_proba(x)\n return segment\n\n df_with_nan = build_dataframe(partitionlinechunks)\n df_with_newline = df_with_nan.replace(u\"NULL\", pd.np.nan)\n behaviour_df = df_with_newline.replace(u\"\\\\N\", pd.np.nan)\n predictions_ser = set_predictions(behaviour_df)\n\n predictions_list = [value for value in [zip(predictions_ser.index, predictions_ser.loc[:,'A'], predictions_ser.loc[:,'Y'], predictions_ser.loc[:,'segment'], predictions_ser.loc[:,'model_version'])]]\n return iter(predictions_list)", "def compute(self):\n try:\n self.set_trackline()\n except:\n app.logger.warning(\"Could not process trackline results. URL may be invalid?\")\n\n if Job.exists(self.task_id, connection=redis_connection):\n job = Job.fetch(self.task_id, connection=redis_connection)\n self.task_result = unicode(job.meta.get(\"outcome\", \"\"))\n\n self.save()", "def split(self):\n st = time()\n tokens = self._build_args.tokens\n\n for token_split in IStorage._tokens_partitions(tokens, config.min_number_of_tokens,\n config.number_of_partitions):\n storage_id = uuid.uuid4()\n log.debug('assigning to %s %d tokens', str(storage_id), len(token_split))\n new_args = self._build_args._replace(tokens=token_split, storage_id=storage_id)\n self.__class__._store_meta(new_args)\n\n yield self.__class__.build_remotely(new_args)\n log.debug('completed split of %s in %f', self.__class__.__name__, time() - st)", "def _run_parallel(\n self, processes: int = 2, build_results: bool = True\n ) -> bool:\n # At first sight, it might seem simpler to use the multiprocessing Pool\n # Class rather than Processes and Queues. However, this way is faster.\n work_queue = Queue() # type: Queue\n done_queue = Queue() # type: Queue\n workers = self._n_workers(processes=processes)\n\n chunks = self.match_generator.build_match_chunks()\n for chunk in chunks:\n work_queue.put(chunk)\n\n self._start_workers(workers, work_queue, done_queue, build_results)\n self._process_done_queue(workers, done_queue, build_results)\n\n return True", "def refine(self, refine_results):\n for phase_path, phase_refinement_results in refine_results.items():\n phase = self.phases[phase_path]\n tx = phase.options['transcription']\n gd = tx.grid_data\n\n need_refine = phase_refinement_results['need_refinement']\n if not phase.refine_options['refine'] or not np.any(need_refine):\n refine_results[phase_path]['new_order'] = gd.transcription_order\n refine_results[phase_path]['new_num_segments'] = gd.num_segments\n refine_results[phase_path]['new_segment_ends'] = gd.segment_ends\n continue\n\n # Refinement is needed\n gd = phase.options['transcription'].grid_data\n numseg = gd.num_segments\n\n refine_seg_idxs = np.where(need_refine)\n P = np.zeros(numseg)\n P[refine_seg_idxs] = np.log(self.error[phase_path][refine_seg_idxs] /\n phase.refine_options['tolerance']) / np.log(\n gd.transcription_order[refine_seg_idxs])\n P = np.ceil(P).astype(int)\n\n if gd.transcription == 'gauss-lobatto':\n odd_idxs = np.where(P % 2 != 0)\n P[odd_idxs] += 1\n\n new_order = gd.transcription_order + P\n B = np.ones(numseg, dtype=int)\n\n raise_order_idxs = np.where(gd.transcription_order + P <= phase.refine_options['max_order'])\n split_seg_idxs = np.where(gd.transcription_order + P > phase.refine_options['max_order'])\n\n new_order[raise_order_idxs] = gd.transcription_order[raise_order_idxs] + P[raise_order_idxs]\n new_order[split_seg_idxs] = phase.refine_options['min_order']\n\n B[split_seg_idxs] = np.around((gd.transcription_order[split_seg_idxs] +\n P[split_seg_idxs]) / phase.refine_options['min_order']).astype(int)\n\n new_order = np.repeat(new_order, repeats=B)\n new_num_segments = int(np.sum(B))\n new_segment_ends = split_segments(gd.segment_ends, B)\n\n refine_results[phase_path]['new_order'] = new_order\n refine_results[phase_path]['new_num_segments'] = new_num_segments\n refine_results[phase_path]['new_segment_ends'] = new_segment_ends\n\n tx.options['order'] = new_order\n tx.options['num_segments'] = new_num_segments\n tx.options['segment_ends'] = new_segment_ends\n tx.init_grid()", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return", "def online_server_garbler_phase(env, pipe, storage):\n\n global clienf_inf_times, num_infs_completed, request_times, waiting_times, offline_times\n while True:\n request = yield pipe.get()\n start_time = env.now\n waiting_time = start_time - request['request_time'] \n waiting_times.append(waiting_time)\n \n before_gc = env.now\n yield storage.get(NUM_RELU)\n offline_times.append(env.now - before_gc)\n\n\n for i in range(len(utils.on_client_compute_relu)): # for i in range(nonlinear layers)...\n yield env.timeout(utils.on_client_write_linear[i] / bandwidth) # client sends x-r to server\n yield env.timeout(utils.on_server_compute_linear[i]) # server performs linear eval (conv)\n yield env.timeout(utils.on_server_write_relu[i] / bandwidth) # server sends encoded labels to client\n yield env.timeout(utils.on_client_compute_relu[i]) # client evaluates garbled circuit\n\n \n # send prediction to client\n yield env.timeout(utils.on_server_write_pred / bandwidth) # server sends prediction to client\n\n num_infs_completed +=1\n client_inf_times.append(env.now-start_time)", "def build_chunks(results, metadata):\n\n for result in results:\n chunk = connector_pb2.DataChunk()\n for field in metadata.fieldInfo:\n set_value(result, field.name, chunk)\n yield chunk", "def run(self):\n # For each microenvironment that the person visits\n while self.routing_node_id != 'end':\n # Get the next node, and this activity class and arguments.\n self.routing_node_id, activity_class, kwargs = self.routing.get_next_activity(self.routing_node_id)\n\n # Add this instance to the arguments list\n kwargs['person'] = self\n\n # Create a parametrised instance of the activity\n this_activity_class = activity_class(self.simulation_params, **kwargs)\n \n # set an event flag to mark end of activity and call the activity class\n finished_activity = self.env.event() \n self.env.process(this_activity_class.start(finished_activity))\n yield finished_activity", "def finalize(self):\n routes = self.routes.copy()\n for child in self._children:\n routes.extend(list(child.tree_routes))\n\n # Make a new Map() out of all of the routes.\n rule_map = Map([route.create_rule() for route in routes])\n self._route_map = rule_map\n\n self.finalized = True", "def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return", "def __call__(self, ctx: ResolutionContext) -> Coroutine[None, None, RT]:", "def run(self):\n import sacc\n import healpy\n import treecorr\n # Load the different pieces of data we need into\n # one large dictionary which we accumulate\n data = {}\n self.load_tomography(data)\n self.load_shear_catalog(data)\n self.load_random_catalog(data)\n # This one is optional - this class does nothing with it\n self.load_lens_catalog(data)\n # Binning information\n self.read_nbin(data)\n\n # Calculate metadata like the area and related\n # quantities\n meta = self.calculate_metadata(data)\n\n # Choose which pairs of bins to calculate\n calcs = self.select_calculations(data)\n\n sys.stdout.flush()\n \n # This splits the calculations among the parallel bins\n # It's not necessarily the most optimal way of doing it\n # as it's not dynamic, just a round-robin assignment,\n # but for this case I would expect it to be mostly fine\n results = []\n for i,j,k in self.split_tasks_by_rank(calcs):\n results += self.call_treecorr(data, i, j, k)\n\n # If we are running in parallel this collects the results together\n results = self.collect_results(results)\n\n # Save the results\n if self.rank==0:\n self.write_output(data, meta, results)", "def _worker(pipelines: List[Pipeline], source: Queue, sink: Queue):\n pipelines = list(pipelines)\n for i, p in enumerate(pipelines):\n if isinstance(p, ConvertT2S):\n pipelines[i] = ConvertT2S()\n\n def processor(article):\n for p in pipelines:\n article = p(article)\n return article\n\n while True:\n article = source.get()\n if article == 'EXIT':\n return\n article = list(processor(article))\n sink.put(article)", "def process_solution(self, solution: dict) -> (list, float):\n start_time = time() * 1000\n nodes = self.application.nodes()\n start = np.min(nodes)\n # fill route with None values\n route = [None] * len(self.application)\n # get nodes from sample\n # NOTE: Prevent duplicate node entries by enforcing only one occurrence per node along route\n logging.info(str(solution.items()))\n\n for (node, timestep), val in solution.items():\n if val:\n logging.info((node, timestep))\n if val and (node not in route):\n route[timestep] = node\n\n # check whether every timestep has only 1 node flagged\n for i in nodes:\n relevant_nodes = []\n relevant_timesteps = []\n for (node, timestep) in solution.keys():\n if node == i:\n relevant_nodes.append(solution[(node, timestep)])\n if timestep == i:\n relevant_timesteps.append(solution[(node, timestep)])\n if sum(relevant_nodes) != 1 or sum(relevant_timesteps) != 1:\n # timestep or nodes have more than 1 or 0 flags\n return None, round(time() * 1000 - start_time, 3)\n\n # check validity of solution\n if sum(value == 1 for value in solution.values()) > len(route):\n logging.warning(\"Result is longer than route! This might be problematic!\")\n return None, round(time() * 1000 - start_time, 3)\n\n # run heuristic replacing None values\n if None in route:\n # get not assigned nodes\n nodes_unassigned = [node for node in list(nodes) if node not in route]\n nodes_unassigned = list(np.random.permutation(nodes_unassigned))\n for idx, node in enumerate(route):\n if node is None:\n route[idx] = nodes_unassigned[0]\n nodes_unassigned.remove(route[idx])\n\n # cycle solution to start at provided start location\n if start is not None and route[0] != start:\n # rotate to put the start in front\n idx = route.index(start)\n route = route[idx:] + route[:idx]\n\n # print route\n parsed_route = ' ->\\n'.join([f' Node {visit}' for visit in route])\n logging.info(f\"Route found:\\n{parsed_route}\")\n return route, round(time() * 1000 - start_time, 3)", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def mirror_batch(self, representations):\n filehandles = []\n requests = []\n representations_by_response_url = dict()\n \n for representation in representations:\n if not representation.mirror_url:\n representation.mirror_url = representation.url\n # Turn the mirror URL into an s3.amazonaws.com URL.\n bucket, filename = self.bucket_and_filename(\n representation.mirror_url\n )\n response_url = self.url(bucket, filename)\n representations_by_response_url[response_url] = (\n representation)\n bucket, remote_filename = self.bucket_and_filename(\n representation.mirror_url)\n fh = representation.content_fh()\n filehandles.append(fh)\n request = self.pool.upload(remote_filename, fh, bucket=bucket,\n content_type=representation.media_type)\n requests.append(request)\n # Do the upload.\n\n def process_response(response):\n representation = representations_by_response_url[response.url]\n if response.status_code == 200:\n source = representation.local_content_path\n if representation.url != representation.mirror_url:\n source = representation.url\n if source:\n print \"MIRRORED %s => %s\" % (\n source, representation.mirror_url)\n else:\n print \"MIRRORED %s\" % representation.mirror_url\n representation.set_as_mirrored()\n else:\n representation.mirrored_at = None\n representation.mirror_exception = \"Status code %d: %s\" % (\n response.status_code, response.content)\n\n try:\n for response in self.pool.as_completed(requests):\n process_response(response)\n except ConnectionError, e:\n # This is a transient error; we can just try again.\n print e\n pass\n except HTTPError, e:\n # Probably also a transient error. In any case\n # there's nothing we can do about it but try again.\n print e\n pass\n\n # Close the filehandles\n for fh in filehandles:\n fh.close()", "def routes(self) -> List[Tuple[int, bytes]]:\n raise NotImplementedError() # pragma: no cover", "def process_response(self, response):\n json = response.json()\n for resp in json[\"responses\"]:\n sub_qry = self._current_query.get(int(resp[\"id\"]))\n self.context.pending_request().map_json(resp[\"body\"], sub_qry.return_type)", "def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()", "def run(self):\n super().run()\n echo = self.echo\n local = self.local\n remote = self.remote\n transport = Transceiver(local)\n transport.set_timeout(0.5)\n self.__result: list[Entry] = []\n\n while True:\n try:\n packet = transport.recv(None)\n params = frame.deserialize(packet)\n seq = params[\"seq\"]\n total = params[\"total\"]\n t_master = params[\"t_master\"]\n infinite = params[\"infinite\"]\n payload = params[\"payload\"]\n\n t_slave = time.time()\n if echo:\n data_send = frame.serialize(infinite, seq, total, t_master, t_slave, payload)\n transport.send(remote, data_send)\n t_ul = (t_slave - t_master) * 1000\n self.add_result(Entry(seq, total, t_ul, 0))\n print(f\"seq = {seq}, ul = {t_ul:.2f} ms, payload: {hex_str(payload)}\")\n if frame.is_end(params):\n print(f\"receive last packet!\")\n break\n except socket.timeout:\n continue\n except KeyboardInterrupt:\n break", "def dispatch(self, queue):\n context = zmq.Context()\n socket = noBlockREQ(context)\n \n seedsQ1 = Queue()\n seedsQ2 = Queue()\n for address in self.seeds:\n seedsQ1.put(address)\n\n connectT = Thread(target=connectToSeeds, name=\"Connect to Seeds\", args=(socket, seedsQ1))\n connectT.start()\n\n toDisconnectQ = Queue()\n disconnectT = Thread(target=disconnectToSeeds, name=\"Disconnect to Seeds\", args=(socket, toDisconnectQ))\n disconnectT.start()\n\n pFindSeeds = Process(target=findSeeds, name=\"Find Seeds\", args=(set(self.seeds), [seedsQ1], [toDisconnectQ], log, 2000, 10, seedsQ2))\n pFindSeeds.start()\n\n pInput = Process(target=getSeedFromFile, name=\"Get seed from file\", args=(seedsQ1, seedsQ2))\n pInput.start()\n\n graph = {}\n depth = 1\n data = {}\n url_mapper = {url:f\"url_{i}\" for i, url in enumerate(self.urls)}\n \n src = set()\n while True: \n new_data = {}\n while len(self.urls):\n try:\n url = self.urls[0]\n self.urls.pop(0)\n self.urls.append(url)\n with counterSocketReq:\n socket.send_json((\"URL\", self.uuid, url))\n log.debug(f\"Send {url}\", \"dispatch\")\n response = socket.recv_pyobj()\n assert isinstance(response, tuple), f\"Bad response, expected <tuple> find {type(response)}\"\n assert len(response) == 2, \"bad response size\"\n assert response[0] == 'RESPONSE', \"Unexpected response format\"\n _, package = response\n log.debug(f\"Received a package with size: {len(package)}\", \"dispatch\")\n for recv_url, html in package.items():\n try:\n idx = self.urls.index(recv_url)\n log.info(f\"{recv_url} {GREEN}OK{RESET}\", \"dispatch\")\n new_data[recv_url] = html\n self.urls.pop(idx)\n except ValueError:\n log.debug(f'Unnecesary {recv_url}', 'dispatch')\n except AssertionError as e:\n log.error(e, \"dispatch\")\n except zmq.error.Again as e:\n log.debug(e, \"dispatch\")\n except Exception as e:\n log.error(e, \"dispatch\")\n time.sleep(0.8)\n \n log.info(f'Depth {depth} done', 'dispatch')\n for url, html in new_data.items():\n graph[url] = set()\n try:\n text = html.decode()\n soup = BeautifulSoup(html, 'html.parser')\n tags = soup.find_all(valid_tags)\n new_urls = [['src', 'href'][tag.has_attr('href')] for tag in tags]\n changes = []\n for i, attr in enumerate(new_urls):\n url_dir = urljoin(url, tags[i][attr])\n graph[url].add(url_dir)\n if url_dir not in url_mapper:\n url_mapper[url_dir] = f'url_{len(url_mapper)}'\n changes.append((tags[i][attr], url_mapper[url_dir]))\n if attr == 'src' or tags[i].name == 'link':\n src.add(url_dir)\n continue\n self.urls.append(url_dir)\n html = change_html(text, changes).encode()\n except UnicodeDecodeError:\n log.debug(f'{url} is not decodeable', 'dispatch')\n except: # BeautifulSoup strange exceptions related with his's logger\n pass\n new_data[url] = html\n data.update(new_data)\n self.urls = set(self.urls)\n self.urls.difference_update(self.old)\n self.old.update(self.urls)\n self.urls = list(self.urls)\n \n if depth > self.depth:\n break\n if depth == self.depth:\n src.difference_update(self.old)\n self.old.update(src)\n self.urls = list(src)\n depth += 1\n log.info(f\"Number of URLs to be requested for download: {RED}{len(self.urls)}{RESET}\", \"dispatch\")\n \n log.info(f\"Starting to write data\", \"dispatch\")\n for i, url in enumerate(self.originals):\n try:\n res = HtmlResponse(url=url, body=data[url], encoding='utf8')\n base = res.css('title::text')[0].get()\n except:\n base = f\"web_page_{i}\"\n try:\n os.makedirs(f'downloads/{base}-data')\n except:\n pass\n writer(f'downloads/{base}-data', url, set(), data, url_mapper, graph) \n \n html = data[url]\n if len(graph[url]) > 0:\n text = data[url].decode()\n changes = []\n for dep in graph[url]:\n name = url_mapper[dep]\n changes.append((name, f'{base}-data/{name}'))\n html = change_html(text, changes).encode()\n with open(f'downloads/{base}', 'wb') as fd:\n fd.write(html)\n \n log.info(f\"Dispatcher:{self.uuid} has completed his URLs succefully\", \"dispatch\")\n log.debug(f\"Dispatcher:{self.uuid} disconnecting from system\", \"dispatch\")\n #disconnect\n\n queue.put(True)\n pFindSeeds.terminate()\n pInput.terminate()", "def do_process_user_file_chunks(\n page_size: int, error_handler: ErrorHandler, position: int, participant: Participant\n):\n \n # FIXME: this is a gross hack to force some time related safety, which is only ever used deep\n # inside of data processing.\n common_constants.LATEST_POSSIBLE_DATA_TIMESTAMP = \\\n int(time.mktime((timezone.now() + timedelta(days=90)).timetuple()))\n \n # Declare a defaultdict of a tuple of 2 lists\n all_binified_data = defaultdict(lambda: ([], []))\n ftps_to_remove = set()\n # The ThreadPool enables downloading multiple files simultaneously from the network, and continuing\n # to download files as other files are being processed, making the code as a whole run faster.\n # In principle we could make a global pool that is free-memory aware.\n pool = ThreadPool(CONCURRENT_NETWORK_OPS)\n survey_id_dict = {}\n \n # A Django query with a slice (e.g. .all()[x:y]) makes a LIMIT query, so it\n # only gets from the database those FTPs that are in the slice.\n # print(participant.as_dict())\n print(\"Number Files To Process:\", participant.files_to_process.exclude(deleted=True).count())\n print(f\"will process {page_size} files.\")\n print(\"current count processing within this run:\", position)\n \n # TODO: investigate, comment. ordering by path results in files grouped by type and\n # chronological order, which is perfect for download efficiency... right? would it break anthing?\n files_to_process = participant.files_to_process \\\n .exclude(deleted=True) #.order_by(\"s3_file_path\", \"created_on\")\n \n # This pool pulls in data for each FileForProcessing on a background thread and instantiates it.\n # Instantiating a FileForProcessing object queries S3 for the File's data. (network request))\n files_for_processing = pool.map(\n FileForProcessing, files_to_process[position: position + page_size], chunksize=1\n )\n \n for file_for_processing in files_for_processing:\n with error_handler:\n process_one_file(\n file_for_processing, survey_id_dict, all_binified_data, ftps_to_remove\n )\n pool.close()\n pool.terminate()\n \n # there are several failure modes and success modes, information for what to do with different\n # files percolates back to here. Delete various database objects accordingly.\n more_ftps_to_remove, number_bad_files, earliest_time_bin, latest_time_bin = upload_binified_data(\n all_binified_data, error_handler, survey_id_dict, participant\n )\n ftps_to_remove.update(more_ftps_to_remove)\n \n # Update the data quantity stats, if it actually processed any files\n if len(files_to_process) > 0:\n calculate_data_quantity_stats(participant,\n earliest_time_bin_number=earliest_time_bin,\n latest_time_bin_number=latest_time_bin)\n \n # Actually delete the processed FTPs from the database\n FileToProcess.objects.filter(pk__in=ftps_to_remove).delete()\n return number_bad_files", "def post(self, location_name):\n # get grass and actinia module lists\n module_list = createModuleList(self)\n pc_list = createProcessChainTemplateList()\n # TODO: find out size before ?\n grass_module_list = []\n actinia_module_list = []\n\n for module in module_list:\n grass_module_list.append(module['id'])\n\n for module in pc_list:\n actinia_module_list.append(module['id'])\n\n rdc = self.preprocess(has_json=True, location_name=location_name)\n\n if rdc:\n rdc.set_storage_model_to_file()\n\n new_pc = []\n for module in rdc.request_data['list']:\n if \"module\" in module:\n name = module[\"module\"]\n if name == \"importer\" or name == \"exporter\":\n new_pc.append(module)\n elif name in grass_module_list:\n new_pc.append(module)\n elif name in actinia_module_list:\n module_pc = fillTemplateFromProcessChain(module)\n new_pc.extend(module_pc)\n else:\n msg = \"Module %s is not of type importer, exporter, grass-module or an actinia-module.\" % name\n return make_response(jsonify(SimpleResponseModel(\n status=\"error\",\n message=msg\n )), 409)\n else:\n new_pc.append(module)\n\n rdc.request_data['list'] = new_pc\n\n enqueue_job(self.job_timeout, start_job, rdc)\n\n html_code, response_model = pickle.loads(self.response_data)\n return make_response(jsonify(response_model), html_code)", "def process_deferred_queue(self):\n\n self.process_queue(self.deferred_queue)\n\n if self.depth_counter == 0:\n self.process_queue(self.complex_deferred_queue)", "def process(self):" ]
[ "0.6792837", "0.5855412", "0.5633914", "0.548248", "0.54527795", "0.532048", "0.53184384", "0.52948433", "0.52264017", "0.5213624", "0.51902413", "0.51709", "0.51507914", "0.5139252", "0.51372004", "0.5116195", "0.5072712", "0.507154", "0.50373584", "0.50307095", "0.5008679", "0.4995035", "0.49627793", "0.4961484", "0.49454665", "0.49254796", "0.49208376", "0.49043077", "0.48994264", "0.48747247", "0.48695818", "0.48561996", "0.48322028", "0.4831495", "0.47910652", "0.4788954", "0.47760087", "0.47542816", "0.4743844", "0.4735418", "0.47332266", "0.47332266", "0.47313505", "0.47216228", "0.47176906", "0.4701077", "0.46839064", "0.468193", "0.4679314", "0.46724427", "0.4670058", "0.4669401", "0.4667635", "0.46644023", "0.4661991", "0.4657582", "0.46568626", "0.46545163", "0.46531245", "0.46483", "0.46472332", "0.46397546", "0.46365276", "0.46354595", "0.46320483", "0.4623566", "0.46185514", "0.46181092", "0.46129256", "0.46072066", "0.45992795", "0.45967567", "0.459483", "0.45931423", "0.4591657", "0.45873547", "0.45872748", "0.4579626", "0.45761064", "0.4575869", "0.4569023", "0.45686823", "0.4567389", "0.45665282", "0.454292", "0.45427376", "0.45395207", "0.45364398", "0.4532831", "0.45317727", "0.4529151", "0.45252255", "0.45230108", "0.45227224", "0.45127425", "0.45070723", "0.45027938", "0.45024824", "0.4502193", "0.44999668" ]
0.6839286
0
Merge the routes calculated in each separate process into a single feature class. Create an empty final output feature class and populate it using InsertCursor, as this tends to be faster than using the Merge geoprocessing tool.
def _post_process_route_fcs(self): # Create the final output feature class desc = arcpy.Describe(self.route_fcs[0]) helpers.run_gp_tool( LOGGER, arcpy.management.CreateFeatureclass, [ os.path.dirname(self.out_routes), os.path.basename(self.out_routes), "POLYLINE", self.route_fcs[0], # template feature class to transfer full schema "SAME_AS_TEMPLATE", "SAME_AS_TEMPLATE", desc.spatialReference ] ) # Insert the rows from all the individual output feature classes into the final output fields = ["SHAPE@"] + [f.name for f in desc.fields] with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member for fc in self.route_fcs: for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member cur.insertRow(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _export_to_feature_class(self, chunk_definition):\r\n # Make output gdb\r\n rt_workspace = self._create_output_gdb()\r\n\r\n # Export routes\r\n output_routes = os.path.join(rt_workspace, f\"Routes_{chunk_definition[0]}_{chunk_definition[1]}\")\r\n self.logger.debug(f\"Exporting Route Routes output to {output_routes}...\")\r\n self.solve_result.export(arcpy.nax.RouteOutputDataType.Routes, output_routes)\r\n\r\n # Export stops\r\n output_stops = os.path.join(rt_workspace, f\"Stops_{chunk_definition[0]}_{chunk_definition[1]}\")\r\n self.logger.debug(f\"Exporting Route Stops output to {output_stops}...\")\r\n self.solve_result.export(arcpy.nax.RouteOutputDataType.Stops, output_stops)\r\n\r\n # Join the input ID fields to Routes\r\n # The new FirstStopID and LastStopID fields were added at Pro 3.1 / Enterprise 11.1 to make relationships\r\n # between IDs/OIDs in output classes are more reliable. Use these fields if they exist in the output.\r\n # Otherwise, use FirstStopOID and LastStopOID, which are mostly reliable but not perfect. For best results, use\r\n # the most recent ArcGIS software.\r\n if \"FirstStopID\" in self.solve_result.fieldNames(arcpy.nax.RouteOutputDataType.Routes):\r\n id_field_prefix = \"ID\"\r\n else:\r\n id_field_prefix = \"OID\"\r\n if self.reverse_direction:\r\n first_stop_field = self.dest_unique_id_field_name\r\n second_stop_field = self.origin_unique_id_field_name\r\n else:\r\n first_stop_field = self.origin_unique_id_field_name\r\n second_stop_field = self.dest_unique_id_field_name\r\n with arcpy.EnvManager(overwriteOutput=True):\r\n helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.JoinField,\r\n [output_routes, f\"FirstStop{id_field_prefix}\", output_stops, \"ObjectID\", [first_stop_field]]\r\n )\r\n helpers.run_gp_tool(\r\n self.logger,\r\n arcpy.management.JoinField,\r\n [output_routes, f\"LastStop{id_field_prefix}\", output_stops, \"ObjectID\", [second_stop_field]]\r\n )\r\n\r\n self.job_result[\"outputRoutes\"] = output_routes", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def post_process():\n for route in os.listdir(GFR_ROUTES_LOCATION):\n if os.path.isfile(MISSING_LOCATION + route):\n # If the route is missing, output the reference data with correct OSM tags.\n\n copyfile(MISSING_LOCATION + route, OUTPUT_LOCATION + route)\n add_property(OUTPUT_LOCATION + route, 'error_type', 'missing')\n elif os.path.isfile(DIFF_MISSING_LOCATION + route) and os.path.isfile(DIFF_WRONG_LOCATION + route) \\\n and merge_differences(route, DIFF_MISSING_LOCATION + route, DIFF_WRONG_LOCATION + route,\n OUTPUT_LOCATION + route):\n # If there's a geometrical difference, combine the two difference files and output it.\n\n add_property(OUTPUT_LOCATION + route, 'error_type', 'difference')\n elif os.path.isfile(TAGS_LOCATION + route):\n # When there's no geometrical difference, output the OSM data possibly containing missing tags.\n\n copyfile(TAGS_LOCATION + route, OUTPUT_LOCATION + route)\n else:\n raise Exception(\"No output file could be generated for route: \" + route)\n\n copy_to_site()\n\n # Export a last updated timestamp\n with open('last_updated', 'w') as fp:\n fp.write(str(int(time.time() * 1000)))", "def _merge_image_features(feature_class_type: Type[Union[kapture.Keypoints,\n kapture.Descriptors,\n kapture.GlobalFeatures]],\n feature_type: str,\n features_list: Union[List[Optional[kapture.Keypoints]],\n List[Optional[kapture.Descriptors]],\n List[Optional[kapture.GlobalFeatures]]],\n features_paths: List[str],\n output_path: str,\n tar_handlers: List[TarCollection]\n ) -> Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]:\n assert len(features_list) > 0\n assert len(features_paths) == len(features_list)\n\n # find no none value\n val = [(i, d) for i, d in enumerate(features_list) if d is not None]\n assert len(val) > 0\n\n merged_features = val[0][1]\n for j, (i, features) in enumerate(val):\n assert isinstance(features, feature_class_type)\n assert features.type_name == merged_features.type_name\n assert features.dtype == merged_features.dtype\n assert features.dsize == merged_features.dsize\n if feature_class_type == kapture.Descriptors or feature_class_type == kapture.GlobalFeatures:\n assert not isinstance(features, kapture.Keypoints) # IDE type check help\n assert not isinstance(merged_features, kapture.Keypoints) # IDE type check help\n assert features.metric_type == merged_features.metric_type\n if feature_class_type == kapture.Descriptors:\n assert isinstance(features, kapture.Descriptors) # IDE type check help\n assert isinstance(merged_features, kapture.Descriptors) # IDE type check help\n assert features.keypoints_type == merged_features.keypoints_type\n for name in features:\n if j > 0 and name in merged_features:\n getLogger().warning(f'{name} was found multiple times.')\n else:\n merged_features.add(name)\n if output_path:\n # TODO: uses kapture.io.features_list.get_image_features_dirpath()\n in_path = kapture.io.features.get_features_fullpath(feature_class_type,\n feature_type,\n features_paths[i],\n name,\n tar_handlers[i])\n out_path = kapture.io.features.get_features_fullpath(feature_class_type,\n feature_type,\n output_path,\n name)\n if in_path != out_path:\n # skip actual copy if file does not actually move.\n os.makedirs(os.path.dirname(out_path), exist_ok=True)\n if isinstance(in_path, str):\n shutil.copy(in_path, out_path)\n else:\n # in_path is a tuple [str, TarHandler]\n # keypoints are not stored in a file, have to read them to be able to copy them\n array = in_path[1].get_array_from_tar(in_path[0], features.dtype, features.dsize)\n array_to_file(out_path, array)\n return merged_features", "def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)", "def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry", "def process_detail_pages(self):\r\n n_pools = os.cpu_count() // 2\r\n with Pool(n_pools) as pool:\r\n result = pool.map(scrape_detail_page, self.base_features_list)\r\n pool.close()\r\n pool.join()\r\n\r\n self.all_features_list = result", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs", "def add_route_to_map(gdf_best_route: gpd.GeoDataFrame, basemap):\n #create a list of colors\n colors = ['orange', 'darkred', 'darkblue', 'purple', 'darkgreen', '#364e4a', 'cadetblues']\n \n # make a feature group for every route\n # merge them to a feature group\n for i, row in gdf_best_route.iterrows():\n fg = folium.FeatureGroup(f\"Route {row['order']} from {row['start_city']} to {row['end_city']}\")\n # add the simple route\n fg.add_child(folium.PolyLine(\n locations=row[\"folium_geom\"], \n popup=f\"From {row['start_city']} to {row['end_city']}\",\n tooltip=f\"Route {row['order']}\",\n color=colors[i], \n dash_array='10',\n weight=4))\n basemap.add_child(fg)\n \n return None", "def close(self): \n \n# web_pdb.set_trace()\n # Process all the features\n for feature in self.meta_features:\n self.mapping_errors = [] # Reset the list that contains the errors\n # Extract attribute value to process\n att_2_map=feature.getAttribute('att_2_map')\n \n # Extract the status of the published parameters (passed through attributes)\n bool_english_refresh = FME_utils.test_attribute_value(feature, ENGLISH_REFRESH, YES, False)\n bool_french_refresh = FME_utils.test_attribute_value(feature, FRENCH_REFRESH, YES, False)\n bool_code_refresh = FME_utils.test_attribute_value(feature, CODE_REFRESH, YES, False)\n bool_error_not_mapped = FME_utils.test_attribute_value(feature, ERROR_NOT_MAPPED, YES, False)\n \n # Extract the attributes name to process. Can be a list or a single attribute\n index_attributes = FME_utils.extract_attribute_list(feature, att_2_map)\n \n if index_attributes:\n # Loop over the attributes\n for index, att_name in index_attributes:\n # Extract the attribute to process\n att_value = FME_utils.feature_get_attribute(feature, att_name, False)\n att_value = att_value.lower().rstrip(\" \").lstrip(\" \")\n \n # Extract the row from CSV dictionary\n try:\n csv_row = self.csv_features[att_value]\n # Map the value according to the requirements\n if bool_english_refresh:\n feature.setAttribute(att_name, csv_row.value_english)\n if bool_french_refresh:\n feature.setAttribute(att_name+\"_fr\", csv_row.value_french)\n if bool_code_refresh:\n feature.setAttribute(att_name+\"_code\", csv_row.code_value)\n except KeyError:\n # Value not found in the dictionary. Add the error in the list\n str_error = (\"Unable to map value: {0} for attribute: {1}.\".format(att_value,att_name))\n self.mapping_errors.append(str_error)\n else:\n # The requested attribute is not found\n str_error = ('The requested attribute to map (ATT_TO_MAP): {} is not present on the FME feature.'.format(att_2_map))\n self.mapping_errors.append(str_error)\n \n # Append the errors in the FME error attribute\n if bool_error_not_mapped:\n # Mapping errors are registered in FME attribute\n if self.mapping_errors:\n # Extract the number of existing error\n err_count = FME_utils.max_index_attribute_list(feature, \"mapping_errors{}.error\")\n err_count += 1\n for i, mapping_error in enumerate(self.mapping_errors):\n # Append the error to the existing list of error\n feature.setAttribute(\"mapping_errors{%d}.error\"%(i+err_count), mapping_error)\n else:\n # Mapping error list empty ===> Nothing to do\n pass\n else:\n # Mapping error are not registered in FME attribute\n pass\n \n # Output the feature to FME after processing all the attributes\n self.pyoutput(feature)\n \n return", "def processAlgorithm(self, parameters, context, feedback):\r\n\r\n # Retrieve the feature source and sink. The 'dest_id' variable is used\r\n # to uniquely identify the feature sink, and must be included in the\r\n # dictionary returned by the processAlgorithm function.\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUT,\r\n context\r\n )\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUTBUFFER,\r\n context\r\n )\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUT2,\r\n context\r\n )\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUT3,\r\n context\r\n )\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUT4,\r\n context\r\n )\r\n source = self.parameterAsSource(\r\n parameters,\r\n self.INPUTSIZE,\r\n context\r\n )\r\n \r\n \r\n \r\n \r\n \r\n ## Obtain filepath from selected address layer, could not extend this functionality to work for actual layer inputs,\r\n ## resulted in instant crash if using the filePathFile, or 'INPUT1-4' as input for processing tools\r\n filePathFile = self.parameterDefinition('INPUT').valueAsPythonString(parameters['INPUT'], context)\r\n in_path = os.path.dirname(filePathFile[1:]) + '/'\r\n out_path = in_path \r\n \r\n \r\n bufferdist = self.parameterAsDouble(parameters, self.INPUTBUFFER,\r\n context)\r\n \r\n sigsize = self.parameterAsDouble(parameters, self.INPUTSIZE,\r\n context)\r\n \r\n\r\n ## Defining filepaths and filenames\r\n #############################################################################\r\n ## NEED TO BE MANUALLY DEFINED BY USER IF USING FILES WITH DIFFERENT NAMES ##\r\n #############################################################################\r\n ##Script set up for test 20 address file\r\n \r\n addressFile = \"AddressesTest.shp\"\r\n\r\n ## Uncomment below line for use with full address file (NOTE: SCRIPT WILL NOT COMPLETE DUE TO DATASET SIZE)\r\n ## addressFile = \"Addresses.shp\"\r\n\r\n ## Uses original parkland file as no geometry issues using zonal method\r\n parklandFile = \"Parkland2.shp\"\r\n \r\n hospitalFile = \"Hospitals.shp\"\r\n groceryFile = \"Grocery.shp\"\r\n \r\n \r\n ##############################################################################\r\n ##############################################################################\r\n #############################################################################\r\n\r\n ## Add address layer to view\r\n addressLayer = iface.addVectorLayer(in_path + addressFile, addressFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run buffer tool\r\n ## Output result to /Output/ folder\r\n bufferDict = {\r\n 'INPUT': addressLayer, \r\n 'DISTANCE': bufferdist, \r\n 'SEGMENTS': 10, \r\n 'ENDCAPSTYLE': 0, \r\n 'JOIN_STYLE':0,\r\n 'MITER_LIMIT':2,\r\n 'DISSOLVE_RESULT': False, \r\n 'OUTPUT': (out_path + 'buffer_' + addressFile)\r\n }\r\n processing.run('native:buffer', bufferDict)\r\n \r\n ## Define filename of buffered file\r\n bufferedFile = ('buffer_' + addressFile) \r\n\r\n ## Load buffered layer and hopsital layer to view\r\n bufferedLayer = iface.addVectorLayer(out_path + bufferedFile, bufferedFile[:-4], 'ogr') \r\n hospitalLayer = iface.addVectorLayer(in_path + hospitalFile, hospitalFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run count points in polygons tool\r\n ## Output result to /Output/ folder\r\n HcountDict = {\r\n 'CLASSFIELD' : '', \r\n 'FIELD' : 'Hcount', \r\n 'OUTPUT' : out_path + 'bufferHcount.shp', \r\n 'POINTS' : hospitalLayer, \r\n 'POLYGONS' : bufferedLayer, \r\n 'WEIGHT' : ''\r\n } \r\n processing.run('native:countpointsinpolygon', HcountDict)\r\n\r\n\r\n ## Define filename of buffers w/ hospital count, add layer to view\r\n HcountFile = 'bufferHcount.shp'\r\n HcountLayer = iface.addVectorLayer(out_path + HcountFile, HcountFile[:-4], 'ogr')\r\n\r\n ## Add grocery store layer to view\r\n groceryLayer = iface.addVectorLayer(in_path + groceryFile, groceryFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run points in polygons tool\r\n ## Output result to /Output/ folder\r\n GcountDict = {\r\n 'CLASSFIELD' : '', \r\n 'FIELD' : 'Gcount', \r\n 'OUTPUT' : out_path + 'bufferHGcount.shp', \r\n 'POINTS' : groceryLayer, \r\n 'POLYGONS' : HcountLayer, \r\n 'WEIGHT' : ''\r\n } \r\n processing.run('native:countpointsinpolygon', GcountDict)\r\n\r\n ## Define filename of buffers with hospital and store counts, and add layer to view \r\n HGcountFile = 'bufferHGcount.shp'\r\n HGcountLayer = iface.addVectorLayer(out_path + HGcountFile, HGcountFile[:-4], 'ogr')\r\n\r\n ## Add parkland layer to view\r\n parklandLayer = iface.addVectorLayer(in_path + parklandFile, parklandFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run buffer tool\r\n ## Output result to /Output/ folder\r\n parkbufferDict = {\r\n 'INPUT': parklandLayer, \r\n 'DISTANCE': 20, \r\n 'SEGMENTS': 10, \r\n 'ENDCAPSTYLE': 0, \r\n 'JOIN_STYLE':0,\r\n 'MITER_LIMIT':2,\r\n 'DISSOLVE_RESULT': True, \r\n 'OUTPUT': (out_path + 'buffer_' + parklandFile)\r\n }\r\n processing.run('native:buffer', parkbufferDict)\r\n\r\n ## Define filename of buffered dissolved parkland, add layer to view\r\n parkBufferFile = 'buffer_' + parklandFile\r\n parkBufferLayer = iface.addVectorLayer(out_path + parkBufferFile, parkBufferFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run negative buffer tool\r\n ## Output result to /Output/ folder \r\n parkNegbufferDict = {\r\n 'INPUT': parkBufferLayer, \r\n 'DISTANCE': -20, \r\n 'SEGMENTS': 10, \r\n 'ENDCAPSTYLE': 0, \r\n 'JOIN_STYLE':0,\r\n 'MITER_LIMIT':2,\r\n 'DISSOLVE_RESULT': False, \r\n 'OUTPUT': (out_path + 'dissolve_' + parklandFile)\r\n }\r\n processing.run('native:buffer', parkNegbufferDict)\r\n\r\n ## Define filename of de-buffered dissolved parkland, add layer to view\r\n parkDissolveFile = 'dissolve_' + parklandFile\r\n parkDissolveLayer = iface.addVectorLayer(out_path + parkDissolveFile, parkDissolveFile[:-4], 'ogr')\r\n\r\n ## Loop through the features in the de-buffered parkland layer, define parameter dictionary, and run multipart to singlepart tool\r\n ## Output result to /Output/ folder\r\n multiDict = { \r\n 'INPUT' : parkDissolveLayer, \r\n 'OUTPUT' : out_path + 'final_' + parklandFile\r\n }\r\n processing.run('native:multiparttosingleparts', multiDict)\r\n \r\n ## Define filename of de-buffered dissolved parkland, add layer to view\r\n finalParkFile = 'final_' + parklandFile\r\n finalParkLayer = iface.addVectorLayer(out_path + finalParkFile, finalParkFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run add geometry columns tool\r\n ## Output result to /Output/ folder\r\n geomDict = { \r\n 'CALC_METHOD' : 0, \r\n 'INPUT' : finalParkLayer, \r\n 'OUTPUT' : out_path + 'geom_'+ parklandFile\r\n }\r\n processing.run('qgis:exportaddgeometrycolumns', geomDict)\r\n\r\n ## Define filename of parkland w/ geometry, add layer to view\r\n parkGeomFile = 'geom_'+ parklandFile\r\n parkGeomLayer = iface.addVectorLayer(out_path + parkGeomFile, parkGeomFile[:-4], 'ogr')\r\n\r\n ## Define parameter dictionary, and run extract by attribute tool\r\n ## Output result to /Output/ folder\r\n extractDict = { \r\n 'FIELD' : 'area', \r\n 'INPUT' : out_path + parkGeomFile, \r\n 'OPERATOR' : 3, \r\n 'OUTPUT' : out_path + 'extract_'+ parklandFile, \r\n 'VALUE' : sigsize \r\n }\r\n processing.run('native:extractbyattribute', extractDict)\r\n \r\n ## Define extracted \"significant\" parkland filename, and add layer to view\r\n parkExtractFile = 'extract_'+ parklandFile\r\n parkExtractLayer = iface.addVectorLayer(out_path + parkExtractFile, parkExtractFile[:-4], 'ogr')\r\n\r\n extentDict = { \r\n 'INPUT' : out_path+parkExtractFile, \r\n 'OUTPUT' : out_path+'extent.shp', \r\n 'ROUND_TO' : 0 \r\n }\r\n processing.run('native:polygonfromlayerextent', extentDict)\r\n\r\n extentFile = out_path+'extent.shp'\r\n extentLayer = iface.addVectorLayer(extentFile, extentFile[:-4], 'ogr')\r\n extent = extentLayer.getFeatures()\r\n for feature in extent:\r\n extentAttributes = feature.attributes()\r\n extentString = str(str(extentAttributes[0])+','+str(extentAttributes[2])+','+str(extentAttributes[1])+','+str(extentAttributes[3])+'['+str(extentLayer.crs().authid())+']') \r\n print(extentString)\r\n\r\n rasterizeDict = {\r\n 'BURN' : 1, \r\n 'DATA_TYPE' : 5, \r\n 'EXTENT' : extentString, \r\n 'EXTRA' : '', \r\n 'FIELD' : '', \r\n 'HEIGHT' : 5000, \r\n 'INIT' : None, \r\n 'INPUT' : parkExtractLayer, \r\n 'INVERT' : False, \r\n 'NODATA' : 0, \r\n 'OPTIONS' : '', \r\n 'OUTPUT' : out_path+'Raster.tif', \r\n 'UNITS' : 0, \r\n 'WIDTH' : 5000\r\n }\r\n processing.run('gdal:rasterize', rasterizeDict)\r\n\r\n rasterFile = out_path+'Raster.tif'\r\n\r\n ## Define parameter dictionary, and run clip tool with extracted parkland layer as overlay on address buffers\r\n ## Output result to /Output/ folder\r\n zonalDict = { \r\n 'COLUMN_PREFIX' : 'Z_',\r\n 'INPUT_VECTOR' : out_path + HGcountFile,\r\n 'INPUT_RASTER' : rasterFile, \r\n 'OUTPUT' : out_path+'zonal_'+addressFile, \r\n 'RASTER_BAND' : 1 \r\n }\r\n processing.run('native:zonalhistogram', zonalDict)\r\n\r\n ## Define filename of clipped buffer layer, add layer to view\r\n zonalFile = 'zonal_'+ addressFile\r\n zonalLayer = iface.addVectorLayer(out_path + zonalFile, zonalFile[:-4], 'ogr')\r\n\r\n\r\n ## Create QgsdataProvider object, used for updating data fields in clipped buffers w/ geometry layer\r\n zonal_provider = zonalLayer.dataProvider()\r\n\r\n ## Use QgsdataProvider.addAttributes method to add empty QgsField objects corresponding to the normalised scores, and liveability score\r\n ## to the geomLayer file, QVariant.Double method used to create fields of data type \"double\"\r\n zonal_provider.addAttributes([QgsField('HospNorm',QVariant.Double)])\r\n zonal_provider.addAttributes([QgsField('GrocNorm',QVariant.Double)])\r\n zonal_provider.addAttributes([QgsField('ParkNorm',QVariant.Double)])\r\n zonal_provider.addAttributes([QgsField('LiveScore',QVariant.Double)])\r\n\r\n ## Update these new data fields and commit changes\r\n zonalLayer.updateFields()\r\n zonalLayer.commitChanges\r\n\r\n ## Define indexes for each count and area value in the geomLayer. This is to be able to easily obtain max value for each field, for normalisation equation\r\n hospIndex = zonalLayer.fields().indexFromName('Hcount')\r\n grocIndex = zonalLayer.fields().indexFromName('Gcount')\r\n parkIndex = zonalLayer.fields().indexFromName('Z_1')\r\n\r\n ## Get features of geomLayer for subsequent loop\r\n zonalAddresses = zonalLayer.getFeatures()\r\n\r\n ## Using a for loop, update the previously created fields with normalised scores, and lockdown liveability score\r\n ## This is done mathematically using each features value and the max value for that field\r\n for address in zonalAddresses:\r\n \r\n zonalLayer.startEditing()\r\n try: \r\n address['HospNorm'] = address['Hcount']/(zonalLayer.maximumValue(hospIndex))\r\n except:\r\n address['HospNorm'] = 0\r\n try:\r\n address['GrocNorm'] = address['Gcount']/(zonalLayer.maximumValue(grocIndex))\r\n except:\r\n address['GrocNorm'] = 0\r\n try:\r\n address['ParkNorm'] = address['Z_1']/(zonalLayer.maximumValue(parkIndex))\r\n except:\r\n address['ParkNorm'] = 0\r\n address['LiveScore'] = (address['HospNorm']+address['GrocNorm']+address['ParkNorm'])/3\r\n \r\n zonalLayer.updateFeature(address)\r\n\r\n ## Commit changes to data fields\r\n zonalLayer.commitChanges\r\n\r\n ## Define parameter dictionary, and run join tool to join final attributes back to original layer\r\n ## Output result to /Output/ folder\r\n joinDict = { \r\n 'INPUT' : in_path + addressFile,\r\n 'FIELD' : 'PFI',\r\n 'INPUT_2' : out_path + zonalFile,\r\n 'FIELD_2' : 'PFI', \r\n 'FIELDS_TO_COPY' : ['Hcount','Gcount','area','HospNorm','GrocNorm','ParkNorm','LiveScore'],\r\n 'METHOD' : 1, \r\n 'DISCARD_NONMATCHING' : False,\r\n 'PREFIX' : '',\r\n 'OUTPUT' : out_path+'final_'+ addressFile, \r\n 'NON_MATCHING': out_path+'nonmatching.shp'\r\n }\r\n processing.run('native:joinattributestable', joinDict)\r\n \r\n ## Define filename of final layer, and add final layer to view \r\n finalFile = 'final_'+ addressFile\r\n finalLayer = iface.addVectorLayer(out_path + finalFile, finalFile[:-4], 'ogr')\r\n\r\n\r\n\r\n return {}", "def combine_features(feature1, feature2):\n new_feature = Feature()\n new_feature.coord = feature1.coord.copy()\n for x in feature2.coord:\n new_feature.coord.append(x)\n return new_feature", "def execute(self, parameters, messages):\r\n arcpy.AddMessage(\"\\nPerforming overall merge...\")\r\n logging.info(\"Starting mergeAreas.py script...\\n\")\r\n # Define variables from parameters\r\n overlapWorkspace = parameters[0].valueAsText\r\n gdbWorkspace = parameters[1].valueAsText\r\n featWorkspace = parameters[2].valueAsText\r\n\r\n # Determine list of total overlap, no overlap and to merge feature classes in overlap feature dataset workspace to process.\r\n arcpy.env.workspace = overlapWorkspace\r\n mergeList = arcpy.ListFeatureClasses(\"*_toMerge\")\r\n totalOverlapList = arcpy.ListFeatureClasses(\"*_TotalOverlap\")\r\n noOverlapList = arcpy.ListFeatureClasses(\"*_noOverlap\")\r\n if len(mergeList) > 0:\r\n arcpy.AddMessage(\"Workspace contains the following \" + str(len(mergeList)) + \" feature classes to merge: \" + str(mergeList))\r\n\r\n # Organize toMerge feature classes by date\r\n mergeDictbyDate = {}\r\n for fc in mergeList:\r\n fcPath = os.path.join(overlapWorkspace, fc)\r\n fcDate = fc.split(\"_\")[1]\r\n mergeDictbyDate[fcDate] = [fcPath]\r\n\r\n # Append no overlap feature classes toMerge feature classes by date\r\n for noOverlapFc in noOverlapList:\r\n noOverlapPath = os.path.join(overlapWorkspace, noOverlapFc)\r\n noOverlapDate = noOverlapFc.split(\"_\")[1]\r\n mergeDictbyDate[noOverlapDate].append(noOverlapPath)\r\n\r\n # Organize dark targets feature classes by date\r\n arcpy.env.workspace = featWorkspace\r\n fcList = arcpy.ListFeatureClasses()\r\n fcDictByDate = {}\r\n for fc in fcList:\r\n fcPath = os.path.join(featWorkspace, fc)\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in fcDictByDate:\r\n fcDictByDate[fcSplit[1]].append(fcPath)\r\n else:\r\n fcDictByDate[fcSplit[1]] = [fcPath]\r\n\r\n # Iterate through dark targets acquisition dates and check for acquisition dates with more than a single feature class (for merging)\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) > 1:\r\n\r\n # Iterate through feature classes within acquisition date\r\n for fc in fcDictByDate[key]:\r\n fcSplit = fc.split(\"_\")\r\n\r\n # Check for and add acquisition date toMerge feature classes if not already present\r\n if fcSplit[len(fcSplit)-2] not in mergeDictbyDate:\r\n mergeDictbyDate[fcSplit[len(fcSplit)-2]] = [fc]\r\n\r\n # Check for and add feature class toMerge feature classes if not already present within acquisition date\r\n else:\r\n fcValue = fc.split(\"\\\\\")[len(fc.split(\"\\\\\"))-1] + \"_noOverlap\"\r\n fcValuePath = os.path.join(overlapWorkspace, fcValue)\r\n if fcValuePath not in mergeDictbyDate[key]:\r\n mergeDictbyDate[key].append(fc)\r\n\r\n # Iterate through dark targets acquisition dates to compile lists of feature classes to merge\r\n for key in mergeDictbyDate:\r\n arcpy.AddMessage(\"\\nMerging feature classes in \" + key + \"...\")\r\n logging.info(\"Processing merges for acquisition date '%s'\", key)\r\n mergeList = []\r\n\r\n # Iterate through feature classes within acquisition date and append them to merge list\r\n for item in mergeDictbyDate[key]:\r\n mergeList.append(item)\r\n\r\n # Merge feature classes in merge list into single feature class for the acquisition date\r\n outputDissolveString = \"RS2_\" + key + \"_toDissolve\"\r\n outputDissolve = os.path.join(overlapWorkspace, outputDissolveString)\r\n arcpy.Merge_management(mergeList, outputDissolve)\r\n logging.info(\"Merge: '%s' created from merging the following feature classes: '%s'\", outputDissolve, str(mergeList))\r\n\r\n # Dissolve attribute duplicates and rename fields\r\n arcpy.AddMessage(\"Dissolving...\")\r\n dissolveLyr = \"dissolveLyr\"\r\n outputMergeString = \"RS2_\" + key + \"_merged\"\r\n outputMerge = os.path.join(gdbWorkspace, outputMergeString)\r\n dissolveFields = [\"Pid\", \"RsatID\"]\r\n fieldList = arcpy.ListFields(outputDissolve)\r\n statsFields = []\r\n for field in fieldList:\r\n if \"OBJECTID\" in field.name or \"FID\" in field.name or \"Shape\" in field.name or field.name in dissolveFields or field.name == \"ID\":\r\n continue\r\n statsField = [field.name,\"FIRST\"]\r\n statsFields.append(statsField)\r\n arcpy.MakeFeatureLayer_management(outputDissolve, dissolveLyr)\r\n logging.info(\"Make Feature Layer: '%s' layer created from '%s' feature class\", dissolveLyr, outputDissolve)\r\n arcpy.Dissolve_management(dissolveLyr, outputMerge, dissolveFields, statsFields)\r\n logging.info(\"Dissolve: '%s' feature class created from '%s' layer dissolve\", outputMerge, dissolveLyr)\r\n fieldList = arcpy.ListFields(outputMerge)\r\n for field in fieldList:\r\n if field.name.startswith(\"FIRST_\"):\r\n newName = field.name[6:]\r\n arcpy.AlterField_management(outputMerge, field.name, newName)\r\n\r\n # Update targetID with combined target ID for overlapping features\r\n arcpy.AddMessage(\"Updating targetID...\")\r\n finalOutputString = \"RS2_\" + key\r\n overlapBool = False\r\n\r\n # Iterate through total overlap feature classes\r\n for fc in totalOverlapList:\r\n\r\n # Check for merged acquisition date feature class containing overlapping features (by finding equivalent total overlap feature class)\r\n if finalOutputString == fc.strip(\"_TotalOverlap\"):\r\n overlapBool = True\r\n\r\n # Perform spatial join to access targetID field from total overlap feature class\r\n totalOverlapFc = os.path.join(overlapWorkspace, fc)\r\n finalOutput = os.path.join(gdbWorkspace, finalOutputString)\r\n fieldmappings = arcpy.FieldMappings()\r\n fieldmappings.addTable(outputMerge)\r\n fldmap_TARGETID = arcpy.FieldMap()\r\n fldmap_TARGETID.addInputField(totalOverlapFc, \"targetID\")\r\n fld_TARGETID = fldmap_TARGETID.outputField\r\n fld_TARGETID.name = \"targetID_1\"\r\n fldmap_TARGETID.outputField = fld_TARGETID\r\n fieldmappings.addFieldMap(fldmap_TARGETID)\r\n arcpy.SpatialJoin_analysis(outputMerge, totalOverlapFc, finalOutput, \"#\", \"#\", fieldmappings)\r\n logging.info(\"Spatial Join: '%s' feature class created by joining '%s' with '%s'\", finalOutput, outputMerge, totalOverlapFc)\r\n\r\n # Update targetID with combined targetID determined from total overlap feature class\r\n expression = \"copyTargetID(!targetID!, !targetID_1!)\"\r\n codeblock = \"\"\"def copyTargetID(targetID, comb_targetID):\r\n if comb_targetID is None:\r\n return targetID\r\n else:\r\n return comb_targetID\"\"\"\r\n arcpy.CalculateField_management(finalOutput, \"targetID\", expression, \"PYTHON_9.3\", codeblock)\r\n logging.info(\"Calculate Field: 'targetID' field value calculated for '%s' feature class\", finalOutput)\r\n\r\n # Delete extraneous fields\r\n arcpy.DeleteField_management(finalOutput, \"targetID_1\")\r\n arcpy.DeleteField_management(finalOutput, \"Join_Count\")\r\n arcpy.DeleteField_management(finalOutput, \"TARGET_FID\")\r\n\r\n # Rename merged acquisition date feature class to appropriate name if it does not contain overlapping targets\r\n if overlapBool is False:\r\n arcpy.Rename_management(outputMerge, finalOutputString)\r\n logging.info(\"Rename: '%s' feature class renamed to '%s'\", outputMerge, finalOutputString)\r\n\r\n # Delete unneeded process outputs (dissolve and merge outputs)\r\n arcpy.Delete_management(outputDissolve)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputDissolve)\r\n if arcpy.Exists(outputMerge):\r\n arcpy.Delete_management(outputMerge)\r\n logging.info(\"Delete: '%s' feature class deleted\", outputMerge)\r\n\r\n logging.info(\"Processing for merges for acquisition date '%s' complete\\n\", key)\r\n\r\n # Iterate through dark targets acquisition dates to export single feature classes\r\n arcpy.AddMessage(\"\\nExporting single feature classes...\")\r\n logging.info(\"Processing single feature classes to export\")\r\n for key in fcDictByDate:\r\n if len(fcDictByDate[key]) == 1:\r\n for fc in fcList:\r\n fcSplit = fc.split(\"_\")\r\n if fcSplit[1] in mergeDictbyDate:\r\n continue\r\n else:\r\n outputFeatureName = \"RS2_\" + fcSplit[1]\r\n arcpy.FeatureClassToFeatureClass_conversion(fc, gdbWorkspace, outputFeatureName, \"#\", \"#\", )\r\n logging.info(\"Feature Class to Feature Class: '%s' feature class converted to '%s'\", fc, outputFeatureName)\r\n outputFeatPath = os.path.join(gdbWorkspace, outputFeatureName)\r\n arcpy.DeleteField_management(outputFeatPath, \"FID\")\r\n logging.info(\"Processing of single feature classes to export complete\")\r\n\r\n logging.info(\"mergeAreas.py script finished\\n\\n\")\r\n\r\n return", "def fdr_control(self, output_features):\n # Map output feature names\n output_feature_map = {output_feature.name: output_feature for output_feature in output_features}\n # Update saved hierarchy with FDR-controlled results\n queue = deque()\n root = self.features[0].root\n queue.append(root)\n while queue:\n parent = queue.popleft()\n if not parent.children:\n continue\n pvalues = np.ones(len(parent.children))\n for idx, child in enumerate(parent.children):\n child.copy_attributes(output_feature_map[child.name])\n pvalues[idx] = child.overall_pvalue\n adjusted_pvalues, rejected_hypotheses = bh_procedure(pvalues, self.args.importance_significance_level)\n for idx, child in enumerate(parent.children):\n child.overall_pvalue = adjusted_pvalues[idx]\n child.important = rejected_hypotheses[idx]\n if child.important:\n queue.append(child)\n else:\n child.window_important = False\n child.ordering_important = False\n child.window_ordering_important = False", "def derive_features(self):\n\n temp = int(self.stop_id)\n\n while temp not in self.stops_latlon.keys():\n if temp < 7692:\n temp += 1\n else:\n while temp not in self.stops_latlon.keys():\n temp -= 1\n\n self.latitude = self.stops_latlon[temp][0]\n self.longitude = self.stops_latlon[temp][1]\n\n self.distance_centre = FormatInput.haversine(self.latitude, self.longitude)\n\n self.cluster = FormatInput.map_stop_to_cluster(self.cluster_map, self.stop_id)\n\n self.holiday = FormatInput.add_holiday(self.date)", "def _insert_stops_many_to_many(self):\r\n # Store data of the relevant origins and destinations in dictionaries for quick lookups and reuse\r\n o_data = {} # {Origin ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_origins_layer,\r\n [self.origin_id_field, \"SHAPE@\"] + self.origin_transfer_fields\r\n ):\r\n o_data[row[0]] = row[1:]\r\n d_data = {} # {Destination ID: [Shape, transferred fields]}\r\n for row in arcpy.da.SearchCursor( # pylint: disable=no-member\r\n self.input_destinations_layer,\r\n [self.dest_id_field, \"SHAPE@\"] + self.destination_transfer_fields\r\n ):\r\n d_data[row[0]] = row[1:]\r\n\r\n # Insert origins from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.origin_unique_id_field_name, \"SHAPE@\"] + self.origin_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n origin_data = o_data[origin_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Origin from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 1, origin_id) + origin_data)\r\n\r\n # Insert destinations from each OD pair into the Route analysis\r\n with self.rt_solver.insertCursor(\r\n arcpy.nax.RouteInputDataType.Stops,\r\n [\"RouteName\", \"Sequence\", self.dest_unique_id_field_name, \"SHAPE@\"] + self.destination_transfer_fields\r\n ) as icur:\r\n for od_pair in self.od_pairs:\r\n origin_id, dest_id = od_pair\r\n try:\r\n dest_data = d_data[dest_id]\r\n except KeyError:\r\n # This should never happen because we should have preprocessed this out.\r\n self.logger.debug(\r\n f\"Destination from OD Pairs not found in inputs. Skipped pair {od_pair}.\")\r\n continue\r\n route_name = f\"{origin_id} - {dest_id}\"\r\n icur.insertRow((route_name, 2, dest_id) + dest_data)", "def main():\n # IMPORTANT: Specify a path to the new shapefile!\n data_dir = os.path.join(\"C:\\\\\",\"Users\",\"janni\",\"OneDrive\",\"Desktop\",\"data\")\n\n #Store route identification codes in to a list\n L_tracks=['\"tag_ident\"=72413','\"tag_ident\"=72417','\"tag_ident\"=73053','\"tag_ident\"=72364',\\\n '\"tag_ident\"=73054','\"tag_ident\"=79694','\"tag_ident\"=79698']\n\n if(os.path.isdir(data_dir)):\n print(\"Very good! You have chosen a valid directory!\")\n # load the point shapefile of the white-fronted goose manually!\n # access the active layer\n point_layer = iface.activeLayer()\n if not point_layer:\n print(\"Shape file failed to load!\")\n else:\n # 1\n addTimeAndDateObs(point_layer)\n print(\"-----------Created Date and Time objects-------------\")\n # 2\n addDistance(point_layer, L_tracks)\n print(\"-----------Distances calculation finished-------------\")\n # 3\n extractPoints(point_layer,Statistics(point_layer),data_dir)\n print(\"-----------Low distance points extracted and save to a new shapefile-------------\")\n print('Done')\n\n raster_fn = os.path.join(data_dir,\"Eurasia_Landcover.tif\")\n landuse_legend_fn = os.path.join(data_dir,'Eurasia_Landcover_Legend.csv')\n in_shape_fn = os.path.join(data_dir,\"lowDistance.shp\")\n out_shape_fn = os.path.join(data_dir,\"lowDistanceLanduseID.shp\")\n\n\n if(QgsProject.instance().mapLayersByName('lowDistanceLanduseID')==[]):\n processing.run(\"qgis:rastersampling\",\n {'COLUMN_PREFIX' : 'LanduseNr_',\n 'INPUT' : in_shape_fn,\n 'OUTPUT' : out_shape_fn,\n 'RASTERCOPY' : raster_fn})\n updated_shapefile = iface.addVectorLayer(out_shape_fn, '', 'ogr')\n else:\n updated_shapefile = QgsProject.instance().mapLayersByName('lowDistanceLanduseID')[0]\n #2\n convertIdFloatToInt(updated_shapefile)\n #3\n legend = preProcessLegend(landuse_legend_fn)\n #4\n convertIdToName(legend,updated_shapefile)\n #5\n plotLandUse(updated_shapefile,\"Pie\")\n print(\"-----------finished!-------------\")\n print(\"DONE! :)\")\n else:\n iface.messageBar().pushMessage(\"Error\", \"The directory does not exist. Please change data_dir in the code\",level = 1)\n print(\"Please specify a valid directory in the main function of Code_Distance.py!\")", "def merge_energy_datatypes(osm_path): \n #extract line data\n df_line = powerline_limited(osm_path) #extract required data\n if 'asset' in df_line.columns:\n df_line['asset'] = list(map(lambda x: x.lower(), df_line['asset'])) #make sure that asset column is in lowercase characters\n #reclassify assets \n mapping_dict = {\n \"cable\" : \"cable\", #underground\n \"minor_cable\" : \"cable\", \n #\"generator\" : \"generator\", #device used to convert power from one form to another\n \"line\" : \"line\", #overground\n \"minor_line\" : \"minor_line\", #overground\n #\"plant\" : \"plant\", #place where power is generated\n #\"substation\" : \"substation\"\n }\n df_line['asset'] = df_line.asset.apply(lambda x : mapping_dict[x]) #reclassification \n\n if 'voltage' in df_line.columns:\n df_line = df_line.drop(['voltage'], axis=1) \n \n #extract polygon data\n df_poly = power_polygon(osm_path) #extract required data\n df_poly['geometry'] =pygeos.buffer(df_poly.geometry,0) #avoid intersection\n \n #extract point data\n df_point = power_point(osm_path) #extract required data\n \n return pandas.concat([df_line, df_poly, df_point], ignore_index=True)", "def _update(self, features: DataFrameLike) -> None:\n # add features\n self._features = (\n pd.concat([self._features, features], axis=1, sort=True)\n # fill nans resulting from concatenation where features does not\n # contain neighborless nodes (out-degree=0) on its axis\n .fillna(0)\n )\n # prune redundant features\n pruner = FeaturePruner(self._final_features, self._feature_group_thresh)\n features_to_drop = pruner.prune_features(self._features)\n self._features = self._features.drop(features_to_drop, axis=1)\n # save features that remain after pruning and that\n # have not previously been saved as final features\n retained = features.columns.difference(features_to_drop)\n feature_dict = as_frame(self._features[retained]).to_dict()\n self._final_features[self.generation_count] = feature_dict", "def r_to_ws(road_fn, ws, paths, ta):\n\n # Open the road data source and find distinct road classes from 'tag' field\n source_ds = ogr.Open(road_fn)\n source_layer = source_ds.GetLayer()\n feature = source_layer.GetNextFeature()\n field_vals = set([feature.GetFieldAsString('tag') for feature in source_layer])\n\n # Read excel file with walking speeds into pandas dataframe and sort\n costs = pd.read_csv(ws)\n costs = costs.sort_values(by=['Walking_Speed'], ascending=False)\n # TODO: Consider whether we should take into account congestion, crossings etc\n if paths is False:\n print()\n # TODO: if False remove paths from pandas dataframe.... \n\n # blank array to use in first back fill\n arr1 = ta.dst_ds.GetRasterBand(1).ReadAsArray()\n\n # Loop through road costs dataframe\n for index, row in costs.iterrows():\n\n # use fuzzy match spreadsheet road classes to polyline road classes\n match = process.extractOne(row[0], field_vals)\n\n # create SQL string to select one polyline road class at a time\n # formatted string of SQL statement where match[0] = road feature class\n sql_str = f\"SELECT * FROM AllRoads WHERE tag='{match[0]}'\" \n\n # rasterize one road class at a time using value from input csv\n # to in-memory array sized based on landcover input\n opt = gdal.RasterizeOptions(burnValues=row[1], # walking speed value to assign to pixel from input csv\n allTouched=True, # value assigned to all pixels touched by line\n SQLStatement=sql_str, # use above SQL string\n SQLDialect='SQLITE')\n gdal.Rasterize(ta.dst_ds, road_fn, options=opt) #(in-memory array, road shapefile, above options)\n\n # FIXME: \n # Warning 1: The input vector layer has a SRS, but the output raster dataset SRS is unknown.\n # Ensure output raster dataset has the same SRS, otherwise results might be incorrect.\n\n # get array from in-memory rasterized layer\n arr = ta.dst_ds.GetRasterBand(1).ReadAsArray()\n\n # create one layer of all roads prioritising based on rasterizing order\n arr1 = backfill(arr, arr1)\n\n return arr1", "def to_segmentation_task(\n self, keep_geometries: Optional[List] = [Polygon, Bitmap], target_classes=None\n ) -> Tuple[ProjectMeta, Dict[ObjClass, ObjClass]]:\n mapping = {}\n res_classes = []\n for obj_class in self.obj_classes:\n obj_class: ObjClass\n\n if target_classes is None or obj_class.name in target_classes:\n if obj_class.geometry_type in keep_geometries:\n if obj_class.geometry_type == Bitmap:\n mapping[obj_class] = obj_class\n res_classes.append(obj_class)\n else:\n new_class = obj_class.clone(geometry_type=Bitmap)\n mapping[obj_class] = new_class\n res_classes.append(new_class)\n else:\n mapping[obj_class] = None\n else:\n mapping[obj_class] = None\n\n res_meta = self.clone(obj_classes=ObjClassCollection(res_classes))\n return res_meta, mapping", "def process(self, data):\n allocating = (self._output is None)\n ind = 0\n for i, (name, feature) in enumerate(self.features):\n if allocating:\n x = feature.compute(data)\n self.feature_indices[name] = (ind, ind+x.size)\n ind += x.size\n\n if self._output is None:\n self._output = x\n else:\n self._output = np.hstack([self._output, x])\n else:\n self._output[self.feature_indices[name][0]:\n self.feature_indices[name][1]] = \\\n feature.compute(data)\n\n return self._output", "def add_building_output_locations(self,area_id,start,end,step,type='BUILDINGS_AS_HOLES'): \n print \"Getting buildings locations...\"\n \n dictionary = self.grid.get_building_output_locations(area_id,type)\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def execute(self, parameters, messages):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_location_property = parameters[1]\r\n\t\tin_relation_degree = parameters[2]\r\n\t\tout_location = parameters[3]\r\n\t\tout_points_name = parameters[4]\r\n\r\n\t\t\r\n\t\tif in_wikiplace_IRI.value:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlocationCommonPropertyNameCount = in_location_property.valueAsText\r\n\t\t\trelationDegree = int(in_relation_degree.valueAsText)\r\n\t\t\toutLocation = out_location.valueAsText\r\n\t\t\toutFeatureClassName = out_points_name.valueAsText\r\n\t\t\t\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\toriginFeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\r\n\t\t\tif outLocation.endswith(\".gdb\") == False:\r\n\t\t\t\tmessages.addErrorMessage(\"Please enter a file geodatabase as the file location for output feature class.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\tarcpy.env.workspace = outLocation\r\n\r\n\t\t\t\tendFeatureClassName = outLocation + \"\\\\\" + outFeatureClassName\r\n\t\t\t\tif arcpy.Exists(endFeatureClassName):\r\n\t\t\t\t\tmessages.addErrorMessage(\"The output feature class name already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\t\t\t\telse:\r\n\r\n\t\t\t\t\t# get all the IRI from input point feature class of wikidata places\r\n\t\t\t\t\tinplaceIRIList = []\r\n\t\t\t\t\tcursor = arcpy.SearchCursor(inputFeatureClassName)\r\n\t\t\t\t\tfor row in cursor:\r\n\t\t\t\t\t\tinplaceIRIList.append(row.getValue(\"URL\"))\r\n\r\n\t\t\t\t\tif relationDegree > 4:\r\n\t\t\t\t\t\trelationDegree = 4\r\n\t\t\t\t\t\tin_relation_degree.value = 4\r\n\t\t\t\t\t\r\n\t\t\t\t\tlocationCommonPropertyURL = LocationPropertyPath.locationCommonPropertyDict[locationCommonPropertyNameCount]\r\n\t\t\t\t\tlocationLinkageRelationJSONObj = SPARQLQuery.locationLinkageRelationQuery(inplaceIRIList, locationCommonPropertyURL, relationDegree)\r\n\t\t\t\t\tlocationLinkageRelationJSON = locationLinkageRelationJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t\t\tendPlaceIRISet = Set()\r\n\t\t\t\t\tfor jsonItem in locationLinkageRelationJSON:\r\n\t\t\t\t\t\tendPlaceIRISet.add(jsonItem[\"end\"][\"value\"])\r\n\r\n\t\t\t\t\tendPlaceIRIList = list(endPlaceIRISet)\r\n\r\n\t\t\t\t\t# endPlaceJSONObj = SPARQLQuery.endPlaceInformationQuery(endPlaceIRIList)\r\n\t\t\t\t\t\r\n\t\t\t\t\tendPlaceJSON = SPARQLQuery.endPlaceInformationQuery(endPlaceIRIList)\r\n\r\n\t\t\t\t\tJson2Field.creatPlaceFeatureClassFromJSON(endPlaceJSON, endFeatureClassName, None, \"\")\r\n\r\n\r\n\t\t\t\t\tlastIndex = locationCommonPropertyNameCount.rfind(\"(\")\r\n\t\t\t\t\tlocationCommonPropertyName = locationCommonPropertyNameCount[:lastIndex]\r\n\t\t\t\t\tlocationLinkageTableName = Json2Field.createLocationLinkageMappingTableFromJSON(locationLinkageRelationJSON, \"origin\", \"end\", inputFeatureClassName, endFeatureClassName, locationCommonPropertyURL, locationCommonPropertyName, relationDegree)\r\n\r\n\t\t\t\t\tendFeatureRelationshipClassName = outFeatureClassName + \"_\" + locationLinkageTableName + \"_RelClass\"\r\n\t\t\t\t\tarcpy.CreateRelationshipClass_management(outFeatureClassName, locationLinkageTableName, endFeatureRelationshipClassName, \"SIMPLE\",\r\n\t\t\t\t\t\t\"is \"+ locationCommonPropertyName + \"of\", locationCommonPropertyName,\r\n\t\t\t\t\t\t\t\t\t\t \"FORWARD\", \"ONE_TO_MANY\", \"NONE\", \"URL\", \"end\")\r\n\r\n\t\t\t\t\toriginFeatureRelationshipClassName = originFeatureClassName + \"_\" + locationLinkageTableName + \"_RelClass\"\r\n\t\t\t\t\tarcpy.CreateRelationshipClass_management(originFeatureClassName, locationLinkageTableName, originFeatureRelationshipClassName, \"SIMPLE\",\r\n\t\t\t\t\t\tlocationCommonPropertyName, \"is \"+ locationCommonPropertyName + \"of\",\r\n\t\t\t\t\t\t\t\t\t\t \"FORWARD\", \"ONE_TO_MANY\", \"NONE\", \"URL\", \"origin\")\r\n\t\t\t\t\r\n\t\t\r\n\r\n\t\treturn", "def PrepareWorkspace():\n \n # define expected file paths for file gdb folder, fgdb, taxi feature class \n fgdb_folder = constants.fgdb_folder\n fgdb_name = constants.taxi_fgdb_name\n file_gdb = os.path.join(fgdb_folder, fgdb_name)\n taxi_feature_class_name = \"TaxiLocations\"\n taxi_feature_class = os.path.join(file_gdb, taxi_feature_class_name)\n \n out_coordinate_system = arcpy.SpatialReference('WGS 1984') # define output spatial reference\n \n if not os.path.exists(fgdb_folder): # if file gdb folder has not been created\n os.mkdir(fgdb_folder) # create the folder\n if not arcpy.Exists(file_gdb): # if file gdb has not been created\n arcpy.CreateFileGDB_management(fgdb_folder, fgdb_name) # create the file gdb\n \n if not arcpy.Exists(taxi_feature_class): # if the taxi feature class does not exist\n # create the point feature class in WGS84 spatial reference\n arcpy.CreateFeatureclass_management(file_gdb, \n taxi_feature_class_name, \n \"Point\", \n spatial_reference=out_coordinate_system) # create a point feature class with defined coordinate system\n \n arcpy.TruncateTable_management(taxi_feature_class) # delete existing features in the feature class\n \n return file_gdb, taxi_feature_class # return fgdb and feature class path to main\n \n \n # %%", "def _link_single_worker(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n\n # Single-process implementation\n # For each UID, cur_anno and cur_decl are relevant\n for uid in np.unique(self.annotations_table.uid.values):\n cur_anno = self.annotations_table[\n self.annotations_table.uid.values == uid\n ]\n cur_decl = self.declarations_table[\n self.declarations_table.uid.values == uid\n ]\n\n anno_frames = cur_anno.frame.values\n decl_frames = cur_decl.frame.values\n\n # For each frame, frame_anno and frame_decl are relevant. We don't\n # have to loop over decl_frames, since any decl with decl_frame not\n # present in anno_frames can't be linked to any annots\n for frame in np.unique(anno_frames):\n frame_anno = cur_anno[anno_frames == frame]\n frame_decl = cur_decl[decl_frames == frame]\n\n # Note that due to PANDAS index magic, the resulting\n # links and id_links map key values are the global indices, and\n # not simply [1 ... len(frame_anno)]\n links, id_links = self._link_frame(frame_anno, frame_decl)\n\n self.det_link_map.update(links)\n self.id_link_map.update(id_links)", "def _merge_boundaries(self):\n \n optical_seg = self._amalgamated_optical_segments\n if bool(optical_seg):\n optical_seg[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._optical_seg_count = tf.shape(\n optical_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_seg_count = 0\n \n stop_seg = self._amalgamated_stop_segments\n if bool(stop_seg):\n stop_seg[\"catagory\"] = STOP * tf.ones_like(\n stop_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._stop_seg_count = tf.shape(\n stop_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_seg_count = 0\n \n target_seg = self._amalgamated_target_segments\n if bool(target_seg):\n target_seg[\"catagory\"] = TARGET * tf.ones_like(\n target_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._target_seg_count = tf.shape(\n target_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_seg_count = 0\n \n self._merged_segments = amalgamate(\n [optical_seg, stop_seg, target_seg], \n SEGMENT_GEO_SIG | {\"catagory\"}\n )\n \n optical_arc = self._amalgamated_optical_arcs\n if bool(optical_arc):\n optical_arc[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._optical_arc_count = tf.shape(\n optical_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_arc_count = 0\n \n stop_arc = self._amalgamated_stop_arcs\n if bool(stop_arc):\n stop_arc[\"catagory\"] = STOP * tf.ones_like(\n stop_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._stop_arc_count = tf.shape(\n stop_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_arc_count = 0\n \n target_arc = self._amalgamated_target_arcs\n if bool(target_arc):\n target_arc[\"catagory\"] = TARGET * tf.ones_like(\n target_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._target_arc_count = tf.shape(\n target_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_arc_count = 0\n \n self._merged_arcs = amalgamate(\n [optical_arc, stop_arc, target_arc], \n ARC_GEO_SIG | {\"catagory\"}\n )", "def _series_merging_map(self, map_list, feature_option=\"sift\"):\n print(\" --- Start ---\")\n # Transform state into 3 specified values\n for i in range(len(map_list)):\n map_list[i] = cv2.cvtColor(map_list[i], cv2.COLOR_RGB2GRAY)\n map_list[i] = MF._transform_state(map_list[i])\n \n\n map_ref = map_list[0]\n for i in range(len(map_list)-1):\n map_align = map_list[i+1]\n\n \n if feature_option == \"orb\":\n orb = cv2.ORB_create()\n key_points_1, descriptor_1 = orb.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = orb.detectAndCompute(map_align, None)\n \n elif feature_option == \"surf\":\n surf = cv2.xfeatures2d.SURF_create(400)\n key_points_1, descriptor_1 = surf.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = surf.detectAndCompute(map_align, None)\n else:\n siftDetector = cv2.xfeatures2d.SIFT_create()\n key_points_1, descriptor_1 = siftDetector.detectAndCompute(map_ref, None)\n key_points_2, descriptor_2 = siftDetector.detectAndCompute(map_align, None)\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descriptor_1, descriptor_2, k=2)\n\n good = []\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good.append(m)\n \n pts_1, pts_2 = [], []\n for i in good:\n query_idx = i.queryIdx\n train_idx = i.trainIdx\n\n pts_1.append([\n key_points_1[query_idx].pt[0],\n key_points_1[query_idx].pt[1],\n ])\n pts_2.append([\n key_points_2[train_idx].pt[0],\n key_points_2[train_idx].pt[1],\n ])\n \n pts1 = np.array(pts_1)\n pts2 = np.array(pts_2)\n\n # relation, value, _ = RMM._ransac_find_rotation_translation(pts_set_1=pts2, pts_set_2=pts1, sigma=0.5, max_iter=5000)\n # print(\"- Inlier Percent: %f\"%value)\n # # Because the coordinates between the maps and the SIFT features are different:\n # # SIFT Features: Right: +x, Down: +y\n # # Maps: Down: +x, Right: +y\n # # Hence the dx and dy should be changed.\n # dx = relation[1]\n # dy = relation[0]\n # dyaw = relation[2]\n # print(\"- (x, y, t): (%f, %f, %f)\"%(dx,dy,dyaw))\n\n # # index, agr, dis = RMM._similarity_index(x=[dy, dx, dyaw], map1=map_ref, map2=map_align)\n # # print(\"Similarity Index: %f\\nAgree Number: %f\\nDisargee Number: %f\"%(index, agr, dis))\n # index, agr, dis, _ = RMM._similarity_index_2(x=[dx, dy, dyaw], map1=map_ref, map2=map_align)\n # print(\"- Similarity Index: %f\\n- Agree Number: %f\\n- Disargee Number: %f\"%(index, agr, dis))\n \n # map_merged = MF._merging_map(dx=dx, dy=dy, dtheta=dyaw, map1=map_ref, map2=map_align)\n # map_ref = map_merged.astype(np.uint8)\n # map_ref = MF._modify_map_size(merged_map=map_ref)\n\n relation, value, _ = RANSAC_Map_Merging()._ransac_find_all(pts_set_1=pts2, pts_set_2=pts1, sigma=5, max_iter=2000)\n dx = relation[1]\n dy = relation[0]\n dyaw = relation[2]\n dr = relation[3]\n print(\"- Inlier Percent: %f\"%value)\n print(\"- (dx, dy, dyaw, dr) = %f, %f, %f, %f\"%(dx,dy,dyaw, dr))\n map_merged = MAP_Function()._merging_map_ratio(dx=dx, dy=dy, dtheta=dyaw, dr=dr, map1=map_ref, map2=map_align)\n map_ref = map_merged.astype(np.uint8)\n map_ref = MF._modify_map_size(merged_map=map_ref)\n\n # return map_ref, (dx, dy, dyaw)\n return map_ref, (dx, dy, dyaw, dr)", "def process_search_pages(self):\r\n features_list = []\r\n for page in self.url_list:\r\n listings = extract_listings(page)\r\n for listing in listings:\r\n features = extract_listing_features(listing, RULES_SEARCH_PAGE)\r\n features['sp_url'] = page\r\n features_list.append(features)\r\n\r\n self.base_features_list = features_list", "def finalizeFeatures(self):\n pass", "def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec", "def add_feature(layer, branchID, segs, lines, lon, lat, Ttime, density, Initial_loc, solubility, flows, concentration, water_level, dist): \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\r\n # Add points individually to the line\r\n #xy = lines[i]\r\n \r\n #line.AddPoint_2D(xy[0][0],xy[0][1])\r\n #line.AddPoint_2D(xy[1][0],xy[1][1])\r\n point.AddPoint(lon[i], lat[i])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(point)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(point)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID[i])) \r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Lon', \"{:.3f}\".format(lon[i]))\r\n feature.SetField('Lat', \"{:.3f}\".format(lat[i]))\r\n #feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n #feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n feature.SetField('T (day)', int(Ttime[i]))\r\n feature.SetField('Density', density[i])\r\n feature.SetField('Initial', Initial_loc[i])\r\n feature.SetField('Solubility', solubility[i])\r\n feature.SetField('Flow', flows[i])\r\n feature.SetField('C (mg/L)', concentration[i])\r\n feature.SetField('WSE (ft)', water_level[i])\r\n feature.SetField('D (ft)', dist[i])\r\n \r\n layer.CreateFeature(feature)", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_stat_fields = parameters[1]\r\n\t\t# out_location = parameters[2]\r\n\t\t# out_points_name = parameters[3]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.altered and not in_stat_fields.altered:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tif currentWorkspace.endswith(\".gdb\") == False:\r\n\t\t\t\tmessages.addErrorMessage(\"Please enter a feature class in file geodatabase for the input feature class.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\t# if in_related_table.value:\r\n\t\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\t\t# out_location.value = currentWorkspace\r\n\t\t\t\t# out_points_name.value = featureClassName + \"_noFunc_merge\"\r\n\t\t\t\t# # check whether the input table are in the same file geodatabase as the input feature class\r\n\t\t\t\t# inputTableName = in_related_table.valueAsText\r\n\t\t\t\t# lastIndexOFTable = inputTableName.rfind(\"\\\\\")\r\n\t\t\t\t# currentWorkspaceTable = inputTableName[:lastIndexOFTable]\r\n\t\t\t\t# if currentWorkspaceTable != currentWorkspace:\r\n\t\t\t\t# \tmessages.addErrorMessage(\"Please enter a table in the same file geodatabase as the input feature class.\")\r\n\t\t\t\t# \traise arcpy.ExecuteError\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tif UTIL.detectRelationship(inputFeatureClassName, inputTableName):\r\n\t\t\t\t# \t\tarcpy.AddMessage(\"The feature class and table are related!\")\r\n\t\t\t\trelatedTableList = UTIL.getRelatedTableFromFeatureClass(inputFeatureClassName)\r\n\t\t\t\t# fieldmappings = arcpy.FieldMappings()\r\n\t\t\t\t# fieldmappings.addTable(inputFeatureClassName)\r\n\t\t\t\t\r\n\t\t\t\tnoFunctionalPropertyTable = []\r\n\r\n\t\t\t\tfor relatedTable in relatedTableList:\r\n\t\t\t\t\tfieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\tif \"origin\" not in fieldList and \"end\" not in fieldList:\r\n\t\t\t\t\t\tnoFunctionalFieldName = fieldList[2].name\r\n\t\t\t\t\t\tarcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\t\t\t\t\t\tnoFunctionalPropertyTable.append([noFunctionalFieldName, 'COUNT', relatedTable])\r\n\t\t\t\t\t\t# MergeNoFunctionalProperty.relatedTableFieldList.append([noFunctionalFieldName, relatedTable, 'COUNT'])\r\n\t\t\t\t\t# fieldmappings.addTable(relatedTable)\r\n\t\t\t\t\t# fieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\t# noFunctionalFieldName = fieldList[len(fieldList)-1].name\r\n\t\t\t\t\t# arcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\t\t\t\t\t# fieldmap = fieldmappings.getFieldMap(fieldmappings.findFieldMapIndex(noFunctionalFieldName))\r\n\t\t\t\t\t# fieldmap.addInputField(relatedTable, \"wikiURL\")\r\n\t\t\t\t\t# fieldmap.addInputField(inputFeatureClassName, \"URL\")\r\n\t\t\t\t\t# fieldmappings.replaceFieldMap(fieldmappings.findFieldMapIndex(noFunctionalFieldName), fieldmap)\r\n\r\n\t\t\t\tin_stat_fields.values = noFunctionalPropertyTable\r\n\r\n\r\n\r\n\t\t\t\t# fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(\"wikiURL\"))\r\n\r\n\r\n\r\n\t\t\t\t# in_field_mapping.value = fieldmappings.exportToString()\r\n\r\n\t\t\t# if in_stat_fields.altered:\r\n\t\t\t# \tfieldMergeRuleTest = in_stat_fields.valueAsText\r\n\t\t\t# \tif fieldMergeRuleTest:\r\n\t\t\t# \tfieldSplitList = fieldMergeRuleTest.split(\";\")\r\n\t\t\t# \tfor fieldSplitItem in fieldSplitList:\r\n\t\t\t# \t\tfieldMergeList = fieldSplitList.split(\"\\t\")\r\n\t\t\t# \t\tfor item in MergeNoFunctionalProperty.relatedTableFieldList:\r\n\t\t\t# \t\t\tif item[]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\r\n\t\treturn", "def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_no_functional_property_list = parameters[1]\r\n\t\tin_related_table_list = parameters[2]\r\n\t\tin_merge_rule = parameters[3]\r\n\t\tin_cancatenate_delimiter = parameters[4]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.altered:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tif currentWorkspace.endswith(\".gdb\") == False:\r\n\t\t\t\tmessages.addErrorMessage(\"Please enter a feature class in file geodatabase for the input feature class.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\t# if in_related_table.value:\r\n\t\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\t\t# out_location.value = currentWorkspace\r\n\t\t\t\t# out_points_name.value = featureClassName + \"_noFunc_merge\"\r\n\t\t\t\t# # check whether the input table are in the same file geodatabase as the input feature class\r\n\t\t\t\t# inputTableName = in_related_table.valueAsText\r\n\t\t\t\t# lastIndexOFTable = inputTableName.rfind(\"\\\\\")\r\n\t\t\t\t# currentWorkspaceTable = inputTableName[:lastIndexOFTable]\r\n\t\t\t\t# if currentWorkspaceTable != currentWorkspace:\r\n\t\t\t\t# \tmessages.addErrorMessage(\"Please enter a table in the same file geodatabase as the input feature class.\")\r\n\t\t\t\t# \traise arcpy.ExecuteError\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tif UTIL.detectRelationship(inputFeatureClassName, inputTableName):\r\n\t\t\t\t# \t\tarcpy.AddMessage(\"The feature class and table are related!\")\r\n\t\t\t\tMergeSingleNoFunctionalProperty.relatedTableFieldList = []\r\n\t\t\t\tMergeSingleNoFunctionalProperty.relatedTableList = []\r\n\t\t\t\tMergeSingleNoFunctionalProperty.relatedNoFunctionalPropertyURLList = []\r\n\r\n\t\t\t\tMergeSingleNoFunctionalProperty.relatedTableList = UTIL.getRelatedTableFromFeatureClass(inputFeatureClassName)\r\n\t\t\t\tin_related_table_list.filter.list = MergeSingleNoFunctionalProperty.relatedTableList\r\n\t\t\t\t\r\n\t\t\t\t# noFunctionalPropertyTable = []\r\n\r\n\t\t\t\tfor relatedTable in MergeSingleNoFunctionalProperty.relatedTableList:\r\n\t\t\t\t\tfieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\tif \"origin\" not in fieldList and \"end\" not in fieldList:\r\n\t\t\t\t\t\tnoFunctionalFieldName = fieldList[2].name\r\n\t\t\t\t\t\tarcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\t\t\t\t\t\tMergeSingleNoFunctionalProperty.relatedTableFieldList.append(noFunctionalFieldName)\r\n\t\t\t\t\t\t# get the no functioal property URL from the firt row of this table field \"propURL\"\r\n\t\t\t\t\t\t# propURL = arcpy.da.SearchCursor(relatedTable, (\"propURL\")).next()[0]\r\n\r\n\t\t\t\t\t\tTableRelationshipClassList = UTIL.getRelationshipClassFromTable(relatedTable)\r\n\t\t\t\t\t\tpropURL = arcpy.Describe(TableRelationshipClassList[0]).forwardPathLabel\r\n\r\n\t\t\t\t\t\tMergeSingleNoFunctionalProperty.relatedNoFunctionalPropertyURLList.append(propURL)\r\n\r\n\t\t\t\tin_no_functional_property_list.filter.list = MergeSingleNoFunctionalProperty.relatedNoFunctionalPropertyURLList\r\n\t\t\t\t\t\t# noFunctionalPropertyTable.append([noFunctionalFieldName, 'COUNT', relatedTable])\r\n\t\t\t\t\t\t# MergeNoFunctionalProperty.relatedTableFieldList.append([noFunctionalFieldName, relatedTable, 'COUNT'])\r\n\t\t\t\t\t# fieldmappings.addTable(relatedTable)\r\n\t\t\t\t\t# fieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t\t# noFunctionalFieldName = fieldList[len(fieldList)-1].name\r\n\t\t\t\t\t# arcpy.AddMessage(\"noFunctionalFieldName: {0}\".format(noFunctionalFieldName))\r\n\r\n\t\t\t\t# in_stat_fields.values = noFunctionalPropertyTable\r\n\r\n\t\tif in_no_functional_property_list.altered:\r\n\t\t\tselectPropURL = in_no_functional_property_list.valueAsText\r\n\t\t\tselectIndex = MergeSingleNoFunctionalProperty.relatedNoFunctionalPropertyURLList.index(selectPropURL)\r\n\t\t\tselectFieldName = MergeSingleNoFunctionalProperty.relatedTableFieldList[selectIndex]\r\n\t\t\tselectTableName = MergeSingleNoFunctionalProperty.relatedTableList[selectIndex]\r\n\r\n\t\t\tin_related_table_list.value = selectTableName\r\n\r\n\t\t\tcurrentDataType = UTIL.getFieldDataTypeInTable(selectFieldName, selectTableName)\r\n\t\t\tif currentDataType in ['Single', 'Double', 'SmallInteger', 'Integer']:\r\n\t\t\t\tin_merge_rule.filter.list = ['SUM', 'MIN', 'MAX', 'STDEV', 'MEAN', 'COUNT', 'FIRST', 'LAST', 'CONCATENATE']\r\n\t\t\t# elif currentDataType in ['SmallInteger', 'Integer']:\r\n\t\t\t# \tin_merge_rule.filter.list = ['SUM', 'MIN', 'MAX', 'COUNT', 'FIRST', 'LAST']\r\n\t\t\telse:\r\n\t\t\t\tin_merge_rule.filter.list = ['COUNT', 'FIRST', 'LAST', 'CONCATENATE']\r\n\r\n\t\tif in_related_table_list.altered:\r\n\t\t\tselectTableName = in_related_table_list.valueAsText\r\n\t\t\tselectIndex = MergeSingleNoFunctionalProperty.relatedTableList.index(selectTableName)\r\n\t\t\tselectFieldName = MergeSingleNoFunctionalProperty.relatedTableFieldList[selectIndex]\r\n\t\t\tselectPropURL = MergeSingleNoFunctionalProperty.relatedNoFunctionalPropertyURLList[selectIndex]\r\n\r\n\t\t\tin_no_functional_property_list.value = selectPropURL\r\n\r\n\t\t\tcurrentDataType = UTIL.getFieldDataTypeInTable(selectFieldName, selectTableName)\r\n\t\t\tif currentDataType in ['Single', 'Double', 'SmallInteger', 'Integer']:\r\n\t\t\t\tin_merge_rule.filter.list = ['SUM', 'MIN', 'MAX', 'STDEV', 'MEAN', 'COUNT', 'FIRST', 'LAST', 'CONCATENATE']\r\n\t\t\t# elif currentDataType in ['SmallInteger', 'Integer']:\r\n\t\t\t# \tin_merge_rule.filter.list = ['SUM', 'MIN', 'MAX', 'COUNT', 'FIRST', 'LAST']\r\n\t\t\telse:\r\n\t\t\t\tin_merge_rule.filter.list = ['COUNT', 'FIRST', 'LAST', 'CONCATENATE']\r\n\t\t\t\r\n\r\n\t\tif in_merge_rule.valueAsText == \"CONCATENATE\":\r\n\t\t\tin_cancatenate_delimiter.enabled = True\r\n\r\n\r\n\r\n\r\n\t\treturn", "async def wrap_up_processing_reports(self):\n if hasattr(Config(), 'results'):\n new_row = []\n for item in self.recorded_items:\n item_value = {\n 'global_round':\n self.current_global_round,\n 'round':\n self.current_round,\n 'accuracy':\n self.accuracy * 100,\n 'average_accuracy':\n self.average_accuracy * 100,\n 'edge_agg_num':\n Config().algorithm.local_rounds,\n 'local_epoch_num':\n Config().trainer.epochs,\n 'training_time':\n max([\n report.training_time for (report, __) in self.updates\n ]),\n 'round_time':\n time.perf_counter() - self.round_start_time\n }[item]\n new_row.append(item_value)\n\n if Config().is_edge_server():\n result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv'\n else:\n result_csv_file = f'{Config().result_dir}result.csv'\n\n csv_processor.write_csv(result_csv_file, new_row)\n\n if Config().is_edge_server():\n # When a certain number of aggregations are completed, an edge client\n # needs to be signaled to send a report to the central server\n if self.current_round == Config().algorithm.local_rounds:\n logging.info(\n '[Server #%d] Completed %s rounds of local aggregation.',\n os.getpid(),\n Config().algorithm.local_rounds)\n self.model_aggregated.set()\n\n self.current_round = 0\n self.new_global_round_begins.clear()\n # Wait until a new global round begins\n # to avoid selecting clients before a new global round begins\n await self.new_global_round_begins.wait()", "def finalize(self):\n routes = self.routes.copy()\n for child in self._children:\n routes.extend(list(child.tree_routes))\n\n # Make a new Map() out of all of the routes.\n rule_map = Map([route.create_rule() for route in routes])\n self._route_map = rule_map\n\n self.finalized = True", "def _generate_feature_tree(self, features):\n # build a set of all features, including top-level features and\n # dependencies.\n self.top_level_features = defaultdict(list)\n\n # find top-level features and index them by entity id.\n for f in self.all_features:\n _, num_forward = self.entityset.find_path(self.target_eid, f.entity.id,\n include_num_forward=True)\n if num_forward or f.entity.id == self.target_eid:\n self.top_level_features[f.entity.id].append(f)", "def postprocess_spatial(self):\n if self.spatial_finalization is not None:\n for func in self.spatial_finalization:\n self.spatial_data = func(self.spatial_data)", "def _merge_groups(self):\n fof_rdd = self.fof_rdd\n nPartitions = self.nPartitions\n \n def remap_local_groups(iterator): \n gmap = iterator.next() \n for p_arr in iterator:\n remap_gid_partition_cython(p_arr, gmap)\n yield p_arr\n\n mapping = self._get_level_map()\n\n group_merge_map = (mapping.flatMap(lambda (g,g_p):\n [(gid, (g,g_p)) for gid in [decode_partition(g), decode_partition(g_p)]])\n .partitionBy(nPartitions)\n .map(lambda (k,v): v, preservesPartitioning=True)\n .mapPartitions(create_map_dict, True)).cache() \n\n merged_rdd = (group_merge_map + fof_rdd).mapPartitions(remap_local_groups, preservesPartitioning=True)\n merged_rdd.setName('merged_rdd')\n\n self.group_merge_map = group_merge_map\n\n return merged_rdd", "def _concatenate_features(features):\n pass", "def create_from_feature_list(self, features): \n for f in features:\n featuretype = f.pop('featuretype', None)\n if featuretype is None:\n raise LoopException\n if featuretype == 'strati':\n self.create_and_add_foliation(f)\n # if featuretype == 'fault':\n # self.create_and_add_fault(f)\n if featuretype == 'folded_strati':\n self.create_and_add_folded_foliation(f)", "def collect_best_features(self):\n bincsp = self.binary_csp # just to make code shorter\n n_folds = len(self.binary_csp.folds)\n n_class_pairs = len(self.binary_csp.class_pairs)\n result_shape = (n_folds, n_class_pairs)\n self.train_feature = np.empty(result_shape, dtype=object)\n self.train_feature_full_fold = np.empty(result_shape, dtype=object)\n self.test_feature = np.empty(result_shape, dtype=object)\n self.test_feature_full_fold = np.empty(result_shape, dtype=object)\n self.selected_filters_per_filterband = np.empty(result_shape, dtype=object)\n for fold_i in range(n_folds):\n for class_pair_i in range(n_class_pairs):\n bin_csp_train_features = deepcopy(bincsp.train_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_train_features_full_fold = deepcopy(\n bincsp.train_feature_full_fold[\n self.selected_filter_inds,\n fold_i, class_pair_i])\n bin_csp_test_features = deepcopy(bincsp.test_feature[\n self.selected_filter_inds, fold_i, class_pair_i])\n bin_csp_test_features_full_fold = deepcopy(\n bincsp.test_feature_full_fold[\n self.selected_filter_inds,fold_i, class_pair_i])\n selected_filters_per_filt = self.select_best_filters_best_filterbands(\n bin_csp_train_features, max_features=self.n_features,\n forward_steps=self.forward_steps, \n backward_steps=self.backward_steps,\n stop_when_no_improvement=self.stop_when_no_improvement)\n self.train_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features, selected_filters_per_filt)\n self.train_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_train_features_full_fold, selected_filters_per_filt)\n \n self.test_feature[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features, selected_filters_per_filt)\n self.test_feature_full_fold[fold_i, class_pair_i] = \\\n self.collect_features_for_filter_selection(\n bin_csp_test_features_full_fold, selected_filters_per_filt)\n \n self.selected_filters_per_filterband[fold_i, class_pair_i] = \\\n selected_filters_per_filt", "def buildRoutesDict(self):\n \n # create route number and name xref dictionary\n arcpy.env.workspace = PublicTransit.RTD_PATH\n routes = arcpy.SearchCursor(PublicTransit.BUS_ROUTES, \"\", \"\", \"RouteID; Name\", \"\")\n self.routeXref = dict()\n for route in routes:\n self.routeXref[route.RouteID] = route.Name\n self.routeXref[route.Name] = route.RouteID\n del routes\n \n #get mode lookup table\n mode_table = self.getModeLookupTable()\n \n # Query the RTD database for the route name, operator, mode, and headways.\n # We are querying for weekday routes (DAYTYPE_CLASS Weekday field = 'Y')\n conn = pyodbc.connect(PublicTransit.DB_CONN_STRING)\n cursor = conn.cursor()\n self.transitRoutes = dict()\n qry = \"\"\"\n WITH t AS\n (\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR,\n CASE\n WHEN HOUR_CLASS >= 3 and HOUR_CLASS < 6 THEN 'EA'\n WHEN HOUR_CLASS >= 6 and HOUR_CLASS < 10 THEN 'AM'\n WHEN HOUR_CLASS >= 10 and HOUR_CLASS < 15 THEN 'MD'\n WHEN HOUR_CLASS >= 15 and HOUR_CLASS < 19 THEN 'PM'\n WHEN (HOUR_CLASS BETWEEN 19 AND 24) OR HOUR_CLASS < 3 THEN 'EV'\n END AS tod,\n [HOURLY_FREQUENCY(Daily until HOUR_CLASS update)], HOUR_CLASS\n FROM dbo.[ROUTE HEADWAY AND FREQUENCY]\n WHERE DAYTYPE_CLASS IN\n (SELECT dc.CLASS FROM dbo.DAYTYPE_CLASS dc WHERE WEEKDAY = 'Y')\n )\n SELECT CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod,\n 60.0 / ROUND(AVG(CAST([HOURLY_FREQUENCY(Daily until HOUR_CLASS update)] AS FLOAT)), 0) as headway\n FROM t\n GROUP BY CPT_AGENCYID, AGENCYNAME, SCH_ROUTEID, SCH_PATTERNID, CPT_MODE, SCH_ROUTEDESIGNATOR, tod\n ORDER BY SCH_ROUTEID, SCH_PATTERNID, tod\"\"\"\n \n used_route_names = []\n # Iterate through result set and apply attributes.\n for row in cursor.execute(qry):\n routePattern = str(row.SCH_ROUTEID) + \"_\" + str(row.SCH_PATTERNID)\n if routePattern not in self.transitRoutes:\n self.transitRoutes[routePattern] = TransitRoute(routePattern,\n routeId = row.SCH_ROUTEID,\n patternId = row.SCH_PATTERNID)\n self.transitRoutes[routePattern].new_name = self.__cleanRouteName(row.CPT_AGENCYID + \"_\" + row.SCH_ROUTEDESIGNATOR[:(11 - 1 - len(row.CPT_AGENCYID))],used_route_names) #12 is the maximum name length\n self.transitRoutes[routePattern].agency = row.AGENCYNAME\n mode = -1\n for mode_row in mode_table:\n if row.CPT_AGENCYID == mode_row[\"CPT_AGENCYID\"] and row.CPT_MODE == mode_row[\"CPT_MODE\"]:\n if mode_row[\"SCH_ROUTEDESIGNATOR\"] != \"NA\":\n if row.SCH_ROUTEDESIGNATOR == mode_row[\"SCH_ROUTEDESIGNATOR\"]:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n break #this is as detailed as we can get\n else:\n mode = mode_row[\"MODECODE\"]\n mode_group = Mode.getModeFromLookupTable(mode_row[\"MODEGROUP\"])\n self.transitRoutes[routePattern].mode = mode\n self.transitRoutes[routePattern].mode_group = Mode.getModeName(mode_group)\n # set headways\n if row.tod == 'EA':\n self.transitRoutes[routePattern].eaHeadway = row.headway\n elif row.tod == 'AM':\n self.transitRoutes[routePattern].amHeadway = row.headway\n elif row.tod == 'MD':\n self.transitRoutes[routePattern].mdHeadway = row.headway\n elif row.tod == 'PM':\n self.transitRoutes[routePattern].pmHeadway = row.headway\n elif row.tod == 'EV':\n self.transitRoutes[routePattern].evHeadway = row.headway\n conn.close()", "def main(*argv):\n try:\n attr_features = argv[0]\n sql_clause = argv[1]\n polygon_grid = argv[2]\n error_field_count = str(argv[3]) #'NULL_COUNT'#\n error_field_def = str(argv[4]) #'NULL_COLUMNS'#\n output_fc = argv[5]\n out_fc_exists = arcpy.Exists(output_fc)\n\n # Local Variable\n #\n scratchFolder = env.scratchFolder\n scratchGDB = env.scratchGDB\n results = []\n # Logic\n #\n if not out_fc_exists:\n output_gdb = validate_workspace(os.path.dirname(output_fc))\n # Create the grid\n #\n out_grid = arcpy.CopyFeatures_management(polygon_grid, output_fc)[0]\n out_grid = extend_table(out_grid)\n where_clause=None\n else:\n arcpy.MakeFeatureLayer_management(output_fc, \"lyr\")\n arcpy.SelectLayerByLocation_management(\"lyr\", \"HAVE_THEIR_CENTER_IN\", polygon_grid)\n oids = [row[0] for row in arcpy.da.SearchCursor(\"lyr\", \"OID@\")]\n if len(oids) >1:\n oids_string = str(tuple(oids))\n else:\n oids_string = str('('+ str(oids[0]) + ')')\n\n where_clause = 'OBJECTID IN ' + oids_string\n\n error_field = (error_field_def, error_field_count)\n\n # Process the Data\n #\n\n poly_desc = arcpy.Describe(output_fc)\n fc_desc = arcpy.Describe(attr_features)\n if poly_desc.extent.within(fc_desc.extent):\n\n temp_fc = 'in_memory/clip'\n arcpy.AddMessage('Clipping features to polygon')\n arcpy.Clip_analysis(attr_features, output_fc, temp_fc)\n arcpy.AddMessage('Created in_memory fc')\n #data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc,\n # fields=[value_field])\n if sql_clause:\n attr_sdf = SpatialDataFrame.from_featureclass(temp_fc,\n fields=error_field,\n where_clause=sql_clause)\n else:\n attr_sdf = SpatialDataFrame.from_featureclass(temp_fc,\n fields=error_field)\n arcpy.AddMessage('features read into spatial dataframe after clipping')\n else:\n #data_sdf = geomotion.SpatialDataFrame.from_featureclass(, fields=[value_field])\n arcpy.AddMessage('features read into spatial dataframe without clipping')\n if sql_clause:\n attr_sdf = SpatialDataFrame.from_featureclass(attr_features,\n fields=error_field,\n where_clause=sql_clause)\n else:\n attr_sdf = SpatialDataFrame.from_featureclass(attr_features,\n fields=error_field)\n\n grid_sdf = SpatialDataFrame.from_featureclass(filename=output_fc,\n where_clause=where_clause)\n\n index = attr_sdf.sindex\n for idx, row in enumerate(grid_sdf.iterrows()):\n errors = []\n attrs = []\n geom = row[1].SHAPE\n oid = row[1].OBJECTID\n print(str(oid))\n ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y,\n geom.extent.upperRight.X, geom.extent.upperRight.Y]\n row_oids = list(index.intersect(ext))\n df_current = attr_sdf.loc[row_oids]#.copy()\n sq = df_current.geometry.disjoint(geom) == False\n fcount = len(df_current[sq]) # Total Count\n q2 = df_current[error_field_count] > 0\n #& q2\n df_current = df_current[sq].copy() # Get the # of features with deficiency_cnt > 0\n #print(\"here\")\n if fcount>0: #len(df_current) > 0:\n errors += df_current[error_field_count].tolist()\n arcpy.AddMessage(str(errors))\n def process(x):\n print(x)\n return [va for va in x.replace(' ', '').split('|')[-1].split(',') if len(va) > 1]\n for e in df_current[error_field_def].apply(process).tolist():\n attrs += e\n del e\n row = get_answers(oid=oid,\n err=errors,\n attr=attrs,\n feature_count=fcount)\n results.append(row)\n if len(results) > 250:\n extend_table(table=output_fc, rows=results)\n results = []\n del idx\n del row\n del errors\n del attrs\n del geom\n del oid\n del ext\n del row_oids\n del df_current\n del sq\n del q2\n if len(results) > 0:\n extend_table(table=output_fc, rows=results)\n del index\n del results\n del grid_sdf\n del attr_sdf\n except arcpy.ExecuteError:\n line, filename, synerror = trace()\n arcpy.AddError(\"error on line: %s\" % line)\n arcpy.AddError(\"error in file name: %s\" % filename)\n arcpy.AddError(\"with error message: %s\" % synerror)\n arcpy.AddError(\"ArcPy Error Message: %s\" % arcpy.GetMessages(2))\n except FunctionError as f_e:\n messages = f_e.args[0]\n arcpy.AddError(\"error in function: %s\" % messages[\"function\"])\n arcpy.AddError(\"error on line: %s\" % messages[\"line\"])\n arcpy.AddError(\"error in file name: %s\" % messages[\"filename\"])\n arcpy.AddError(\"with error message: %s\" % messages[\"synerror\"])\n arcpy.AddError(\"ArcPy Error Message: %s\" % messages[\"arc\"])\n except:\n line, filename, synerror = trace()\n arcpy.AddError(\"error on line: %s\" % line)\n arcpy.AddError(\"error in file name: %s\" % filename)\n arcpy.AddError(\"with error message: %s\" % synerror)", "def _extract_features(self, times):\n times[1] = time()\n data = {n:self._extract_feature(f) for (n,f) in self.features.items()} \n times[2] = time()\n return (data, times, os.getpid())", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def __merge_processes_data(self, manager_data, tracker=None):\n\n if manager_data is not None:\n if (\n not self.autosave.authorized\n and PyFunceble.CONFIGURATION.multiprocess_merging_mode != \"live\"\n and not PyFunceble.CONFIGURATION.quiet\n ):\n print(\n Fore.MAGENTA\n + Style.BRIGHT\n + \"\\nMerging cross processes data... This process may take some time.\"\n )\n\n for test_output in manager_data:\n if self.autosave.authorized:\n print(Fore.MAGENTA + Style.BRIGHT + \"Merging process data ...\")\n\n self.post_test_treatment(\n test_output,\n self.file_type,\n complements_test_started=self.complements_test_started,\n auto_continue_db=self.autocontinue,\n inactive_db=self.inactive_db,\n mining=self.mining,\n whois_db=self.whois_db,\n )\n\n if tracker:\n tracker.add_position(len(test_output[\"given\"]))\n\n manager_data[:] = []\n\n self.autocontinue.save()\n self.inactive_db.save()\n self.mining.save()\n\n self.cleanup(self.autocontinue, self.autosave, test_completed=False)", "def _merge_boundaries(self):\n optical = self._amalgamated_optical\n if bool(optical):\n optical[\"catagory\"] = OPTICAL * tf.ones_like(\n optical[\"xp\"],\n dtype=tf.int64\n )\n self._optical_count = tf.shape(\n optical[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_count = 0\n \n stop = self._amalgamated_stop\n if bool(stop):\n stop[\"catagory\"] = STOP * tf.ones_like(\n stop[\"xp\"],\n dtype=tf.int64\n )\n self._stop_count = tf.shape(\n stop[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_count = 0\n \n target = self._amalgamated_target\n if bool(target):\n target[\"catagory\"] = TARGET * tf.ones_like(\n target[\"xp\"],\n dtype=tf.int64\n )\n self._target_count = tf.shape(\n target[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_count = 0\n \n self._merged = amalgamate(\n [optical, stop, target], \n TRIANGLE_GEO_SIG | {\"catagory\"}\n )", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def _get_features_geo(self, id):\n #creates featues/geo tensors for all atoms in protein\n if self.type_feature == \"hot_simple\":\n features = self.hot_enc(id)\n elif self.type_feature == \"mass_charges\":\n features = self.mass_charges(id)\n elif self.type_feature == \"bio_properties\":\n features = self.bio_prop(id)\n elif self.type_feature == \"bio_all_properties\":\n features_1 = self.mass_charges(id)\n features_2 = self.bio_prop(id)\n features = np.concatenate((features_1, features_2), axis=1)\n geometry = self._get_geometry_protein(id)\n return features, geometry", "def execute(self, parameters, messages):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_stat_fields = parameters[1]\r\n\t\t# out_location = parameters[2]\r\n\t\t# out_points_name = parameters[3]\r\n\r\n\t\t\r\n\t\tif in_wikiplace_IRI.value:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\t# outLocation = out_location.valueAsText\r\n\t\t\t# outFeatureClassName = out_points_name.valueAsText\r\n\t\t\tfieldMergeRuleTest = in_stat_fields.valueAsText\r\n\r\n\t\t\t# messages.addErrorMessage(\"in_stat_fields.values: {0}\".format(in_stat_fields.values))\r\n\t\t\t# messages.addErrorMessage(\"MergeNoFunctionalProperty.relatedTableFieldList: {0}\".format(MergeNoFunctionalProperty.relatedTableFieldList))\r\n\r\n\t\t\t\r\n\r\n\t\t\t\r\n\t\t\t# fieldmappings = in_field_mapping.valueAsText\r\n\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tif currentWorkspace.endswith(\".gdb\") == False:\r\n\t\t\t\tmessages.addErrorMessage(\"Please enter a feature class in file geodatabase for the input feature class.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\t# if in_related_table.value:\r\n\t\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\t\t# relatedTableList = UTIL.getRelatedTableFromFeatureClass(inputFeatureClassName)\r\n\t\t\t\t# fieldmappings = arcpy.FieldMappings()\r\n\t\t\t\t# fieldmappings.addTable(inputFeatureClassName)\r\n\t\t\t\t# for relatedTable in relatedTableList:\r\n\t\t\t\t# \tfieldmappings.addTable(relatedTable)\r\n\t\t\t\t# \tfieldList = arcpy.ListFields(relatedTable)\r\n\t\t\t\t# \tfieldName = fieldList[len(fieldList)-1].name\r\n\t\t\t\t# \tarcpy.AddMessage(\"fieldName: {0}\".format(fieldName))\r\n\r\n\r\n\t\t\t\t# fieldmappings.removeFieldMap(fieldmappings.findFieldMapIndex(\"wikiURL\"))\r\n\r\n\t\t\t\t# arcpy.AddMessage(\"fieldmappings: {0}\".format(fieldmappings))\r\n\t\t\t\t# if out_location.value and out_points_name.value:\r\n\t\t\t\t# \tarcpy.FeatureClassToFeatureClass_conversion(inputFeatureClassName, outLocation, outFeatureClassName, \"\", fieldmappings)\r\n\r\n\t\t\t\t# get the ValueTable(fieldName, merge rule, related table full path) \r\n\t\t\t\tfieldMergeRuleFileNameList = []\r\n\r\n\t\t\t\tif fieldMergeRuleTest:\r\n\t\t\t\t\tfieldSplitList = fieldMergeRuleTest.split(\";\")\r\n\t\t\t\t\tfor fieldSplitItem in fieldSplitList:\r\n\t\t\t\t\t\tfieldMergeList = fieldSplitItem.split(\" \", 2)\r\n\t\t\t\t\t\tfieldMergeRuleFileNameList.append(fieldMergeList)\r\n\r\n\t\t\t\tarcpy.AddMessage(\"fieldMergeRuleFileNameList: {0}\".format(fieldMergeRuleFileNameList))\r\n\r\n\t\t\t\tfor fieldMergeRuleFileNameItem in fieldMergeRuleFileNameList:\r\n\t\t\t\t\tappendFieldName = fieldMergeRuleFileNameItem[0]\r\n\t\t\t\t\tmergeRule = fieldMergeRuleFileNameItem[1]\r\n\t\t\t\t\trelatedTableName = fieldMergeRuleFileNameItem[2].replace(\"'\", \"\")\r\n\r\n\t\t\t\t\tnoFunctionalPropertyDict = UTIL.buildMultiValueDictFromNoFunctionalProperty(appendFieldName, relatedTableName)\r\n\t\t\t\t\tif noFunctionalPropertyDict != -1:\r\n\t\t\t\t\t\tUTIL.appendFieldInFeatureClassByMergeRule(inputFeatureClassName, noFunctionalPropertyDict, appendFieldName, relatedTableName, mergeRule)\r\n\r\n\t\t\t\t# UTIL.buildMultiValueDictFromNoFunctionalProperty(fieldName, tableName)\r\n\t\t\t\t# UTIL.appendFieldInFeatureClassByMergeRule(inputFeatureClassName, noFunctionalPropertyDict, appendFieldName, relatedTableName, mergeRule)\r\n\r\n\t\treturn", "def FeaturesGen(ChopChopresults, outputDir, sgRNA_type):\n \n #make output Directory if it does not already exist\n if not os.path.isdir(outputDir):\n os.makedirs(outputDir)\n \n #list the directory contents \n for i,j,k in os.walk(ChopChopresults): #use walk to go through and find all directories\n \n if j == []: #no subdirectories\n saveDF = pd.DataFrame() #initiate dataframe\n for target in k: #loop through to find the sgRNA sequences\n if target.endswith('.offtargets'):\n with open(os.path.join(i,target), 'r+') as f:\n guide = f.readlines()\n #add them to a dataframe\n temp = pd.Series()\n temp['guideNo'] = target.split('.')[0] + sgRNA_type\n temp['guideSeq'] = guide.pop(0).rstrip()\n \n saveDF = saveDF.append(temp.to_frame().transpose())\n saveDF['type'] = 'sgRNA'\n \n if sgRNA_type == 'General' or sgRNA_type == None:\n saveDF['fwd'] = 'pink'\n saveDF['rev'] = 'green'\n elif sgRNA_type == 'GG':\n saveDF['fwd'] = 'yellow'\n saveDF['rev'] = 'plum'\n elif sgRNA_type == 'GA':\n saveDF['fwd'] = 'cyan'\n saveDF['rev'] = 'cornflower blue'\n \n \n #save to txt file with tab delimiter\n saveDF.to_csv(os.path.join(outputDir, os.path.basename(i) + '_features.txt'),\\\n index = False, header = False, sep = '\\t')\n \n del saveDF", "def _build_legs(self):\n if self._primary_mode == 'transit':\n for transit_leg in self._best_trip.get_transit_legs():\n self._legs.append(transit_leg.get_directions())\n else:\n self._legs.append(self._best_trip.get_directions())", "def Point_to_FeatureClass(self, fc):\n\n\n feature_class = []\n for index, traectory in enumerate(self.__traectory_list):\n point_row = arcpy.Point(X=traectory[0], Y=traectory[1], Z=traectory[2], ID=index)\n feature_class.append(arcpy.PointGeometry(point_row, arcpy.SpatialReference(2436)))\n arcpy.CopyFeatures_management(feature_class, (self.workspace + '\\\\' + fc))\n print 'Complete Creating a Point Feature Class'\n\n return None", "def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")", "def collect_ftmap(cls):\n for obj in pm.get_object_list():\n if obj.endswith(\".pdb\"):\n new_name = obj[:-4].replace(\"crosscluster\", \"consensus\")\n pm.set_name(obj, new_name)\n yield from cls.collect()", "def obj_feature_map(self, features, rois):\n feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(self.compress(features) if self.use_resnet else features, rois)\n return self.roi_fmap(feature_pool.view(rois.size(0), -1))", "def main():\n state = sys.argv[1]\n metaPath = root.joinpath(\"outputs\",\"groundwater\",\"csv\",state+\"_metadata.log\")\n outputsPath = root.joinpath(\"outputs\",\"groundwater\")\n \n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n handlers=[logging.FileHandler(str(metaPath))],\n )\n \n logging.info(\"preProcessing '%s' dataset\",state)\n path,metacols = gwcfg.get_params(state)\n \n # Initialize Well Data Object (which has self.df and self.gdf (geodataframe))\n gwObj = gwmod.WellDataObj(path,metacols)\n logging.info(\"original df and gdf initialized, shape: %s\",str(gwObj.df.shape))\n \n # Subset gdf to a single state\n gwObj.subset_gdf(state)\n logging.info(\"single state subsetted : %s , no of records: %d\",state,len(gwObj.gdf))\n \n # Remove Duplicates (entire row) ,Remove Null Data Rows, Drop Duplicate geometries\n num_dups,num_nulls,num_geom_dups = gwObj.pre_process()\n logging.info(\"number of duplicates found & dropped: %d \\\n number of nulls found & dropped: %d \\\n number of duplicate geometries found & dropped: %d\",num_dups,num_nulls,num_geom_dups)\n \n # Save processed dataframe to CSV , SHP(without data) and SHP(with data) \n dfPath = outputsPath.joinpath(\"csv\", (state + '_processed' + path.suffix))\n gdfPath = outputsPath.joinpath(\"shapefiles\", (state + '_processed' + \".shp\"))\n gdfPathwData = outputsPath.joinpath(\"shapefiles\", (state + '_processed_wData' + \".shp\"))\n \n gwObj.df.to_csv(dfPath,index=False)\n logging.info(\"saved df to CSV\")\n gwObj.gdf.geometry.to_file(gdfPath,index=False)\n logging.info(\"saved gdf (only geometries) to SHP\")\n gwObj.gdf.to_file(gdfPathwData,index=False)\n logging.info(\"saved gdf (with data) to SHP\")", "def obj_feature_map(self, features, rois):\n feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(features, rois)\n return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def chunk_user_route(detail_of_trip):\n\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n # since I can't get javascript to load, here's a hacky way of loading json\n # that details the route based on the user's point A and point B\n # detail_of_trip = api.directions(\n # (40.760350, -73.976209),\n # (40.754009, -73.981097),\n # mode=\"walking\"\n # )[0]\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n # now that I have javascript sending over the json, load json that details\n # the route based on the user's point A and point B\n\n # -------------- This section is for interpolation/splitting using shapely\n first = True # to see if this is the start position for the entire route\n line_points = [] # stores all the points to the route based on dict passed\n\n for leg in detail_of_trip['legs']:\n for step in leg['steps']:\n # Create a list of two element lists that represent points along the\n # route. via google. line_points = [ [lat1, lng1], [lat2, lng2],...]\n # Only add the starting point the first time. Every other iteration\n # we will just tack on the end points to our line.\n if first:\n line_points.append([step['start_location']['lat'], step['start_location']['lng']])\n first = False\n line_points.append([step['end_location']['lat'], step['end_location']['lng']])\n\n # Now load those points into a geometry, here shapely's LineString type.\n route_line = LineString(line_points)\n return (route_line, line_points)", "def wfs_common(request, response, mode, spatial_mode='wfs'):\n\n outputpath = configuration.get_config_value('server', 'outputpath')\n outputurl = configuration.get_config_value('server', 'outputurl')\n\n list_of_files = []\n for one_resource in request.inputs['resource']:\n # Download if not opendap\n # Adding a maximum file size from a server config file would\n # be possible here...\n try:\n nc_file = opendap_or_download(\n one_resource.data,\n auth_tkt_cookie=request.http_request.cookies,\n output_path='/tmp')\n except:\n raise Exception(traceback.format_exc())\n list_of_files.append(nc_file)\n\n if ('typename' in request.inputs) and ('featureids' in request.inputs):\n typename = request.inputs['typename'][0].data\n features = [f.data for f in request.inputs['featureids']]\n if 'geoserver' in request.inputs:\n geoserver = request.inputs['geoserver'][0].data\n else:\n geoserver = configuration.get_config_value('extra', 'geoserver')\n if 'mosaic' in request.inputs:\n mosaic = request.inputs['mosaic'][0].data\n else:\n mosaic = False\n try:\n conn = WebFeatureService(url=geoserver, version='2.0.0')\n resp = conn.getfeature([typename], featureid=features,\n outputFormat='application/json')\n feature = json.loads(resp.read())\n crs_code = owslib.crs.Crs(\n feature['crs']['properties']['name']).code\n crs = ocgis.CoordinateReferenceSystem(epsg=crs_code)\n geom = [\n {'geom': shape(f['geometry']), 'crs': crs,\n 'properties': f['properties']}\n for f in feature['features']]\n except Exception as e:\n msg = ('Failed to fetch features.\\ngeoserver: {0} \\n'\n 'typename: {1}\\nfeatures {2}\\n{3}').format(\n geoserver, typename, features, e)\n raise Exception(msg)\n if mosaic:\n new_geom = geom[0]\n for merge_geom in geom[1:]:\n new_geom['geom'] = new_geom['geom'].union(merge_geom['geom'])\n new_geom['properties'] = {'bbox': feature['bbox']}\n geom = new_geom\n elif spatial_mode == 'bbox':\n geom = [[request.inputs['lon0'][0].data,\n request.inputs['lat0'][0].data,\n request.inputs['lon1'][0].data,\n request.inputs['lat1'][0].data]]\n else:\n geom = [None]\n\n if ('initial_datetime' in request.inputs) and \\\n ('final_datetime' in request.inputs):\n tr = [request.inputs['initial_datetime'][0].data,\n request.inputs['final_datetime'][0].data]\n else:\n tr = None\n\n try:\n output_files = []\n output_urls = []\n mv_dir = tempfile.mkdtemp(dir=outputpath)\n os.chmod(mv_dir, 0755)\n\n for one_file in list_of_files:\n file_name = os.path.basename(one_file)\n if file_name[-3:] == '.nc':\n file_prefix = file_name[:-3]\n else:\n file_prefix = file_name\n ocgis.env.DIR_OUTPUT = tempfile.mkdtemp(dir=os.getcwd())\n ocgis.env.OVERWRITE = True\n nc = netCDF4.Dataset(one_file, 'r')\n var_names = guess_main_variables(nc)\n nc.close()\n rd = ocgis.RequestDataset(one_file, var_names)\n for i, one_geom in enumerate(geom):\n if one_geom is None:\n ocgis_geom = None\n elif spatial_mode == 'bbox':\n ocgis_geom = one_geom\n else:\n ocgis_geom = one_geom['geom']\n if mode == 'averager':\n # Extent errors are ignored\n try:\n # Here with aggregate=True, can't pass the whole\n # one_geom dictionary, is this a sign that this does\n # not support multipolygon?\n ops = ocgis.OcgOperations(\n dataset=rd, geom=ocgis_geom,\n spatial_operation='clip', aggregate=True,\n time_range=tr, output_format='nc',\n interpolate_spatial_bounds=True,\n prefix=file_prefix).execute()\n except ExtentError:\n continue\n elif mode == 'subsetter':\n # Extent errors are ignored\n try:\n # Still having problem with the geometry, previously\n # was passing geom=[one_geom]\n ops = ocgis.OcgOperations(\n dataset=rd, geom=ocgis_geom, time_range=tr,\n output_format='nc',\n interpolate_spatial_bounds=True,\n prefix=file_prefix).execute()\n except ExtentError:\n continue\n # Here, the global attribute 'subset_typename' and\n # 'subset_featureid' are added to the NetCDF file to keep\n # track of the feature used.\n if (geom != [None]) and (spatial_mode == 'wfs'):\n with netCDF4.Dataset(ops, 'a') as nc:\n nc.subset_typename = typename\n nc.subset_featureid = features[i]\n\n if (spatial_mode == 'wfs') and \\\n ('featureids' in request.inputs):\n mv_name = '{0}_{1}.nc'.format(\n os.path.basename(ops)[:-3], features[i])\n else:\n mv_name = '{0}_{1}.nc'.format(\n os.path.basename(ops)[:-3], 'subset')\n\n mv_file = os.path.join(mv_dir, mv_name)\n shutil.move(ops, mv_file)\n output_files.append(mv_file)\n shutil.rmtree(ocgis.env.DIR_OUTPUT)\n\n # Cover the case of an online wps server and the offline\n # mode for tests.\n if outputurl == 'file:///tmp':\n disk_file = 'file:///' + mv_file.lstrip('/')\n output_urls.append(disk_file)\n else:\n url_file = os.path.join(\n outputurl, os.path.basename(mv_dir), mv_name)\n output_urls.append(url_file)\n except:\n raise Exception(traceback.format_exc())\n\n # If only ExtentError occured, the output_urls will be empty...\n if not output_urls:\n raise ExtentError(message=\"All ocgis calls returned ExtentError.\")\n\n time_str = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime())\n output_file_name = \"result_%s_.json\" % (time_str,)\n output_file = os.path.join('/tmp', output_file_name)\n f1 = open(output_file, 'w')\n f1.write(json.dumps(output_urls))\n f1.close()\n response.outputs['output'].file = output_file\n response.outputs['output'].output_format = json_format\n response.update_status(\"done\", 100)\n return response", "def merge_global_features(global_features_list: List[Optional[kapture.GlobalFeatures]],\n global_features_paths: List[str], output_path: str,\n tar_handlers: List[TarCollection]) -> kapture.GlobalFeatures:\n features = _merge_image_features(kapture.GlobalFeatures, global_features_list, global_features_paths,\n output_path, tar_handlers)\n assert isinstance(features, kapture.GlobalFeatures)\n return features", "def bulk_train(self):\n logger.info(\"collecting subfolders - relations\")\n relations = self.collect_subfolders(self.input_dir)\n logger.info(\"relations - {}\".format(relations))\n\n execution_times = []\n\n for rel, rel_path in tqdm(relations.items(), desc=\"relations\"):\n logger.info(\"collecting training files from {}\".format(rel_path))\n tr_files = self.collect_files(rel_path, self.regexp_train)\n hyper_params = self.get_hyperparams()\n hyper_params['graph'] = tr_files\n\n output_folder = os.path.join(self.output_dir, rel)\n if not os.path.exists(output_folder):\n logger.info(\"creating {} (did not exist)\".format(output_folder))\n os.makedirs(output_folder)\n\n for params in tqdm(ParameterGrid(hyper_params), desc=\"training embedding\"):\n logger.info(\"hyperparams: {}\".format(params))\n train_file = params['graph']\n model_name = self.compute_model_name(params, output_folder)\n logger.info('training starspace model \"{}\" from file \"{}\"'.format(\n model_name, train_file))\n external_output, delta = self.call_starspace(params, train_file, model_name)\n logger.info(\"executed in {:0.2f}s\".format(delta))\n\n logger.info(\"external command output logged in {}\".format(self.external_log))\n if not os.path.exists(self.output_dir):\n logger.info(\"creating {} (did not exist)\".format(self.output_dir))\n os.makedirs(self.output_dir)\n\n with open(self.external_log, 'a') as f:\n f.write(external_output)\n\n execution_times.append(dict({ 'time': delta }, **params))\n \n return execution_times", "def execute(self, parameters, messages):\r\n\t\tin_buf_query_center = parameters[0]\r\n\t\tin_place_type = parameters[1]\r\n\t\tin_is_directed_instance = parameters[2]\r\n\t\tin_radius = parameters[3]\r\n\t\tout_location = parameters[4]\r\n\t\tout_points_name = parameters[5]\r\n\t\tout_place_type_url = parameters[6]\r\n\r\n\t\tinBufCenter = in_buf_query_center.valueAsText\r\n\t\tinPlaceType = in_place_type.valueAsText\r\n\t\tsearchRadius = in_radius.valueAsText\r\n\t\toutLocation = out_location.valueAsText\r\n\t\toutFeatureClassName = out_points_name.valueAsText\r\n\t\t\r\n\t\tisDirectInstance = False\r\n\t\t\r\n\t\tif in_is_directed_instance.valueAsText == 'true':\r\n\t\t\tisDirectInstance = True\r\n\t\telif in_is_directed_instance.valueAsText == 'false':\r\n\t\t\tisDirectInstance = False\r\n\r\n\t\t# arcpy.AddMessage((\"in_is_directed_instance.valueAsText: {0}\").format(in_is_directed_instance.valueAsText))\r\n\r\n\t\r\n\t\tif \".gdb\" in outLocation:\r\n\t\t\t# if the outputLocation is a file geodatabase, cancatnate the outputlocation with outFeatureClassName to create a feature class in current geodatabase\r\n\t\t\tout_path = os.path.join(outLocation,outFeatureClassName)\r\n\t\telse:\r\n\t\t\t# if the outputLocation is a folder, creats a shapefile in this folder\r\n\t\t\tout_path = os.path.join(outLocation,outFeatureClassName) + \".shp\"\r\n\t\t\t# however, Relationship Class must be created in a geodatabase, so we forbid to create a shapfile\r\n\t\t\t# messages.addErrorMessage(\"Please enter a file geodatabase as output location in order to create a relation class\")\r\n\t\t\t# raise arcpy.ExecuteError\r\n\t\t\t\r\n\r\n\r\n\t\tmessages.addMessage(\"outpath: {0}\".format(out_path))\r\n\r\n\t\tselectedURL = out_place_type_url.valueAsText\r\n\r\n\t\t# messages.addMessage(\"len(self.entityTypeLabel): {0}\".format(len(self.entityTypeLabel)))\r\n\r\n\t\t# for i in range(len(self.entityTypeLabel)):\r\n\t\t# \tmessages.addMessage(\"Label: {0}\".format(self.entityTypeLabel[i]))\r\n\t\t# \tif inPlaceType == self.entityTypeLabel[i]:\r\n\t\t# \t\tselectedURL = self.entityTypeURLList[i]\r\n\r\n\t\tmessages.addMessage(\"selectedURL: {0}\".format(selectedURL))\r\n\r\n\t\t# Create a FeatureSet object and load in_memory feature class\r\n\t\tin_feature_set = arcpy.FeatureSet()\r\n\t\tin_feature_set.load(inBufCenter)\r\n\t\tin_feature_set_json = json.loads(in_feature_set.JSON)\r\n\r\n\t\t# messages.addMessage(\"Points: {0}\".format(json.loads(in_feature_set.JSON)))\r\n\r\n\t\t# messages.addMessage(\"Point: {0}\".format(json.loads(in_feature_set.JSON)['spatialReference']['wkid']))\r\n\r\n\t\tWGS84Reference = arcpy.SpatialReference(4326)\r\n\t\tcurrentSpatialReference = arcpy.SpatialReference(in_feature_set_json['spatialReference']['latestWkid'])\r\n\r\n\t\t# a set of unique Coordinates for each input points\r\n\t\t# searchCoordsSet = Set()\r\n\t\tsearchCoordsSet = []\r\n\t\r\n\t\tfor i in range(len(in_feature_set_json['features'])):\r\n\t\t\tlat = in_feature_set_json['features'][i]['geometry']['y']\r\n\t\t\tlng = in_feature_set_json['features'][i]['geometry']['x']\r\n\t\t\tcoords = [lng, lat]\r\n\t\t\tsearchCoordsSet.append(coords)\r\n\t\t# \tif i == 0:\r\n\t\t# \t\tsearchCoordsSet.append(coords)\r\n\t\t# \telse:\r\n\t\t# \t\tif coords not in searchCoordsSet:\r\n\t\t# \t\t\tsearchCoordsSet.add(coords)\r\n\r\n\r\n\t\t# searchCoordsSet = List(searchCoordsSet)\r\n\r\n\t\t# a set of unique Coordinates for each found places\r\n\t\tplaceIRISet = Set()\r\n\t\tplaceList = []\r\n\r\n\t\tfor coord in searchCoordsSet:\r\n\t\t\tlat = coord[1]\r\n\t\t\tlng = coord[0]\r\n\r\n\t\t# lat = in_feature_set_json['features'][0]['geometry']['y']\r\n\t\t# lng = in_feature_set_json['features'][0]['geometry']['x']\r\n\r\n\t\t\tif in_feature_set_json['spatialReference']['wkid'] != '4326' or in_feature_set_json['spatialReference']['latestWkid'] != '4326':\r\n\t\t\t\tWGS84PtGeometry = arcpy.PointGeometry(arcpy.Point(lng, lat), currentSpatialReference).projectAs(WGS84Reference)\r\n\t\t\t\t# messages.addMessage(\"My Coordinates: {0}\".format(WGS84PtGeometry.WKT))\r\n\t\t\t\tcoordList = re.split(\"[( )]\", WGS84PtGeometry.WKT)\r\n\t\t\t\tlat = coordList[3]\r\n\t\t\t\tlng = coordList[2]\r\n\r\n\t\t\tqueryPrefix = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\t\t\t\t\t\t\t\t\tPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\r\n\t\t\t\t\t\t\t\t\tPREFIX owl: <http://www.w3.org/2002/07/owl#>\r\n\t\t\t\t\t\t\t\t\tPREFIX geo-pos: <http://www.w3.org/2003/01/geo/wgs84_pos#>\r\n\t\t\t\t\t\t\t\t\tPREFIX omgeo: <http://www.ontotext.com/owlim/geo#>\r\n\t\t\t\t\t\t\t\t\tPREFIX dbpedia: <http://dbpedia.org/resource/>\r\n\t\t\t\t\t\t\t\t\tPREFIX dbp-ont: <http://dbpedia.org/ontology/>\r\n\t\t\t\t\t\t\t\t\tPREFIX ff: <http://factforge.net/>\r\n\t\t\t\t\t\t\t\t\tPREFIX om: <http://www.ontotext.com/owlim/>\r\n\t\t\t\t\t\t\t\t\tPREFIX wikibase: <http://wikiba.se/ontology#>\r\n\t\t\t\t\t\t\t\t\tPREFIX bd: <http://www.bigdata.com/rdf#>\r\n\t\t\t\t\t\t\t\t\tPREFIX wdt: <http://www.wikidata.org/prop/direct/>\r\n\t\t\t\t\t\t\t\t\tPREFIX geo: <http://www.opengis.net/ont/geosparql#>\"\"\"\r\n\r\n\t\t\tif selectedURL != None:\r\n\t\t\t\tquery = queryPrefix + \"\"\"SELECT distinct ?place ?placeLabel ?distance ?location\r\n\t\t\t\t\t\t\t\t\t\tWHERE {\r\n\t\t\t\t\t\t\t\t\t\t# geospatial queries\r\n\t\t\t\t\t\t\t\t\t\tSERVICE wikibase:around {\r\n\t\t\t\t\t\t\t\t\t\t# get the coordinates of a place\r\n\t\t\t\t\t\t\t\t\t\t?place wdt:P625 ?location .\r\n\t\t\t\t\t\t\t\t\t\t# create a buffer around (-122.4784360859997 37.81826788900048)\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:center \"Point(\"\"\" + str(lng) + \"\"\" \"\"\" + str(lat) + \"\"\")\"^^geo:wktLiteral .\r\n\t\t\t\t\t\t\t\t\t\t# buffer radius 2km\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:radius '\"\"\"+searchRadius+\"\"\"' .\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:distance ?distance .\r\n\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t# retrieve the English label\r\n\t\t\t\t\t\t\t\t\t\tSERVICE wikibase:label {bd:serviceParam wikibase:language \"en\". ?place rdfs:label ?placeLabel .}\"\"\"\r\n\t\t\t\tif isDirectInstance == False:\r\n\t\t\t\t\tquery +=\t\t\t\"\"\"?place wdt:P31 ?placeFlatType.\r\n\t\t\t\t\t\t\t\t\t\t?placeFlatType wdt:P279* <\"\"\" + selectedURL + \"\"\">.\"\"\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tquery +=\t\t\t\"\"\"?place wdt:P31 <\"\"\" + selectedURL + \"\"\">.\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t# show results ordered by distance\r\n\t\t\t\tquery +=\t\t\t\t\"\"\"} ORDER BY ?distance\"\"\"\r\n\t\t\telse:\r\n\t\t\t\tquery = queryPrefix + \"\"\"SELECT distinct ?place ?placeLabel ?distance ?location \r\n\t\t\t\t\t\t\t\t\t\tWHERE {\r\n\t\t\t\t\t\t\t\t\t\t# geospatial queries\r\n\t\t\t\t\t\t\t\t\t\tSERVICE wikibase:around {\r\n\t\t\t\t\t\t\t\t\t\t# get the coordinates of a place\r\n\t\t\t\t\t\t\t\t\t\t?place wdt:P625 ?location .\r\n\t\t\t\t\t\t\t\t\t\t# create a buffer around (-122.4784360859997 37.81826788900048)\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:center \"Point(\"\"\" + str(lng) + \"\"\" \"\"\" + str(lat) + \"\"\")\"^^geo:wktLiteral .\r\n\t\t\t\t\t\t\t\t\t\t# buffer radius 2km\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:radius '\"\"\"+searchRadius+\"\"\"' .\r\n\t\t\t\t\t\t\t\t\t\tbd:serviceParam wikibase:distance ?distance .\r\n\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t# retrieve the English label\r\n\t\t\t\t\t\t\t\t\t\tSERVICE wikibase:label {bd:serviceParam wikibase:language \"en\". ?place rdfs:label ?placeLabel .}\r\n\t\t\t\t\t\t\t\t\t\t?place wdt:P31 ?placeFlatType.\r\n\t\t\t\t\t\t\t\t\t\t?placeFlatType wdt:P279* wd:Q2221906.\r\n\r\n\t\t\t\t\t\t\t\t\t\t# show results ordered by distance\r\n\t\t\t\t\t\t\t\t\t\t} ORDER BY ?distance\"\"\"\r\n\r\n\t\t\tsparqlParam = {'query': query, 'format': 'json'}\r\n\t\t\t\r\n\t\t\tsparqlRequest = requests.get('https://query.wikidata.org/sparql', params=sparqlParam)\r\n\r\n\t\t\tprint(sparqlRequest.url)\r\n\t\t\tmessages.addMessage(\"SPARQL: {0}\".format(sparqlRequest.url))\r\n\t\t \r\n\t\t\t\r\n\t\t\tbufferQueryResult = sparqlRequest.json()[\"results\"][\"bindings\"]\r\n\r\n\t\t\t# if len(bufferQueryResult) == 0:\r\n\t\t\t# \tmessages.addMessage(\"No {0} nearby the clicked place can be finded!\".format(inPlaceType))\r\n\t\t\t# \t# pythonaddins.MessageBox(\"No \" + inPlaceType + \" nearby the clicked place can be finded!\",\r\n\t\t\t# \t# \"Warning Message\", 0)\r\n\t\t\t# else:\r\n\r\n\t\t\tfor item in bufferQueryResult:\r\n\t\t\t\tprint \"%s\\t%s\\t%s\\t%s\" % (\r\n\t\t\t\t\titem[\"place\"][\"value\"], item[\"placeLabel\"][\"value\"], item[\"distance\"][\"value\"],\r\n\t\t\t\t\titem[\"location\"][\"value\"])\r\n\t\t\t\tif len(placeIRISet) == 0 or item[\"place\"][\"value\"] not in placeIRISet:\r\n\t\t\t\t\tplaceIRISet.add(item[\"place\"][\"value\"])\r\n\t\t\t\t\tcoordItem = item[\"location\"][\"value\"]\r\n\t\t\t\t\tcoordList = re.split(\"[( )]\", coordItem)\r\n\t\t\t\t\titemlat = coordList[2]\r\n\t\t\t\t\titemlng = coordList[1]\r\n\t\t\t\t\tplaceList.append(\r\n\t\t\t\t\t\t[item[\"place\"][\"value\"], item[\"placeLabel\"][\"value\"], item[\"distance\"][\"value\"],\r\n\t\t\t\t\t\t itemlat,itemlng])\r\n\r\n\t\tif len(placeList) == 0:\r\n\t\t\tmessages.addMessage(\"No {0} nearby the input point(s) can be finded!\".format(inPlaceType))\r\n\t\telse:\r\n\t\t\t# Spatial reference set to GCS_WGS_1984\r\n\t\t\tspatial_reference = arcpy.SpatialReference(4326)\r\n\t\t\t# creat a Point feature class in arcpy\r\n\t\t\tpt = arcpy.Point()\r\n\t\t\tptGeoms = []\r\n\t\t\tfor p in placeList:\r\n\t\t\t\tpt.X = float(p[4])\r\n\t\t\t\tpt.Y = float(p[3])\r\n\t\t\t\tpointGeometry = arcpy.PointGeometry(pt, spatial_reference)\r\n\t\t\t\tptGeoms.append(pointGeometry)\r\n\r\n\t\t\t# out_path = pythonaddins.SaveDialog(\"Save Nearby Places\", \"placeNear\",\r\n\t\t\t# os.path.dirname(arcpy.mapping.MapDocument(\"current\").filePath),\r\n\t\t\t# FileGDBSave())\r\n\r\n\t\t\tif out_path == None:\r\n\t\t\t\tmessages.addMessage(\"No data will be added to the map document.\")\r\n\t\t\t\t# pythonaddins.MessageBox(\"No data will be added to the map document.\", \"Warning Message\", 0)\r\n\t\t\telse:\r\n\t\t\t\t# create a geometry Feature class to represent \r\n\t\t\t\tplaceNearFeatureClass = arcpy.CopyFeatures_management(ptGeoms, out_path)\r\n\r\n\t\t\t\tlabelFieldLength = Json2Field.fieldLengthDecide(bufferQueryResult, \"placeLabel\")\r\n\t\t\t\tarcpy.AddMessage(\"labelFieldLength: {0}\".format(labelFieldLength))\r\n\t\t\t\t# add field to this point feature class\r\n\t\t\t\tarcpy.AddField_management(placeNearFeatureClass, \"Label\", \"TEXT\", field_length=labelFieldLength)\r\n\t\t\t\tarcpy.AddField_management(placeNearFeatureClass, \"URL\", \"TEXT\", field_length=100)\r\n\t\t\t\t# arcpy.AddField_management(placeNearFeatureClass, \"TypeURL\", \"TEXT\", field_length=50)\r\n\t\t\t\t# arcpy.AddField_management(placeNearFeatureClass, \"TypeName\", \"TEXT\", field_length=50)\r\n\t\t\t\t# if selectedURL != None:\r\n\t\t\t\t# \tarcpy.AddField_management(placeNearFeatureClass, \"BTypeURL\", \"TEXT\", field_length=50)\r\n\t\t\t\t# \tarcpy.AddField_management(placeNearFeatureClass, \"BTypeName\", \"TEXT\", field_length=50)\r\n\t\t\t\t# arcpy.AddField_management(placeNearFeatureClass, \"Latitude\", \"TEXT\", 10, 10)\r\n\t\t\t\t# arcpy.AddField_management(placeNearFeatureClass, \"Longitude\", \"TEXT\", 10, 10)\r\n\r\n\t\t\t\tarcpy.AddXY_management(placeNearFeatureClass)\r\n\t\t\t\t# add label, latitude, longitude value to this point feature class\r\n\r\n\t\t\t\ti = 0\r\n\t\t\t\tcursor = arcpy.UpdateCursor(out_path)\r\n\t\t\t\trow = cursor.next()\r\n\t\t\t\twhile row:\r\n\t\t\t\t\trow.setValue(\"Label\", placeList[i][1])\r\n\t\t\t\t\trow.setValue(\"URL\", placeList[i][0])\r\n\t\t\t\t\t# row.setValue(\"TypeURL\", placeList[i][5])\r\n\t\t\t\t\t# row.setValue(\"TypeName\", placeList[i][6])\r\n\t\t\t\t\tcursor.updateRow(row)\r\n\t\t\t\t\ti = i + 1\r\n\t\t\t\t\trow = cursor.next()\r\n\r\n\t\t\t\t# if selectedURL != None:\r\n\t\t\t\t# \ti = 0\r\n\t\t\t\t# \tcursor = arcpy.UpdateCursor(out_path)\r\n\t\t\t\t# \trow = cursor.next()\r\n\t\t\t\t# \twhile row:\r\n\t\t\t\t# \t\trow.setValue(\"BTypeURL\", selectedURL)\r\n\t\t\t\t# \t\trow.setValue(\"BTypeName\", inPlaceType)\r\n\t\t\t\t# \t\tcursor.updateRow(row)\r\n\t\t\t\t# \t\ti = i + 1\r\n\t\t\t\t# \t\trow = cursor.next()\r\n\r\n\t\t\t\t# get the map document\r\n\t\t\t\t# mxd = arcpy.mapping.MapDocument(\r\n\t\t\t\t# r\"D:\\UCSB_STKO_Lab\\STKO Research\\research\\DBpedia-Search-plugin\\wiki1.mxd\")\r\n\r\n\r\n\r\n\t\t\t\tmxd = arcpy.mapping.MapDocument(\"CURRENT\")\r\n\r\n\t\t\t\t# get the data frame\r\n\t\t\t\tdf = arcpy.mapping.ListDataFrames(mxd)[0]\r\n\r\n\t\t\t\t# create a new layer\r\n\t\t\t\tplaceNearLayer = arcpy.mapping.Layer(out_path)\r\n\r\n\t\t\t\t# add the layer to the map at the bottom of the TOC in data frame 0\r\n\t\t\t\tarcpy.mapping.AddLayer(df, placeNearLayer, \"BOTTOM\")\r\n\r\n\t\treturn", "def merge_global_features_collections(global_features_collections_list: List[Optional[Dict[str,\n kapture.GlobalFeatures]]],\n global_features_paths: List[str],\n output_path: str,\n tar_handlers: List[TarCollection]) -> Dict[str, kapture.GlobalFeatures]:\n return _merge_image_features_collection(kapture.GlobalFeatures, global_features_collections_list,\n global_features_paths, output_path, tar_handlers)", "def speed_map_segs_to_geojson(seg_list):\n # Initialize a new GeoJSON object\n new_geojson = {\n 'type': 'FeatureCollection',\n 'features': []\n }\n\n # Dont work on the input list\n seg_list_copy = copy.deepcopy(seg_list)\n\n # Iterativley build the features of the new GeoJSON object\n for i, seg in enumerate(seg_list_copy):\n # Prepare the feature properties\n del seg['fromStop']\n del seg['toStop']\n\n # New attribute, can be used to identify segments\n seg['order'] = i\n\n # Prepare the feature geometry coordinates\n pathLocs = seg.pop('pathLocs')\n coords = [[p['lon'], p['lat']] for p in pathLocs]\n\n # Construct feature\n new_feature = {\n 'type': 'Feature',\n 'geometry': {'type': 'LineString', 'coordinates': coords},\n 'properties': seg\n }\n\n # Append feature to the list of features in GeoJSON object\n new_geojson['features'].append(new_feature)\n\n return new_geojson", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def run(self):\n lineage_csv_gz = self.input_files_local[0][0]\n output_db = self.output_files_local()[0]\n log.write(f\"input: {lineage_csv_gz} output: {output_db}\")\n\n with IdSeqDictForUpdate(output_db, IdSeqDictValue.VALUE_TYPE_ARRAY) as lineage_dict:\n batch_list = {}\n with gzip.open(lineage_csv_gz, \"rt\") as gzf:\n for line in gzf:\n fields = line.rstrip().split(\",\")\n taxid = fields[0]\n species, genus, family = fields[-1:-4:-1]\n batch_list[taxid] = [species, genus, family]\n if len(batch_list) >= BATCH_INSERT_SIZE:\n lineage_dict.batch_inserts(batch_list.items())\n batch_list = {}\n lineage_dict.batch_inserts(batch_list.items())", "def finalize(feature, features, obj, source, aipname, cta_aip, restrict_aip, aip_sup, tia_aip):\n global completed\n global country\n global end_notam\n\n feature['properties']['source_href']=source\n feature['properties']['country']=country\n feature['geometry'] = obj\n aipname = wstrip(str(aipname))\n if aipname == 'EN D476':\n aipname = 'EN D476 R og B 1'\n if aipname == 'EN D477':\n aipname = 'EN D477 R og B 2'\n\n if 'ACC' in aipname and country==\"ES\":\n return {\"properties\":{}}, []\n for ignore in ['ADS','AOR','FAB',' FIR','HTZ']:\n if ignore in aipname:\n logger.debug(\"Ignoring: %s\", aipname)\n return {\"properties\":{}}, []\n feature['properties']['name']=aipname\n if cta_aip or aip_sup or tia_aip or 'ACC' in aipname:\n recount = len([f for f in features if aipname in f['properties']['name']])\n recount = recount or len([f for f in accsectors if aipname in f['properties']['name']])\n if recount>0:\n separator = \" \"\n if re.search('\\d$', aipname):\n separator=\"-\"\n # special handling Farris TMA skipping counters\n if \"Farris\" in aipname:\n if recount > 4:\n recount += 2\n else:\n recount += 1\n logger.debug(\"RECOUNT renamed \" + aipname + \" INTO \" + aipname + separator + str(recount+1))\n feature['properties']['name']=aipname + separator + str(recount+1)\n if 'TIZ' in aipname or 'TIA' in aipname:\n feature['properties']['class']='G'\n elif 'CTR' in aipname:\n feature['properties']['class']='D'\n elif 'TRIDENT' in aipname \\\n or 'EN D' in aipname or 'END' in aipname \\\n or 'ES D' in aipname:\n feature['properties']['class']='D'\n elif 'EN R' in aipname \\\n or 'ES R' in aipname or 'ESTRA' in aipname \\\n or 'EUCBA' in aipname or 'RPAS' in aipname:\n feature['properties']['class']='R'\n elif 'TMA' in aipname or 'CTA' in aipname or 'FIR' in aipname \\\n or 'ACC' in aipname or 'ATZ' in aipname or 'FAB' in aipname \\\n or 'Sector' in aipname:\n feature['properties']['class']='C'\n elif '5.5' in source or \"Hareid\" in aipname:\n if \"Nidaros\" in aipname:\n #skip old Nidaros airspace\n return {\"properties\":{}}, []\n feature['properties']['class']='Luftsport'\n index = len(collection)+len(features)\n\n if names.get(aipname):\n logger.debug(\"DUPLICATE NAME: %s\", aipname)\n\n if len(obj)>100:\n logger.debug(\"COMPLEX POLYGON %s with %i points\", feature['properties'].get('name'), len(obj))\n obj=simplify_poly(obj, 100)\n feature['geometry'] = obj\n\n if len(obj)>3:\n logger.debug(\"Finalizing polygon #%i %s with %i points.\", index, feature['properties'].get('name'), len(obj))\n\n name = feature['properties'].get('name')\n source = feature['properties'].get('source_href')\n from_ = feature['properties'].get('from (ft amsl)')\n to_ = feature['properties'].get('to (ft amsl)')\n class_ = feature['properties'].get('class')\n\n\n if name in completed:\n logger.info(\"ERROR Duplicate feature name: #%i %s\", index, name)\n return {\"properties\":{}}, []\n #sys.exit(1)\n else:\n if 'ACC' in aipname:\n logger.debug(\"Writing ACC sector to separate file: %s\", aipname)\n accsectors.append(feature)\n else:\n features.append(feature)\n\n # SANITY CHECK\n if name is None:\n logger.error(\"Feature without name: #%i\", index)\n sys.exit(1)\n if \"None\" in name:\n logger.error(\"Feature without name: #%i\", index)\n sys.exit(1)\n completed[name]=True\n if source is None:\n logger.error(\"Feature without source: #%i\", index)\n sys.exit(1)\n if feature['properties'].get('name') is None:\n logger.error(\"Feature without name: #%i (%s)\", index, source)\n sys.exit(1)\n if class_ is None:\n logger.error(\"Feature without class (boo): #%i (%s)\", index, source)\n sys.exit(1)\n # SPECIAL CASE NOTAM reserved ENR in Oslo area\n if \"EN R\" in aipname and \"Kongsvinger\" in aipname:\n feature['properties']['notam_only'] = 'true'\n if \"EN R\" in aipname and (\"Romerike\" in aipname or (\"Oslo\" in aipname and not \"102\" in aipname)):\n feature['properties']['notam_only'] = 'true'\n feature['properties']['from (ft amsl)'] = '0'\n feature['properties']['to (ft amsl)'] = '99999' # unspecified\n feature['properties']['from (m amsl)'] = '0'\n feature['properties']['to (m amsl)'] = '99999'\n from_ = '0'\n to_ = '0'\n if (\"EN D\" in aipname or \"END\" in aipname) and end_notam:\n feature['properties']['notam_only'] = 'true'\n if from_ is None:\n if \"en_sup_a_2018_015_en\" in source:\n feature['properties']['from (ft amsl)']='0'\n feature['properties']['from (m amsl)']='0'\n from_ = '0'\n else:\n logger.error(\"Feature without lower limit: #%i (%s)\", index, source)\n sys.exit(1)\n if to_ is None:\n if \"en_sup_a_2018_015_en\" in source:\n feature['properties']['to (ft amsl)']='99999'\n feature['properties']['to (m amsl)']='9999'\n to_ = '99999'\n else:\n logger.error(\"Feature without upper limit: #%i (%s)\", index, source)\n sys.exit(1)\n if int(from_) >= int(to_):\n # SPECIAL CASE NOTAM reserved ENR in Oslo area\n if \"en_sup_a_2018_015_en\" in source or \"Romerike\" in aipname or \"Oslo\" in aipname:\n feature['properties']['from (ft amsl)']=to_\n feature['properties']['to (ft amsl)']=from_\n else:\n logger.error(\"Lower limit %s > upper limit %s: #%i (%s)\", from_, to_, index, source)\n sys.exit(1)\n elif len(obj)>0:\n logger.error(\"ERROR Finalizing incomplete polygon #%i (%i points)\", index, len(obj))\n\n names[aipname]=True\n logger.debug(\"OK polygon #%i %s with %i points (%s-%s).\", index, feature['properties'].get('name'),\n len(obj),\n feature['properties'].get('from (ft amsl)'),\n feature['properties'].get('to (ft amsl)'))\n return {\"properties\":{}}, []", "def cal_topology_feature(self):\n self.NPL()\n self.topo_efficiency_cal()\n self.efficiency_cal()\n self.cluster_cal()\n self.topo_diameter()\n self.spatial_diameter()", "def extract_features(self):\n\n self.feature_paths = dict()\n for feat_type in self.outlier_feat_types:\n try:\n print('Extracting feature type: {}'.format(feat_type))\n self.feature_paths[feat_type] = self.feature_extractor(self, feat_type)\n except:\n traceback.print_exc()\n print('Unable to extract {} features! skipping..'.format(feat_type))", "def generate(self):\n i = self.start_index\n df_array = []\n # load the dataframes from the multiple files(one file per frame_id, agent_id pair)\n for k in range(self.frame_length): # segment length\n frame_id = i + k\n # print(frame_id)\n # breakpoint()\n if frame_id >= len(self.agent_maps):\n print(\n f\"Trying to access frame {frame_id} but only have {len(self.agent_maps)}\"\n )\n # breakpoint()\n break\n df = pd.read_csv(self.agent_maps[frame_id], compression=\"gzip\")\n df = df.query(f\"abs(pos_x)<{self.radius} & abs(pos_y)<{self.radius}\")\n df_array.append(df)\n # merge all dataframes together\n if len(df_array) == 0:\n self.df_merged = pd.DataFrame()\n self.agent_tracks = {}\n self.agent_metadata_dfs = {}\n self.agent_track_len = {}\n self.sorted_agent_ids = []\n return\n self.df_merged = pd.concat(df_array).reset_index()\n # group all dataframes by id, so that we can get agent level metrics across time\n agent_grp = self.df_merged.groupby(\"id\")\n self.agent_tracks = {}\n self.agent_metadata_dfs = {}\n self.agent_track_len = {}\n for agent_id in agent_grp.groups:\n sub_df = self.df_merged.iloc[agent_grp.groups[agent_id]]\n # empty template for the trajectories\n tracks = [\n [self.radius + 1, self.radius + 1] for _ in range(self.frame_length)\n ]\n track_len = 0\n # populate the empty template\n for idx, row in sub_df.iterrows():\n frame_idx = row.frame_id - self.start_index\n # if row.frame_id == 0:\n # # check if the object is within the car position add to the current frame\n # print(\"0 frame id\")\n try:\n tracks[frame_idx] = [row.pos_x, row.pos_y]\n except:\n breakpoint()\n track_len += 1\n self.agent_tracks[agent_id] = np.array(tracks)\n self.agent_metadata_dfs[agent_id] = sub_df\n self.agent_track_len[agent_id] = track_len\n self.sorted_agent_ids = list(self.agent_track_len.keys())\n self.sorted_agent_ids.sort(key=lambda x: self.agent_track_len[x], reverse=True)", "def _parse_features(self):\n for root in self.roots:\n for feature in root.iter('feature'):\n api = feature.attrib.get('api', '')\n feature_name = feature.attrib.get('name', '')\n feature_number = int(float(feature.attrib.get('number', '')) * 10.0)\n\n # filter by api\n if api != 'gl':\n continue\n\n for require in feature.iter('require'):\n require_profile = require.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n\n for remove in feature.iter('remove'):\n remove_profile = remove.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in remove.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_removed_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })\n for command in remove.iter('command'):\n command_name = command.attrib['name']\n self.command_removed_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })", "def _process_data(self):\n assert not hasattr(self, 'changes'), '_process_data called twice.'\n assert hasattr(self, 'errors'), (\n '_process_data not called by is_valid().')\n r_by_t = Collection.resource_by_type\n\n # Create and load collection of new data\n new_collection = Collection()\n for rtype, items in self.data.items():\n resource_cls = r_by_t.get(rtype)\n if resource_cls:\n for seq, json_api_item in enumerate(items):\n item = json_api_item.copy()\n links = item.pop('links', {})\n item.update(links)\n resource = self.load_resource(resource_cls, item)\n resource._seq = seq\n new_collection.add(resource)\n\n # Create native representation of current feature data\n current_collection = Collection(DjangoResourceClient())\n feature_serializer = ViewFeatureSerializer(context=self.context)\n current_feature = feature_serializer.to_representation(self.feature)\n current_extra = current_feature.pop('_view_extra')\n del current_extra['meta']\n\n # Load feature into new and current collection\n current_feature_resource = self.load_resource(\n r_by_t['features'], current_feature)\n current_collection.add(current_feature_resource)\n current_feature.update(self.feature._in_extra)\n current_feature['id'] = str(current_feature['id'])\n resource_feature = self.load_resource(\n r_by_t['features'], current_feature)\n resource_feature._seq = None\n new_collection.add(resource_feature)\n\n # Populate collection of current data\n for rtype, items in current_extra.items():\n resource_cls = r_by_t[rtype]\n for item in items:\n resource = self.load_resource(resource_cls, item)\n current_collection.add(resource)\n\n # Add existing items not explicit in PUT content\n # This avoids 'delete' changes\n new_items = new_collection.get_all_by_data_id()\n for data_id, item in current_collection.get_all_by_data_id().items():\n if data_id not in new_items:\n rtype = item._resource_type\n resource = r_by_t[rtype]()\n json_api_rep = item.to_json_api()\n json_api_rep[rtype]['id'] = item.id.id\n resource.from_json_api(json_api_rep)\n resource._seq = None\n new_collection.add(resource)\n\n # Add existing items used in new collection to current collection\n # This avoids incorrect 'new' changes\n existing_items = current_collection.get_all_by_data_id()\n for data_id, item in new_collection.get_all_by_data_id().items():\n if item.id:\n item_id = item.id.id\n int_id = None\n existing_item = existing_items.get(data_id)\n try:\n int_id = int(item_id)\n except ValueError:\n pass\n if int_id and (existing_item is None):\n rtype = item._resource_type\n resource_cls = r_by_t[rtype]\n model_cls, serializer_cls = view_cls_by_name[rtype]\n obj = model_cls.objects.get(id=int_id)\n serializer = serializer_cls()\n data = serializer.to_representation(obj)\n resource = self.load_resource(resource_cls, data)\n current_collection.add(resource)\n\n # Load the diff\n self.changeset = CollectionChangeset(\n current_collection, new_collection)\n assert not self.changeset.changes.get('deleted'), (\n 'Existing items were not added, so deletions found:\\n%s'\n % self.changes['deleted'])", "def _update_from_exons(self, feature):\n # note that start and end here are in direction of translation\n def start(loc):\n return loc[0][1]\n\n def end(loc):\n if loc[-1][2] == \"+\":\n return loc[-1][1] + loc[-1][3] + 1\n else:\n return loc[-1][1] - loc[-1][3] - 1\n\n if 'exon' in feature:\n # update the feature with the exon locations and sequences\n feature['location'] = [x['location'][0] for x in feature['exon']]\n feature['dna_sequence'] = \"\".join(\n x['dna_sequence'] for x in feature['exon'])\n feature['dna_sequence_length'] = len(feature['dna_sequence'])\n\n # construct feature location from utrs and cdss if present\n elif 'cds' in feature:\n cds = [copy.deepcopy(self.feature_dict[feature['cds']])]\n locs = [] # type: list\n seq = \"\"\n for frag in feature.get('five_prime_UTR', []) + cds + \\\n feature.get('three_prime_UTR', []):\n\n # merge into last location if adjacent\n if locs and abs(end(locs) - start(frag['location'])) == 1:\n # extend the location length by the length of the first\n # location in the fragment\n first = frag['location'].pop(0)\n locs[-1][3] += first[3]\n\n locs.extend(frag['location'])\n seq += frag['dna_sequence']\n\n feature['location'] = locs\n feature['dna_sequence'] = seq\n feature['dna_sequence_length'] = len(seq)\n\n # remove these properties as they are no longer needed\n for x in ['five_prime_UTR', 'three_prime_UTR', 'exon']:\n feature.pop(x, None)\n\n else:\n ValueError('Feature {feature[\"id\"]} must contain either exon or cds data to '\n 'construct an accurate location and sequence')", "def generate_feature(self):\n for cmd in self.get_filtered_cmd_names():\n self.command_info[cmd] = self.feature_cmd_params[cmd]", "def _parse_pbf(self):\n # todo pyrosm bounding_box is inefficient, first extract bbox from pbf using pyosmium\n osm = pyrosm.OSM(filepath=self._pbf, bounding_box=self._bbox)\n osm.keep_node_info = True\n logging.info(\"Parsing OSM ways and nodes from pbf file\")\n ways = osm.get_network(self._profile)\n\n all_nodes = osm._nodes_gdf # todo this has all the attributes but goes out of memory easily\n way_nodes_ids = ways.nodes.explode().unique().tolist()\n nodes = all_nodes.loc[all_nodes.id.isin(way_nodes_ids)]\n\n def clip_way(way):\n clipped_way = way.copy()\n clipped_way.nodes = clipped_way.nodes[:len(way.geometry.coords.xy[0])-1]\n return clipped_way\n\n broken_ways = ways.nodes.apply(lambda x: not all(n in nodes.id.to_list() for n in x))\n for ix, way in ways.loc[broken_ways].iterrows():\n ways.loc[ix] = clip_way(way)\n\n # all_nodes = osm._node_coordinates\n # way_nodes = {id: all_nodes[id] for id in way_nodes_ids if id in all_nodes.keys()}\n # nodes = pd.DataFrame.from_dict(way_nodes, orient='index', columns=['longitude', 'latitude'])\n # nodes['id'] = nodes.index\n\n # ways_nodes_found = ways.nodes.apply(lambda x: all(n in nodes.id.to_list() for n in x))\n # nodes = pd.DataFrame(columns=['id', 'latitude', 'longitude'])\n # for ix, way in ways.iterrows():\n # way_nodes = pd.DataFrame(columns=['id', 'latitude', 'longitude'])\n # try:\n # way_nodes.id = way.nodes\n # way_nodes.latitude = way.geometry.coords.xy[1]\n # way_nodes.longitude = way.geometry.coords.xy[0]\n # except:\n # pass\n # nodes = nodes.append(way_nodes, ignore_index=True)\n\n logging.info(\"Done parsing pbf file\")\n return nodes, ways", "def __surface_labelled_segmentation_pipeline(self, features):\n tic = time.perf_counter()\n\n # Collect the data\n ###########################################\n training_data, dev_data, test_data = {}, {}, {}\n dictionaries = (training_data, dev_data, test_data)\n counter = 0\n for file in self.input_files:\n input_file = open(os.path.join(sys.path[0], file), 'r')\n for line in input_file.readlines():\n content = line.rstrip('\\n').split(\" | \")\n labels = '-'.join(get_labels(content[2]))\n segments = removeLabels(content[2])\n\n # dictionaries[counter][content[0]] = [segments, labels] # word:[[segments],[labels]]\n dictionaries[counter][segments] = labels # segments : labels\n input_file.close()\n counter += 1\n\n toc = time.perf_counter()\n print(\"Data Collected in \" + str(tic - toc.__round__(2)))\n\n # Evaluate Model On the Test Set Using Optimised Model\n #######################################################\n\n print(\"Beginning Feature Computation and Model Optimisation\")\n tic = time.perf_counter()\n\n X_training, Y_training, words_training = surface_labelled_data_preparation(training_data)\n X_dev, Y_dev, words_dev = surface_labelled_data_preparation(dev_data)\n X_test, Y_test, words_test = surface_labelled_data_preparation(test_data)\n print(\"Data Processed\")\n\n best_epsilon = 1e-07\n best_max_iteration = 280\n best_algo = 'ap'\n\n # crf = sklearn_crfsuite.CRF(algorithm=best_algo, epsilon=best_epsilon, max_iterations=best_max_iteration)\n '''crf = sklearn_crfsuite.CRF(\n algorithm='lbfgs',\n c1=0.1,\n c2=0.1,\n max_iterations=100,\n all_possible_transitions=True\n )'''\n crf = sklearn_crfsuite.CRF(algorithm='ap', epsilon=best_epsilon, max_iterations=best_max_iteration)\n print(\"CRF Initialized\")\n # crf.fit(X_training, Y_training, X_dev=X_dev, y_dev=Y_dev)\n crf.fit(X_training, Y_training)\n print(\"Data Fitted\")\n Y_predict = crf.predict(features)\n labels = list(crf.classes_)\n sorted_labels = sorted(labels)\n return Y_predict, Y_test", "def compute(self, inputs, outputs):\n\n log.info(f\"Start optimisation iteration: {Rt.counter}\")\n\n # Update the geometry dictionary\n for name, infos in Rt.geom_dict.items():\n infos[1].append(inputs[name][0])\n\n if Rt.counter == 0:\n\n # For the first iteration, update the CPACS file from previous module\n cpacs_in = Rt.modules[0].cpacs_in\n cpacs_out = Rt.modules[0].cpacs_out\n update_cpacs_file(cpacs_in, cpacs_out, Rt.geom_dict)\n\n else:\n\n for m, module in enumerate(Rt.modules):\n\n # Increment name of output CPACS file\n Rt.modules[m].cpacs_out = Path(\n Rt.modules[m].cpacs_out.parents[0],\n \"iter_\" + str(Rt.counter).rjust(2, \"0\") + \".xml\",\n )\n\n if m == 0:\n\n # Use output of the last module of the previous iteration as input\n Rt.modules[m].cpacs_in = Rt.modules[-1].cpacs_out\n\n # Update the geometry of the CPACS file\n cpacs_in = Rt.modules[m].cpacs_in\n cpacs_out = Rt.modules[m].cpacs_out\n update_cpacs_file(cpacs_in, cpacs_out, Rt.geom_dict)\n\n # # The first module of the loop must update its output where the input\n # # was saved because it has been updated by \"update_cpacs_file\" function.\n # Rt.modules[m].cpacs_in = Rt.modules[m].cpacs_out\n\n else:\n\n # Increment name of input CPACS file\n Rt.modules[m].cpacs_in = Path(\n Rt.modules[m].cpacs_in.parents[0],\n \"iter_\" + str(Rt.counter).rjust(2, \"0\") + \".xml\",\n )\n\n Rt.counter += 1", "def process(self):\n coo_adj = sp.load_npz(os.path.join(self.raw_path, \"adj_full.npz\"))\n g = from_scipy(coo_adj)\n\n features = np.load(os.path.join(self.raw_path, \"feats.npy\"))\n features = F.tensor(features, dtype=F.float32)\n\n y = [-1] * features.shape[0]\n with open(os.path.join(self.raw_path, \"class_map.json\")) as f:\n class_map = json.load(f)\n for key, item in class_map.items():\n y[int(key)] = item\n labels = F.tensor(np.array(y), dtype=F.int64)\n\n with open(os.path.join(self.raw_path, \"role.json\")) as f:\n role = json.load(f)\n\n train_mask = np.zeros(features.shape[0], dtype=bool)\n train_mask[role[\"tr\"]] = True\n\n val_mask = np.zeros(features.shape[0], dtype=bool)\n val_mask[role[\"va\"]] = True\n\n test_mask = np.zeros(features.shape[0], dtype=bool)\n test_mask[role[\"te\"]] = True\n\n g.ndata[\"feat\"] = features\n g.ndata[\"label\"] = labels\n g.ndata[\"train_mask\"] = generate_mask_tensor(train_mask)\n g.ndata[\"val_mask\"] = generate_mask_tensor(val_mask)\n g.ndata[\"test_mask\"] = generate_mask_tensor(test_mask)\n\n if self._reorder:\n self._graph = reorder_graph(\n g,\n node_permute_algo=\"rcmk\",\n edge_permute_algo=\"dst\",\n store_ids=False,\n )\n else:\n self._graph = g", "def add_features(self, fbids):\n if not fbids:\n warnings.warn(\"No fbids provided.\")\n return False\n feats = self.name_synonym_lookup(fbids)\n proc_names = [f._asdict() for f in feats.values()]\n for d in proc_names:\n d['synonyms'] = '|'.join(d['synonyms'])\n statement = \"MERGE (n:Feature:Class { short_form : line.fbid } ) \" \\\n \"SET n.label = line.symbol SET n.synonyms = split(line.synonyms, '|') \" \\\n \"SET n.iri = 'http://flybase.org/reports/' + line.fbid\" # Why not using ni? Can kbw have switch to work via csv?\n self.commit_via_csv(statement, proc_names)\n self.addTypes2Neo(fbids)\n return feats", "def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)", "def add_features_to_output(m: onnx.ModelProto) -> None:\n del m.graph.output[:]\n m.graph.output.extend(m.graph.value_info)", "def _extract_features(self, a_rel, a_parses):\n feats = {}\n doc_id = a_rel[DOC_ID]\n toks_pos1 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG1)\n toks_pos2 = self._get_toks_pos(a_parses[doc_id][SENTENCES],\n a_rel, ARG2)\n self._get_product_rules(feats, doc_id, a_rel, a_parses)\n self._get_dep_rules(feats, doc_id, a_rel, a_parses)\n self._get_first_last_toks(feats, toks_pos1, toks_pos2)\n self._get_modality(feats, toks_pos1, toks_pos2)\n self._get_vb_class(feats, toks_pos1, toks_pos2)\n self._get_brown_clusters(feats, toks_pos1, toks_pos2)\n self._get_inquirer(feats, toks_pos1, toks_pos2)\n self._get_MPQA(feats, toks_pos1, toks_pos2)\n return feats", "def target_mapping(self):\n\n map_list = []\n self.bin_tracking_array = self.seg_analyzer.bin_tracking_array\n self.log.info(\"Spawning {0} jobs to begin building Target_Bed_Map_Array for permutation analysis.\"\n .format(self.args.Spawn))\n\n p = pathos.multiprocessing.Pool(int(self.args.Spawn))\n for lst in p.starmap(self.sub_target_mapping,\n zip(itertools.repeat(self.bin_tracking_array), itertools.repeat(self.target_bed_array),\n itertools.repeat(self.args), self.seg_analyzer.chrom_list)):\n\n map_list.extend(lst)\n\n map_list.sort(key=lambda x: x[0])\n\n if eval(self.args.Map_File):\n self.log.info(\"Writing Map File\")\n file_data = \"\"\n map_file = open(\"{0}{1}_{2}_mapfile.txt\"\n .format(self.args.Working_Folder, self.args.Job_Name, self.args.Cell_Name), 'w')\n map_file.write(\"Chrom\\tstart\\tstop\\trefBinID\\ttargetBinID\\ttargetCount\\n\")\n\n for row in sorted(map_list, key=itemgetter(0)):\n\n coord_start = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 2])\n coord_stop = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 3])\n chrom = self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 1].decode()\n r_count = len(row[1])\n file_data += (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\"\n .format(chrom, coord_start, coord_stop, row[0], row[1], r_count))\n\n map_file.write(file_data)\n map_file.close()\n self.log.info(\"Map File Written\")\n\n self.log.info(\"Target_Bed_Map_Array built.\")\n return numpy.array(map_list, dtype='object')", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def collect_pipeline_runs(self):\n db = self.mongo_client.metalearning\n collection = db.pipeline_runs\n collection_size = collection.count()\n pipeline_cursor = collection.find()\n list_of_experiments = {\"classification\": [], \"regression\": []}\n for index, pipeline_run in enumerate(pipeline_cursor):\n if index % 1000 == 0:\n print(\"At {} out of {} documents\".format(index, collection_size))\n # if index == 2000:\n # # running into memory errors\n # break\n pipeline_run_info = self.get_pipeline_run_info(pipeline_run)\n metafeatures = self.get_metafeature_info(pipeline_run)\n # TODO: get all metafeatures so we don't need this\n if metafeatures != {}:\n experiment_json = dict(pipeline_run_info, **metafeatures)\n list_of_experiments[experiment_json[\"problem_type\"]].append(experiment_json)\n\n for problem_type in list_of_experiments.keys():\n final_data_file = json.dumps(list_of_experiments[problem_type], sort_keys=True, indent=4, default=json_util.default)\n with open(\"data/complete_pipelines_and_metafeatures_test_{}.json\".format(problem_type), \"w\") as file:\n file.write(final_data_file)\n\n return", "def add_mapping(self, protocol: Protocol, features: Set[FeatureName]) -> None:\n instance = cast(interface.Features, self.get(protocol))\n if instance:\n for feature in features:\n # Add feature to map if missing OR replace if this protocol has higher\n # priority than previous mapping\n if feature not in self._feature_map or self._has_higher_priority(\n protocol, self._feature_map[feature][0]\n ):\n self._feature_map[feature] = (protocol, instance)", "def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()", "def social_infrastructure_combined(osm_path): \n df_point = social_infrastructure_point(osm_path)\n df_polygon = social_infrastructure_polygon(osm_path)\n \n df_polygon_filtered = compare_polygon_to_point(df_point, df_polygon) #remove duplicates polygon and point data \n df_polygon_filtered['geometry'] = pygeos.centroid(df_polygon_filtered.geometry) #transform to pointdata\n \n return (df_point.append(df_polygon_filtered)).reset_index(drop=True)", "def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)", "def updateFeatureClass(self, featureClass, query=[\"1=1\"], append=False, userFields=[], debug=False):\n if debug:\n debug = Debug()\n debug.log(\"Running %s\" %debug.callingScript)\n #check if user fileds already exist\n if not self.userFields:\n self.userFields = userFields\n #check for errors\n if not validWorkspace(featureClass):\n raise IncorrectWorkspaceType(\"Incorrect workspace - feature class must be created in a local geodatabase\")\n if not self.__matchSchema(featureClass):\n raise SchemaMismatch(\"Schema of input feature class does not match object schema\")\n queries = self.__generateQuery(query)\n cursor = None\n\n #iterate over queries\n for query in queries:\n if debug: debug.log(\"Working on %s\" %query)\n recordsInQuery = self.__getNumRecordsFromQuery(query, debug=debug)\n if recordsInQuery == 0:\n if debug: debug.log(\"Skipping query\")\n continue\n elif self.__numRecordsMoreThanMax(recordsInQuery):\n del cursor\n raise TooManyRecords(\"Query returns more than max allowed. Please refine query: \" + query)\n #else do the rest\n rValues = {\"where\":query,\n \"f\":\"json\",\n \"returnCountOnly\":\"false\",\n \"outFields\": \"*\"}\n featureData = self._getEsriRESTJSON(self.url+\"/query\", rValues, useIjson=True, debug=debug)\n if debug: debug.log(\"Successfully returned data\")\n\n #Append or overwrite mode - prevents deletion if service is unavailable\n if all([not append, not cursor]):\n if debug: debug.log(\"Deleting records\")\n arcpy.DeleteFeatures_management(featureClass)\n\n #instantiate cursor\n if not cursor:\n if debug: debug.log(\"Instantiating cursor\")\n updateFields = [f['name'] for f in self.updateFields]\n cursor = arcpy.da.InsertCursor(featureClass, updateFields)\n\n for feature in featureData:\n #if geometry is bad, skip record\n try:\n geom = self.__getGeometry(feature['geometry'])\n except NullGeometryError as e:\n if debug: debug.log(\"Null geometry error\")\n continue\n except:\n if debug: debug.log(\"Some other geometry error - couldn't get geometry\")\n attributes = []\n attributes.append(geom)\n for field in self.updateFields:\n if field['name'] == \"Shape@\":\n continue\n elif 'date' in field['type'].lower():\n attributes.append(self.__handleDateAttribute(feature['attributes'][field['name']]))\n else:\n \"\"\"getting strange OverflowError Python int too large to convert to C long,\n so casting section getting problem with some services where some fields\n aren't returned in results so added try/catch block\"\"\"\n try:\n newAttribute = feature['attributes'][field['name']]\n if type(newAttribute) is long:\n if type(int(newAttribute)) is long:\n attributes.append(float(newAttribute))\n else:\n attributes.append(newAttribute)\n else:\n attributes.append(newAttribute)\n except KeyError, e:\n attributes.append(None)\n cursor.insertRow(attributes)\n #Delete cursor\n del cursor\n if debug: debug.close()", "def __data_generation(self, batch_data):\n X = np.zeros((self.batch_size, self.num_features), dtype=float)\n y = np.zeros((self.batch_size, self.num_outputs), dtype=float)\n\n for i, sample in batch_data.iterrows():\n # Get lat/long of pickup and dropoff locations\n PULocation = self.taxizone_data.loc[sample['PULocationID']].centroids\n PULocationLong, PULocationLat = PULocation.x, PULocation.y\n DOLocation = self.taxizone_data.loc[sample['DOLocationID']].centroids\n DOLocationLong, DOLocationLat = DOLocation.x, DOLocation.y\n\n # Get month date, day of week and hours/mins for pickup\n PUDateTime = datetime.strptime(sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S')\n PUDate = PUDateTime.strftime('%Y-%m-%d')\n PUYear, PUMonth, PUMonthDate = PUDate.split('-')\n # TODO - Add this to pre-processing of trip data! Some random months in the data!!\n if PUYear != '2018' or PUMonth != '06':\n continue\n PUDayOfWeek = PUDateTime.weekday()\n PUTimeHour, PUTimeMinute = datetime.strptime(\n sample.tpep_pickup_datetime, '%Y-%m-%d %H:%M:%S'\n ).strftime('%H:%M').split(':')\n\n # Get precipitation for that day\n Precipitation = self.weather_data[self.weather_data['DATE'] == PUDate]['PRCP'].values[0]\n\n X[i] = np.concatenate((np.array([\n\n PULocationLat,\n PULocationLong,\n DOLocationLat,\n DOLocationLong,\n abs((PULocationLat - DOLocationLat) ** 2 + abs(PULocationLong - DOLocationLong) ** 2) ** 0.5,\n Precipitation\n ]),\n to_categorical(PUDayOfWeek, 7),\n to_categorical(PUMonthDate, 31),\n to_categorical(PUTimeHour, 24)\n ))\n\n y[i] = [sample['duration']] if self.generator_type == 'duration' \\\n else [sample['total_amount'] - sample['tip_amount']]\n\n return X, y", "def aggregate_from_base(\n base_dir: str, ref_seq: str, ref_pos: t.Sequence[int],\n pos_parser: t.Callable[[str], t.List[str]] = lambda x: x.split('-'),\n temperature: float = 0.6, count_threshold: int = 100,\n holo: str = 'holo', apo: str = 'apo', mc: str = 'MC',\n bias_name: str = 'ADAPT.inp.dat', seqs_name: str = 'RESULTS.tsv') -> pd.DataFrame:\n\n ref_pos_str = list(map(str, ref_pos))\n ref_pos_mapping = {p: i for i, p in enumerate(ref_pos_str)}\n\n def affinity_df(pair_base):\n pop_apo = pd.read_csv(f'{pair_base}/{apo}/{mc}/{seqs_name}', sep='\\t')\n pop_holo = pd.read_csv(f'{pair_base}/{holo}/{mc}/{seqs_name}', sep='\\t')\n bias_apo = f'{pair_base}/{apo}/{mc}/{bias_name}'\n bias_holo = f'{pair_base}/{holo}/{mc}/{bias_name}'\n stability_apo = stability(pop_apo, bias_apo, ref_seq, temperature, count_threshold, ref_pos_str)\n stability_holo = stability(pop_holo, bias_holo, ref_seq, temperature, count_threshold, ref_pos_str)\n df = pd.merge(stability_apo, stability_holo, on='seq', how='outer', suffixes=['_apo', '_holo'])\n df['affinity'] = df['stability_holo'] - df['stability_apo']\n positions = pos_parser(pair_base)\n df['seq_subset'] = df['seq'].apply(lambda s: ''.join(s[ref_pos_mapping[p]] for p in positions))\n df['pos'] = '-'.join(positions)\n return df\n\n paths = tqdm(glob(f'{base_dir}/*'), desc='Aggregating workers')\n dfs = []\n for p in paths:\n try:\n dfs.append(affinity_df(p))\n except (NoReferenceError, ValueError, KeyError) as e:\n warn(f'Could not aggregate worker {p} due to {e}')\n\n return pd.concat(dfs)", "def add_starters_to_map(gdf_best_route: gpd.GeoDataFrame, basemap):\n #create a list of colors\n colors = ['orange', 'darkred', 'darkblue', 'purple', 'darkgreen', '#364e4a', 'cadetblue']\n\n # make one feature group for the markers\n fg_marker = folium.FeatureGroup(\"Destination Cities\")\n for i, row in gdf_best_route.iterrows():\n fg_marker.add_child(folium.Marker(\n location=row[\"folium_geom\"][0],\n tooltip=f\"{row['start_city']}\",\n icon=folium.Icon(color=colors[i], icon='train', prefix='fa')\n ))\n basemap.add_child(fg_marker)\n\n return None" ]
[ "0.60113615", "0.5859844", "0.5772984", "0.5617712", "0.5504301", "0.5316391", "0.5313953", "0.5273943", "0.5268024", "0.5257252", "0.52431345", "0.52355766", "0.52347904", "0.52220434", "0.52174675", "0.5180655", "0.5177188", "0.5174838", "0.51718193", "0.5159236", "0.51124686", "0.5109292", "0.510242", "0.50930285", "0.5087657", "0.50649846", "0.50555915", "0.50475895", "0.50474733", "0.5042096", "0.50420177", "0.50235105", "0.50206584", "0.5001425", "0.49987257", "0.49842116", "0.49836", "0.49588332", "0.49417564", "0.4941491", "0.49233416", "0.49091232", "0.48984444", "0.48829246", "0.48796776", "0.4875922", "0.48649535", "0.48646176", "0.48635963", "0.48541203", "0.48523244", "0.48502696", "0.48496088", "0.48406643", "0.48399097", "0.48395613", "0.4838682", "0.48379478", "0.48339674", "0.4828731", "0.48277986", "0.48123452", "0.48109213", "0.480335", "0.4798574", "0.4795563", "0.47937748", "0.478979", "0.47871247", "0.47756946", "0.47748452", "0.4773466", "0.47731283", "0.47669122", "0.4764467", "0.4764071", "0.4762774", "0.47588864", "0.47572532", "0.47553825", "0.47551683", "0.4749441", "0.47483194", "0.47347736", "0.47338828", "0.47322264", "0.47315612", "0.4726866", "0.47256452", "0.47250763", "0.4721439", "0.47200787", "0.47161666", "0.4715844", "0.47147956", "0.4710743", "0.47101262", "0.47086066", "0.4706868", "0.470503" ]
0.71977544
0
Read arguments passed in via subprocess and run the parallel Route. This script is intended to be called via subprocess via the solve_large_route_pair_analysis.py module, which does essential preprocessing and validation. Users should not call this script directly from the command line. We must launch this script via subprocess in order to support parallel processing from an ArcGIS Pro script tool, which cannot do parallel processing directly.
def launch_parallel_rt_pairs(): # Create the parser parser = argparse.ArgumentParser(description=globals().get("__doc__", ""), fromfile_prefix_chars='@') # Define Arguments supported by the command line utility # --pair-type parameter help_string = "The type of origin-destination pair assignment to use. Either one_to_one or many_to_many." parser.add_argument("-pt", "--pair-type", action="store", dest="pair_type_str", help=help_string, required=True) # --origins parameter help_string = "The full catalog path to the feature class containing the origins." parser.add_argument("-o", "--origins", action="store", dest="origins", help=help_string, required=True) # --origins-id-field parameter help_string = "The name of the unique ID field in origins." parser.add_argument( "-oif", "--origins-id-field", action="store", dest="origin_id_field", help=help_string, required=True) # --destinations parameter help_string = "The full catalog path to the feature class containing the destinations." parser.add_argument("-d", "--destinations", action="store", dest="destinations", help=help_string, required=True) # --destinations-id-field parameter help_string = "The name of the unique ID field in destinations." parser.add_argument( "-dif", "--destinations-id-field", action="store", dest="dest_id_field", help=help_string, required=True) # --network-data-source parameter help_string = "The full catalog path to the network dataset or a portal url that will be used for the analysis." parser.add_argument( "-n", "--network-data-source", action="store", dest="network_data_source", help=help_string, required=True) # --travel-mode parameter help_string = ( "The name or JSON string representation of the travel mode from the network data source that will be used for " "the analysis." ) parser.add_argument("-tm", "--travel-mode", action="store", dest="travel_mode", help=help_string, required=True) # --time-units parameter help_string = "String name of the time units for the analysis. These units will be used in the output." parser.add_argument("-tu", "--time-units", action="store", dest="time_units", help=help_string, required=True) # --distance-units parameter help_string = "String name of the distance units for the analysis. These units will be used in the output." parser.add_argument( "-du", "--distance-units", action="store", dest="distance_units", help=help_string, required=True) # --max-routes parameter help_string = "Maximum number of routes that can be in one chunk for parallel processing of Route solves." parser.add_argument( "-mr", "--max-routes", action="store", dest="max_routes", type=int, help=help_string, required=True) # --max-processes parameter help_string = "Maximum number parallel processes to use for the Route solves." parser.add_argument( "-mp", "--max-processes", action="store", dest="max_processes", type=int, help=help_string, required=True) # --reverse-direction parameter help_string = "Whether to reverse the direction of travel (destination to origin)." parser.add_argument( "-rd", "--reverse-direction", action="store", type=lambda x: bool(strtobool(x)), dest="reverse_direction", help=help_string, required=True) # --out-routes parameter help_string = "The full catalog path to the output routes feature class." parser.add_argument("-r", "--out-routes", action="store", dest="out_routes", help=help_string, required=True) # --scratch-folder parameter help_string = "The full catalog path to the scratch folder where intermediate outputs will be stored." parser.add_argument( "-sf", "--scratch-folder", action="store", dest="scratch_folder", help=help_string, required=True) # --assigned-dest-field parameter help_string = ("The name of the field in origins indicating the assigned destination. " "Required for one_to_one pair-type") parser.add_argument( "-adf", "--assigned-dest-field", action="store", dest="assigned_dest_field", help=help_string, required=False) # --od-pair-table parameter help_string = "CSV file holding preassigned OD pairs. Required for many_to_many pair-type." parser.add_argument( "-odp", "--od-pair-table", action="store", dest="od_pair_table", help=help_string, required=False) # --time-of-day parameter help_string = (f"The time of day for the analysis. Must be in {helpers.DATETIME_FORMAT} format. Set to None for " "time neutral.") parser.add_argument("-tod", "--time-of-day", action="store", dest="time_of_day", help=help_string, required=False) # --barriers parameter help_string = "A list of catalog paths to the feature classes containing barriers to use in the Route." parser.add_argument( "-b", "--barriers", action="store", dest="barriers", help=help_string, nargs='*', required=False) try: # Get arguments as dictionary. args = vars(parser.parse_args()) # Initialize a parallel Route calculator class rt_calculator = ParallelRoutePairCalculator(**args) # Solve the Route in parallel chunks start_time = time.time() rt_calculator.solve_route_in_parallel() LOGGER.info(f"Parallel Route calculation completed in {round((time.time() - start_time) / 60, 2)} minutes") except Exception: # pylint: disable=broad-except LOGGER.error("Error in parallelization subprocess.") errs = traceback.format_exc().splitlines() for err in errs: LOGGER.error(err) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_route_in_parallel(self):\r\n # Validate Route settings. Essentially, create a dummy Route class instance and set up the\r\n # solver object to ensure this at least works. Do this up front before spinning up a bunch of parallel processes\r\n # that are guaranteed to all fail.\r\n self._validate_route_settings()\r\n\r\n # Check if the input origins and destinations have any fields we should use in the route analysis\r\n self._populate_input_data_transfer_fields()\r\n\r\n # Compute Route in parallel\r\n LOGGER.info(f\"Beginning parallelized Route solves ({self.total_jobs} chunks)\")\r\n completed_jobs = 0 # Track the number of jobs completed so far to use in logging\r\n # Use the concurrent.futures ProcessPoolExecutor to spin up parallel processes that solve the routes\r\n with futures.ProcessPoolExecutor(max_workers=self.max_processes) as executor:\r\n # Each parallel process calls the solve_route() function with the rt_inputs dictionary for the\r\n # given origin ranges and their assigned destinations.\r\n jobs = {executor.submit(solve_route, self.rt_inputs, range): range for range in self.chunks}\r\n # As each job is completed, add some logging information and store the results to post-process later\r\n for future in futures.as_completed(jobs):\r\n try:\r\n # The Route job returns a results dictionary. Retrieve it.\r\n result = future.result()\r\n except Exception: # pylint: disable=broad-except\r\n # If we couldn't retrieve the result, some terrible error happened and the job errored.\r\n # Note: This does not mean solve failed. It means some unexpected error was thrown. The most likely\r\n # causes are:\r\n # a) If you're calling a service, the service was temporarily down.\r\n # b) You had a temporary file read/write or resource issue on your machine.\r\n # c) If you're actively updating the code, you introduced an error.\r\n # To make the tool more robust against temporary glitches, retry submitting the job up to the number\r\n # of times designated in helpers.MAX_RETRIES. If the job is still erroring after that many retries,\r\n # fail the entire tool run.\r\n errs = traceback.format_exc().splitlines()\r\n failed_range = jobs[future]\r\n LOGGER.debug((\r\n f\"Failed to get results for Route chunk {failed_range} from the parallel process. Will retry \"\r\n f\"up to {helpers.MAX_RETRIES} times. Errors: {errs}\"\r\n ))\r\n job_failed = True\r\n num_retries = 0\r\n while job_failed and num_retries < helpers.MAX_RETRIES:\r\n num_retries += 1\r\n try:\r\n future = executor.submit(solve_route, self.rt_inputs, failed_range)\r\n result = future.result()\r\n job_failed = False\r\n LOGGER.debug(f\"Route chunk {failed_range} succeeded after {num_retries} retries.\")\r\n except Exception: # pylint: disable=broad-except\r\n # Update exception info to the latest error\r\n errs = traceback.format_exc().splitlines()\r\n if job_failed:\r\n # The job errored and did not succeed after retries. Fail the tool run because something\r\n # terrible is happening.\r\n LOGGER.debug(f\"Route chunk {failed_range} continued to error after {num_retries} retries.\")\r\n LOGGER.error(\"Failed to get Route result from parallel processing.\")\r\n errs = traceback.format_exc().splitlines()\r\n for err in errs:\r\n LOGGER.error(err)\r\n raise\r\n\r\n # If we got this far, the job completed successfully and we retrieved results.\r\n completed_jobs += 1\r\n LOGGER.info(\r\n f\"Finished Route calculation {completed_jobs} of {self.total_jobs}.\")\r\n\r\n # Parse the results dictionary and store components for post-processing.\r\n if result[\"solveSucceeded\"]:\r\n self.route_fcs.append(result[\"outputRoutes\"])\r\n else:\r\n # Typically, a solve fails because no destinations were found for any of the origins in the chunk,\r\n # and this is a perfectly legitimate failure. It is not an error. However, they may be other, less\r\n # likely, reasons for solve failure. Write solve messages to the main GP message thread in debug\r\n # mode only in case the user is having problems. The user can also check the individual OD log\r\n # files.\r\n LOGGER.debug(f\"Solve failed for job id {result['jobId']}.\")\r\n LOGGER.debug(result[\"solveMessages\"])\r\n\r\n # Post-process outputs\r\n if self.route_fcs:\r\n LOGGER.info(\"Post-processing Route results...\")\r\n self.route_fcs = sorted(self.route_fcs)\r\n self._post_process_route_fcs()\r\n else:\r\n LOGGER.warning(\"All Route solves failed, so no output was produced.\")\r\n\r\n # Clean up\r\n # Delete the job folders if the job succeeded\r\n if DELETE_INTERMEDIATE_OUTPUTS:\r\n LOGGER.info(\"Deleting intermediate outputs...\")\r\n try:\r\n shutil.rmtree(self.scratch_folder, ignore_errors=True)\r\n except Exception: # pylint: disable=broad-except\r\n # If deletion doesn't work, just throw a warning and move on. This does not need to kill the tool.\r\n LOGGER.warning(f\"Unable to delete intermediate Route output folder {self.scratch_folder}.\")\r\n\r\n LOGGER.info(\"Finished calculating Routes.\")", "def main() -> None:\n args = _get_arguments()\n\n file_level_logging = logging.DEBUG if args.log_to_file else None\n setup_logger(logging.INFO, file_level_logging)\n\n if not os.path.exists(args.smiles):\n mol = Molecule(smiles=args.smiles)\n if mol.rd_mol is None:\n logger().error(\n f\"The --smiles argument ({args.smiles})\"\n \" does not point to an existing file or is a valid RDKit SMILES.\"\n \" Cannot start retrosynthesis planning.\"\n )\n return\n\n if args.nproc:\n _multiprocess_smiles(args)\n return\n\n multi_smiles = os.path.exists(args.smiles)\n\n finder = AiZynthFinder(configfile=args.config)\n _select_stocks(finder, args)\n post_processing = _load_postprocessing_jobs(args.post_processing)\n finder.expansion_policy.select(args.policy or finder.expansion_policy.items[0])\n if args.filter:\n finder.filter_policy.select(args.filter)\n else:\n finder.filter_policy.select_all()\n\n params = [\n args.smiles,\n finder,\n args.output,\n args.cluster,\n args.route_distance_model,\n post_processing,\n args.checkpoint,\n ]\n if multi_smiles:\n _process_multi_smiles(*params)\n else:\n params = params[:-1]\n _process_single_smiles(*params)", "def parseCommandLineArguments():\n parser = argparse.ArgumentParser(prog=\"findPhasiRNAs.py\",description=\"findPhasiRNAs can be used to find genomic locations where phasing occurs. \")\n optional_arg = parser.add_argument_group(\"Optional Arguments\")\n required_arg = parser.add_argument_group(\"Required Arguments\")\n genome_mutex = parser.add_mutually_exclusive_group(required = True)\n \n required_arg.add_argument(\"--input_library\",\"-i\",help=\"Specify the name of the file which has the small-RNA reads. This option is mutually exclusive with --consolidated_library\")\n genome_mutex.add_argument(\"--genome\",\"-g\",help=\"Specify the name of the genome fasta file of the organism. Please note that the program will not be able to handle multiple fasta files. \")\n genome_mutex.add_argument(\"--bowtie_index\",\"-bindex\",help=\"Provide the bowtie index. This argument is optional. If no index is provided then the software will generate one.\")\n required_arg.add_argument(\"--output_directory\",\"-out\",help=\"Specify an output directory to which all the generated files will be housed. This includes the log file which can be later checked. Please make sure that there are sufficient permissions to create the output directory. The program will throw an error if creation of the output directory fails. If the directory already exists then its contents will be overwritten without warning. This directory will contain the summary file containing the details of the execution\",required=True)\n optional_arg.add_argument(\"--small_rna_size\",\"-srnasize\",nargs=\"+\",help=\"Specify the size of the small RNA that you wish to analyze. You can enter more than one possible size.\",default=[\"21\"])\n optional_arg.add_argument(\"--number_of_cycles\",\"-numcycles\",nargs=\"+\",help=\"Specify the number of cycles you wish to analyze with. You can enter multiple number of number of cycles. The accepted values are 9, 10, 11, 12 and 13\",default=[\"9\"])\n optional_arg.add_argument(\"--pvalue_cutoff\",\"-p\",help=\"Enter the p-value cut off\",default=0.05)\n optional_arg.add_argument(\"--clean_up\",\"-c\",help=\"Set this to 1 if you wish to clean up all the intermediate files. The program will keep all temporary files by default.\",default=0)\n optional_arg.add_argument(\"--CPU\",\"-n\",help=\"Provide the number of CPUs to be used. Default is 1.\",default=\"1\")\n optional_arg.add_argument(\"--map_limit\",\"-mapl\",help=\"Specify the mapping limit. Only reads which are mapped at most -mapl times will be considered. The default is 1. The maximum number of alignments allowed for a single read is 10. \",default=1)\n optional_arg.add_argument(\"--force\",\"-f\",help=\"Overwrite contents of output directory if it exists.\",default=0)\n \n # Supressed arguments\n parser.add_argument(\"--input_filename\",\"-ifname\",help=argparse.SUPPRESS)\n parser.add_argument(\"--input_path\",\"-ipath\",help=argparse.SUPPRESS)\n parser.add_argument(\"--consolidated_filename\",\"-cfname\",help=argparse.SUPPRESS)\n parser.add_argument(\"--adapter_trimmed_filename\",\"-atfname\",help=argparse.SUPPRESS)\n parser.add_argument(\"--output_directory_per_run\",\"-output_directory_per_run\",help=argparse.SUPPRESS)\n #parser.add_argument(\"--output_directory\",\"-actual_out\",help=argparse.SUPPRESS)\n \n return parser.parse_args()", "def main():\n parser = argparse.ArgumentParser(\n description='Runs test for C++ implementation of M*')\n parser.add_argument('test_file', help='File describing test cases')\n parser.add_argument('output_file', help='Name of output file')\n parser.add_argument('num_processors', type=int, action='store',\n help='Number of processes to run on each node. ' +\n 'The local host running the primary server will ' +\n 'run one fewer worker processes')\n parser.add_argument('-i', action='store', type=float, default=1.0,\n help='Set inflation factor for the heuristic, ' +\n 'defaults to 1', metavar='INF', dest='inflation')\n parser.add_argument('-t', action='store', type=int, default=120,\n help='Set time limit for planning. Defaults to 2 ' +\n 'minutes', dest='time_limit')\n parser.add_argument('--hosts', action='store',\n default=('python', 'cobra', 'viper', 'anaconda'),\n help='Hostnames/IPs to use as processing nodes.',\n nargs='*', metavar='HOSTNAME')\n\n args = parser.parse_args()\n\n run_cpp_mstar_trial(args.test_file, args.output_file,\n inflation=args.inflation, time_limit=args.time_limit,\n hosts=args.hosts, num_processors=args.num_processors)", "def __init__(self, **kwargs):\r\n self.pair_type = kwargs[\"pair_type\"]\r\n self.origins = kwargs[\"origins\"]\r\n self.origin_id_field = kwargs[\"origin_id_field\"]\r\n self.destinations = kwargs[\"destinations\"]\r\n self.dest_id_field = kwargs[\"dest_id_field\"]\r\n self.network_data_source = kwargs[\"network_data_source\"]\r\n self.travel_mode = kwargs[\"travel_mode\"]\r\n self.time_units = kwargs[\"time_units\"]\r\n self.distance_units = kwargs[\"distance_units\"]\r\n self.time_of_day = kwargs[\"time_of_day\"]\r\n self.reverse_direction = kwargs[\"reverse_direction\"]\r\n self.scratch_folder = kwargs[\"scratch_folder\"]\r\n self.assigned_dest_field = kwargs[\"assigned_dest_field\"]\r\n self.od_pair_table = kwargs[\"od_pair_table\"]\r\n self.origin_transfer_fields = kwargs[\"origin_transfer_fields\"]\r\n self.destination_transfer_fields = kwargs[\"destination_transfer_fields\"]\r\n self.barriers = []\r\n if \"barriers\" in kwargs:\r\n self.barriers = kwargs[\"barriers\"]\r\n\r\n # Create a job ID and a folder for this job\r\n self._create_job_folder()\r\n\r\n # Setup the class logger. Logs for each parallel process are not written to the console but instead to a\r\n # process-specific log file.\r\n self.setup_logger(\"RoutePairs\")\r\n\r\n # Get field objects for the origin and destination ID fields since we need this in multiple places\r\n self.origin_id_field_obj = arcpy.ListFields(self.origins, wild_card=self.origin_id_field)[0]\r\n self.dest_id_field_obj = arcpy.ListFields(self.destinations, wild_card=self.dest_id_field)[0]\r\n\r\n # Set up other instance attributes\r\n self.is_service = helpers.is_nds_service(self.network_data_source)\r\n self.rt_solver = None\r\n self.solve_result = None\r\n self.input_origins_layer = \"InputOrigins\" + self.job_id\r\n self.input_destinations_layer = \"InputDestinations\" + self.job_id\r\n self.input_origins_layer_obj = None\r\n self.input_dests_layer_obj = None\r\n self.origin_unique_id_field_name = \"OriginUniqueID\"\r\n self.dest_unique_id_field_name = \"DestinationUniqueID\"\r\n self.od_pairs = None\r\n\r\n # Create a network dataset layer if needed\r\n if not self.is_service:\r\n self._make_nds_layer()\r\n\r\n # Prepare a dictionary to store info about the analysis results\r\n self.job_result = {\r\n \"jobId\": self.job_id,\r\n \"jobFolder\": self.job_folder,\r\n \"solveSucceeded\": False,\r\n \"solveMessages\": \"\",\r\n \"outputRoutes\": \"\",\r\n \"logFile\": self.log_file\r\n }", "def parse_command_line_arguments(command_line_arguments):\n # First determine if a we're loading from a file\n filename = False\n if \"--file\" in command_line_arguments:\n index = command_line_arguments.index(\"--file\")+1\n filename = command_line_arguments[index]\n if output.tracking_suffix in filename:\n # Assumes simulation in progress. So, if final_sweep ==\n # current_sweep, the simulation will load and then\n # immediately end.\n return get_progress_file_info(filename)\n if not (output.output_suffix in filename):\n raise ValueError(\"Can only load from *.boundaryprg2p1 or \"\n +\"*.boundary2p1 files!\")\n # If filename is of type *.boundary2p1, we assume its okay and\n # load from it. None of the other command line arguments\n # change.\n\n if \"--select\" in command_line_arguments:\n index = command_line_arguments.index(\"--select\")+1\n if command_line_arguments[index] == \"std\":\n algorithm = monte_carlo.select_for_curvature\n elif command_line_arguments[index] == \"area\":\n algorithm = monte_carlo.select_for_area\n else:\n algorithm = default_algorithm\n else:\n algorithm = default_algorithm \n\n if \"--target-area\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-area\")+1\n target_area = int(eval(command_line_arguments[index]))\n else:\n target_area = int(eval(command_line_arguments[0]))\n\n if \"--target-std\" in command_line_arguments:\n index = command_line_arguments.index(\"--target-std\")+1\n target_std = float(eval(command_line_arguments[index]))\n else:\n target_std = default_target_std\n\n if \"--area-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--area-damping\")+1\n area_damping_strength = float(eval(command_line_arguments[index]))\n else:\n area_damping_strength = default_area_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--std-damping\" in command_line_arguments:\n index = command_line_arguments.index(\"--std-damping\")+1\n std_damping_strength = float(eval(command_line_arguments[index]))\n else:\n std_damping_strength = default_std_damping\n if not 0 <= area_damping_strength <= 1:\n raise ValueError(\"Damping must be between 0 and 1.\")\n\n if \"--initial\" in command_line_arguments:\n index = command_line_arguments.index(\"--initial\")+1\n initial_sweep = int(eval(command_line_arguments[index]))\n else:\n initial_sweep = default_initial_sweep\n\n if \"--final\" in command_line_arguments:\n index = command_line_arguments.index(\"--final\")+1\n final_sweep = int(eval(command_line_arguments[index]))\n else:\n final_sweep = default_final_sweep\n\n if \"--save\" in command_line_arguments:\n index = command_line_arguments.index(\"--save\")+1\n save_every_n_sweeps = int(eval(command_line_arguments[index]))\n else:\n save_every_n_sweeps = default_save_every_n_sweeps\n if save_every_n_sweeps < 1:\n raise ValueError(\"You must save at least every 1 sweeps!\")\n\n if \"--v5\" in command_line_arguments:\n index = command_line_arguments.index(\"--v5\")+1\n v5damping = int(eval(command_line_arguments[index]))\n else:\n v5damping = target_area/10\n\n if \"--v6\" in command_line_arguments:\n index = command_line_arguments.index(\"--v6\")+1\n v6damping = int(eval(command_line_arguments[index]))\n else:\n v6damping = target_area/10\n \n if \"--many\" in command_line_arguments:\n if \"--one\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.save_many_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_n_files\n elif \"--one\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--exact\" in command_line_arguments:\n raise ValueError(\"Condtradictory input!\")\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n elif \"--exact\" in command_line_arguments:\n if \"--many\" in command_line_arguments or \"--one\" in command_line_arguments:\n raise ValueError(\"Contradictory input!\")\n gather_data_function = output.generate_n_exact_spheres\n index = command_line_arguments.index(\"--exact\")+1\n # In this case, v5damping is fitness_damping, as defined\n # in generate_n_exact_spheres\n v5damping = int(eval(command_line_arguments[index]))\n else:\n if \"--micro\" in command_line_arguments:\n gather_data_function = output.stop_at_microscopically_optimal\n else:\n gather_data_function = output.gather_data_to_1_file\n\n # return a class with all the info we need\n params = parameters(filename, target_area, area_damping_strength,\n target_std, std_damping_strength,\n initial_sweep, final_sweep,\n save_every_n_sweeps,\n v5damping, v6damping,\n algorithm,\n gather_data_function)\n return params", "def solve_route(inputs, chunk):\r\n rt = Route(**inputs)\r\n if inputs[\"pair_type\"] is helpers.PreassignedODPairType.one_to_one:\r\n rt.logger.info(f\"Processing origins OID {chunk[0]} to {chunk[1]} as job id {rt.job_id}\")\r\n elif inputs[\"pair_type\"] is helpers.PreassignedODPairType.many_to_many:\r\n rt.logger.info(f\"Processing chunk {chunk[0]} as job id {rt.job_id}\")\r\n rt.solve(chunk)\r\n rt.teardown_logger()\r\n return rt.job_result", "def main():\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)", "def run_parallel(pid, call_method_id, run_id='gwas', kinship_method='ibd'):\n job_id = '%s_%s_%d_%d' % (run_id, kinship_method, call_method_id, pid)\n file_prefix = env.env['results_dir'] + job_id\n\n #Cluster specific parameters \n shstr = '#!/bin/bash\\n'\n shstr += '#$ -S /bin/bash\\n'\n shstr += '#$ -N %s\\n' % job_id\n #shstr += '#$ -o %s_job_$JOB_ID.out\\n' % file_prefix\n #shstr += '#$ -e %s_job_$JOB_ID.err\\n' % file_prefix\n shstr += '#$ -o %s_job.out\\n' % file_prefix\n shstr += '#$ -e %s_job.err\\n' % file_prefix\n shstr += 'source /etc/modules-env.sh\\n'\n shstr += 'module load scipy/GotoBLAS2/0.9.0\\n'\n shstr += 'module load matplotlib/1.0.0\\n'\n shstr += 'module load mysqldb/1.2.3\\n'\n\tshstr += 'module load h5py/2.0.0\\n'\n shstr += 'export GOTO_NUM_THREADS=1\\n'\n\n\n shstr += \"python %sfullseq_gwas_project.py %s %s %d %d\" % \\\n (env.env['script_dir'], run_id, kinship_method, call_method_id, pid)\n\n #shstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n print '\\n', shstr, '\\n'\n script_file_name = run_id + \".sh\"\n f = open(script_file_name, 'w')\n f.write(shstr)\n f.close()\n\n #Execute qsub script\n os.system(\"qsub \" + script_file_name)", "def main():\n (\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n ) = data_sequence_cli_parsing()\n\n if options.verbose:\n log.setLevel(logging.DEBUG)\n else:\n log.setLevel(logging.INFO)\n\n # Run the routine piping all the analysis steps\n rc = data_sequence(\n calibration_file,\n drs4_ped_file,\n time_calibration_file,\n systematic_correction_file,\n drive_log_file,\n run_summary_file,\n pedestal_ids_file,\n run_number,\n )\n sys.exit(rc)", "def _do_run(self, path, args):\n try:\n self.router.route(path, args)\n except TypeError, e:\n # To catch the follow errors\n # TypeError: xxxx got an unexpected keyword argument 'k'\n # TypeError: 'print_my_good() takes at least 1 argument (0 given)'\n print \"run job %s with arg < %s > error:\" % (path, \", \".join(args))\n print \"%s\" % e", "def main():\n\t\n\tparser = argparse.ArgumentParser(description=\"This invokes a java program to place and/or route a design using Vivado tcl.\")\n\tparser.add_argument('input_dcp', nargs=1, help=\"Input design checkpoint (.dcp) file.\")\n\tparser.add_argument('output_dcp', nargs=1, help=\"Output dcp file.\")\n\tparser.add_argument('-p', '--place', dest='place', action='store_true', help=\"Place design.\")\n\tparser.add_argument('-r', '--route', dest='route', action='store_true', help=\"Route design.\")\n\tparser.add_argument('-b', '--both', dest='place_and_route', action='store_true', help=\"Place and route design. This option supercedes -p or -r.\")\n\tparser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help=\"Suppress messages.\")\n\tparser.add_argument('-f', '--force', dest='force', action='store_true', help=\"Overwrite file at destination if it exists.\")\n\targs = parser.parse_args()\n\t\n\t\n\tif args.place or args.place_and_route:\n\t\tplace_flag = \"-p true\"\n\telse:\n\t\tplace_flag = \"-p false\"\n\t\t\n\tif args.route or args.place_and_route:\n\t\troute_flag = \"-r true\"\n\telse:\n\t\troute_flag = \"-r false\"\n\t\n\tif args.force:\n\t\tforce_flag = \"-f true\"\n\telse:\n\t\tforce_flag = \"-f false\"\n\tif args.quiet:\n\t\tquiet_flag = \"-q true\"\n\telse:\n\t\tquiet_flag = \"-q false\"\n\t\n\t\n\tplace_and_route_dir = \"~/Documents/2019_summer/AddILA/\"\n\tclass_path = \".:/nfs/ug/thesis/thesis0/pc2019/Software/RapidWright/RapidWright\"\n\t\n\tcd_cmd = \"cd {}\".format(place_and_route_dir)\n\tjava_compile_cmd = \"javac PlaceAndRoute.java\".format()\n\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(\"pwd\")\n\t\tpwd = subprocess.check_output(\"pwd\").decode(sys.stdout.encoding).strip()\n\t\tif not args.quiet:\n\t\t\tprint(cd_cmd)\n\t\tos.system(cd_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(cd_cmd))\n\t\treturn 1\n\n\tif args.input_dcp[0][0] in {'~', '/'}:\n\t\tinput_dcp = args.input_dcp[0]\n\telse:\n\t\tinput_dcp = pwd + \"/\" + args.input_dcp[0]\n\tif args.output_dcp[0][0] in {'~', '/'}:\n\t\toutput_dcp = args.output_dcp[0]\n\telse:\n\t\toutput_dcp = pwd + \"/\" + args.output_dcp[0]\n\t\n\tjava_execute_cmd = \"java -cp {0} PlaceAndRoute {1} {2} {3} {4} {5} {6}\".format(class_path, input_dcp, output_dcp, place_flag, route_flag, force_flag, quiet_flag)\n\t\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(java_compile_cmd)\n\t\tos.system(java_compile_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(java_compile_cmd))\n\t\treturn 1\n\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(java_execute_cmd)\n\t\tos.system(java_execute_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(java_execute_cmd))\n\t\treturn 1", "def main ():\n\n print(\"MapReduce Map Worker program\")\n\n # first parse the command line arguments\n parsed_args = parseCmdLineArgs ()\n \n # instantiate a map object with the parsed args\n mapobj = MR_Map (parsed_args)\n\n # initialize the map worker network connections\n mapobj.init_worker ()\n \n # invoke the map process. We run this map process forever\n while True:\n mapobj.do_work ()\n print(\"MapReduce Map Worker done for this iteration\")\n time.sleep (5)", "def main(*args):\n \n # Read input file in as dictionary\n with open(args[0]) as f:\n input_dict = iprPy.input.parse(f, allsingular=True)\n \n # Interpret and process input parameters\n process_input(input_dict, *args[1:])\n \n # Call calculation's function(s)\n results_dict = bondscan(input_dict['lammps_command'],\n input_dict['potential'],\n input_dict['symbols'],\n mpi_command = input_dict['mpi_command'],\n rmin = input_dict['minimum_r'],\n rmax = input_dict['maximum_r'],\n rnum = input_dict['number_of_steps_r'],\n thetamin = input_dict['minimum_theta'],\n thetamax = input_dict['maximum_theta'],\n thetanum = input_dict['number_of_steps_theta'])\n \n # Build and save data model of results\n record = iprPy.load_record(record_style)\n record.buildcontent(input_dict, results_dict)\n with open('results.json', 'w') as f:\n record.content.json(fp=f, indent=4)", "def main():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument('-V', '--version', action='version', version=VERSION)\n\n\n file_group = parser.add_argument_group('Input Files')\n file_group.add_argument('-f', dest='traj', required=True, type=str,\n help='trajectory file (XTC/TRR/GRO/PDB ...)')\n file_group.add_argument('-s', dest='tpr', required=True, type=str,\n help='tpr file (TPR)')\n file_group.add_argument('-o', dest='outpath', type=str,\n help='name of the mapped trajectory (XTC/GRO)')\n file_group.add_argument('-m', dest='map_file', type=str,\n help='.mapping file or path to directory of .map files')\n\n mapping_group = parser.add_argument_group('Mapping Options')\n mapping_group.add_argument('-mode', dest='mode', required=False, type=str,\n help='COG or COM mapping', default='COG')\n mapping_group.add_argument('-pbc', action='store_true', required=False, dest='pbc_complete',\n help='complete pbc with MDAnalysis; this is slow!')\n mapping_group.add_argument('-mols', dest='mol_names', required=True, type=str, nargs='+',\n help='names of molecules to consider when mapping as in the [moleculetypes] directive')\n mapping_group.add_argument('-add_H', dest='h_association', nargs='+', type=lambda s: s.split(':'),\n default=[],\n help='atom-types corresponding to CH3, CH2, CH1 for aliphatic groups and CH2d for double bonds.')\n args = parser.parse_args()\n\n print(\"INFO - Loading universe\")\n # load trajectory\n init_universe = UniverseHandler(args.mol_names,\n args.tpr,\n args.traj,\n in_memory=True)\n if args.pbc_complete:\n print(\"INFO - PBC completing trajectory\")\n init_universe.pbc_complete()\n\n if args.h_association:\n print(\"INFO - Adding Hydrogen to united-atoms\")\n treated_atoms = init_universe.shift_united_atom_carbons(dict(args.h_association))\n else:\n treated_atoms = np.array([])\n\n print(\"INFO - Loading mapping files\")\n #determine if we have a single .mapping file or a directory of .map files\n map_path = pathlib.Path(args.map_file)\n if map_path.is_file() == True:\n with open(args.map_file, \"r\") as _file:\n lines = _file.readlines()\n elif map_path.is_dir() == True:\n l = []\n for i in map_path.glob('*.map'):\n with open(i, \"r\") as _file:\n l.append(_file.readlines())\n if len(l) > 0:\n lines = [item for sublist in l for item in sublist]\n else:\n msg = (\"Couldn't find any .map files in the directory given.\"\n \"Please check the -m argument!\")\n raise IOError(msg)\n else:\n msg = (\"\\nCannot determine if you have given me a single .mapping file\\n\"\n \"or a directory of .map files. Please check!\\n\")\n raise IOError(msg)\n\n mappings = read_mapping(lines)[0]\n\n print(\"INFO - Mapping universe - indices\")\n # first mapp the atom indices\n mapped_atoms, bead_idxs = forward_map_indices(init_universe,\n mappings)\n n_frames = len(init_universe.trajectory)\n\n print(\"INFO - Mapping universe - positions\")\n mapped_atoms = numba.typed.List(mapped_atoms)\n bead_idxs = numba.typed.List(bead_idxs)\n # extract the position array from universe\n # if it's not a trajectory we have to emulate\n # a single frame\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n if file_extension in [\"xtc\", \"trr\"]:\n positions = init_universe.trajectory.coordinate_array\n else:\n positions = init_universe.atoms.positions\n positions = positions.reshape(1, -1, 3)\n\n mapped_trajectory = forward_map_positions(mapped_atoms,\n bead_idxs,\n positions,\n n_frames,\n args.mode,\n treated_atoms)\n\n print(\"INFO - Mapping universe - building pos-array\")\n cg_universe = create_new_universe(init_universe, mapped_trajectory, mappings)\n\n # write coordinate\n print(\"INFO - Writing CG trajectory\")\n if args.traj:\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n else:\n file_extension = \"xtc\"\n\n if file_extension in [\"xtc\", \"trr\"]:\n cg_beads = cg_universe.atoms\n with mda.Writer(args.outpath,\n multiframe=True,\n n_atoms=len(cg_universe.atoms)) as mapped:\n for time_step in cg_universe.trajectory:\n mapped.write(cg_beads)\n else:\n cg_universe.atoms.positions = cg_universe.trajectory.coordinate_array[0]\n cg_beads = cg_universe.atoms\n cg_universe.atoms.dimensions = init_universe.atoms.dimensions\n with mda.Writer(args.outpath, n_atoms=len(cg_universe.atoms)) as mapped:\n mapped.write(cg_beads)", "def main():\n args = parse_args()\n process_args(args)", "def main() -> None:\n\n data = Ground(sys.argv[1])\n DeliveryMan.show_route(data.coordinates)", "def parse_command_line(args):\r\n\r\n\r\n # Initial validity check of input command: each argument may be\r\n # defined only once\r\n for a in args:\r\n if a.startswith('--'):\r\n if args.count(a) > 1:\r\n raise STAPLERerror.STAPLERerror('Each command line parameter can be '\r\n 'defined only once! The following '\r\n 'parameter was defined multiple times:\\n '\r\n '{0}'.format(a))\r\n\r\n # Initialize a named tuple to store all available command line arguments\r\n Command_line_parameters = namedtuple('Input_file_parameters',\r\n ['all_parameters',\r\n 'staplerfile_path',\r\n 'resource_manager',\r\n 'max_job_count',\r\n 'auto_split_workflows',\r\n 'compress_run',\r\n 'validate_run',\r\n 'fix_run',\r\n 'rm_workflow'])\r\n\r\n # Parse user command line and check sanity of values\r\n\r\n # Parse the resource manager to use\r\n all_parameters = ' '.join(args)\r\n resource_manager = None\r\n if '--lsf' in args:\r\n resource_manager = 'lsf'\r\n args.remove('--lsf')\r\n if '--sge' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'sge'\r\n args.remove('--sge')\r\n if '--slurm' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'slurm'\r\n args.remove('--slurm')\r\n if '--torque' in args:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'torque'\r\n args.remove('--torque')\r\n if '--UNIX' in args or resource_manager is None:\r\n if resource_manager is not None:\r\n raise STAPLERerror.STAPLERerror('Multiple resource managers are listed in the '\r\n 'command line. Please, choose only one.')\r\n resource_manager = 'unix'\r\n if '--UNIX' in args: args.remove('--UNIX')\r\n\r\n # Parse the limit for maximum number of jobs to spawn\r\n if '--max_job_count' in args:\r\n if resource_manager is None:\r\n raise STAPLERerror.STAPLERerror('--max_job_count parameter can only be defined '\r\n 'if a resource manager is also defined '\r\n '(e.g. --slurm)!')\r\n try:\r\n max_job_count = int(args[args.index('--max_job_count')+1])\r\n except (TypeError, IndexError):\r\n raise STAPLERerror.STAPLERerror('--max_job_count requires a positive integer '\r\n 'value, e.g. --max_job_count 16')\r\n if max_job_count < 1:\r\n raise STAPLERerror.STAPLERerror('--max_job_count requires a positive integer '\r\n 'value, e.g. --max_job_count 16')\r\n args.pop(args.index('--max_job_count')+1)\r\n args.remove('--max_job_count')\r\n else:\r\n if resource_manager == 'unix':\r\n max_job_count = multiprocessing.cpu_count()\r\n else:\r\n max_job_count = None\r\n\r\n # Parse workflow control parameters\r\n if '--priority' in args:\r\n if resource_manager is None:\r\n raise STAPLERerror.STAPLERerror('--priority parameter can be used only if a '\r\n 'resource manager (e.g. SLURM) is specified!')\r\n if resource_manager == 'unix':\r\n raise STAPLERerror.STAPLERerror('--priority parameter cannot be '\r\n 'used in combination with --UNIX '\r\n 'parameter!')\r\n try:\r\n if (args[args.index('--priority')+1]).lower() in ('continuous', 'c'):\r\n auto_split_workflows = False\r\n elif (args[args.index('--priority')+1]).lower() in ('split', 's'):\r\n auto_split_workflows = True\r\n else:\r\n raise STAPLERerror.STAPLERerror('Allowed values for --priority parameter are '\r\n '\"continuous\", \"c\", \"split\" and \"s\"!')\r\n except (TypeError, IndexError):\r\n raise STAPLERerror('--priority parameter requires a value! Allowed values are '\r\n '\"continuous\", \"c\", \"split\" and \"s\" ')\r\n args.pop(args.index('--priority')+1)\r\n args.remove('--priority')\r\n else:\r\n if resource_manager == 'unix':\r\n auto_split_workflows = True\r\n else:\r\n auto_split_workflows = False\r\n compress_run = None\r\n\r\n # Parse workflow compression/decompression parameters\r\n if '--compress' in args:\r\n compress_run = 'compress'\r\n args.remove('--compress')\r\n if '--decompress' in args:\r\n if '--compress' in args:\r\n raise STAPLERerror.STAPLERerror('--compress and --decompress parameters can '\r\n 'not be used simultaneously!')\r\n compress_run = 'decompress'\r\n args.remove('--decompress')\r\n\r\n # Parse workflow validation/fixing/removing parameters\r\n if '--validate_run' in args:\r\n validate_run = True\r\n args.remove('--validate_run')\r\n else:\r\n validate_run = False\r\n if '--fix_run' in args:\r\n fix_run = True\r\n args.remove('--fix_run')\r\n else:\r\n fix_run = False\r\n if '--remove' in args:\r\n rm_workflow = True\r\n args.remove('--remove')\r\n else:\r\n rm_workflow = False\r\n\r\n # Parse path to staplefile. All other valid parameters are now read & removed\r\n # from args.\r\n if len(args) == 1:\r\n if os.path.isfile(args[0]):\r\n staplerfile_path = args[0]\r\n else:\r\n raise STAPLERerror.STAPLERerror('Command line contains an odd value:\\n{0}\\n '\r\n 'This is not an existing path to a staplerfile '\r\n 'or any other recognized parameter!'.format(\r\n args[0]))\r\n elif len(args) == 0:\r\n raise STAPLERerror.STAPLERerror('Command line is missing path to staplerfile!')\r\n elif len(args) > 1:\r\n for a in args:\r\n if os.path.isfile(a):\r\n odd_values = args\r\n args.remove(a)\r\n raise STAPLERerror.STAPLERerror('Command line contains some odd '\r\n 'parameters! The string \"{0}\" is '\r\n 'probably the path to stapler file, '\r\n 'but the following parameters are '\r\n 'unknown:\\n{1}\\nFor more info, '\r\n 'type\\npython STAPLER.py -h'.format(a,\r\n '\\n'.join(odd_values)))\r\n raise STAPLERerror.STAPLERerror('Command line is missing path to staplerfile! '\r\n 'Instead, some odd parameters are present:\\n{0}'.format('\\n'.join(args)))\r\n\r\n # Do further validity checks for different parameter combinations\r\n if validate_run and fix_run:\r\n raise STAPLERerror.STAPLERerror('--validate_run and --fix_run cannot be used '\r\n 'in the same command!')\r\n if validate_run and rm_workflow:\r\n raise STAPLERerror.STAPLERerror('--remove_WORKFLOW and --validate_run cannot be '\r\n 'used in the same command!')\r\n if fix_run and rm_workflow:\r\n raise STAPLERerror.STAPLERerror('--fix_run and REMOVE_WORKFLOW cannot be used in '\r\n 'the same command!')\r\n if compress_run is not None:\r\n if validate_run or rm_workflow or fix_run:\r\n raise STAPLERerror.STAPLERerror('--validate_run, --remove_WORKFLOW or --fix_run '\r\n 'parameters cannot be used in the same command '\r\n 'with --COMRESS_RUN!')\r\n if validate_run or rm_workflow:\r\n if resource_manager is not 'unix':\r\n raise STAPLERerror.STAPLERerror('Resource managers cannot be used when '\r\n 'removing workflows!')\r\n\r\n command_line_parameters = Command_line_parameters(\r\n all_parameters=all_parameters,\r\n staplerfile_path=staplerfile_path,\r\n resource_manager=resource_manager,\r\n max_job_count=max_job_count,\r\n auto_split_workflows=auto_split_workflows,\r\n compress_run=compress_run,\r\n validate_run=validate_run,\r\n fix_run=fix_run,\r\n rm_workflow=rm_workflow)\r\n\r\n return command_line_parameters", "def run_multimapping(SRA):\n\n if not os.path.exists(\"TMP/ambiguous_reads/\"):\n os.mkdir(\"TMP/ambiguous_reads/\")\n\n cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n \n # Keep only multi-mapping reads:\n cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all'\n output = subprocess.run(cmd_filter, shell=True)\n\n cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam'\n output = subprocess.run(cmd_samtools2, shell=True)", "def read_arguments():\n\n parser = argparse.ArgumentParser(\n description='Enter arguments to run the pipeline.')\n\n # arguments for external files that might be necessary to run the program\n parser.add_argument(\n '--cost_network', type=str,\n help='file storing the state dictionary of the cost network.'\n )\n\n parser.add_argument(\n '--policy_network', type=str,\n help='File storing the state dictionary of the Policy network.'\n )\n\n parser.add_argument(\n '--state_dictionary', type=str,\n help='Environment on which to run the algo (obstacle/no obstacle)'\n )\n\n parser.add_argument(\n '--expert_trajectory_file', type=str,\n help='Path to file containing the exeprt trajectories.')\n\n # network hyper parameters\n parser.add_argument(\n '--cost_network_input', type=int, default=29,\n help='layer size of cost network. None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden size of cost network.None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_output', type=int, default=1,\n help='Output layer size of cost network.None if you have specified \\\n cost network state dict.')\n\n parser.add_argument(\n '--policy_network_input', type=int, default=29,\n help='Input layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_output', type=int, default=4,\n help='Output layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n # other run hyper parameters like optimizer and all???\n\n # run hyperparameters\n parser.add_argument('--irl_iterations', type=int,\n help='Number of times to iterate over the IRL part.')\n\n parser.add_argument(\n '--no_of_samples', type=int,\n help='Number of samples to create agent state visitation frequency.')\n\n parser.add_argument(\n '--rl_iterations', type=int,\n help='Number of iterations to be performed in the RL section.')\n\n # arguments for the I/O of the program\n parser.add_argument(\n '--display_board', type=str, default='False',\n help='If True, draw envirnment.')\n\n parser.add_argument(\n '--on_server', type=str, default='True',\n help='False if program is to run on server.')\n\n parser.add_argument('--store_results', type=str, default='True')\n\n parser.add_argument(\n '--plot_interval', type=int, default=10,\n help='Iterations before loss and reward curve plots are stored.')\n\n parser.add_argument(\n '--savedict_policy_interval', type=int, default=100,\n help='Iterations after which the policy network will be stored.')\n\n parser.add_argument(\n '--savedict_cost_interval', type=int, default=1,\n help='Iterations after which the cost network will be stored.')\n\n # arguments for the broader pipeLine\n parser.add_argument(\n '--rl_method', type=str,\n help='Enter the RL method to be used.')\n\n parser.add_argument(\n '--feature_space', type=str,\n help='Type of features to be used to get the state of the agent.')\n\n parser.add_argument('--irl_method', type=str,\n help='Enter the IRL method to be used.')\n\n parser.add_argument(\n '--run_type', type=str, default='train',\n help='Enter if it is a train run or a test run.(train/test).')\n\n parser.add_argument(\n '--verbose', type=str, default='False',\n help='Set verbose to \"True\" to get a myriad of print statements crowd\\\n your terminal. Necessary information should be provided with either\\\n of the modes.')\n\n parser.add_argument(\n '--no_of_testRuns', type=int, default=0,\n help='If --run_type set to test, then this denotes the number of test \\\n runs you want to conduct.')\n\n _args = parser.parse_args()\n\n return _args", "def process_argv(argv):\n # Initial preparation\n import __main__\n for (k,v) in global_constants.items():\n exec '%s = %s' % (k,v) in __main__.__dict__\n\n # Allow param.normalize_path.prefix to be overridden in the\n # startup files, but otherwise force it to exist before doing\n # anything else\n param.normalize_path.prefix = default_output_path()\n exec_startup_files()\n set_output_path(param.normalize_path.prefix)\n\n # Tell the user how many cores are in use, if available\n openmp_main=Parameterized(name=\"OpenMP\") # Dummy object just for messages\n try:\n import os,multiprocessing\n total_cores = multiprocessing.cpu_count()\n num_threads = int(os.environ.get('OMP_NUM_THREADS',total_cores))\n openmp_main.verbose(\"Using %d threads on a machine with %d detected CPUs\",num_threads, total_cores)\n except:\n pass\n\n # Repeatedly process options, if any, followed by filenames, if any, until nothing is left\n topo_parser.disable_interspersed_args()\n args=argv\n option=None\n global something_executed\n while True:\n # Process options up until the first filename\n (option,args) = topo_parser.parse_args(args,option)\n\n # Handle filename\n if args:\n filename=args.pop(0)\n #print \"Executing %s\" % (filename)\n filedir = os.path.dirname(os.path.abspath(filename))\n sys.path.insert(0,filedir) # Allow imports relative to this file's path\n sim_name_from_filename(filename) # Default value of topo.sim.name\n\n execfile(filename,__main__.__dict__)\n something_executed=True\n\n if not args:\n break\n\n global_params.check_for_unused_names()\n\n # If no scripts and no commands were given, pretend -i was given.\n if not something_executed: interactive()\n\n if option.gui: topo.guimain.title(topo.sim.name)\n\n ## INTERACTIVE SESSION BEGINS HERE (i.e. can't have anything but\n ## some kind of cleanup code afterwards)\n if os.environ.get('PYTHONINSPECT'):\n print \"Output path: %s\" % param.normalize_path.prefix\n print BANNER\n # CBALERT: should probably allow a way for users to pass\n # things to IPython? Or at least set up some kind of\n # topographica ipython config file. Right now, a topo_parser\n # option has to be added for every ipython option we want to\n # support (e.g. see --pdb)\n\n if ipython_shell_interface == \"IPython.Shell\":\n # IPython 0.10 and earlier\n\n # Stop IPython namespace hack?\n # http://www.nabble.com/__main__-vs-__main__-td14606612.html\n __main__.__name__=\"__mynamespace__\"\n\n ipython_args = ['-noconfirm_exit','-nobanner',\n '-pi1',CommandPrompt.get_format(),\n '-pi2',CommandPrompt2.get_format(),\n '-po',OutputPrompt.get_format()]\n if option.pdb:\n ipython_args.append('-pdb')\n\n ipshell = IPShell(ipython_args,user_ns=__main__.__dict__)\n ipshell.mainloop(sys_exit=1)\n\n elif ipython_shell_interface == \"InteractiveShellEmbed\":\n # IPython 0.11 and later\n\n config = Config()\n\n if ipython_prompt_interface == \"PromptManager\":\n config.PromptManager.in_template = CommandPrompt.get_format()\n config.PromptManager.in2_template = CommandPrompt2.get_format()\n config.PromptManager.out_template = OutputPrompt.get_format()\n else:\n config.InteractiveShell.prompt_in1 = CommandPrompt.get_format()\n config.InteractiveShell.prompt_in2 = CommandPrompt2.get_format()\n config.InteractiveShell.prompt_out = OutputPrompt.get_format()\n config.InteractiveShell.confirm_exit = False\n ipshell = IPShell(config=config,user_ns=__main__.__dict__,\n banner1=\"\",exit_msg=\"\")\n if option.pdb:\n ipshell.call_pdb = True\n\n # Load Topographica IPython extension in embedded shell\n try:\n ipshell.extension_manager.load_extension('topo.misc.ipython')\n except:\n cmdline_main.warning(\n \"Could not load IPython extension 'topo.misc.ipython'; ignored error was:\\n%s\"%traceback.format_exc())\n\n ipshell()\n\n global return_code\n if return_code != 0:\n cmdline_main.warning(\"Errors encountered; exiting with return code %d\" % return_code)\n\n sys.exit(return_code)", "def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)", "def test_cmdlineproc_test9():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--disconnect\", \"--replicates\", \"1000\", \"test.exe\",\n \"-i\", \"input.file\", \"param1\", \"--someflag\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"test.exe\"\n assert parameters[\"executableargs\"] == \"-i input.file param1 --someflag\"\n assert longbowargs == [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--disconnect\", \"--replicates\", \"1000\"]", "def lammps(parameter, iteration, numberlist, density, chainlength):\n # input parameters\n var = parameter[0]\n x = var * numberlist[0]\n y = var * numberlist[1] * 1.732 / 2\n z = x / 2\n #nodeid = 55 + iteration % 5\n M = int((x * y * z * density - 4 * numberlist[0] * numberlist[1] * int(z / 0.3)) / chainlength) \n os.environ['var'] = str(var)\n os.environ['iteration'] = str(iteration)\n os.environ['M'] = str(M)\n #os.environ['nodeid'] = str(nodeid)\n # copy the files to modify\n os.system(\"cp in.asymmetric in.asymmetric_$iteration\")\n os.system(\"cp run_lammps.sh run_lammps_$iteration.sh\")\n os.system(\"cp MultiBCP_hexagonal_post.py MultiBCP_hexagonal_post_$iteration.py\")\n # modify relevant files\n os.system('sed -i -e \"s/distance = 12/distance = $var/\" MultiBCP_hexagonal_post_$iteration.py')\n os.system(\"python MultiBCP_hexagonal_post_$iteration.py\")\n time.sleep(3)\n os.system('sed -i -e \"s/3072/$M/\" in.asymmetric_$iteration')#modify according to the parameters\n os.system('sed -i -e \"s/XN_100/XN_100_$iteration/\" in.asymmetric_$iteration')\n os.system('sed -i -e \"s/finalasymmetric/finalasymmetric_$iteration/\" in.asymmetric_$iteration')\n os.system('sed -i -e \"s/POSTA2B8/POSTA2B8_$iteration/\" run_lammps_$iteration.sh')\n os.system('sed -i -e \"s/posta2b8.out/posta2b8_$iteration.out/\" run_lammps_$iteration.sh')\n #os.system('sed -i -e \"s/node55/node$nodeid/\" run_lammps_$iteration.sh')\n os.system('sed -i -e \"s/in.asymmetric/in.asymmetric_$iteration/\" run_lammps_$iteration.sh')\n # run the simulation\n os.system(\"sbatch run_lammps_$iteration.sh\")\n return None", "def main(command_line_parameters = None):\n\n global task_count, job_count, job_ids, score_directories\n job_count = 0\n task_count = 0\n job_ids = {}\n score_directories = []\n\n command_line_options(command_line_parameters)\n\n global configuration, place_holder_key\n configuration = utils.read_config_file([args.configuration_file])\n place_holder_key = args.place_holder_key\n\n if args.preprocessor:\n configuration.preprocessor = args.preprocessor\n if args.extractor:\n configuration.extractor = args.extractor\n if args.algorithm:\n configuration.algorithm = args.algorithm\n\n if args.replace_variable is not None:\n exec(\"configuration.replace = configuration.%s\" % args.replace_variable)\n\n for attribute in ('preprocessor', 'extractor', 'algorithm'):\n if not hasattr(configuration, attribute):\n raise ValueError(\"The given configuration file '%s' does not contain the required attribute '%s', and it was not given on command line either\" %(args.configuration_file, attribute))\n\n # extract the dictionary of replacements from the configuration\n if not hasattr(configuration, 'replace'):\n raise ValueError(\"Please define a set of replacements using the 'replace' keyword.\")\n if not hasattr(configuration, 'imports'):\n configuration.imports = ['bob.bio.base']\n logger.info(\"No 'imports' specified in configuration file '%s' -> using default %s\", args.configuration_file, configuration.imports)\n\n if not hasattr(configuration, 'requirements'):\n configuration.requirements = []\n\n replace_dict = {}\n for step, replacements in configuration.replace.items():\n for key in replacements.keys():\n if key in replace_dict:\n raise ValueError(\"The replacement key '%s' was defined multiple times. Please use each key only once.\")\n # we always start with index 0.\n replace_dict[key] = 0\n\n # now, iterate through the list of replacements and create the according calls\n create_recursive(replace_dict, step_index = 0, directories = {}, dependency_level = 0)\n\n # finally, write some information about the\n if args.grid is not None:\n logger.info(\"The number of executed tasks is: %d, which are split up into %d jobs that are executed in the grid\" , task_count, job_count)\n\n if args.parallel is not None:\n logger.info(\"The total amount of finsihed tasks is: %d\", task_count)\n\n return score_directories", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n parser.add_argument('--debug', dest='debug', default=False, action='store_true', help='set to debug mode ')\n parser.add_argument('--num_train_sentence', dest='num_train_sentence', type=int, default=-1,\n help='set number of sentence to train on')\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n parser.add_argument('--attn_model', type=str, default='general', help='Attention model to use: general (default), dot, concat')\n\n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def test_cmdlineproc_test11():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"test.exe\", \"-i\", \"input.file\", \"param1\", \"--someflag\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"test.exe\"\n assert parameters[\"executableargs\"] == \"-i input.file param1 --someflag\"\n assert longbowargs == []", "def main():\n # Default input parameters\n nelx, nely, volfrac, penalty, rmin, ft = cli.parse_args(\n nelx=120, volfrac=0.2, penalty=6.0, rmin=1.5)\n bc = DistributedMultipleLoadsBoundaryConditions(nelx, nely)\n problem = ComplianceProblem(bc, penalty)\n cli.main(nelx, nely, volfrac, penalty, rmin, ft, bc=bc,\n problem=problem)", "def main(options):\n # Check if path to MIRZA is valid\n if not is_executable(options.mirzabin):\n raise Exception(\"Path to MIRZA is invalid (%s)! Please define it with --mirzabin option.\" % options.mirzabin)\n if options.verbose:\n syserr(\"Reading coordinate file\\n\")\n coords = read_coordinates(options.coords, True)\n\n if options.verbose:\n syserr(\"Reading mRNA sequences\\n\")\n mRNAseqs = read_fasta_to_dict(options.seq)\n\n if options.verbose:\n syserr(\"Reading miRNA sequences\\n\")\n miRNAseqs = read_fasta_to_dict(options.motifs)\n\n if options.onlymirza != 'yes':\n if options.verbose:\n syserr(\"Preparing alignments and phylogenetic tree\\n\")\n\n phylo_tree = read_phylogenetic_tree(options.tree)\n multiple_alignment_dict = read_multiple_alignments(phylo_tree,\n options.mln_dir,\n coords)\n mirhomologues = make_homologues_mirnas(phylo_tree, miRNAseqs)\n\n with gzip.open(options.out, 'wb') as outfile:\n if options.verbose:\n syserr(\"Collecting sequences\\n\")\n mRNA_sequences = [cor[-1] for cor in coords]\n mRNA_ids = [\"%s,%s,%s\" % (cor[0], cor[2], cor[3]) for cor in coords]\n number_of_coords = len(set([cor[1] for cor in coords])) == 1\n if number_of_coords > 1:\n raise Exception(\"More than mirna in coordinate file\")\n if number_of_coords == 0:\n syserr(\"There is no coordinates. Exit.\")\n sys.exit()\n\n miRNAseq = miRNAseqs[list(set([cor[1] for cor in coords]))[0]][:21]\n miRNAid = list(set([cor[1] for cor in coords]))[0]\n\n if options.verbose:\n syserr(\"Running MIRZA\\n\")\n results = calculate_mirza(mRNA_sequences, mRNA_ids, miRNAseq, miRNAid)\n\n if options.verbose:\n syserr(\"Collecting results\\n\")\n for key, group in itertools.groupby(results.splitlines(), lambda x: x == \"\"):\n if not key:\n proper_group = False\n for line in group:\n if line.startswith(\">\"):\n mRNAid = line.split()[0][1:].split(\",\")[0]\n beg = line.split()[0][1:].split(\",\")[1]\n end = line.split()[0][1:].split(\",\")[2]\n score = float(line.split()[-1])\n proper_group = True\n # elif line.startswith(\"miRNA\"):\n # mirhyb = line.split(\"\\t\")[1].split(\" \")[0]\n # elif line.startswith(\"A L\"):\n # hyb = line.split(\"\\t\")[1].rstrip()\n elif line.startswith(\"mRNA\"):\n mrhyb = line.split(\"\\t\")[1].split(\" \")[0]\n if proper_group:\n if len(miRNAseq) < 21:\n outtext = '%s,%s,%s,%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (mRNAid,\n miRNAid,\n beg,\n end,\n \"NA\",\n \"NA\",\n \"NA\",\n \"NA\",\n \"NA\")\n outfile.write(outtext)\n continue\n\n # hybrids = [mirhyb, hyb, mrhyb]\n # mirseq, hybseq, mrhybseq, mrpos = get_hybrid_vector(hybrids)\n # canonical, type_of_site = is_canonical([mirseq, hybseq, mrhybseq])\n if options.onlymirza != 'yes':\n try:\n mln_frag = multiple_alignment_dict[mRNAid]\n qd = calculate_conservation(phylotree=phylo_tree,\n mrna_frag=mrhyb.replace(\"-\", \"\")[::-1],\n mrnaid=mRNAid,\n mirna=mirhomologues,\n mirname=miRNAid,\n mln_dict=mln_frag,\n ref_org=options.reforg,\n threshold=options.thr,\n mrna_len=options.contextLen)\n qd = str(qd)\n except KeyError, e:\n qd = \"NA\"\n sys.stderr.write(\"KeyError: \" + str(e) + \"\\n\")\n sys.stderr.write(\"Trace: \"\n + traceback.format_exc()\n + \"\\n\")\n # raise KeyError\n else:\n qd = \"NA\"\n outtext = '%s,%s,%s,%s\\t%f\\t%s\\n' % (mRNAid,\n miRNAid,\n beg,\n end,\n score,\n # \":\".join(hybrids),\n qd)\n # \"canonical\" if canonical else \"non-canonical\",\n # type_of_site)\n outfile.write(outtext)\n clean()", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-b\", \"--bam-file\", dest=\"bam\", type=\"string\",\n help=\"BAM file containing iCLIP reads\")\n parser.add_option(\"-s\", \"--spread\", dest=\"spread\", type=\"int\",\n default=15,\n help=\"Number of bases each site of each bases\"\n \"to use when calculating height\")\n parser.add_option(\"-r\", \"--randomisations\", dest=\"rands\", type=\"int\",\n default=100,\n help=\"Number of randomisations to use when\"\n \"calculating FDR\")\n parser.add_option(\"-t\", \"--threshold\", dest=\"threshold\", type=\"float\",\n default=0.05,\n help=\"FDR threshold on which to select bases\")\n parser.add_option(\"-f\", \"--feature\", dest=\"feature\", type=\"choice\",\n choices=[\"transcript\", \"gene\"],\n default=\"gene\",\n help=\"GTF feature to use. Gene or transcript\")\n parser.add_option(\"-p\", \"--processes\", dest=\"proc\", type=\"int\",\n default=None,\n help=\"Number of processes to use for multiprocessing\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.proc:\n try:\n import multiprocessing\n pool = multiprocessing.Pool(options.proc)\n except ImportError:\n E.warn(\"Failed to setup multiprocessing, using single processor\")\n pool = None\n else:\n pool = None\n\n if options.feature == \"gene\":\n iterator = GTF.flat_gene_iterator(GTF.iterator(options.stdin))\n elif options.feature == \"transcript\":\n iterator = GTF.transcript_iterator(GTF.iterator(options.stdin))\n else:\n raise ValueError(\"Unknown feature type %s\" % options.feature)\n\n bam = pysam.AlignmentFile(options.bam)\n\n results = clusters.get_crosslink_fdr_by_randomisation(\n iterator, bam, options.rands, options.spread, pool)\n\n results = results[results <= options.threshold]\n results = results.sort_index()\n results = results.reset_index()\n results.columns = [\"contig\", \"start\", \"FDR\"]\n results[\"start\"] = results[\"start\"].astype(\"int\")\n results[\"end\"] = results.start + 1\n results = results.loc[:,[\"contig\", \"start\", \"end\", \"FDR\"]]\n results[\"FDR\"] = -numpy.log10(results[\"FDR\"])\n results.to_csv(options.stdout, header=False, index=False, sep=\"\\t\")\n # write footer and output benchmark information.\n E.Stop()", "def main(cliargs=None):\n\n try:\n args = parsecli(cliargs)\n # checkargs(args)\n result = 0 # process(args)\n log.info(\"Done.\")\n return result\n\n except KeyboardInterrupt:\n return 10", "def test_cmdlineproc_test8():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\", \"test.exe\",\n \"-i\", \"input.file\", \"param1\", \"--someflag\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"test.exe\"\n assert parameters[\"executableargs\"] == \"-i input.file param1 --someflag\"\n assert longbowargs == [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\"]", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def main(argv=None):\n\n parser = ArgParser(\n description=\"Extrapolate input data to required lead times.\")\n parser.add_argument(\"input_filepath\", metavar=\"INPUT_FILEPATH\",\n type=str, help=\"Path to input NetCDF file.\")\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--output_dir\", metavar=\"OUTPUT_DIR\", type=str,\n default=\"\", help=\"Directory to write output files.\")\n group.add_argument(\"--output_filepaths\", nargs=\"+\", type=str,\n help=\"List of full paths to output nowcast files, in \"\n \"order of increasing lead time.\")\n\n optflw = parser.add_argument_group('Advect using files containing the x '\n ' and y components of the velocity')\n optflw.add_argument(\"--eastward_advection_filepath\", type=str, help=\"Path\"\n \" to input file containing Eastward advection \"\n \"velocities.\")\n optflw.add_argument(\"--northward_advection_filepath\", type=str, help=\"Path\"\n \" to input file containing Northward advection \"\n \"velocities.\")\n\n speed = parser.add_argument_group('Advect using files containing speed and'\n ' direction')\n speed.add_argument(\"--advection_speed_filepath\", type=str, help=\"Path\"\n \" to input file containing advection speeds,\"\n \" usually wind speeds, on multiple pressure levels.\")\n speed.add_argument(\"--advection_direction_filepath\", type=str,\n help=\"Path to input file containing the directions from\"\n \" which advection speeds are coming (180 degrees from\"\n \" the direction in which the speed is directed). The\"\n \" directions should be on the same grid as the input\"\n \" speeds, including the same vertical levels.\")\n speed.add_argument(\"--pressure_level\", type=int, default=75000, help=\"The\"\n \" pressure level in Pa to extract from the multi-level\"\n \" advection_speed and advection_direction files. The\"\n \" velocities at this level are used for advection.\")\n parser.add_argument(\"--orographic_enhancement_filepaths\", nargs=\"+\",\n type=str, default=None, help=\"List or wildcarded \"\n \"file specification to the input orographic \"\n \"enhancement files. Orographic enhancement files are \"\n \"compulsory for precipitation fields.\")\n parser.add_argument(\"--json_file\", metavar=\"JSON_FILE\", default=None,\n help=\"Filename for the json file containing \"\n \"required changes to the metadata. Information \"\n \"describing the intended contents of the json file \"\n \"is available in \"\n \"improver.utilities.cube_metadata.amend_metadata.\"\n \"Every output cube will have the metadata_dict \"\n \"applied. Defaults to None.\", type=str)\n parser.add_argument(\"--max_lead_time\", type=int, default=360,\n help=\"Maximum lead time required (mins).\")\n parser.add_argument(\"--lead_time_interval\", type=int, default=15,\n help=\"Interval between required lead times (mins).\")\n\n accumulation_args = parser.add_argument_group(\n 'Calculate accumulations from advected fields')\n accumulation_args.add_argument(\n \"--accumulation_fidelity\", type=int, default=0,\n help=\"If set, this CLI will additionally return accumulations\"\n \" calculated from the advected fields. This fidelity specifies the\"\n \" time interval in minutes between advected fields that is used to\"\n \" calculate these accumulations. This interval must be a factor of\"\n \" the lead_time_interval.\")\n accumulation_args.add_argument(\n \"--accumulation_period\", type=int, default=15,\n help=\"The period over which the accumulation is calculated (mins). \"\n \"Only full accumulation periods will be computed. At lead times \"\n \"that are shorter than the accumulation period, no accumulation \"\n \"output will be produced.\")\n accumulation_args.add_argument(\n \"--accumulation_units\", type=str, default='m',\n help=\"Desired units in which the accumulations should be expressed,\"\n \"e.g. mm\")\n\n # Load Cubes\n args = parser.parse_args(args=argv)\n\n metadata_dict = load_json_or_none(args.json_file)\n\n upath, vpath = (args.eastward_advection_filepath,\n args.northward_advection_filepath)\n spath, dpath = (args.advection_speed_filepath,\n args.advection_direction_filepath)\n\n # load files and initialise advection plugin\n input_cube = load_cube(args.input_filepath)\n orographic_enhancement_cube = load_cube(\n args.orographic_enhancement_filepaths, allow_none=True)\n\n speed_cube = direction_cube = ucube = vcube = None\n if (upath and vpath) and not (spath or dpath):\n ucube = load_cube(upath)\n vcube = load_cube(vpath)\n elif (spath and dpath) and not (upath or vpath):\n level_constraint = Constraint(pressure=args.pressure_level)\n try:\n speed_cube = load_cube(spath, constraints=level_constraint)\n direction_cube = load_cube(dpath, constraints=level_constraint)\n except ValueError as err:\n raise ValueError(\n '{} Unable to extract specified pressure level from given '\n 'speed and direction files.'.format(err))\n else:\n raise ValueError('Cannot mix advection component velocities with speed'\n ' and direction')\n\n # Process Cubes\n accumulation_cubes, forecast_to_return = process(\n input_cube, ucube, vcube, speed_cube, direction_cube,\n orographic_enhancement_cube, metadata_dict, args.max_lead_time,\n args.lead_time_interval, args.accumulation_fidelity,\n args.accumulation_period, args.accumulation_units)\n\n # Save Cube\n if args.output_filepaths and \\\n len(args.output_filepaths) != len(forecast_to_return):\n raise ValueError(\"Require exactly one output file name for each \"\n \"forecast lead time\")\n for i, cube in enumerate(forecast_to_return):\n # save to a suitably-named output file\n if args.output_filepaths:\n file_name = args.output_filepaths[i]\n else:\n file_name = os.path.join(\n args.output_dir, generate_file_name(cube))\n save_netcdf(cube, file_name)\n\n if args.accumulation_fidelity > 0:\n # return accumulation cubes\n for i, cube in enumerate(accumulation_cubes):\n file_name = os.path.join(args.output_dir, generate_file_name(cube))\n save_netcdf(cube, file_name)", "def test_cmdlineproc_test5():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\", \"pmemd.MPI\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"pmemd.MPI\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\"]", "def run_parallel(heritability, x_start_i, x_stop_i, cluster='usc'):\n\trun_id = 'corr_trait_sim'\n\tjob_id = ' % s_ % d_ % d' % (run_id, x_start_i, x_stop_i)\n\tfile_prefix = env.env['results_dir'] + run_id + '_' + str(x_start_i) + '_' + str(x_stop_i)\n\n\t#Cluster specific parameters\t\n\tif cluster == 'gmi': #GMI cluster.\n\t\tshstr = '#!/bin/sh\\n'\n\t\tshstr += '#$ -N %s\\n' % job_id\n\t\tshstr += \"#$ -q q.norm@blade*\\n\"\n\t\tshstr += '#$ -o %s.log\\n' % job_id\n\t\t#shstr += '#$ -cwd /home/GMI/$HOME\\n'\n\t\t#shstr += '#$ -M bjarni.vilhjalmsson@gmi.oeaw.ac.at\\n\\n'\n\n\telif cluster == 'usc': #USC cluster.\n\t\tshstr = \"#!/bin/csh\\n\"\n\t\tshstr += \"#PBS -l walltime=%s \\n\" % '72:00:00'\n\t\tshstr += \"#PBS -l mem=%s \\n\" % '1950mb'\n\t\tshstr += \"#PBS -q cmb\\n\"\n\t\tshstr += \"#PBS -N p%s \\n\" % job_id\n\n\tshstr += \"(python %scorr_trait_sim.py %s %d %d \" % (env.env['script_dir'], heritability, x_start_i, x_stop_i)\n\n\tshstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n\tprint '\\n', shstr, '\\n'\n\tscript_file_name = run_id + \".sh\"\n\tf = open(script_file_name, 'w')\n\tf.write(shstr)\n\tf.close()\n\n\t#Execute qsub script\n\tos.system(\"qsub \" + script_file_name)", "def main():\n # Check the user CLI input matches correct syntax\n try:\n # Specify the valid CLI options/arguments\n opts, _ = getopt.getopt(\n sys.argv[1:],\n \"hv\",\n [\n \"help\",\n \"verbose\",\n \"input=\",\n \"output=\",\n \"locations=\",\n \"trips=\",\n \"results=\",\n ],\n )\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n sys.exit(2)\n\n # Define input arguments and initialize default values.\n input_file = \"tests/input/input1.txt\"\n output_file = \"tests/output/output1.txt\"\n locations_file = \"data/locations.csv\"\n trips_file = \"data/trips.csv\"\n results = 3\n\n # Loop through all the User CLI options/arguments\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-v\", \"--verbose\"):\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n elif opt == \"--input\":\n input_file = arg\n\n if not os.path.exists(input_file):\n sys.exit(\"Could not find input file\")\n elif opt == \"--output\":\n output_file = arg\n elif opt == \"--locations\":\n locations_file = arg\n\n if not os.path.exists(locations_file):\n sys.exit(\"Could not find locations file\")\n elif opt == \"--trips\":\n trips_file = arg\n\n if not os.path.exists(trips_file):\n sys.exit(\"Could not find trips file\")\n elif opt == \"--results\":\n results = int(arg)\n\n parse(\n locations_file,\n trips_file,\n input_file,\n output_file,\n results,\n )", "def parseCommandLine():\n\n parser = argparse.ArgumentParser(\n description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')\n\n\n parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],\n help='Set the log level')\n parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',\n help='Directory of PS1 catalog')\n parser.add_argument(\"--diagnosticplotsdir\", dest='outputimageRootDir', default=None,\n help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')\n parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',\n help='Result output directory. .db file is written here')\n parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',\n help=\"LCO archive root directory\")\n parser.add_argument('--site', dest='site', default=None, help='sites code for camera')\n parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')\n parser.add_argument('--redo', action='store_true')\n parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')\n\n\n\n mutex = parser.add_mutually_exclusive_group()\n mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')\n mutex.add_argument('--lastNdays', type=int)\n\n\n cameragroup = parser.add_mutually_exclusive_group()\n\n cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')\n cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],\n help='camera type to process at selected sites to process. ')\n cameragroup.add_argument('--crawldirectory', default=None, type=str,\n help=\"process all reduced image in specific directoy\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=getattr(logging, args.log_level.upper()),\n format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')\n\n args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)\n\n if args.outputimageRootDir is not None:\n args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)\n print (\"Writing db to directory: %s\" % args.outputimageRootDir)\n\n if args.crawldirectory is not None:\n args.crawldirectory = os.path.expanduser(args.crawldirectory)\n\n\n\n if (args.lastNdays is not None):\n args.date=[]\n today = datetime.datetime.utcnow()\n for ii in range (args.lastNdays):\n day = today - datetime.timedelta(days=ii)\n args.date.append (day.strftime(\"%Y%m%d\"))\n\n args.date = args.date[::-1]\n\n args.ps1dir = os.path.expanduser(args.ps1dir)\n\n print (args.processstatus)\n return args", "def main(args):\n options = parseArgs(args)\n dir = \"\"\n # Check if were doing a local or global directory. \n if options.outputDir.startswith(\"/\"):\n dir = options.outputDir\n else: dir = os.getcwd() + \"/\" + options.outputDir\n \n # Check that the directory doesn't exist and make it. \n if not os.path.exists(dir):\n os.makedirs(dir)\n os.chdir(dir)\n else:\n print (\"The output dir provided, %s, is already in use. Aborting.\"%(options.outputDir))\n exit(1)\n\n if options.verbose: print (\"Start run kallisto on a batch of fastq files (cdwManiFastqToKallisto).\")\n if options.verbose: print (\"Making the job list...\")\n makeJobList(\"../\"+options.manifestFile, open(\"jobList\",\"w\", 1),options.isMouse, options.bootstrap, options.outputDir, options.verbose)\n \n if options.test:\n print (\"You have selected a dry run, the program is stopping now.\")\n exit(1)\n\n # Go onto the server and run the joblist. \n paraRunKallisto(options.verbose, os.getenv(\"HOST\")) \n if options.verbose: print (\"Starting the parasol run...\")\n if options.verbose: print (\"Completed run kallisto on a batch of fastq files (cdwManiFastqToKallisto).\")", "def execute(self, parameters, messages):\r\n\r\n # Gather all necessary parameters\r\n in_nc = parameters[0].valueAsText\r\n in_csv = parameters[1].valueAsText\r\n basin_mask = parameters[2].valueAsText\r\n routing = parameters[3].valueAsText\r\n Lake_routing = parameters[4].valueAsText\r\n in_reservoir = parameters[5].valueAsText\r\n in_raster = parameters[6].valueAsText\r\n cellsize = parameters[7].valueAsText\r\n threshold = parameters[8].valueAsText\r\n ovroughrtfac_val = parameters[9].valueAsText\r\n retdeprtfac_val = parameters[10].valueAsText\r\n out_zip = parameters[11].valueAsText\r\n\r\n # Prepare output log file\r\n outtable = open(os.path.join(os.path.dirname(out_zip), os.path.basename(out_zip) + '.log'), \"w\")\r\n loglines = ['Begining processing on %s' %time.ctime()]\r\n loglines.append('64-bit background geoprocessing: %s' %bit64)\r\n tic = time.time()\r\n loglines.append('Input parameters:')\r\n for param in parameters:\r\n loglines.append(' Parameter: %s: %s' %(param.displayName, param.valueAsText))\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n\r\n # Create scratch directory for temporary outputs\r\n projdir = os.path.dirname(out_zip) + os.sep + 'scratchdir' # This is the only instance where we need the 'os' module!\r\n if not os.path.exists(projdir):\r\n os.makedirs(projdir)\r\n arcpy.env.overwriteOutput = True\r\n arcpy.env.workspace = projdir\r\n arcpy.env.scratchWorkspace = projdir\r\n\r\n # Set the input units to meters\r\n inunits = 'm'\r\n\r\n # Interpret the input for reservoir routing\r\n if Lake_routing == 'false':\r\n in_lakes = ''\r\n else:\r\n in_lakes = in_reservoir\r\n\r\n # Step 1 - Georeference geogrid file\r\n\r\n LU_INDEX, sr2, Projection_String, loglines = wrf_hydro_functions.georeference_geogrid_file(arcpy, in_nc, 'LU_INDEX') # Process: Generate LU Index grid\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n \r\n hgt_m_raster, sr2, Projection_String, loglines = wrf_hydro_functions.georeference_geogrid_file(arcpy, in_nc, 'HGT_M')\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n\r\n\r\n # Step 2 - Create high resolution topography layers\r\n mosprj, cellsize1, cellsize2, loglines = wrf_hydro_functions.create_high_res_topogaphy(arcpy, in_raster, hgt_m_raster, cellsize, sr2, projdir)\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n\r\n # Step 3 - Create latitude and longitude rasters\r\n loglines = wrf_hydro_functions.create_lat_lon_rasters(arcpy, projdir, mosprj)\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n\r\n # Step 4 - Hyrdo processing functions\r\n loglines = wrf_hydro_functions.sa_functions(arcpy, basin_mask, mosprj, ovroughrtfac_val, retdeprtfac_val, projdir, in_csv, out_zip, threshold, inunits, LU_INDEX, cellsize1, cellsize2, routing, in_lakes) # , mosprj2,\r\n outtable.writelines(\"\\n\".join(loglines) + \"\\n\")\r\n\r\n # Clean up and give finishing message\r\n #del LU_INDEX, hgt_m_raster\r\n #shutil.rmtree(projdir)\r\n loglines = ['Completed without error in %s seconds.\\n' %(time.time()-tic)]\r\n arcpy.AddMessage(loglines[-1])\r\n outtable.write(loglines[-1])\r\n outtable.close()\r\n return", "def main():\n parser = argparse.ArgumentParser(description=\"Renumber atoms and residues from a 3D structure.\", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('-c', '--config', required=False, help=\"This file can be a YAML file, JSON file or JSON string\")\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('-i', '--input_gro_path', required=True, help=\"Input GRO file name\")\n required_args.add_argument('-o', '--output_gro_path', required=True, help=\"Output sorted GRO file name\")\n\n args = parser.parse_args()\n config = args.config if args.config else None\n properties = settings.ConfReader(config=config).get_prop_dic()\n\n # Specific call of each building block\n sort_gro_residues(input_gro_path=args.input_gro_path,\n output_gro_path=args.output_gro_path,\n properties=properties)", "def test_cmdlineproc_test6():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--maxtime\", \"01:00\", \"--replicates\", \"1000\",\n \"--disconnect\", \"pmemd.MPI\", \"-O\", \"-i\", \"ex.in\", \"-c\",\n \"ex.min\", \"-p\", \"ex.top\", \"-o\", \"ex.out\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"pmemd.MPI\"\n assert parameters[\"executableargs\"] == \\\n \"-O -i ex.in -c ex.min -p ex.top -o ex.out\"\n assert longbowargs == [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--maxtime\", \"01:00\", \"--replicates\", \"1000\",\n \"--disconnect\"]", "def process_arguments():\n # Create ArgumentParser object. Description message will be displayed as part of help message if script is run with -h flag\n parser = argparse.ArgumentParser(description='Prints tier 1 and 2 variant details to stdout for a given 100k case')\n # Define the arguments that will be taken.\n parser.add_argument('-i', '--ir_id', required=True, help='GeL Interpretation Request ID in format 12345-1')\n parser.add_argument('-p', '--proband_id', required=True, help='GeL participant ID for proband')\n # Return the arguments\n return parser.parse_args()", "def run(args):\n # args = args_init(vars(get_args()), align=True)\n\n log.info('running RNAseq pipeline')\n\n ## default arguments, for RNAseq2 only\n args['align_to_rRNA'] = True\n\n ## multireads should not be discarded\n ## https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4728800/\n ## Genome Biology, 2016\n if args['include_multi_reads']:\n args['unique_only'] = False # \n else:\n args['unique_only'] = True # default \n\n # determine gtf file\n # gene gtf\n if args['gtf'] is None:\n args['gtf'] = Genome(**args).gene_gtf('refseq') # ucsc version\n\n # for GRCh38 genome, using GENCODE version \n # if args['genome'] == 'GRCh38':\n if args['genome'] in ['GRCh38', 'GRCm38']:\n args['gtf'] = Genome(**args).gene_gtf('ensembl') # gencode\n\n # te gtf\n if args['te_gtf'] is None:\n args['te_gtf'] = Genome(**args).te_gtf()\n\n # print(args['gtf'])\n\n ## update prefix\n ctl_prefix = str_common([os.path.basename(f) for f in args['c1']])\n ctl_prefix = ctl_prefix.rstrip('r|R|rep|Rep').rstrip('_|.')\n if args['C'] is None:\n args['C'] = ctl_prefix\n tre_prefix = str_common([os.path.basename(f) for f in args['t1']])\n tre_prefix = tre_prefix.rstrip('r|R|rep|Rep').rstrip('_|.')\n if args['T'] is None:\n args['T'] = tre_prefix\n \n ## run pipeline\n if args['extra_index']:\n extra_rnaseq(args, args['extra_gtf'])\n elif args['align_to_te']:\n te_rnaseq(args, args['te_gtf'])\n else:\n gene_rnaseq(args)\n\n log.info('finish')", "def parse_args():\n parser = argparser()\n args = parser.parse_args()\n\n # --------------------------------------------------------- #\n # Parse and check runtime parameters.\n # --------------------------------------------------------- #\n\n # check validity of cores selection.\n max_ppn = max_cpu() + 1\n if args.proc_per_node > max_ppn:\n warnings.warn('{0} is greater than the number of available cores ({1}). Reducing to {2}'\n .format(args.proc_per_node, max_ppn, max_ppn - 1))\n args.proc_per_node = max_ppn - 1\n\n # ensure that user has supplied fresh .bam/.bai files or a directory containing alignment files.\n if not args.bam_files and not args.bam_dir:\n raise ValueError('Must specify either --bam-files or --bam-dir.')\n\n if args.bam_files and args.bam_dir:\n raise ValueError('Both --bam-files and --bam-dir were provided! Not sure which data set to use.')\n\n # --------------------------------------------------------- #\n # Gather input RNA-Seq + genome annotation files.\n # --------------------------------------------------------- #\n\n # check validity of gene annotation file selection.\n if not args.genome_annotation:\n raise ValueError('If warm-start directory not specified, gene annotation file must be specified!')\n\n else:\n if not os.path.isfile(args.genome_annotation):\n raise FileNotFoundError('Gene annotation file {0} not found.'.format(args.genome_annotation))\n\n # check validity of file i/o selection.\n bam_files = list()\n bai_files = list()\n create_bai_files = list()\n\n # INPUT OPTION 1: a --bam-dir was specified.\n if args.bam_dir:\n\n # if user used both --bam-dir and --bam-files and/or --bai-files, yell at them. (only use one method).\n if args.bam_files is not None or args.bai_files is not None:\n raise ValueError('Do not specify both a --bam-dir and either --bam-files and/or --bai-files.'\n 'Use one input selection method or the other.')\n\n # check that the dir actually exists.\n if not os.path.isdir(args.bam_dir):\n raise NotADirectoryError('Cannot find --bam-dir {0}'.format(args.bam_dir))\n\n # scan directory for .bam files.\n for f in os.listdir(args.bam_dir):\n if f.endswith('.bam'):\n bam_files.append(os.path.join(args.bam_dir, f))\n\n # search for .bai files in the --bam-dir. If they don't exist, try to make them.\n for bam_file in bam_files:\n bai_file = re.sub('.bam$', '.bai', bam_file)\n\n # if .bai file under same basename as .bam file doesn't exist,\n # add it to list of .bai files that need to be created.\n if not os.path.isfile(bai_file):\n bai_files.append(bai_from_bam_file(bam_file))\n create_bai_files.append(bam_file)\n else:\n bai_files.append(bai_file)\n\n # INPUT OPTION 2: --bam-files and possibly --bai-files were specified.\n else:\n # ensure .bam files are actually .bam files.\n for bam_file in args.bam_files:\n if not bam_file.endswith('.bam'):\n raise ValueError('{0} is not a .bam file.'.format(bam_file))\n elif not os.path.isfile(bam_file):\n raise FileNotFoundError('Count not find .bam file {0}'.format(bam_file))\n else:\n bam_files.append(bam_file)\n\n # case where user has specified .bai files to accompany .bam files.\n if args.bai_files is not None:\n # if user has supplied an incorrect number of bai files, fail out.\n if len(args.bai_files) != len(bam_files):\n raise ValueError('Number of supplied .bai files does not match number of supplied .bam files.')\n\n # ensure .bai files are actually .bai files.\n for bai_file in args.bai_files:\n if not bai_file.endswith('.bai'):\n raise ValueError('{0} is not a .bai file.'.format(bai_file))\n elif not os.path.isfile(bai_file):\n raise FileNotFoundError('Count not find .bai file {0}'.format(bai_file))\n else:\n bai_files.append(bai_file)\n\n # if user has not supplied any bai files: look for them under the same name\n # as each of the .bam files, or create new .bai files with samtools (if possible).\n else:\n for bam_file in bam_files:\n bai_file = re.sub('.bam$', '.bai', bam_file)\n\n # if .bai file under same name as .bam file doesn't exist,\n # add it to list of .bam files for which we need to create a .bai file.\n if not os.path.isfile(bai_file):\n bai_files.append(bai_from_bam_file(bam_file))\n create_bai_files.append(bam_file)\n else:\n bai_files.append(bai_file)\n\n # ensure that input files are uniquely named.\n if len(bam_files) != len(set(bam_files)):\n raise ValueError('Supplied .bam files are not uniquely named!')\n\n # create parser attributes for bam/index files.\n args.bam_files = bam_files\n args.bai_files = bai_files\n args.create_bai_files = create_bai_files\n\n return args", "def get_options():\n parser = argparse.ArgumentParser(description='ZA Machine learning for full run2 Ulegacy data')\n \n parser.add_argument('-o', '--outputs', action='store', required=True, type=str,\n help='ZA machine learning outputs dir ')\n\n #=========================================================================\n # Local For Test : Scan, deploy and restore arguments #\n #=========================================================================\n a = parser.add_argument_group('Scan, deploy and restore arguments')\n a.add_argument('-s','--scan', action='store', required=False, type=str, default='',\n help='Name of the scan to be used (modify scan parameters in NeuralNet.py)')\n a.add_argument('-task','--task', action='store', required=False, type=str, default='',\n help='Name of dict to be used for scan (Used by function itself when submitting jobs or DEBUG)')\n a.add_argument('--generator', action='store_true', required=False, default=False, \n help='Wether to use a generator for the neural network')\n a.add_argument('--resume', action='store_true', required=False, default=False,\n help='Wether to resume the training of a given model (path in parameters.py)')\n #=========================================================================\n # Slurm Submissions : Splitting and submitting jobs arguments #\n #=========================================================================\n b = parser.add_argument_group('Splitting and submitting jobs arguments')\n b.add_argument('-split','--split', action='store', required=False, type=int, default=0,\n help='Number of parameter sets per jobs to be used for splitted training for slurm submission (if -1, will create a single subdict)')\n b.add_argument('-submit','--submit', action='store', required=False, default='', type=str,\n help='Wether to submit jons to slurm (must have specified --split)')\n b.add_argument('-resubmit','--resubmit', action='store', required=False, default='', type=str,\n help='Wether to resubmit failed jobs given the name you give during submission ( must giev the same output)')\n b.add_argument('-debug','--debug', action='store_true', required=False, default=False,\n help='Debug mode of the slurm submission, does everything except submit the jobs')\n #=========================================================================\n # Repot and Produce Outputs: This do csv concatenation and get the best model : workdir/model/*.csv\n # Further used for : Analyzing or producing outputs of the given model \n #=========================================================================\n c = parser.add_argument_group('Analyzing or producing outputs for given model (csv or zip file)')\n c.add_argument('-r','--report', action='store_true', required=False, default=False,\n help='report 10 best models (according to the eval_criterion) and plot on the console several histograms and *.png files')\n c.add_argument('-m','--model', action='store', required=False, type=str, default='',\n help='Loads the provided model name (without .zip and type, it will find them)') \n c.add_argument('-k','--key', action='store', required=False, nargs='+', type=str, default=[], \n help='Applies the provided model (do not forget -k) on the list of keys from parameters.TTree') \n #=========================================================================\n # Physics arguments #\n #=========================================================================\n e = parser.add_argument_group('Physics arguments')\n e.add_argument('-p','--process', action='store', required=False, nargs='+', default=[],\n help='Which process you want to submit for training ')\n e.add_argument('--resolved', action='store_true', required=False, default=False,\n help='Resolved topology')\n e.add_argument('--boosted', action='store_true', required=False, default=False,\n help='Boosted topology')\n #=========================================================================\n # Additional arguments #\n #=========================================================================\n f = parser.add_argument_group('Additional arguments')\n f.add_argument('-v','--verbose', action='store_true', required=False, default=False,\n help='Show DEGUG logging')\n f.add_argument('--GPU', action='store_true', required=False, default=False,\n help='GPU requires to execute some commandes before')\n f.add_argument('--cache', action='store_true', required=False, default=False,\n help='Will use the cache')\n f.add_argument('--interactive', action='store_true', required=False, default=False,\n help='Interactive mode to check the dataframe')\n \n opt = parser.parse_args()\n\n if opt.split!=0 or opt.submit!='':\n if opt.scan!='' or opt.report:\n logging.critical('These parameters cannot be used together: ') \n logging.critical('\\t--scan --debug --verbose : to debug and check that all okay locally') \n logging.critical('\\t--submit --split 1 : to submit jobs to slurm if the previous okay') \n logging.critical('\\t--report : should be the last step in order to get the best model, plots, etc...')\n sys.exit(1)\n \n if opt.submit!='': # Need --output or --split arguments\n if opt.split==0 and len(opt.key)==0:\n logging.warning('In case of learning you forgot to specify --split')\n sys.exit(1)\n \n if opt.split!=0 and (opt.report or opt.key!='' or opt.scan!=''):\n logging.warning('Since you have specified a split, all the other arguments will be skipped')\n \n if opt.report and (opt.key!='' or opt.scan!=''):\n logging.warning('Since you have specified a scan report, all the other arguments will be skipped')\n \n if len(opt.key)!=0 and opt.key == '': \n logging.critical(f'--key is missing choices: {parameters.TTree}')\n sys.exit(1)\n \n if opt.generator:\n logging.info(\"Will use the generator\")\n \n if opt.resume:\n logging.info(\"Will resume the training of the model\")\n\n return opt", "def RunJobs(self, runfile_mapping, server_run_map):\n if self.workflow is None:\n raise RuntimeError(\"Tried to create unnamed workflow!\")\n\n \n # Generate jobs for the first pass over the data\n for run in sorted(runfile_mapping.keys()):\n if self.VERBOSE>0:\n inputfiles=\"/%s/rawdata/volatile/%s/rawdata/Run%06d/hd_rawdata_*.evio\"%(HDRunFileRAIDList.GetRAIDDirFromRun(run,server_run_map),HDJobUtils.GetRunPeriodFromRun(run),run)\n\n # PASS 0\n print \"processing run %d, phase 0 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)\n\n # PASS 1\n print \"processing run %d, phase 1 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)", "def execute_waypoint_sequence(detail_of_trip):\n\n # rets (route_line, line_points)\n sliced_route_and_line_points = chunk_user_route(detail_of_trip)\n\n sliced_route = sliced_route_and_line_points[0]\n line_points = sliced_route_and_line_points[1]\n\n # Interpolate/Break into 1/10 segments\n segmented_points = interpolate_points(sliced_route, line_points)\n waypoints = find_crime_areas(segmented_points)\n\n # print \"segmented_points\", json.dumps(segmented_points, indent=2)\n print \"\\n\\n\\n\\n\" # compensating for the giant GET request\n return waypoints", "def test_cmdlineproc_test4():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"--hosts\", \"hosts.file\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\"]", "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Download SMAP data. Register at https://urs.earthdata.nasa.gov/ first.\"\n )\n parser.add_argument(\n \"localroot\", help='Root of local filesystem where the data is stored.')\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=mkdate,\n help=(\n \"Startdate. Either in format YYYY-MM-DD or YYYY-MM-DDTHH:MM.\"\n \" If not given then the target folder is scanned for a start date.\"\n \" If no data is found there then the first available date of the product is used.\"\n ))\n parser.add_argument(\n \"-e\",\n \"--end\",\n type=mkdate,\n help=(\"Enddate. Either in format YYYY-MM-DD or YYYY-MM-DDTHH:MM.\"\n \" If not given then the current date is used.\"))\n parser.add_argument(\n \"--product\",\n type=str,\n default=\"SPL3SMP.008\",\n help='SMAP product to download. (default: SPL3SMP.008).'\n ' See also https://n5eil01u.ecs.nsidc.org/SMAP/ ')\n parser.add_argument(\n \"--filetypes\",\n nargs=\"*\",\n default=[\"h5\", \"nc\"],\n help=\"File types (extensions) to download. Files with\"\n \"other extensions are ignored. \"\n \"Default is equivalent to --filetypes h5 nc\")\n parser.add_argument(\"--username\", help='Username to use for download.')\n parser.add_argument(\"--password\", help='password to use for download.')\n parser.add_argument(\n \"--n_proc\",\n default=1,\n type=int,\n help='Number of parallel processes to use for downloading.')\n args = parser.parse_args(args)\n # set defaults that can not be handled by argparse\n\n if args.start is None or args.end is None:\n first, last = folder_get_first_last(args.localroot)\n if args.start is None:\n if last is None:\n args.start = get_start_date(args.product)\n else:\n args.start = last\n if args.end is None:\n args.end = datetime.now()\n\n args.urlroot = 'https://n5eil01u.ecs.nsidc.org'\n args.urlsubdirs = ['SMAP', args.product, '%Y.%m.%d']\n args.localsubdirs = ['%Y.%m.%d']\n\n print(\n f\"Downloading SMAP {args.product} data from {args.start.isoformat()} \"\n f\"to {args.end.isoformat()} into folder {args.localroot}.\")\n\n return args", "def main(argv = sys.argv[1:]):\n\n parser = OptionParser(\"\"\"usage: %prog RESULT_DIR RUNNIGN_SCRIPT <optional parameters>\"\"\")\n\n # OPTIONAL\n group_opt = OptionGroup(parser, \"Optional parameters\",\n \"Defaults should normally be fine for these options in order to run batch\")\n group_opt.add_option(\"-r\", \"--repeats\",\n type=\"int\", default=50,\n help=\"\"\"number of repeats of each configuration\"\"\")\n group_opt.add_option(\"-a\", \"--args\",\n type=\"string\", default=\"//home//veredhi//EMIRGE_SMURF//cluster_summary.csv\",\n help=\"arguments csv path\")\n\n parser.add_option_group(group_opt)\n\n # ACTUALLY PARSE ARGS\n (options, args) = parser.parse_args(argv)\n\n # minimal sanity checking of input\n if len(args) != 2:\n parser.error(\n \"RESULT_DIR ans RUNNIGN_SCRIPT are required, and all options except should have a flag associated with them (options without flags: %s)\" % args)\n\n params_dict['HOME_PATH'] = os.path.join(os.path.abspath(args[0]), \"CONFIG_INDEX\")\n params_dict['REPEATS'] = str(options.repeats)\n\n args_path = os.path.abspath(options.args)\n params_dict['SCRIPT_PATH'] = os.path.abspath(args[1])\n\n args_df = pd.read_csv(args_path)\n\n for i in range(args_df.shape[0]):\n params_dict['CONFIG_INDEX'] = str(i)\n\n mkdir_if_not_exists(replace_in_string(params_dict['HOME_PATH'], params_dict))\n mkdir_if_not_exists(replace_in_string(params_dict['SLURM_OUTPUT_FOLDER'], params_dict))\n mkdir_if_not_exists(replace_in_string(params_dict['SBATCH_PATH'], params_dict))\n mkdir_if_not_exists(replace_in_string(params_dict['PYTHON_OUTPUT_FOLDER'], params_dict))\n mkdir_if_not_exists(replace_in_string(params_dict['TMP_FOLDER'], params_dict))\n\n row = args_df.iloc[i]\n params_dict[\"c_bases\"] = str(row['number of changed bases'])\n params_dict[\"c_references\"] = str(row['number of changed bacterias'])\n params_dict[\"mix_size\"] = str(row['mock_mixure_size'])\n params_dict[\"unique\"] = str(row['unique_bacteria_in_mixture'])\n\n for j in range(int(params_dict['REPEATS'])):\n\n index=str(j)\n test_name = \"test_\" + index\n\n params_dict[\"index\"] = index\n\n if os.path.isfile(os.path.join(replace_in_string(params_dict[\"PYTHON_OUTPUT_FOLDER\"], params_dict),\n test_name, \"emirge_smurf_WFalseSTrue.csv\")):\n continue\n\n params_dict[\"out_file\"] = index + '_out'\n params_dict[\"err_file\"] = index + '_err'\n\n sbatch_text = replace_in_string(SBATCH_TEMPLATE, params_dict)\n\n sbatch_file_path = os.path.join(replace_in_string(params_dict['SBATCH_PATH'], params_dict), test_name)\n with open(sbatch_file_path, 'w') as f:\n f.write(sbatch_text)\n\n command = 'sbatch \"{}\"'.format(sbatch_file_path)\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n status = process.wait()\n while status is not 0:\n status = process.wait()\n print(\"Add {} {}\".format(i, test_name))", "def parse_args():\n parser = argparse.ArgumentParser(description='use STAR for mapping reads')\n #input files\n parser.add_argument('-s', '--sequences',\n help='number of sequences to take for testing')\n parser.add_argument('--tmpdir',\n help='tmp directory',default=\"/tmp/\")\n parser.add_argument('--input_dir',\n help='optional: Choose input directory')\n parser.add_argument('--reads_R1',\n help='Forward unmerged reads')\n parser.add_argument('--reads_R2',\n help='Reverse unmerged reads')\n parser.add_argument('--merged',\n help='merged watson and crick fastq')\n parser.add_argument('--reference',\n help='reference clusters')\n parser.add_argument('--refgenome',\n help='reference clusters')\n parser.add_argument('--barcodes',\n help='Barcodes used in output')\n parser.add_argument('--species',\n help='Species: if selected only that species will be put in BAM RG header')\n parser.add_argument('--threads',\n help='Number of threads to used where multithreading is possible')\n parser.add_argument('--output_dir',\n help='Choose output directory')\n parser.add_argument('--extraflags',\n help='extra flags for testing')\n args = parser.parse_args()\n if args.input_dir:\n args.reads_R1 = os.path.join(args.input_dir,'Unassembled.R1.watson.fq.gz')\n args.reads_R2 = os.path.join(args.input_dir,'Unassembled.R2.crick.fq.gz')\n args.merged = os.path.join(args.input_dir,'Assembled.fq.gz')\n args.reference = os.path.join(args.input_dir,'consensus_cluster.renamed.fa')\n if args.output_dir:\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n if 'log' not in args:\n args.log = os.path.join(args.output_dir,'mapping_variantcalling.log')\n args.watson_vcf = os.path.join(args.output_dir,'watson.vcf')\n args.crick_vcf = os.path.join(args.output_dir,'crick.vcf')\n args.snp_vcf = os.path.join(args.output_dir,'snp.vcf')\n args.methylation_vcf = os.path.join(args.output_dir,'methylation.vcf')\n args.heatmap = os.path.join(args.output_dir,'heatmap.igv')\n #2 bed files should be made for subsequent analysis using Rnbeads or other software\n args.mastermeth = os.path.join(args.output_dir,'methylation.bed')\n args.tmpdir = tempfile.mkdtemp(suffix='STAR', prefix='tmp', dir=args.tmpdir)\n return args", "def main():\n args = load_args()\n\n perturbation_file = args.perturbation_file\n vm_params = load_yaml(args.vm_params_location)\n processes = args.n_processes\n verbose = args.verbose\n\n if args.perturbation:\n if args.model:\n perturbation_model = pd.read_csv(args.model)\n generate_velocity_model_perturbation_file_from_model(\n vm_params, perturbation_model, perturbation_file, processes, verbose\n )\n elif args.parameter_file:\n common_params, layer_params = load_parameter_file(args.parameter_file)\n generate_velocity_model_perturbation_file_from_config(\n common_params, layer_params, perturbation_file, processes, verbose\n )\n else:\n create_constant_vm_file(\n perturbation_file, vm_params[\"nx\"] * vm_params[\"ny\"] * vm_params[\"nz\"]\n )\n\n if args.fault_damage_zone:\n apply_fault_damage_zone(\n srf_location=args.srf_location,\n vm_params=vm_params,\n pert_f_location=perturbation_file,\n depth_km=args.depth_km,\n max_depth_km=args.max_depth_km,\n width_km=args.width_km,\n max_width_km=args.max_width_km,\n min_damage_velocity=args.max_velocity_drop,\n n_processes=processes,\n )", "def main():\n parser = argparse.ArgumentParser(\n description=\"Returns back the entire solution graph.\")\n parser.add_argument(\"-M\", \"--master\", type=str, default=\"local[8]\",\n help=\"url of the master for this job\")\n parser.add_argument(\"-O\", \"--output\", type=str, default=\"solution-out\",\n help=\"name of the output file\")\n parser.add_argument(\"-H\", \"--height\", type=int, default=2,\n help=\"height of the puzzle\")\n parser.add_argument(\"-W\", \"--width\", type=int, default=2,\n help=\"width of the puzzle\")\n args = parser.parse_args()\n\n\n # open file for writing and create a writer function\n output_file = open(args.output, \"w\")\n writer = lambda line: output_file.write(line + \"\\n\")\n\n # call the puzzle solver\n solve_sliding_puzzle(args.master, writer, args.height, args.width)\n\n # close the output file\n output_file.close()", "def exec_from_args(args):\n outfolder = args.folder + '/normal/'\n check(outfolder, 'm')\n\n makeconfig(str(args.gene_names), str(args.sequences), str(args.org_included),\n len_threshold=args.len_threshold,\n its=str(args.its), query_cover=str(args.query_cover), identity=str(args.identity),\n distance=str(args.string_distance), subsp=str(args.subsp), excluded=str(args.org_excluded),\n remote=str(args.remote_blast), folder=args.folder, date=args.today, blacklist=args.blacklist,\n synonyms=args.synonyms)\n\n r = Run('n', args.folder, args.debug)\n r.start()", "def main(args):\n options = parse_cmd_parameters_(args)\n execute_(options)", "def run_genomemap(args):\n genome_map(args)", "def main(matrix,model,processors,algorithm):\n if algorithm == \"raxml-ng\":\n ab = subprocess.call(['which', 'raxml-ng'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxml-ng\")\n sys.exit()\n elif algorithm == \"raxml-HPC\":\n ab = subprocess.call(['which', 'raxmlHPC-PTHREADS-SSE3'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxmlHPC-PTHREADS-SSE3\")\n sys.exit()\n last=get_field_index(matrix)\n matrix_to_fasta(matrix, last)\n #Prep the creation of the FASTA file, removing odd characters\n os.system(\"sed 's/://g' all.fasta | sed 's/,//g' > out.fasta\")\n if model == \"ASC_GTRGAMMA\":\n subprocess.check_call(\"raxmlHPC-SSE3 -f d -p 12345 -m %s -s out.fasta -n nasp --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-SSE3 -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n else:\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f d -p 12345 -m %s -s out.fasta -n nasp --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n elif algorithm == \"raxml-ng\":\n subprocess.check_call(\"raxml-ng --msa out.fasta --model GTR+G --threads %s --prefix nasp\" % processors,stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"mv RAxML_bestTree.nasp nasp_raxml.tree\", shell=True)\n subprocess.check_call(\"mv RAxML_binaryModelParameters.PARAMS nasp.PARAMS\", shell=True)\n subprocess.check_call(\"rm RAxML_* out.fasta all.fasta\", shell=True)\n else:\n subprocess.check_call(\"mv nasp.raxml.bestTree nasp_raxml.tree\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"rm nasp.raxml.startTree out.fasta all.fasta\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n print(\"Model used: %s\" % model)", "def __main__():\r\n\tparser = optparse.OptionParser()\r\n\tparser.add_option(\"-i\", \"--input\", default=None, dest=\"input\",\r\n\t\t\t\t\t help=\"The input bam file\")\r\n\tparser.add_option(\"-c\", \"--chromfile\", default=None, dest=\"chromfile\",\r\n\t\t\t\t\t help=\"The input list of chromosomes \")\r\n\tparser.add_option(\"-d\", \"--directory\", default=None, dest=\"directory\",\r\n\t\t\t\t\t\thelp=\"directory\")\r\n\tparser.add_option(\"-l\", \"--lane\", default=None, dest=\"lane\",\r\n\t\t\t\t\t\thelp=\"Lane\")\t\t\t\t \r\n\tparser.add_option(\"-o\", \"--output\", default=None, dest=\"output\",\r\n\t\t\t\t\t help=\"The output file\")\r\n\t(options, args) = parser.parse_args()\r\n\t\r\n\tif not options.input:\r\n\t\tparser.error(\"Need to specify the input file\")\r\n\tif not options.chromfile:\r\n\t\tparser.error(\"Need to specify the list of chromosomes file\")\r\n\tif not options.output:\r\n\t\tparser.error(\"Need to specify the output file\")\r\n\r\n\twith open(options.output, \"w\") as outfile:\r\n\t\twith open(options.chromfile, \"r\") as handle:\r\n\t\t\tfor line in handle:\r\n\t\t\t\tline = line.rstrip()\r\n\t\t\t\t#p = subprocess.run([\"echo %s\" % line], shell=True, check=True, stdout=subprocess.PIPE)\r\n\t\t\t\tp = subprocess.run([\"samtools depth -r %s %s/%s | sort -n -k 3 | tail -n 1\" % (line, options.directory, options.input)], shell=True, check=True, stdout=subprocess.PIPE)\r\n\t\t\t\toutfile.write(p.stdout.decode('utf-8').rstrip() + \"\\t%s\" % options.input + \"\\t%s\\n\" % options.lane)", "def exec_ripser(data_path,output_path,max_dim,input_file='input.txt',format_file = 'lower-distance',threshold=None):\n ############# RIPSER ####################\n # high dimension\n ## execfile ripser (OUTPUT from ripser)\n start = timeit.default_timer() \n print 'input_file ',input_file\n input_file_full = os.path.join(data_path,input_file)\n output_file_full = os.path.join(output_path,'output_ripser.txt')\n\n if threshold is None:\n ripser_arguments = 'ripser --format %s --dim %i %s'%(format_file,max_dim,input_file_full)\n else:\n ripser_arguments = 'ripser --format %s --dim %i --threshold %f %s'%(format_file,max_dim,threshold,input_file_full)\n \n ripser_call(ripser_arguments.split(' '),output_file_full)\n #os.system(ripser_call) # OLD CALL BASED ON executable\n\n stop = timeit.default_timer()\n print 'Ripser execution time '\n print stop - start \n input_file_path = os.path.join(data_path,'input.txt')\n if(os.path.isfile(input_file_path)):\n os.remove(input_file_path) ## remove auxiliar file with lower matrix used as input for Ripser\n return()", "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('infile', help='path to the file to be mapped.It should\\\n contain one identifer on each line.')\n parser.add_argument('-rh', '--redis_host', default=DEFAULT_REDIS_URL,\n help='url of Redis db')\n parser.add_argument('-rp', '--redis_port', default=DEFAULT_REDIS_PORT,\n help='port for Redis db')\n parser.add_argument('-rps', '--redis_pass', default=DEFAULT_REDIS_PASS,\n help='password for Redis db')\n parser.add_argument('-of', '--outfile', default=None,\n help='path to the output file')\n parser.add_argument('-sh', '--source_hint', help='suggestion for ID source \\\n database used to resolve ambiguities in mapping',\n default=DEFAULT_HINT)\n parser.add_argument('-t', '--taxon', help='taxon id of species of all gene \\\n names', default=DEFAULT_TAXON)\n myargs = parser.parse_args()\n return myargs", "def parse_arguments_for_single_machine():\n\n # setup command line parser and parse arguments\n parser = argparse.ArgumentParser()\n\n arg_help = 'the path of the point sprite data (positions and intensities)' \n parser.add_argument('--input-directory',\n action='store',\n dest='input_directory',\n help=arg_help)\n \n arg_help = 'the directory where the configuration files will be generated'\n parser.add_argument('--output-directory',\n action='store',\n dest='output_directory',\n help=arg_help)\n\n arg_help = 'the point sprite header file (meta data of the sprite)'\n parser.add_argument('--psh-file',\n action='store', default='NO_FILE_PROVIDED',\n dest='psh_file',\n help=arg_help)\n \n arg_help = 'the data configuration file for the circuit and the sensor'\n parser.add_argument('--data-config-file',\n action='store', default='NO_FILE_PROVIDED',\n dest='data_config_file',\n help=arg_help)\n\n arg_help = 'simulation method, direct-sprite, linear-sprite, ... '\n parser.add_argument('--simulation-method',\n action='store', default='direct-sprite',\n dest='simulation_method',\n help=arg_help)\n\n arg_help = 'the template pbrt sensor configuration file for sprite'\n parser.add_argument('--pbrt-sprite-sensor-config',\n action='store', default='NO_FILE_PROVIDED',\n dest='pbrt_sprite_sensor_config',\n help=arg_help)\n \n arg_help = 'the template pbrt sensor configuration file for volume'\n parser.add_argument('--pbrt-volume-sensor-config',\n action='store', default='NO_FILE_PROVIDED',\n dest='pbrt_volume_sensor_config',\n help=arg_help)\n\n arg_help = 'the path of the pbrt executable that will run the simulation'\n parser.add_argument('--pbrt-executable',\n action='store', default='pbrt', # installed\n dest='pbrt_executable',\n help=arg_help)\n\n arg_help = 'the path of the sprite volumeizer executable that will ' \\\n 'convert the sprite to a volume '\n parser.add_argument('--volumizer-executable',\n action='store', default='volumizesprite', # installed \n dest='volumizer_executable',\n help=arg_help)\n \n arg_help = 'the path of the spritebounds executable that will ' \\\n 'quicky extract the bounds of the sprite to get the sensor data'\n parser.add_argument('--sprite-bounds-executable',\n action='store', default='spritebounds', # installed \n dest='spritebounds_executable',\n help=arg_help)\n\n arg_help = 'resolution of the grid converted from the sprite'\n parser.add_argument('--grid-resolution',\n action='store', default='512',\n dest='grid_resolution',\n help=arg_help)\n\n arg_help = 'running node, cluster or local, cluster by default'\n parser.add_argument('--node',\n action='store', default='cluster',\n dest='node',\n help=arg_help)\n \n arg_help = 'the base (maximum) resolution of the sensor'\n parser.add_argument('--sensor-resolution',\n action='store', default='512', \n dest='sensor_resolution',\n help=arg_help)\n\n # parse the arguments\n args = parser.parse_args()\n \n return args", "def run():\n import argparse\n parser = argparse.ArgumentParser(description=\"Create and solve mazes\")\n parser.add_argument(\"-c\", \"--cli\", help=\"Switch to CLI mode\", action='store_true')\n parser.add_argument(\"-f\", \"--file\", help=\"File to import map from\")\n parser.add_argument(\"-s\", \"--start\", help=\"Starting position in the maze\")\n parser.add_argument(\"-e\", \"--end\", help=\"Ending position in the maze\")\n args = parser.parse_args()\n if args.file:\n myfile = args.file\n else:\n myfile = 'map1.txt'\n with open(myfile, 'r') as mapfile:\n maze_str = mapfile.read()\n maze = Maze(maze_str, cli=args.cli, start=parse_seq(args.start), finish=parse_seq(args.end))\n maze.game_loop()", "def _main():\n import argparse\n\n # Get command line arguments\n parser = argparse.ArgumentParser(description=\"This wrapper script will run \\\n a pickled Python function on \\\n some pickled retrieved data \\\n via 0MQ. You almost never \\\n want to run this yourself.\")\n parser.add_argument('home_address',\n help='IP address of submitting host.')\n parser.add_argument('module_dir',\n help='Directory that contains module containing pickled\\\n function. This will get added to PYTHONPATH \\\n temporarily.')\n args = parser.parse_args()\n\n # Make warnings from built-in warnings module get formatted more nicely\n logging.captureWarnings(True)\n logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +\n '%(message)s'), level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n logger.info(\"Appended {0} to PYTHONPATH\".format(args.module_dir))\n sys.path.insert(0, args.module_dir)\n\n logger.debug(\"Job ID: %i\\tHome address: %s\\tModule dir: %s\",\n os.environ['JOB_ID'],\n args.home_address, args.module_dir)\n\n # Process the database and get job started\n _run_job(os.environ['JOB_ID'], args.home_address)", "def test_cmdlineproc_test12():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--hosts\", \"hosts.file\", \"--bogus\", \"--jobname\", \"test\",\n \"--replicates\", \"1000\", \"--disconnect\", \"pmemd.MPI\",\n \"-O\", \"-i\", \"ex.in\", \"-c\", \"ex.min\", \"-p\", \"ex.top\",\n \"-o\", \"ex.out\"]\n\n with pytest.raises(exceptions.CommandlineargsError):\n\n _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)", "def main(options):\n coords = []\n if options.verbose == True:\n print \"Reading sequences from coordinate file %s\" % (options.coords)\n try:\n corfile = gzip.open(options.coords, 'rb')\n except IOError:\n raise IOError('Cannot read from coordinate file %s' % (options.coords))\n\n for c in corfile:\n try:\n c = c.rstrip().split()\n coords.append([c[0], int(c[2]), int(c[3]), c[1]])\n except ValueError, e:\n raise ValueError(\"Wrong coordinates: %s\" % \" \".join(c))\n\n\n # Read mRNA sequences into hash table: {id:sequence}\n if options.verbose == True:\n print \"Reading sequences from mRNA file %s\" % (options.seq)\n try:\n seq_obj = open(options.seq, 'Ur')\n except IOError:\n raise IOError('Cannot read from mRNA file %s' % (options.seq))\n\n mRNAseqs = {}\n for seq in SeqIO.parse(seq_obj, 'fasta'):\n mRNAseqs[str(seq.id)] = str(seq.seq)\n\n\n # Open output file and write first lines\n try:\n outfile = gzip.open(options.out, 'wb')\n except IOError:\n raise IOError(\"Connot open output file %s\" % (options.out))\n # outfile.write('#siteID\\tdistToBoundary\\n')\n\n # Iterate through the binding coordinates to calculate their score\n if options.verbose == True:\n print \"Calculating average G content... \"\n for (mrnaid, lowerix, upperix, mirnas) in coords:\n if mrnaid in mRNAseqs:\n mrnasequ = mRNAseqs[mrnaid]\n score = calculate_distance_to_boundary(len(mrnasequ), lowerix)\n for mirna in mirnas.split(\",\"):\n outtext = '%s,%s,%i,%i\\t%s\\n' % (mrnaid,\n mirna,\n lowerix,\n upperix,\n score)\n outfile.write(outtext)\n else:\n for mirna in mirnas.split(\",\"):\n outtext = '%s,%s,%i,%i\\t%s\\n' % (mrnaid,\n mirna,\n lowerix,\n upperix,\n \"NA\")\n outfile.write(outtext)\n\n\n outfile.close()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=__description__)\n\n # Positionals\n parser.add_argument(\"fasta_file\",help=\"fasta file to be turned into kmers\")\n\n # Options\n parser.add_argument(\"-o\",\"--outbase\",help=\"base name for output files\",action=\"store\",type=str,default=None)\n parser.add_argument(\"-k\",\"--kmersize\",help=\"kmer size\",action=\"store\",type=int,default=12)\n parser.add_argument(\"-s\",\"--seqperfile\",help=\"number of sequences per output file\",action=\"store\",\n type=int,default=50000)\n parser.add_argument(\"-n\",\"--numkmers\",\n help=\"Number of kmers to make, starting from most to least common. If -1, make all possible.\",\n type=int,default=1000000)\n\n args = parser.parse_args(argv)\n\n if args.outbase is None:\n out_base = args.fasta_file\n else:\n out_base = args.outbase\n\n parse_proteome(args.fasta_file,kmer_size=args.kmersize,out_base=out_base,\n seq_per_file=args.seqperfile,num_to_write=args.numkmers)", "def process_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"srcDir\", type=str, help=\"Directory containing Unit Hydrograph grids to be aggregated\")\n parser.add_argument(\"gridFile\", type=str, help=\"Input netCDF target grid\")\n parser.add_argument(\"--remapDir\", type=str, help=\"Directory containing Output Unit Hydrograph grids\")\n parser.add_argument(\"--aggDir\", type=str, help=\"Directory where to store aggregated files (before remap)\")\n parser.add_argument(\"--inPrefix\", type=str, help=\"Input Unit Hydrograph File Prefix (default=UH_)\",default='UH_')\n parser.add_argument(\"--outPrefix\", type=str, help=\"Output Unit Hydrograph File Prefix (default=Agg_UH_)\", default=\"Agg_UH_\")\n parser.add_argument(\"--time\", type=str, help=\"Input Unit Hydrograph time variable name (default=time)\",default='time')\n parser.add_argument(\"--lon\", type=str, help=\"Input Unit Hydrograph longitude variable name (default=lon)\",default='lon')\n parser.add_argument(\"--lat\", type=str, help=\"Input Unit Hydrograph latitude variable name (default=lat)\",default='lat')\n parser.add_argument(\"--fraction\", type=str, help=\"Input Unit Hydrograph fraction variable name (default=fraction)\",default='fraction')\n parser.add_argument(\"--unit_hydrograph\",type=str, help=\"Input unit hydrograph variable name (default=unit_hydrograph)\",default='unit_hydrograph')\n parser.add_argument(\"--xc\", type=str, help=\"Input target grid longitude variable (default=xc)\",default='xc')\n parser.add_argument(\"--yc\", type=str, help=\"Input target grid latitude variable (default=yc)\",default='yc') \n parser.add_argument(\"--testAgg\",help=\"Do a test aggregation, where all inpoint points are aggregated into one file, remapping can be done afterwards using the --remap flag\",action=\"store_true\")\n parser.add_argument(\"--cdoDebug\",help=\"Enable CDO debuging (prings each step to screen)\",action=\"store_true\")\n parser.add_argument(\"--cdoForce\",help=\"Enable CDO force output (will overwrite existing files during remap)\",action=\"store_true\")\n parser.add_argument(\"--verbose\",help=\"Make script verbose\",action=\"store_true\")\n parser.add_argument(\"--remap\",help=\"Remap the aggregated Unit Hydrographs to outDir and put the aggregated files in the tempDir\",action='store_true')\n parser.add_argument(\"--agg\",help=\"Aggregate the input files onto the targetGrid (gridFile)\",action='store_true')\n parser.add_argument(\"--fill_value\",type=float,help=\"value to use as masked value (default=9.96920996839e+36)\",default = 9.96920996839e+36)\n parser.add_argument(\"--pad\",type=int,help=\"Set number of empty cells to include around each aggregated basin (default=10)\",default=10)\n parser.add_argument(\"--resolution\",type=float,help=\"Set resolution of input Unit Hydrographs (default=1/16.)\",default=1/16.)\n parser.add_argument(\"--clean\",help=\"Clean up aggregated Unit Hydrograph grids if remapping\", action='store_true')\n parser.add_argument(\"--dryrun\",help=\"Do the mapping between the source and target grid based on the files in the input directory, return the performance stats for the run\", action='store_true')\n args = parser.parse_args()\n\n options = {}\n paths = {}\n # parse the basics\n Rvars = (args.time,args.lon,args.lat,args.fraction,args.unit_hydrograph)\n Cvars = (args.yc,args.xc)\n paths['srcDir'] = args.srcDir\n paths['gridFile'] = args.gridFile\n\n if args.aggDir:\n paths['aggDir'] = args.aggDir\n else:\n paths['aggDir'] = os.path.join(paths['srcDir'],'../aggregated/')\n if not os.path.exists(paths['aggDir']):\n os.makedirs(paths['aggDir'])\n\n options['verbose'] = args.verbose\n options['fill_value'] = args.fill_value\n options['pad'] = args.pad\n options['resolution'] = args.resolution\n options['inPrefix'] = args.inPrefix\n options['outPrefix'] = args.outPrefix\n options['dryrun'] = args.dryrun\n options['testAgg'] = args.testAgg\n options['clean']=args.clean\n options['remap']=args.remap\n options['agg']=args.agg\n \n if options['remap']:\n cdo.debug=args.cdoDebug\n cdo.forceOutput=args.cdoForce\n if args.remapDir:\n paths['remapDir'] = args.remapDir\n else:\n paths['remapDir'] = os.path.join(paths['srcDir'],'../remaped/')\n if not os.path.exists(paths['remapDir']):\n os.makedirs(paths['remapDir'])\n print paths['remapDir'] \n\n return Rvars,Cvars,paths,options", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--ID-file\", dest=\"ID_file\", type=str,\n help=\"Supply txt file with Sample Name assigned to Sequencing Number\")\n\n parser.add_argument(\"--Infile-dir\", dest=\"Infile_Dir\", type=str,\n help=\"Supply path to files to be moved\")\n\n parser.add_argument(\"--Outfile-ID-1\", dest=\"Outfile1_ID\", type=str,\n help=\"Supply identifier that can be used to move file to a particular directory e.g. CMS\")\n\n parser.add_argument(\"--Outfile-ID-2\", dest=\"Outfile2_ID\", type=str,\n help=\"Supply identifier within file name that can be used to move file to a particular directory e.g. GFU\")\n\n parser.add_argument(\"--Outfile-dir-1\", dest=\"Outfile1_Dir\", type=str,\n help=\"Supply desired directory for file containing identifier given in --Outfile-ID-1\")\n\n parser.add_argument(\"--Outfile-dir-2\", dest=\"Outfile2_Dir\", type=str,\n help=\"Supply desired directory for file containing identifier given in --Outfile-ID-2\")\n\n \n # add common options (-h/--help, ...) and parse command line\n (args) = E.start(parser, argv=argv)\n\n ###############################################\n ###############################################\n ############## Execute Functions ##############\n ###############################################\n ###############################################\n\n\n IDDict = CreateIDDict(args.ID_file)\n Shuttle(IDDict, args.Infile_Dir, args.Outfile1_ID, args.Outfile2_ID, args.Outfile1_Dir, args.Outfile2_Dir)\n \n \n # write footer and output benchmark information.\n E.stop()", "def main():\n sys.argv.pop(0)\n (cmd, var, args) = process_options(sys.argv[:])\n execute(cmd, var, args)", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('-1',\n dest='r1',\n help='Path to first fasta file.')\n\n parser.add_argument('-2',\n dest='r2',\n help='Path to paired fasta file.')\n\n args = parser.parse_args()\n salmon(args.r1, args.r2)", "def process_command_line(argv):\n\tif argv is None:\n\t\targv = sys.argv[1:]\n\t\t\n\t# initialize the parser object:\n\tparser = optparse.OptionParser(\n\t\t\t formatter=optparse.TitledHelpFormatter(width=78),\n\t\t\t add_help_option=None)\n\n\t# define options here:\n\n\tparser.add_option(\n\t\t'-w', '--workdir',\n\t\thelp='Workdir where temporary and final files will be saved.')\n\n\tparser.add_option(\n\t\t'-a', '--assembly_file',\n\t\thelp='File with a list of assemblies for which a reference genome is to be determined.')\n\n\tparser.add_option(\n \t'--installation',\n \thelp='Pipeline installation.')\n\n parser.add_option(\n '--EDIRECT',\n help='edirect tools installation.')\n\n parser.add_option(\n '--QUAST',\n help='Quast installation.')\n\n parser.add_option(\n '--mail',\n help='Email for edirect.')\n\n\tparser.add_option(\n\t\t'-o', '--organism',\n\t\thelp='Organism to be searched for on NCBI Assembly.')\n\n\tparser.add_option(\n\t\t'--dont_delete', action=\"store_true\",\n\t\thelp='Do not delete temporary files after running.')\n\t\t\n\tparser.add_option(\n\t\t'-s', '--script', default=\"/home/users/yair/Documents/PhD_projects/project_B/bin/downloading_database/determine_best_genome.sh\",\n\t\thelp='Path of determine_best_genome.sh script')\n\n\tparser.add_option( # customized description; put --help last\n\t\t'-h', '--help', action='help',\n\t\thelp='Show this help message and exit.')\n\n\tsettings, args = parser.parse_args(argv)\n\n\treturn settings, args", "def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)", "def main():\n args = _parse_arguments()\n\n util.log_init(\"sitl_A%s_%s.txt\" % (args.id, util.get_latest_log(\"latest_sitl.txt\")), util.log_level[args.level])\n\n shared.AGENT_ID = 'A%s' % args.id\n shared.AGENT_COUNT = args.n\n shared.CURRENT_ALGORITHM = args.algorithm\n shared.AGENT_CHARACTER = args.character\n shared.des_alt = args.alt\n \n util.log_info(\"AGENT_ID = %s\" % shared.AGENT_ID)\n util.log_info(\"Algorithm: %s\" % shared.CURRENT_ALGORITHM)\n util.log_info(\"Agent type: %s\" % shared.AGENT_CHARACTER)\n\n print \"Start simulator (SITL)\"\n sitl = SITL(args.pix) # initialize SITL with firmware path\n\n if shared.AGENT_ID in start_loc:\n sitl_args = ['--home=%s' % start_loc[shared.AGENT_ID]]\n else:\n sitl_args = ['--home=%s' % start_loc['FFF']]\n \n # Pre-recorded coordinates.\n #sitl_args = ['-I0', '--model', 'quad', '--home=31.301201,121.498192,9,353']\t\n sitl.launch(sitl_args, await_ready=True, restart=True)\n\n # Connect to the vehicle. (Spawn an instance of Vehicle named \"vehicle\")\n # connection port is coded in the file name of the firmware like \"ac3.4.5_port5760\"\n # use regular expression to search the string and extract port number\n port = re.search(r'port\\d{4}', args.pix)\n port = re.search(r'\\d{4}', port.group()).group()\n\n print \"Connecting to copter on: TCP: 127.0.0.1:%s\" % port\n copter = nav.connect('tcp:127.0.0.1:%s' % port, wait_ready=True, rate=20)\n util.log_info(\"Copter connected. Firmware: %s\" % copter.version)\n \n if not args.xbee: # simulate XBee using ZeroMQ\n [pub, sub] = comm.zmq_init(comm_port_list[shared.AGENT_ID], comm_port_list)\n subscriber_thread = comm.Subscriber(shared.AGENT_ID, sub)\n subscriber_thread.start()\n xbee = pub # make xbee the publisher\n util.log_info(\"ZeroMQ initialzied.\") \n \n else: # use actual xbee ports\n ser = serial.Serial(args.xbee, 57600)\n xbee = comm.xbee_init(ser)\n util.log_info(\"Xbee initialzed.\")\n\n info = \"IFO,%s connected with firmware %s\" % (shared.AGENT_ID, copter.version)\n comm.xbee_broadcast(xbee, info)\n\n _add_listeners(copter)\n\n takeoff_thread = nav.Takeoff(copter, xbee, shared.des_alt, 3)\n purge_thread = comm.Purge(shared.neighbors)\n broadcast_thread = comm.Broadcast(shared.AGENT_ID, copter, xbee)\n flocking_thread = _choose_algorithm(copter, xbee, shared.neighbors)\n\n takeoff_thread.start()\n takeoff_thread.join() # wait until takeoff procedure completed\n\n if shared.status['airborne']: # only execute the threads when airborne\n util.log_info(\"Copter is airborne, starting threads.\")\n broadcast_thread.start()\n purge_thread.start()\n flocking_thread.start()\n\n # main loop\n while True:\n try: time.sleep(.2)\n except KeyboardInterrupt: break\n \n if shared.status['airborne']:\n # echo exiting status\n if shared.status['exiting']:\n info = \"IFO,%s %s-ing.\" % (shared.AGENT_ID,shared.status['command'])\n comm.xbee_broadcast(xbee, info)\n util.log_info(info)\n\n # if an rtl or land command is received, kill flocking and set the `exiting` flag\n elif shared.status['command'] == 'RTL' or shared.status['command'] == 'LAND':\n shared.status['thread_flag'] |= shared.FLOCKING_FLAG\n nav.set_mode(copter, shared.status['command'])\n shared.status['exiting'] = True\n\n if not flocking_thread.is_alive(): # break the loop if finished\n break\n\n nav.wait_for_disarm(copter) # wait for disarm\n comm.xbee_broadcast(xbee, 'IFO,%s terminated.' % shared.AGENT_ID)\n\n # clean up\n purge_thread.stop()\n while purge_thread.is_alive(): \n util.log_info('Waiting for purge to shutdown') \n purge_thread.join(3)\n util.log_info('Purge killed.')\n\n broadcast_thread.stop()\n while broadcast_thread.is_alive(): \n util.log_info('Waiting for broadcast to shutdown') \n broadcast_thread.join(3)\n util.log_info('Broadcast killed.')\n\n copter.close()\n util.log_info(\"Copter shutdown.\")\n\n if args.xbee:\n xbee.halt()\n ser.close()\n util.log_info(\"Xbee and serial closed.\")\n else:\n subscriber_thread.stop()\n while subscriber_thread.is_alive(): \n util.log_info('Waiting for Subscriber to shutdown') \n subscriber_thread.join(3)\n util.log_info('Subscriber killed.')\n\n sitl.stop()\n util.log_info(\"SITL shutdown.\")", "def ParseArguments():\n\t#TODO: check input variable types!\n\t# check for integers ans strings\n\t# check for distance and distance cutoff value: ONLY CERTAIN VALUES ALLOWED\n\targ_parser = argparse.ArgumentParser(description=\"Program to get background distribution matching user input SNPs on the following parameters {MAF, distance to nearest gene, gene density}\")\n\tsubparsers = arg_parser.add_subparsers(dest='subcommand',\n\t\t\t\t\t\t\t\t\t title='subcommands in this script',\n\t\t\t\t\t\t\t\t\t description='valid subcommands. set subcommand after main program required arguments',\n\t\t\t\t\t\t\t\t\t help='You can get additional help by writing <program-name> <subcommand> --help')\n\n\t## Subparsers\n\targ_parser_annotate = subparsers.add_parser('annotate')\n\t#arg_parser_annotate.set_defaults(func=run_annotate)\n\targ_parser_match = subparsers.add_parser('match')\n\t#arg_parser_annotate.set_defaults(func=run_match)\n\n\n\targ_parser.add_argument(\"--user_snps_file\", help=\"Path to file with user-defined SNPs\", required=True) # TODO: make the program read from STDIN via '-'\n\targ_parser.add_argument(\"--output_dir\", help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\t#arg_parser.add_argument(\"--output_dir\", type=ArgparseAdditionalUtils.check_if_writable, help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\targ_parser.add_argument(\"--distance_type\", help=\"ld or kb\", required=True)\n\targ_parser.add_argument(\"--distance_cutoff\", help=\"r2, or kb distance\", required=True)\n\t# NEW: options\n\t#arg_parser.add_argument(\"--status_file\", help=\"Bool (switch, takes no value after argument); if set then logging is ENABLED.\", action='store_true')\n\t#arg_parser.add_argument(\"--status_file\", help=\"If set, a json file will be written. Value should be the a filepath.\")\n\targ_parser.add_argument(\"--web\", help=\"If set, the program will run in web mode. VALUE should be the a filepath to output (temporary) file - usually this will be the session_id. The web mode activates: 1) creating a status_obj and writing it to json file; 2) ENABLE writing a json report file;\")\n\targ_parser.add_argument(\"--NoLogger\", help=\"Bool (switch, takes no value after argument); if set then logging is DISAPLED. Logfile will be placed in outputdir.\", action='store_true')\n\n\n\t### MATCH arguments\n\targ_parser_match.add_argument(\"--N_sample_sets\", type=int, help=\"Number of matched SNPs to retrieve\", required=True) # 1000 - \"Permutations?\" TODO: change name to --n_random_snp_sets or --N\n\t#TODO: add argument that describes if ABSOLUTE of PERCENTAGE deviation should be used\n\targ_parser_match.add_argument(\"--max_freq_deviation\", type=int,help=\"Maximal deviation of SNP MAF bin [MAF +/- deviation]\", default=5) # 5\n\targ_parser_match.add_argument(\"--max_distance_deviation\", type=int, help=\"Maximal PERCENTAGE POINT deviation of distance to nearest gene [distance +/- %%deviation])\", default=5) # 20000\n\t#TODO: CHECK THAT max_distance_deviation > 1 %\n\t#TODO: WHY IS max_genes_count_deviation type float!!!!????\n\targ_parser_match.add_argument(\"--max_genes_count_deviation\", type=float, help=\"Maximal PERCENTAGE POINT deviation of genes in locus [gene_density +/- %%deviation]\", default=5) # 0.2\n\targ_parser_match.add_argument(\"--set_file\", help=\"Bool (switch, takes no value after argument); if set then write out set files to rand_set..gz. Default is false\", action='store_true')\n\n\targs = arg_parser.parse_args()\n\n\treturn args", "def test_cmdlineproc_test1():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = []\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == []", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def runTriplexator(args):\n global TRIPLEXATOR_LIBRARY_ENV\n triplex_lib_path = os.environ.get(TRIPLEXATOR_LIBRARY_ENV)\n\n if os.environ.get(TRIPLEXATOR_LIBRARY_ENV) is None:\n print \"Please set the environment variable for the Triplexator library (\" + TRIPLEXATOR_LIBRARY_ENV + \").\"\n else:\n triplex_lib = cdll.LoadLibrary(triplex_lib_path)\n arg_strings = args.split(' ')\n arg_ptr = (c_char_p * (len(arg_strings) + 1))()\n\n arg_ptr[0] = \"triplexator\" # to simulate calling from cmd line\n for i, s in enumerate(arg_strings):\n arg_ptr[i + 1] = s\n\n triplex_lib.pyTriplexator(len(arg_strings) + 1, arg_ptr)", "def main():\n utl.calibrate(False)\n undistort(False)\n edge_detect(False)\n transform(False)\n identify_line(False)\n lane_line(True)", "def system_parallel(cmdL, nproc=None, verbose=True):\n if nproc is None:\n nproc = multiprocessing.cpu_count()\n sh_filename = '_run_parallel_' + hashlib.md5('\\n'.join(cmdL).encode('utf-8')).hexdigest()\n with open(sh_filename, 'wt') as f:\n f.write('\\n'.join(cmdL))\n out = subprocess.check_output('parallel -j%d %s--keep-order < %s' % (nproc, '--verbose ' if verbose else '', sh_filename), shell=True)\n out = out.decode('utf-8')\n if verbose:\n print('-'*80)\n print('system_parallel output:')\n print('-'*80)\n print(out)\n os.remove(sh_filename)\n return out", "def parse_arguments():\n\n info = 'Divides pdb info files for parallelization'\n parser = argparse.ArgumentParser(description=info)\n\n # program arguments\n parser.add_argument('-f', '--in-file',\n type=str,\n required=True,\n help='PDB info file to divide')\n parser.add_argument('-n', '--num-splits',\n default=1000,\n type=int,\n help='Number of splits to perform (Default: 1000)')\n parser.add_argument('-m', '--mut-file',\n type=str,\n required=True,\n help='File containing mutation information')\n parser.add_argument('--split-dir',\n default = \"../data/split_pdbs/\",\n type=str,\n help='Output directory for split PDB info files')\n\n args = parser.parse_args()\n opts = vars(args)\n return opts", "def main():\r\n\r\n option_parser, opts, args = parse_command_line_parameters(**script_info)\r\n\r\n # additional option checks\r\n if opts.chimera_detection_method == 'blast_fragments':\r\n if not (opts.blast_db or opts.reference_seqs_fp):\r\n option_parser.error('Must provide either --blast_db or' +\r\n ' --reference_seqs_fp and --id_to_taxonomy_fp when' +\r\n ' method is blast_fragments.')\r\n if not opts.id_to_taxonomy_fp:\r\n option_parser.error('Must provide --id_to_taxonomy_fp when method' +\r\n ' is blast_fragments.')\r\n if opts.num_fragments < 2:\r\n option_parser.error('Invalid number of fragments (-n %d) Must be >= 2.'\r\n % opts.num_fragments)\r\n elif opts.chimera_detection_method == 'ChimeraSlayer':\r\n if not opts.aligned_reference_seqs_fp:\r\n option_parser.error(\"Must provide --aligned_reference_seqs_fp \"\r\n \"when using method ChimeraSlayer\")\r\n elif opts.chimera_detection_method == 'usearch61':\r\n if opts.suppress_usearch61_ref and opts.suppress_usearch61_denovo:\r\n option_parser.error(\"Supressing both de novo and reference \"\r\n \"chimera detection not allowed.\")\r\n if not opts.reference_seqs_fp and not opts.suppress_usearch61_ref:\r\n option_parser.error(\"--reference_seqs_fp required for reference \"\r\n \"based chimera detection, suppress reference based chimera \"\r\n \"detection with --suppress_usearch61_ref\")\r\n if opts.reference_seqs_fp:\r\n try:\r\n temp_f = open(opts.reference_seqs_fp, \"U\")\r\n temp_f.close()\r\n except IOError:\r\n raise IOError(\"Unable to open --reference_seqs_fp, please \"\r\n \"check filepath and permissions.\")\r\n if opts.non_chimeras_retention not in ['intersection', 'union']:\r\n option_parser.error(\"--non_chimeras_retention must be either \"\r\n \"'union' or 'intersection'\")\r\n if opts.usearch61_xn <= 1:\r\n option_parser.error(\"--usearch61_xn must be > 1\")\r\n if opts.usearch61_dn <= 0:\r\n option_parser.error(\"--usearch61_dn must be > 0\")\r\n if opts.usearch61_mindiffs <= 0:\r\n option_parser.error(\"--usearch61_mindiffs must be > 0\")\r\n if opts.usearch61_mindiv <= 0:\r\n option_parser.error(\"--usearch61_mindiv must be > 0\")\r\n if opts.usearch61_abundance_skew <= 0:\r\n option_parser.error(\"--usearch61_abundance_skew must be > 0\")\r\n\r\n verbose = opts.verbose # not used yet ...\r\n input_seqs_fp = opts.input_fasta_fp\r\n id_to_taxonomy_fp = opts.id_to_taxonomy_fp\r\n reference_seqs_fp = opts.reference_seqs_fp\r\n chimera_detection_method = opts.chimera_detection_method\r\n num_fragments = opts.num_fragments\r\n output_fp = opts.output_fp\r\n taxonomy_depth = opts.taxonomy_depth\r\n max_e_value = opts.max_e_value\r\n blast_db = opts.blast_db\r\n keep_intermediates = opts.keep_intermediates\r\n threads = opts.threads\r\n\r\n # calculate threads as 1 per CPU, or use float of input value\r\n if threads == 'one_per_cpu':\r\n threads = float(1 / cpu_count())\r\n else:\r\n # Make sure input is a float\r\n try:\r\n threads = float(threads)\r\n except ValueError:\r\n option_parser.error(\"--threads must be a float value if \"\r\n \"default 'one_per_cpu' value overridden.\")\r\n\r\n if not output_fp:\r\n if chimera_detection_method == \"usearch61\":\r\n output_dir = \"usearch61_chimeras/\"\r\n create_dir(output_dir, fail_on_exist=False)\r\n else:\r\n input_basename = splitext(split(input_seqs_fp)[1])[0]\r\n output_fp = '%s_chimeric.txt' % input_basename\r\n elif chimera_detection_method == \"usearch61\":\r\n output_dir = output_fp\r\n create_dir(output_dir, fail_on_exist=False)\r\n\r\n if chimera_detection_method == 'blast_fragments':\r\n blast_fragments_identify_chimeras(input_seqs_fp,\r\n id_to_taxonomy_fp,\r\n reference_seqs_fp, blast_db=blast_db,\r\n num_fragments=opts.num_fragments,\r\n max_e_value=max_e_value,\r\n output_fp=output_fp,\r\n taxonomy_depth=taxonomy_depth)\r\n elif chimera_detection_method == 'ChimeraSlayer':\r\n chimeraSlayer_identify_chimeras(input_seqs_fp,\r\n output_fp=output_fp,\r\n db_FASTA_fp=opts.reference_seqs_fp,\r\n db_NAST_fp=opts.aligned_reference_seqs_fp,\r\n min_div_ratio=opts.min_div_ratio,\r\n keep_intermediates=keep_intermediates)\r\n elif chimera_detection_method == 'usearch61':\r\n usearch61_chimera_check(input_seqs_fp,\r\n output_dir=output_dir,\r\n reference_seqs_fp=reference_seqs_fp,\r\n suppress_usearch61_intermediates=opts.suppress_usearch61_intermediates,\r\n suppress_usearch61_ref=opts.suppress_usearch61_ref,\r\n suppress_usearch61_denovo=opts.suppress_usearch61_denovo,\r\n split_by_sampleid=opts.split_by_sampleid,\r\n non_chimeras_retention=opts.non_chimeras_retention,\r\n usearch61_minh=opts.usearch61_minh,\r\n usearch61_xn=opts.usearch61_xn,\r\n usearch61_dn=opts.usearch61_dn,\r\n usearch61_mindiffs=opts.usearch61_mindiffs,\r\n usearch61_mindiv=opts.usearch61_mindiv,\r\n usearch61_abundance_skew=opts.usearch61_abundance_skew,\r\n percent_id_usearch61=opts.percent_id_usearch61,\r\n minlen=opts.minlen,\r\n word_length=opts.word_length,\r\n max_accepts=opts.max_accepts,\r\n max_rejects=opts.max_rejects,\r\n verbose=opts.verbose,\r\n threads=threads)", "def ProcessCommandLine():\n\tparser = argparse.ArgumentParser(description=__doc__.strip())\n\n\tparser.add_argument('--calibrate', action='store_true', default=False,\n help='Create pixel position calibration using camera')\n\tparser.add_argument('--calibrate-rgb', metavar='R,G,B', type=str, default='50,50,50',\n help='Calibration RGB value 0-255,0-255,0-255')\n \tparser.add_argument('--calibration-name', metavar='FILENAME', type=str, default=None,\n help='Pixel calibration name to create/read')\n\tparser.add_argument('--plot', action='store_true', default=False,\n help='Plot the calibration')\n\tparser.add_argument('--clear', action='store_true', default=False,\n help='Switch of all pixels')\n\tparser.add_argument('--debug', action='store_true', default=False,\n\t\t\t help='Print back trace in event of exception')\n\n\n\treturn parser.parse_args()", "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-hgt\", \"--imgHeight\", help=\"The height of the images, default=720.\",\n type=int, default=720)\n\n parser.add_argument(\"-wd\", \"--imgWidth\", help=\"The width of the images, default=1280.\",\n type=int, default=1280)\n\n parser.add_argument(\"-r\", \"--chessboardRows\", help=\"The rows of the chessboard calibration images, default=6.\",\n type=int, default=6)\n\n parser.add_argument(\"-c\", \"--chessboardCols\", help=\"The cols of the chessboard calibration images, default=9.\",\n type=int, default=9)\n\n parser.add_argument(\"-cp\", \"--calibrationPath\", help=\"The height of the images, default=720.\",\n type=str, default='')\n\n parser.add_argument(\"-in\", \"--inputVideoPath\", help=\"The path to the input video to be processed.\",\n type=str, default='')\n\n parser.add_argument(\"-out\", \"--outputVideoPath\", help=\"The path to the where to store output video.\",\n type=str, default='')\n\n args = parser.parse_args()\n\n print(args)\n\n assert args.calibrationPath != '', \"The path to calibration images can't be empty\"\n assert args.inputVideoPath != '', \"The path to input video can't be empty\"\n assert args.outputVideoPath != '', \"The path to output video can't be empty\"\n\n camera_mtx, dist_coeff = CameraCalibration((args.imgHeight, args.imgWidth),\n (args.chessboardRows, args.chessboardCols),\n args.calibrationPath).calibrate()\n print(\"Camera Mtx\", camera_mtx)\n print(\"Distortion Coefficient\", dist_coeff)\n # img = cv2.imread('test_images/test5.jpg')\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n AdvancedLaneDetection(args.inputVideoPath, camera_mtx, dist_coeff).process_video(args.outputVideoPath)\n\n # cv2.imwrite(\"output.jpg\", result)", "def main(target_clusters, clusters_per_lane, project_id, dest_plate_list, allow_non_dupl_struct): \n couch = connection()\n structure = proj_struct(couch, project_id, target_clusters)\n [lane_maps, clusters_rem, clusters_expr] = parse_indata(structure, target_clusters)\n if allow_non_dupl_struct:\n aggregator(lane_maps,clusters_rem,clusters_per_lane)\n else:\n simple_unique_set(lane_maps)\n [ideal_ratios, req_lanes, total_lanes] = sample_distributor(lane_maps, clusters_rem, clusters_per_lane)\n acc_ratios = correct_numbers(lane_maps, clusters_expr, ideal_ratios, req_lanes, total_lanes)\n generate_output(project_id, dest_plate_list, total_lanes, req_lanes, lane_maps, acc_ratios)", "def run_lammps(lammps_executable, input_file, output_file):\n # run lammps\n lammps_command = f\"{lammps_executable} -in {input_file} \"\n print(\"run command:\", lammps_command)\n with open(\"tmp2False.out\", \"w+\") as fout:\n subprocess.call(lammps_command.split(), stdout=fout)", "def main():\n # get arguments from command line\n args = parse_arguments()\n\n # checks on the output file\n # if args.stats_only:\n # assert args.output, \"The output file was not provided\"\n if args.output and os.path.exists(args.output):\n warnings.warn(\"Overwriting task file \" + args.output, UserWarning)\n os.remove(args.output)\n\n # initialize the task\n task = Task(\n args.database, args.on,\n across=args.across,\n by=args.by,\n filters=args.filters,\n regressors=args.regressors,\n verbose=args.verbose)\n\n if args.stats_only:\n task.print_stats()\n else:\n if args.tempdir and not os.path.exists(args.tempdir):\n os.makedirs(args.tempdir)\n\n # generate triplets and unique pairs\n task.generate_triplets(\n output=args.output,\n threshold=args.threshold,\n tmpdir=args.tempdir,\n seed=args.seed)", "def main():\n arg_parser = argparse.ArgumentParser(description=\"\"\"\n This utility will take a SAM alignment file from paired end reads \n and filter the original read FASTQ files do those reads without\n high-likelihood alignments to human.\n For gzipped alignments, consider using pipes: \n gunzip -c ref.fna.gz | strip_mt_ebv.py | gzip > ref.nomtebv.fna.gz\n \"\"\")\n\n arg_parser.add_argument(\n '--alnfile', '-A',\n type=argparse.FileType('r'),\n help='Alignment File. Can be stdin. For gzip, consider pipes',\n default=sys.stdin\n )\n arg_parser.add_argument(\n '--r1in', '-1',\n required=True,\n help='Input fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2in', '-2',\n required=True,\n help='Input fastq file for R2'\n )\n arg_parser.add_argument(\n '--r1out', '-o1',\n required=True,\n help='Output fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2out', '-o2',\n required=True,\n help='Output fastq file for R2'\n )\n arg_parser.add_argument(\n '--mapq',\n default=30,\n type=int,\n help='Minimum mapq required to be considered a valid read'\n )\n arg_parser.add_argument(\n '--cov_min',\n type=float,\n default=0.9\n )\n\n args = arg_parser.parse_args()\n\n passed_ids = get_passing_ids(\n args.alnfile,\n args.mapq,\n args.cov_min,\n )\n\n filter_fastq(\n passed_ids,\n args.r1in,\n args.r2in,\n args.r1out,\n args.r2out\n )", "def main():\n parser = argparse.ArgumentParser(description=MAIN_DESCRIPTION)\n parser.add_argument('-a', '--algorithm', help=ALGORITHM_DESCRIPTION)\n parser.add_argument('-n', '--number', type=int, help=NUMBER_DESCRIPTION)\n parser.add_argument('-o', '--order', help=ORDER_DESCRIPTION)\n parser.add_argument('-s', '--size', help=SIZE_DESCRIPTION)\n args = parser.parse_args()\n try:\n if not (args.algorithm and args.number and args.order and args.size):\n raise ValueError\n create_structure()\n try:\n data = get_data(args.number, args.order, args.size)\n except IOError:\n data = generate_in_files(args.number, args.order, args.size)\n finally:\n alg, out, time = sorting_algorithm(data, args.algorithm)\n # generate_out_files(out, args.number)\n generate_log_file(args.algorithm, args.number, args.order,\n args.size, alg.compares, alg.moves, time)\n except (TypeError, UnboundLocalError, ValueError) as e:\n parser.print_help()", "def parseArgs():\n parser = ArgumentParser(description='Main deepSAMIREI script.')\n parser.add_argument('-v', '--verbose',dest='verbose', action='store_true', \n default=False, help=\"verbose output [default is quiet running]\")\n \n parser.add_argument('-o','--outDir',dest='directory',type=str,\n action='store',help=\"output directory\", default='')\n parser.add_argument('-m','--mode', dest='mode',type=str,\n action='store',help=\"Mode of operation: train or test.\", default='train') \n parser.add_argument('--deskload', dest='deskLoad',\n action='store_true',default=False,\n help=\"Load dataset from desk. If false, the data is converted into tensors and kept in main memory (not recommended for large datasets).\") \n parser.add_argument('-w','--numworkers',dest='numWorkers',type=int,\n action='store',help=\"Number of workers used in data loader. For loading from the desk, use more than 1 for faster fetching.\", default=1) \n #parser.add_argument('-c','--cvtype',dest='cvtype',type=str,action='store',\n # help=\"Type of cross validation to use. Options are: chrom (leave-one-chrom-out), nfolds (N fold CV), and none (No CV, just regular train test split)\",\n # default='none')\n #parser.add_argument('--numfolds',dest='numfolds',type=int,action='store',\n # help=\"Number of folds for N fold CV\", default=10)\n parser.add_argument('--splittype',dest='splitType',type=str, action='store',\n help=\"Either to use a percantage of data for valid,test or use specific chromosomes. In the later case, provide chrA,chrB for valid,test. Default value is percent and --splitperc value will be used.\", default='percent')\n parser.add_argument('--splitperc',dest='splitperc',type=float, action='store',\n help=\"Pecentages of test, and validation data splits, eg. 10 for 10 percent data used for testing and validation.\", default=10)\n parser.add_argument('--motifanalysis', dest='motifAnalysis',\n action='store_true',default=False,\n help=\"Analyze CNN filters for motifs and search them against known TF database.\")\n parser.add_argument('--scorecutoff',dest='scoreCutoff',type=float,\n action='store',default=0.65,\n help=\"In case of binary labels, the positive probability cutoff to use.\")\n parser.add_argument('--tomtompath',dest='tomtomPath',\n type=str,action='store',default=None,\n help=\"Provide path to where TomTom (from MEME suite) is located.\") \n parser.add_argument('--database',dest='tfDatabase',type=str,action='store',\n help=\"Search CNN motifs against known TF database. Default is Human CISBP TFs.\", default=None)\n parser.add_argument('--annotate',dest='annotateTomTom',type=str,action='store',\n default=None, help=\"Annotate tomtom motifs. The options are: 1. path to annotation file, 2. No (not to annotate the output) 3. None (default where human CISBP annotations are used)\") \n parser.add_argument('-a','--attnfigs', dest='attnFigs',\n action='store_true',default=False,\n help=\"Generate Attention (matrix) figures for every test example.\")\n parser.add_argument('-i','--interactions', dest='featInteractions',\n action='store_true',default=False,\n help=\"Self attention based feature(TF) interactions analysis.\")\n parser.add_argument('-b','--background', dest='intBackground',type=str,\n action='store',default=None,\n help=\"Background used in interaction analysis: shuffle (for di-nucleotide shuffled sequences with embedded motifs.), negative (for negative test set). Default is not to use background (and significance test).\")\n parser.add_argument('--attncutoff', dest='attnCutoff',type=float,\n action='store',default=0.04,\n help=\"Attention (probability) cutoff value to use while searching for maximum interaction. A value (say K) greater than 1.0 will mean using top K interaction values.\") #In human promoter DHSs data analysis, lowering the cutoff leads to more TF interactions. \n parser.add_argument('--intseqlimit', dest='intSeqLimit',type=int,\n action='store',default = -1,\n help=\"A limit on number of input sequences to test. Default is -1 (use all input sequences that qualify).\")\n parser.add_argument('-s','--store', dest='storeInterCNN',\n action='store_true',default=False,\n help=\"Store per batch attention and CNN outpout matrices. If false, the are kept in the main memory.\")\n parser.add_argument('--considertophit', dest='considerTopHit',\n action='store_true',default=False,\n help=\"Consider only the top matching TF/regulatory element for a filter (from TomTom results).\") #This is particularly useful when we have lots of TF hits with fewer (1 or two) filters per TF. Using the top TF match will lead to fewer interactions.\n parser.add_argument('--numlabels', dest='numLabels',type=int,\n action='store',default = 2,\n help=\"Number of labels. 2 for binary (default). For multi-class, multi label problem, can be more than 2. \")\n parser.add_argument('--tomtomdist', dest='tomtomDist',type=str,\n action='store',default = 'pearson',\n help=\"TomTom distance parameter (pearson, kullback, ed etc). Default is pearson. See TomTom help from MEME suite.\")\n parser.add_argument('--attrbatchsize', dest='attrBatchSize',type=int,\n action='store',default = 12,\n help=\"Batch size used while calculating attributes. Default is 12.\")\n parser.add_argument('--tomtompval', dest='tomtomPval',type=float,\n action='store',default = 0.05,\n help=\"Adjusted p-value cutoff from TomTom. Default is 0.05.\")\n parser.add_argument('--testall', dest='testAll',\n action='store_true',default=False,\n help=\"Test on the entire dataset (default False). Useful for interaction/motif analysis.\")\n parser.add_argument('--useall', dest='useAll',\n action='store_true',default=False,\n help=\"Use all examples in multi-label problem. Default is False.\")\n parser.add_argument('--precisionlimit', dest='precisionLimit',type=float,\n action='store',default = 0.50,\n help=\"Precision limit to use for selecting examples in case of multi-label problem.\")\t\t\t\t\t\n parser.add_argument('inputprefix', type=str,\n help=\"Input file prefix for the bed/text file and the corresponding fasta file (sequences).\")\n parser.add_argument('hparamfile',type=str,\n help='Name of the hyperparameters file to be used.')\n \n \n \n args = parser.parse_args()\n #if not validateArgs( args ):\n # raise Exception(\"Argument Errors: check arguments and usage!\")\n return args", "def analyzeCommandLineArguments(options):\n print(options)\n flag=0\n if os.path.exists(options.output_directory)==False:\n cmd=\"mkdir \"+options.output_directory\n os.system(cmd)\n else:\n if options.force==0:\n os.system(\"echo \\\"Output directory already exists. Please re-run the program with -f 1 to enforce rewrite of the directory \\\" >> \"+options.output_directory+\"/Log.out\")\n flag=1\n else:\n cmd=\"rm -rf \"+options.output_directory\n os.system(cmd)\n cmd=\"mkdir \"+options.output_directory\n os.system(cmd)\n \n cmd=\"touch \"+options.output_directory+\"/Log.out\"\n os.system(cmd)\n if options.bowtie_index == None:\n os.system(\"echo \\\"No bowtie index provided. Proceeding to building index\\\" >> \"+options.output_directory+\"/Log.out\")\n if os.path.exists(options.genome)==False:\n os.system(\"echo \\\"The genome file you provided does not exist\\\" >> \"+options.output_directory+\"/Log.out\")\n flag=1\n if options.input_library==None:\n os.system(\"echo \\\"The input file \"+options.input_library+\" does not exist\\\" >> \"+options.output_directory+\"/Log.out\")\n flag=1\n \n if flag==1:\n print(\"The program had to terminate prematurely....Please check \"+options.output_directory+\"/Log.out file for more details\")\n sys.exit()\n for ele in options.number_of_cycles:\n if ele not in [\"9\",\"10\",\"11\",\"12\",\"13\"]:\n os.system(\"echo \\\"Incorrect number of cycles have been entered. Valid choices are 9, 10, 11, 12 and 13 \\\" >> \"+options.output_directory+\"/Log.out\")\n flag=1\n \n if options.input_library.split(\".\")[-1]==\"fq\" or options.input_library.split(\".\")[-1]==\"fastq\":\n options.input_filename=options.output_directory+\"/\"+options.input_library.split(\"/\")[-1].split(\".\")[0]+\".fa\"\n \n options.input_path=\"/\".join(options.input_library.split(\"/\")[:-1])\n options.input_filename=options.input_library.split(\"/\")[-1].split(\".\")[0]\n options.consolidated_filename=options.output_directory+\"/\"+options.input_filename+\".consolidated.fasta\"\n options.adapter_trimmed_filename=options.output_directory+\"/\"+options.input_filename+\"_adapter_trimmed.fastq\"\n \n options.small_rna_size=list(map(int,options.small_rna_size))\n options.number_of_cycles=list(map(int,options.number_of_cycles))\n options.pvalue_cutoff=float(options.pvalue_cutoff)\n options.map_limit=int(options.map_limit)\n return options", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def prepare_runs(args):\n output_directory = _prepare_output_dir(args.output_directory)\n z_score_dir = args.z_score_dir\n region_list = args.region_list \n if args.region_list is None:\n try:\n flanking_region = int(args.flanking_region)\n except ValueError:\n logging.error('Flanking region argument needs to be an integer')\n sys.exit(COMMAND_LINE_ERROR)\n build = args.build\n bed_directory = args.bed_directory\n # Create the SNPList\n try:\n min_maf = float(args.maf)\n except:\n logging.error(\"Min Maf -m or --min-maf needs to be an floating point number\")\n sys.exit(COMMAND_LINE_ERROR)\n if args.region_list is not None:\n region_list = {}\n snp_list = []\n with open(args.region_list) as input_file:\n # When using no flaking region SNP must be valid, but it doesn't actually matter what it is, need to ensure that is actually the case.\n for i, line in enumerate(input_file):\n rsid = str(i)+ \"_\" + ''.join(line.strip().split(\"\\t\"))\n chromosome = line.strip().split(\":\")[0] \n snp = Snp(chromosome,\"1\",rsid)\n snp_list.append(snp)\n region_list[snp.rsid] = line.strip()\n else:\n snp_list = SnpList(args.snp_list, build)\n logging.info(snp_list)\n # Locus to process\n # population_to_extract_vcf\n if not args.annotation_only:\n no_flanking = args.flanking_units\n if no_flanking:\n raise NotImplementedError(\"Using a number of flanking SNPs instead of a region is not supported\")\n populations= args.populations.split(',')\n logging.info(\"Populations to process: {0}\".format(populations))\n loci = []\n gemini_databases = []\n output_vcfs = []\n for snp in snp_list:\n logging.info('Preparing output files for SNP {0}'.format(snp.rsid))\n locus = snp.rsid\n loci.append(locus)\n logging.info(\"Obtaining VCF file from the 1000 genomes project\")\n if region_list is not None:\n vcf = get_vcf_file(snp, string_region=region_list[locus])\n else: \n vcf = get_vcf_file(snp, flanking_region=flanking_region)\n for population in populations:\n tmp_vcf = extract_population_from_1000_genomes(vcf=vcf, super_population=population)\n z_score_file = get_relevant_zscore(snp.chrom, population, z_score_dir)\n pos_list_zscore = create_pos_hash_table(z_score_file)\n output_vcf = generate_zscore_and_vcf_output(output_directory=output_directory, zscore_hash=pos_list_zscore, vcf=tmp_vcf, locus=locus,population=population, multiply_rsquare=args.multiply_rsquare)\n if bed_directory is None:\n logging.info(\"Creating gemini database\")\n # TODO: Fix broxen gemini referenec\n gemini_databases.append(create_gemini_database(vcf=output_vcf))\n vcf_to_plink(locus, output_directory=output_directory, vcf=output_vcf, population=population)\n plink_to_ld_matrix(locus, output_directory=output_directory, population=population)\n logging.info(\"Generate transancestrals matrices\")\n generate_transancestral_output(loci, populations, output_directory)\n if bed_directory is None:\n logging.info(\"Generating annotation matrices to be used with Paintor\")\n logging.info(gemini_databases)\n generate_and_write_encode_annotations(databases=gemini_databases, output_directory=output_directory, loci=snp_list)\n else:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n # So finally we need to fix the LD matrices for inputting into PAINTOR. \n\n with open(os.path.join(output_directory, 'input.files'), 'w') as out_f:\n for snp in snp_list:\n out_f.write(snp.rsid +'\\n')\n # Remove .tbi files\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.tbi'):\n try:\n os.remove(file)\n except OSError:\n logging.warning(\"Could not remove a .tbi file from the 1000 genomes tabix run\")\n else: \n loci = []\n for snp in snp_list:\n loci.append(snp.rsid)\n if bed_directory is not None:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n logging.info(\"Finemapping file preparation complete\")", "def main(args):\n if '-' in args['-p']:\n tmp = args['-p'].split('-')\n tgtPorts = [str(i) for i in xrange(int(tmp[0]), int(tmp[1])+1)]\n else:\n tgtPorts = [args['-p']]\n tgtHost = args['-H']\n for tgtPort in tgtPorts:\n nmapScan(tgtHost, tgtPort)", "def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)", "def parse_args():\n\n parser = argparse.ArgumentParser(\n description='Assignment 1: Map directions.',\n epilog = 'If SERIALPORT is not specified, stdin/stdout are used.')\n parser.add_argument('-s', '--serial',\n help='path to serial port',\n dest='serialport',\n default=None)\n parser.add_argument('-v', dest='verbose',\n help='verbose',\n action='store_true')\n parser.add_argument('-g', '--graph',\n help='path to graph (DEFAULT = \" edmonton_roads.txt\")',\n dest='graphname',\n default='edmonton_roads.txt')\n\n return parser.parse_args()", "def _run_job(args):\n # Note that we do not set the seed of the random number generator because\n # we already modified the interaction matrix before calling this function\n # and it does not harm us when all sub processes have the same sequence of\n # random numbers.\n \n # create the object ...\n obj = LibraryBinaryNumeric(**args[0])\n # ... get the method to evaluate ...\n method = getattr(obj, args[1])\n # ... and evaluate it\n if len(args) > 2:\n return method(**args[2])\n else:\n return method()", "def ReadArguments():\n\n args = ParseArguments()\n\n logging.info('Command line arguments...')\n for arg in vars(args):\n logging.info(str(arg) + ': ' + str(getattr(args, arg)))\n logging.info('')\n\n IsTest(args)\n ProcessCacheSize(args)\n ProcessLineSize(args)\n ProcessMulti(args)\n ProcessMemPattern(args)\n ProcessMemFile(args)", "def run_overlay_resources_score_motifs(motif_sites_dir,\n all_chromatin_makrs_all_cells_combined_dir_path,\n motifs_overlapping_tracks_output_dir,\n run_in_parallel_param,\n number_processes_to_run_in_parallel,\n normal_expression_per_tissue_origin_per_TF,\n matching_tissue_to_cell,\n motifTFName_TFNames_matches_dict,\n cells_assays_dict,\n cell_tfs,\n tf_cells,\n assay_cells_datatypes):\n\n # check if input motif_sites_dir is directory and get files from it\n if os.path.exists(motif_sites_dir):\n if not os.path.isdir(motif_sites_dir) and os.path.isfile(motif_sites_dir):\n motif_files = [motif_sites_dir]\n motif_sites_dir = \".\"\n else:\n motif_files = os.listdir(motif_sites_dir)\n else:\n sys.exit(\"Specified path to Motif files does not exist\")\n\n # get list of paths to all motif files\n motif_files_full_path = [motif_sites_dir + '/' + s for s in motif_files]\n\n # get list of all files of combined tracks\n chromatin_tracks_files = os.listdir(all_chromatin_makrs_all_cells_combined_dir_path)\n\n # create output directory if not existing\n if not os.path.exists(motifs_overlapping_tracks_output_dir):\n os.makedirs(motifs_overlapping_tracks_output_dir)\n\n # compute overlay resources score motif to find overlapping structures\n # Debug: print(run_in_parallel_param, motif_files)\n if run_in_parallel_param and len(motif_files) > 1:\n print(\"Run overlay_resources_score_motifs in parallel\")\n p = Pool(int(number_processes_to_run_in_parallel))\n motifs_overlapping_tracks_files = p.starmap(overlay_resources_score_motifs, product(motif_files_full_path,\n [\n motifs_overlapping_tracks_output_dir],\n [\n all_chromatin_makrs_all_cells_combined_dir_path],\n [chromatin_tracks_files]))\n p.close()\n p.join()\n else:\n print(\"Do not run overlay_resources_score_motifs in parallel\")\n motifs_overlapping_tracks_files = []\n for i in motif_files_full_path:\n if os.path.exists(i):\n motifs_overlapping_tracks_file = overlay_resources_score_motifs(i,\n motifs_overlapping_tracks_output_dir,\n all_chromatin_makrs_all_cells_combined_dir_path,\n chromatin_tracks_files)\n motifs_overlapping_tracks_files.append(motifs_overlapping_tracks_file)\n else:\n print(\"Motif file \" + i + \"cannot be found and will be ignored.\")\n\n print(\"Finished overlay_resources_score_motifs\")\n scored_motifs_overlapping_tracks_files = []\n for motifs_overlapping_tracks_file in motifs_overlapping_tracks_files:\n if motifs_overlapping_tracks_file is None:\n continue\n scored_motifs_chromatin_tracks_output_file = '.'.join(\n motifs_overlapping_tracks_file.split('.')[0:-1]) + '_scored.bed10'\n # create or overwrite scored motif (chromatin-wise) files\n if not os.path.exists(scored_motifs_chromatin_tracks_output_file): # score each motif-track_overlapping file\n print(\"computing scores to: \" + scored_motifs_chromatin_tracks_output_file)\n # TODO: control change below\n index_track_names = 7\n index_motif_name = 3\n with open(scored_motifs_chromatin_tracks_output_file, 'w') as scored_motifs_writefile:\n header_line = ['posrange', 'chr', 'motifstart', 'motifend', 'name', 'score', 'pval', 'strand']\n for cell in sorted(cells_assays_dict.keys()):\n for assay in sorted(cells_assays_dict[cell].keys()):\n if cell[0].isdigit():\n cell = 'a' + cell\n\n cell_name = '_'.join(((cell + \"___\" + assay).replace('(', '').replace(')', '')\n .replace('-', '__').replace('.', '').replace(\"'\", \"\")).split())\n header_line.append('\"' + cell_name + '\"')\n scored_motifs_writefile.write('\\t'.join(header_line) + '\\n')\n\n # TODO: end previous for loop here, call Rust function, input all files --> minimize switching between them\n # score motifs\n if (run_in_parallel_param):\n print(\"Run score_motifs per cell in parallel\")\n os.system(\"\"\"split -l 200000 {} {}\"\"\".format(motifs_overlapping_tracks_file,\n motifs_overlapping_tracks_file + '_tmp'))\n motifs_overlapping_tracks_file_splitted = glob.glob(motifs_overlapping_tracks_file + '_tmp*')\n p = Pool(int(number_processes_to_run_in_parallel))\n p.starmap(score_motifs_per_cell, product(motifs_overlapping_tracks_file_splitted,\n [normal_expression_per_tissue_origin_per_TF],\n [matching_tissue_to_cell],\n [motifTFName_TFNames_matches_dict],\n [cells_assays_dict],\n [cell_tfs],\n [tf_cells],\n [assay_cells_datatypes],\n [index_track_names],\n [index_motif_name]))\n p.close()\n p.join()\n\n # remove tmp split files\n with open(scored_motifs_chromatin_tracks_output_file, 'a') as scored_motifs_writefile:\n for f in motifs_overlapping_tracks_file_splitted:\n with open(f + '_scored', 'r') as f_score_ifile:\n l = f_score_ifile.readline()\n while l:\n scored_motifs_writefile.write(l)\n l = f_score_ifile.readline()\n\n f_score_ifile.close()\n os.remove(f)\n os.remove(f + '_scored')\n scored_motifs_writefile.close()\n else:\n print(\"Do not run score_motifs per cell in parallel\")\n scored_file_tmp = score_motifs_per_cell(motifs_overlapping_tracks_file,\n normal_expression_per_tissue_origin_per_TF,\n matching_tissue_to_cell,\n motifTFName_TFNames_matches_dict,\n cells_assays_dict,\n cell_tfs,\n tf_cells,\n assay_cells_datatypes,\n index_track_names,\n index_motif_name)\n # write scores into scored file with header\n with open(scored_file_tmp, 'r') as infile, \\\n open(scored_motifs_chromatin_tracks_output_file, 'a') as outfile:\n outfile.write(infile.read())\n\n scored_motifs_overlapping_tracks_files.append(scored_motifs_chromatin_tracks_output_file)\n print(\"Finished run_overlay_resources_score_motifs\")\n return motifs_overlapping_tracks_files, scored_motifs_overlapping_tracks_files" ]
[ "0.64769953", "0.62038994", "0.57405895", "0.554428", "0.55252844", "0.551435", "0.55140823", "0.55098575", "0.54549307", "0.5444846", "0.54388684", "0.54316586", "0.5404963", "0.53763914", "0.5359437", "0.5330171", "0.5305152", "0.53005004", "0.53004366", "0.5288501", "0.5277907", "0.5273782", "0.5257273", "0.5246049", "0.52365404", "0.52344126", "0.5232616", "0.52227193", "0.52207667", "0.52179265", "0.5213977", "0.5201716", "0.51995957", "0.5199537", "0.5193075", "0.51912445", "0.5182842", "0.5182086", "0.51818806", "0.51769966", "0.51638764", "0.51467633", "0.5146361", "0.5132422", "0.51307225", "0.511958", "0.51034445", "0.5095793", "0.50772095", "0.50756836", "0.5072251", "0.5071752", "0.5064928", "0.50619835", "0.50588524", "0.50558925", "0.50369525", "0.5035158", "0.50324917", "0.502804", "0.50243616", "0.5024271", "0.50228816", "0.500136", "0.4992842", "0.49835673", "0.49820736", "0.49819568", "0.49788612", "0.49778017", "0.49711415", "0.49690938", "0.49648342", "0.49579063", "0.49542892", "0.49518484", "0.49478006", "0.4945823", "0.49454084", "0.49359623", "0.49357408", "0.49282688", "0.49263397", "0.4922911", "0.4922229", "0.49207088", "0.4915766", "0.49147552", "0.4914072", "0.4911449", "0.49093324", "0.4907796", "0.4901998", "0.4899626", "0.48919755", "0.48900905", "0.48865324", "0.4875646", "0.48731452", "0.487209" ]
0.68828976
0
Expand various colors to RRGGBB.
def expand_color(color, default=None, passthrough=False, block=None): if color: if color[0] == "#": color = color[1:] try: int(color, 16) except ValueError: return block length = len(color) if length in [3, 4]: color = "".join(color[x] * 2 for x in range(length)) elif length not in [6, 8]: return block return "#" + color.upper() elif block: return block return COLOR_NAMES.get(color, color if passthrough else default)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_rgb(r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def combine_color(red, green, blue):\r\n\r\n return (red << 16) + (green << 8) + blue", "def rgb(r, g, b):\n return \"\".join([\"%02X\" % max(0, min(x, 255)) for x in [r, g, b]])", "def rgb(r, g, b, alpha=1.0):\n global _cmds\n _cmds = (f\"color([{r/255},{g/255},\"\n f\"{b/255},{alpha}])\\n\") + _cmds", "def setColorRGB(r,g,b):\n r, g, b = r/255., g/255., b/255.\n dislin.setrgb(r,g,b)", "def RGB_to_fill(R, G, B):\n\n # based on https://stackoverflow.com/q/30484220\n\n return PatternFill(\n start_color=RGB_to_HEX(R, G, B),\n end_color=RGB_to_HEX(R, G, B),\n fill_type='solid'\n )", "def get_rgb(self, r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def Color(red, green, blue, white = 0):\n\treturn (white << 24) | (red << 16)| (green << 8) | blue", "def Color(red, green, blue, white = 0):\n return (white << 24) | (red << 16)| (green << 8) | blue", "def _color565(self, r, g, b):\n return (((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3))", "def get_color_in_rgb_decimal():\n\n # Grabbing custom colormap from matplotlib\n a = cm.get_cmap('cool', 32)\n b = cm.get_cmap('spring', 32)\n c = cm.get_cmap('autumn_r', 64)\n d = cm.get_cmap('bwr_r', 192)\n e = cm.get_cmap('Greens', 192)\n\n # Adding the colormaps into one stack to have a more comprehensive color spectrum \n newcolors = np.vstack((a(np.linspace(0, 1, 32)), \n b(np.linspace(0, 1, 32)), \n c(np.linspace(0, 1, 64)),\n d(np.linspace(0, 0.5, 192)),\n e(np.linspace(0, 1, 192)),\n ))\n return newcolors", "def setColors(self):\n #productive\n profprint()\n self.color= [[0,0,0] for i in range(205)]\n self.color255= self.setColors255()\n for i in range(205):\n for j in range(3):\n self.color[i][j] = self.color255[i][j]/float(255)\n\n return self.color", "def set_color_rgb(r, g, b):\r\n global _current_color\r\n _current_color = (r, g, b)", "def fromInts(r, g, b):\n return IColor(r/255.,g/255.,b/255.)", "def setColors(self):\r\n # productive\r\n profprint()\r\n self.color = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255 = self.setColors255()\r\n for i in range(MAXCOL):\r\n for j in range(3):\r\n self.color[i][j] = self.color255[i][j] / float(255)\r\n\r\n return self.color", "def multiply_rgb(color, alter):\n rgb = color[1:]\n chunks, chunk_size = len(rgb), len(rgb)/3\n r, g, b = [ int(int('0x%s' % rgb[i:i+chunk_size], 0) * alter) for i in range(0, chunks, chunk_size) ]\n return '#%.2x%.2x%.2x' % (r, g, b)", "def convert_to_RGB_255(colors):\n return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)", "def merge_color(rate):\n if rate < 15:\n return 'r'\n if rate < 30:\n return 'y'\n return 'g'", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def color565(red, green=0, blue=0):\n try:\n red, green, blue = red # see if the first var is a tuple/list\n except TypeError:\n pass\n return (red & 0xf8) << 8 | (green & 0xfc) << 3 | blue >> 3", "def reduceColorRGB(channels,levels):\n data = (levels[2]*levels[1]*reduceColor(channels[0],levels[0])+\n levels[2]*reduceColor(channels[1],levels[1])+\n reduceColor(channels[2],levels[2])).astype(numpy.uint8)\n return data", "def color565(r, g=0, b=0):\n try:\n r, g, b = r # see if the first var is a tuple/list\n except TypeError:\n pass\n return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3", "def change_color():\n return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)", "def set_rgb(self, r=0, g=0, b=0, calibrate=True):\n r = self.set_red(r)\n g = self.set_green(g)\n b = self.set_blue(b)\n return (r,g,b)", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def get_xterm_color(r, g, b):\n rr = round_tint(r)\n rg = round_tint(g)\n rb = round_tint(b)\n\n return 16 + rr * 36 + rg * 6 + rb", "def Red(*args, **kwargs):\n return _gdi_.Colour_Red(*args, **kwargs)", "def set_rgb_color(r, g, b, a=1.0):\n glColor4fv(get_rgb_normalized(r, g, b, a))", "def colorize(self):\n return", "def _pack_rgb_values(r, g, b):\n if isinstance(r, np.ndarray):\n r = r.astype(np.int)\n rgb = r << 8\n rgb |= g\n rgb = rgb << 8\n rgb |= b\n return rgb", "def rgbString(red,green,blue):\n return chr(red)+chr(green)+chr(blue)", "def ramp_color_rgb(values, feature, parent): \r\n ramp_name = values[0]\r\n ramp_position = values[1]\r\n \r\n ramp = QgsStyleV2.defaultStyle().colorRampRef(ramp_name)\r\n if not ramp:\r\n parent.setEvalErrorString( QObject.tr( '\"{}\" is not a valid color ramp'.format(ramp_name)))\r\n return QColor(0,0,0).name()\r\n \r\n value, error = getFloat(ramp_position)\r\n if error:\r\n parent.setEvalErrorString(error)\r\n \r\n color = ramp.color(value)\r\n return \"{},{},{}\".format(color.red(), color.green(), color.blue())", "def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pg.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + (0,), None, pg.BLEND_RGBA_ADD)\n\n return image", "def update_r(color, new_r):\n\n color.update_r(new_r)", "def rainbow_example(rounds=1, delay_secs=0.01):\n set_color('black') # Start with all LED's \"off\"\n update()\n\n saturation = 100 # 0 (grayer) to 100 (full color)\n brightness = 100 # 0 (darker) to 100 (brighter)\n\n for i in range(0, rounds):\n for hue in tuple(range(0, 360)) + tuple(range(360, -1, -1)): # 0..360..0\n color_str = \"hsb({}, {}%, {}%)\".format(hue, saturation, brightness)\n push_color(color_str)\n update()\n sleep(delay_secs)", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def generate_normalized_rgb(self):\n \n r,g,b=(Numeric.zeros(256),Numeric.zeros(256),Numeric.zeros(256))\n for i in Numeric.arange(256):\n r_,g_,b_=self.colfct(i/255.0) # these are from [0,1]\n r[i],g[i],b[i]=int(255*r_),int(255*g_),int(255*b_)\n return r/256.0,g/256.0,b/256.0", "def _style_colours(self):\n\n pass", "def rgb(r, g, b):\n return (r/255, g/255, b/255)", "def get_rgb(self):\n\n return \"#%02X%02X%02X\" % (self.r, self.g, self.b)", "def modify_color(color, sigma):\n\t# get values of individual colors, convert to hex integers, and modify\n\tred, green, blue = (vary(int(color[i:i+2], 16), sigma) for i in (1, 3, 5))\n\t# return combined hex representation of new color values\n\treturn '#%02X%02X%02X' % (red, green, blue)", "def setColors255(self):\r\n # productive\r\n profprint()\r\n self.color255 = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255[0] = [221, 108, 158]\r\n self.color255[1] = [128, 174, 128]\r\n self.color255[2] = [241, 214, 145]\r\n self.color255[3] = [177, 122, 101]\r\n self.color255[4] = [111, 184, 210]\r\n self.color255[5] = [216, 101, 79]\r\n self.color255[6] = [221, 130, 101]\r\n self.color255[7] = [144, 238, 144]\r\n self.color255[8] = [192, 104, 88]\r\n self.color255[9] = [220, 245, 20]\r\n self.color255[10] = [78, 63, 0]\r\n self.color255[11] = [255, 250, 220]\r\n self.color255[12] = [230, 220, 70]\r\n self.color255[13] = [200, 200, 235]\r\n self.color255[14] = [250, 250, 210]\r\n self.color255[15] = [244, 214, 49]\r\n self.color255[16] = [0, 151, 206]\r\n self.color255[17] = [183, 156, 220]\r\n self.color255[18] = [183, 214, 211]\r\n self.color255[19] = [152, 189, 207]\r\n self.color255[20] = [178, 212, 242]\r\n self.color255[21] = [68, 172, 100]\r\n self.color255[22] = [111, 197, 131]\r\n self.color255[23] = [85, 188, 255]\r\n self.color255[24] = [0, 145, 30]\r\n self.color255[25] = [214, 230, 130]\r\n self.color255[26] = [218, 255, 255]\r\n self.color255[27] = [170, 250, 250]\r\n self.color255[28] = [140, 224, 228]\r\n self.color255[29] = [188, 65, 28]\r\n self.color255[30] = [216, 191, 216]\r\n self.color255[31] = [145, 60, 66]\r\n self.color255[32] = [150, 98, 83]\r\n self.color255[33] = [250, 250, 225]\r\n self.color255[34] = [200, 200, 215]\r\n self.color255[35] = [68, 131, 98]\r\n self.color255[36] = [83, 146, 164]\r\n self.color255[37] = [162, 115, 105]\r\n self.color255[38] = [141, 93, 137]\r\n self.color255[39] = [182, 166, 110]\r\n self.color255[40] = [188, 135, 166]\r\n self.color255[41] = [154, 150, 201]\r\n self.color255[42] = [177, 140, 190]\r\n self.color255[43] = [30, 111, 85]\r\n self.color255[44] = [210, 157, 166]\r\n self.color255[45] = [48, 129, 126]\r\n self.color255[46] = [98, 153, 112]\r\n self.color255[47] = [69, 110, 53]\r\n self.color255[48] = [166, 113, 137]\r\n self.color255[49] = [122, 101, 38]\r\n self.color255[50] = [253, 135, 192]\r\n self.color255[51] = [145, 92, 109]\r\n self.color255[52] = [46, 101, 131]\r\n self.color255[53] = [0, 108, 112]\r\n self.color255[54] = [127, 150, 88]\r\n self.color255[55] = [159, 116, 163]\r\n self.color255[56] = [125, 102, 154]\r\n self.color255[57] = [106, 174, 155]\r\n self.color255[58] = [154, 146, 83]\r\n self.color255[59] = [126, 126, 55]\r\n self.color255[60] = [201, 160, 133]\r\n self.color255[61] = [78, 152, 141]\r\n self.color255[62] = [174, 140, 103]\r\n self.color255[63] = [139, 126, 177]\r\n self.color255[64] = [148, 120, 72]\r\n self.color255[65] = [186, 135, 135]\r\n self.color255[66] = [99, 106, 24]\r\n self.color255[67] = [156, 171, 108]\r\n self.color255[68] = [64, 123, 147]\r\n self.color255[69] = [138, 95, 74]\r\n self.color255[70] = [97, 113, 158]\r\n self.color255[71] = [126, 161, 197]\r\n self.color255[72] = [194, 195, 164]\r\n self.color255[73] = [88, 106, 215]\r\n self.color255[74] = [82, 174, 128]\r\n self.color255[75] = [57, 157, 110]\r\n self.color255[76] = [60, 143, 83]\r\n self.color255[77] = [92, 162, 109]\r\n self.color255[78] = [255, 244, 209]\r\n self.color255[79] = [201, 121, 77]\r\n self.color255[80] = [70, 163, 117]\r\n self.color255[81] = [188, 91, 95]\r\n self.color255[82] = [166, 84, 94]\r\n self.color255[83] = [182, 105, 107]\r\n self.color255[84] = [229, 147, 118]\r\n self.color255[85] = [174, 122, 90]\r\n self.color255[86] = [201, 112, 73]\r\n self.color255[87] = [194, 142, 0]\r\n self.color255[88] = [241, 213, 144]\r\n self.color255[89] = [203, 179, 77]\r\n self.color255[90] = [229, 204, 109]\r\n self.color255[91] = [255, 243, 152]\r\n self.color255[92] = [209, 185, 85]\r\n self.color255[93] = [248, 223, 131]\r\n self.color255[94] = [255, 230, 138]\r\n self.color255[95] = [196, 172, 68]\r\n self.color255[96] = [255, 255, 167]\r\n self.color255[97] = [255, 250, 160]\r\n self.color255[98] = [255, 237, 145]\r\n self.color255[99] = [242, 217, 123]\r\n self.color255[100] = [222, 198, 101]\r\n self.color255[101] = [213, 124, 109]\r\n self.color255[102] = [184, 105, 108]\r\n self.color255[103] = [150, 208, 243]\r\n self.color255[104] = [62, 162, 114]\r\n self.color255[105] = [242, 206, 142]\r\n self.color255[106] = [250, 210, 139]\r\n self.color255[107] = [255, 255, 207]\r\n self.color255[108] = [182, 228, 255]\r\n self.color255[109] = [175, 216, 244]\r\n self.color255[110] = [197, 165, 145]\r\n self.color255[111] = [172, 138, 115]\r\n self.color255[112] = [202, 164, 140]\r\n self.color255[113] = [224, 186, 162]\r\n self.color255[114] = [255, 245, 217]\r\n self.color255[115] = [206, 110, 84]\r\n self.color255[116] = [210, 115, 89]\r\n self.color255[117] = [203, 108, 81]\r\n self.color255[118] = [233, 138, 112]\r\n self.color255[119] = [195, 100, 73]\r\n self.color255[120] = [181, 85, 57]\r\n self.color255[121] = [152, 55, 13]\r\n self.color255[122] = [159, 63, 27]\r\n self.color255[123] = [166, 70, 38]\r\n self.color255[124] = [218, 123, 97]\r\n self.color255[125] = [225, 130, 104]\r\n self.color255[126] = [224, 97, 76]\r\n self.color255[127] = [184, 122, 154]\r\n self.color255[128] = [211, 171, 143]\r\n self.color255[129] = [47, 150, 103]\r\n self.color255[130] = [173, 121, 88]\r\n self.color255[131] = [188, 95, 76]\r\n self.color255[132] = [255, 239, 172]\r\n self.color255[133] = [226, 202, 134]\r\n self.color255[134] = [253, 232, 158]\r\n self.color255[135] = [244, 217, 154]\r\n self.color255[136] = [205, 179, 108]\r\n self.color255[137] = [186, 124, 161]\r\n self.color255[138] = [255, 255, 220]\r\n self.color255[139] = [234, 234, 194]\r\n self.color255[140] = [204, 142, 178]\r\n self.color255[141] = [180, 119, 153]\r\n self.color255[142] = [216, 132, 105]\r\n self.color255[143] = [255, 253, 229]\r\n self.color255[144] = [205, 167, 142]\r\n self.color255[145] = [204, 168, 143]\r\n self.color255[146] = [255, 224, 199]\r\n self.color255[147] = [139, 150, 98]\r\n self.color255[148] = [249, 180, 111]\r\n self.color255[149] = [157, 108, 162]\r\n self.color255[150] = [203, 136, 116]\r\n self.color255[151] = [185, 102, 83]\r\n self.color255[152] = [247, 182, 164]\r\n self.color255[153] = [222, 154, 132]\r\n self.color255[154] = [124, 186, 223]\r\n self.color255[155] = [249, 186, 150]\r\n self.color255[156] = [244, 170, 147]\r\n self.color255[157] = [255, 181, 158]\r\n self.color255[158] = [255, 190, 165]\r\n self.color255[159] = [227, 153, 130]\r\n self.color255[160] = [213, 141, 113]\r\n self.color255[161] = [193, 123, 103]\r\n self.color255[162] = [216, 146, 127]\r\n self.color255[163] = [230, 158, 140]\r\n self.color255[164] = [245, 172, 147]\r\n self.color255[165] = [241, 172, 151]\r\n self.color255[166] = [177, 124, 92]\r\n self.color255[167] = [171, 85, 68]\r\n self.color255[168] = [217, 198, 131]\r\n self.color255[169] = [212, 188, 102]\r\n self.color255[170] = [185, 135, 134]\r\n self.color255[171] = [198, 175, 125]\r\n self.color255[172] = [194, 98, 79]\r\n self.color255[173] = [255, 238, 170]\r\n self.color255[174] = [206, 111, 93]\r\n self.color255[175] = [216, 186, 0]\r\n self.color255[176] = [255, 226, 77]\r\n self.color255[177] = [255, 243, 106]\r\n self.color255[178] = [255, 234, 92]\r\n self.color255[179] = [240, 210, 35]\r\n self.color255[180] = [224, 194, 0]\r\n self.color255[181] = [213, 99, 79]\r\n self.color255[182] = [217, 102, 81]\r\n self.color255[183] = [0, 147, 202]\r\n self.color255[184] = [0, 122, 171]\r\n self.color255[185] = [186, 77, 64]\r\n self.color255[186] = [240, 255, 30]\r\n self.color255[187] = [185, 232, 61]\r\n self.color255[188] = [0, 226, 255]\r\n self.color255[189] = [251, 159, 255]\r\n self.color255[190] = [230, 169, 29]\r\n self.color255[191] = [0, 194, 113]\r\n self.color255[192] = [104, 160, 249]\r\n self.color255[193] = [221, 108, 158]\r\n self.color255[194] = [137, 142, 0]\r\n self.color255[195] = [230, 70, 0]\r\n self.color255[196] = [0, 147, 0]\r\n self.color255[197] = [0, 147, 248]\r\n self.color255[198] = [231, 0, 206]\r\n self.color255[199] = [129, 78, 0]\r\n self.color255[200] = [0, 116, 0]\r\n self.color255[201] = [0, 0, 255]\r\n self.color255[202] = [157, 0, 0]\r\n self.color255[203] = [100, 100, 130]\r\n self.color255[204] = [205, 205, 100]\r\n self.color255[205] = [255, 255, 0]\r\n\r\n return self.color255", "def recolorRC(src,dst):\n b,g,r=cv2.split(src)\n cv2.addWeighted(b,0.5,g,0.5,0,b) #arguements(in order):first src array,a weight applied\n # to array, scnd src array, a weight applied to array\n # a constant added to the result and a destination array\n cv2.merge((b,b,r),dest) #replace b and g with modified b(which has both and g)", "def rgb_to_ansi256(r, g, b):\n if r == g and g == b:\n if r < 8:\n return 16\n if r > 248:\n return 231\n\n return round(((r - 8) / 247.0) * 24) + 232\n\n ansi_r = 36 * round(r / 255.0 * 5.0)\n ansi_g = 6 * round(g / 255.0 * 5.0)\n ansi_b = round(b / 255.0 * 5.0)\n ansi = 16 + ansi_r + ansi_g + ansi_b\n return ansi", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def resetColor(self):\n self.setColor(255, 255, 255 ,255)", "def addColors(*colorMultPairs):\n netRGB = [0, 0, 0]\n for color, mult in colorMultPairs:\n colorRGB = _getTkWdg().winfo_rgb(color)\n netRGB = [netRGB[ii] + (mult * colorRGB[ii]) for ii in range(3)]\n truncRGB = [max(min(int(val), 0xFFFF), 0) for val in netRGB]\n retColor = \"#%04x%04x%04x\" % tuple(truncRGB)\n #print \"mixColors(%r); netRGB=%s; truncRGB=%s; retColor=%r\" % (colorMultPairs, netRGB, truncRGB, retColor)\n return retColor", "def fill(self, color):", "def fill(self, colour: int, /) -> None:", "def colors_to_string(colors):\n return ''.join(['%02x%02x%02x' % (r,g,b) for r,g,b in colors])", "def rgbcolor(h, f):\n # q = 1 - f\n # t = f\n if h == 0:\n return v, f, p\n elif h == 1:\n return 1 - f, v, p\n elif h == 2:\n return p, v, f\n elif h == 3:\n return p, 1 - f, v\n elif h == 4:\n return f, p, v\n elif h == 5:\n return v, p, 1 - f", "def packl(self):\n a = 255\n r = IColor.scaleClipl(self.r)\n g = IColor.scaleClipl(self.g)\n b = IColor.scaleClipl(self.b)\n return (a << 24) | (r << 16) | (g << 8) | (b)", "def label_rgb(colors):\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))", "def setRgb ( self, r, g = 0.0, b = 0.0 ):\n self.setRgba( r, g, b )", "def IntermediateColor(startcol, targetcol, frac):\n if frac < 0:\n frac = 0\n if frac >= 1.0:\n frac = 1.0\n sc = MakeColorTuple(startcol)\n tc = MakeColorTuple(targetcol)\n dR = tc[0] - sc[0]\n dG = tc[1] - sc[1]\n dB = tc[2] - sc[2]\n R = sc[0] + dR * frac\n G = sc[1] + dG * frac\n B = sc[2] + dB * frac\n return \"#%02x%02x%02x\" % (R, G, B)", "def __fill_color(self, uol_c, uil_c, lol_c, lil_c):\n self.__fill_lip_lines(uol_c, uil_c)\n self.__fill_lip_lines(lol_c, lil_c)\n self.__add_color(1)\n self.__fill_lip_solid(uol_c, uil_c)\n self.__fill_lip_solid(lol_c, lil_c)\n self.__smoothen_color(uol_c, uil_c)\n self.__smoothen_color(lol_c, lil_c)", "def color(step: int=10) -> Tuple[int, int, int]:\n # Randomly seed the r g b values\n r, g, b = (random_uniform(0, 255), random_uniform(0, 255),\n random_uniform(0, 255))\n\n # Randomly determine if each r g and b value is increasing or not\n r_inc = True\n g_inc = True\n b_inc = True\n r_step = random_uniform(step)\n g_step = random_uniform(step)\n b_step = random_uniform(step)\n\n # Yield the initial r, g, b values\n yield r, g, b\n\n # Loop and yeild forever\n while True:\n # If r is increasing\n if r_inc:\n # Increment r by the step\n r += r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to decreasing\n r_inc = r < 255 - r_step\n # If r is decreasing\n else:\n # Decrement r by the step\n r -= r_step\n # Ensure that the next step will be within the limits\n # if not then set the flag to increasing\n r_inc = r < r_step\n\n # See above\n if g_inc:\n g += g_step\n g_inc = g < 255 - g_step\n else:\n g -= g_step\n g_inc = g < g_step\n\n # See above\n if b_inc:\n b += b_step\n b_inc = b < 255 - b_step\n else:\n b -= b_step\n b_inc = b < b_step\n\n # Yield the red, green, and blue values\n yield r, g, b", "def unconvert_from_RGB_255(colors):\n un_rgb_color = (colors[0]/(255.0),\n colors[1]/(255.0),\n colors[2]/(255.0))\n\n return un_rgb_color", "def rgb_to_ansi16(r, g, b, use_bright=False):\n ansi_b = round(b / 255.0) << 2\n ansi_g = round(g / 255.0) << 1\n ansi_r = round(r / 255.0)\n ansi = (90 if use_bright else 30) + (ansi_b | ansi_g | ansi_r)\n\n return ansi", "def change_color(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if color_mode == 'RGB':\n color_red = random.randint(-25,25)\n color_green = random.randint(-25,25)\n color_blue = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = (color[0]+color_red,color[1]+color_green,color[2]+color_blue)\n else: #color_mode == 'L':\n color_diff = random.randint(-25,25)\n color = mutated_genome[index][0]\n newcolor = color+color_diff\n mutated_genome[index][0] = newcolor", "def setColors255(self):\n #productive\n profprint()\n self.color255= [[0,0,0] for i in range(205)]\n self.color255[0]=[221,108,158]\n self.color255[1]=[128,174,128]\n self.color255[2]=[241,214,145]\n self.color255[3]=[177,122,101]\n self.color255[4]=[111,184,210]\n self.color255[5]=[216,101,79]\n self.color255[6]=[221,130,101]\n self.color255[7]=[144,238,144]\n self.color255[8]=[192,104,88]\n self.color255[9]=[220,245,20]\n self.color255[10]=[78,63,0]\n self.color255[11]=[255,250,220]\n self.color255[12]=[230,220,70]\n self.color255[13]=[200,200,235]\n self.color255[14]=[250,250,210]\n self.color255[15]=[244,214,49]\n self.color255[16]=[0,151,206]\n self.color255[17]=[183,156,220]\n self.color255[18]=[183,214,211]\n self.color255[19]=[152,189,207]\n self.color255[20]=[178,212,242]\n self.color255[21]=[68,172,100]\n self.color255[22]=[111,197,131]\n self.color255[23]=[85,188,255]\n self.color255[24]=[0,145,30]\n self.color255[25]=[214,230,130]\n self.color255[26]=[218,255,255]\n self.color255[27]=[170,250,250]\n self.color255[28]=[140,224,228]\n self.color255[29]=[188,65,28]\n self.color255[30]=[216,191,216]\n self.color255[31]=[145,60,66]\n self.color255[32]=[150,98,83]\n self.color255[33]=[250,250,225]\n self.color255[34]=[200,200,215]\n self.color255[35]=[68,131,98]\n self.color255[36]=[83,146,164]\n self.color255[37]=[162,115,105]\n self.color255[38]=[141,93,137]\n self.color255[39]=[182,166,110]\n self.color255[40]=[188,135,166]\n self.color255[41]=[154,150,201]\n self.color255[42]=[177,140,190]\n self.color255[43]=[30,111,85]\n self.color255[44]=[210,157,166]\n self.color255[45]=[48,129,126]\n self.color255[46]=[98,153,112]\n self.color255[47]=[69,110,53]\n self.color255[48]=[166,113,137]\n self.color255[49]=[122,101,38]\n self.color255[50]=[253,135,192]\n self.color255[51]=[145,92,109]\n self.color255[52]=[46,101,131]\n self.color255[53]=[0,108,112]\n self.color255[54]=[127,150,88]\n self.color255[55]=[159,116,163]\n self.color255[56]=[125,102,154]\n self.color255[57]=[106,174,155]\n self.color255[58]=[154,146,83]\n self.color255[59]=[126,126,55]\n self.color255[60]=[201,160,133]\n self.color255[61]=[78,152,141]\n self.color255[62]=[174,140,103]\n self.color255[63]=[139,126,177]\n self.color255[64]=[148,120,72]\n self.color255[65]=[186,135,135]\n self.color255[66]=[99,106,24]\n self.color255[67]=[156,171,108]\n self.color255[68]=[64,123,147]\n self.color255[69]=[138,95,74]\n self.color255[70]=[97,113,158]\n self.color255[71]=[126,161,197]\n self.color255[72]=[194,195,164]\n self.color255[73]=[88,106,215]\n self.color255[74]=[82,174,128]\n self.color255[75]=[57,157,110]\n self.color255[76]=[60,143,83]\n self.color255[77]=[92,162,109]\n self.color255[78]=[255,244,209]\n self.color255[79]=[201,121,77]\n self.color255[80]=[70,163,117]\n self.color255[81]=[188,91,95]\n self.color255[82]=[166,84,94]\n self.color255[83]=[182,105,107]\n self.color255[84]=[229,147,118]\n self.color255[85]=[174,122,90]\n self.color255[86]=[201,112,73]\n self.color255[87]=[194,142,0]\n self.color255[88]=[241,213,144]\n self.color255[89]=[203,179,77]\n self.color255[90]=[229,204,109]\n self.color255[91]=[255,243,152]\n self.color255[92]=[209,185,85]\n self.color255[93]=[248,223,131]\n self.color255[94]=[255,230,138]\n self.color255[95]=[196,172,68]\n self.color255[96]=[255,255,167]\n self.color255[97]=[255,250,160]\n self.color255[98]=[255,237,145]\n self.color255[99]=[242,217,123]\n self.color255[100]=[222,198,101]\n self.color255[101]=[213,124,109]\n self.color255[102]=[184,105,108]\n self.color255[103]=[150,208,243]\n self.color255[104]=[62,162,114]\n self.color255[105]=[242,206,142]\n self.color255[106]=[250,210,139]\n self.color255[107]=[255,255,207]\n self.color255[108]=[182,228,255]\n self.color255[109]=[175,216,244]\n self.color255[110]=[197,165,145]\n self.color255[111]=[172,138,115]\n self.color255[112]=[202,164,140]\n self.color255[113]=[224,186,162]\n self.color255[114]=[255,245,217]\n self.color255[115]=[206,110,84]\n self.color255[116]=[210,115,89]\n self.color255[117]=[203,108,81]\n self.color255[118]=[233,138,112]\n self.color255[119]=[195,100,73]\n self.color255[120]=[181,85,57]\n self.color255[121]=[152,55,13]\n self.color255[122]=[159,63,27]\n self.color255[123]=[166,70,38]\n self.color255[124]=[218,123,97]\n self.color255[125]=[225,130,104]\n self.color255[126]=[224,97,76]\n self.color255[127]=[184,122,154]\n self.color255[128]=[211,171,143]\n self.color255[129]=[47,150,103]\n self.color255[130]=[173,121,88]\n self.color255[131]=[188,95,76]\n self.color255[132]=[255,239,172]\n self.color255[133]=[226,202,134]\n self.color255[134]=[253,232,158]\n self.color255[135]=[244,217,154]\n self.color255[136]=[205,179,108]\n self.color255[137]=[186,124,161]\n self.color255[138]=[255,255,220]\n self.color255[139]=[234,234,194]\n self.color255[140]=[204,142,178]\n self.color255[141]=[180,119,153]\n self.color255[142]=[216,132,105]\n self.color255[143]=[255,253,229]\n self.color255[144]=[205,167,142]\n self.color255[145]=[204,168,143]\n self.color255[146]=[255,224,199]\n self.color255[147]=[139,150,98]\n self.color255[148]=[249,180,111]\n self.color255[149]=[157,108,162]\n self.color255[150]=[203,136,116]\n self.color255[151]=[185,102,83]\n self.color255[152]=[247,182,164]\n self.color255[153]=[222,154,132]\n self.color255[154]=[124,186,223]\n self.color255[155]=[249,186,150]\n self.color255[156]=[244,170,147]\n self.color255[157]=[255,181,158]\n self.color255[158]=[255,190,165]\n self.color255[159]=[227,153,130]\n self.color255[160]=[213,141,113]\n self.color255[161]=[193,123,103]\n self.color255[162]=[216,146,127]\n self.color255[163]=[230,158,140]\n self.color255[164]=[245,172,147]\n self.color255[165]=[241,172,151]\n self.color255[166]=[177,124,92]\n self.color255[167]=[171,85,68]\n self.color255[168]=[217,198,131]\n self.color255[169]=[212,188,102]\n self.color255[170]=[185,135,134]\n self.color255[171]=[198,175,125]\n self.color255[172]=[194,98,79]\n self.color255[173]=[255,238,170]\n self.color255[174]=[206,111,93]\n self.color255[175]=[216,186,0]\n self.color255[176]=[255,226,77]\n self.color255[177]=[255,243,106]\n self.color255[178]=[255,234,92]\n self.color255[179]=[240,210,35]\n self.color255[180]=[224,194,0]\n self.color255[181]=[213,99,79]\n self.color255[182]=[217,102,81]\n self.color255[183]=[0,147,202]\n self.color255[184]=[0,122,171]\n self.color255[185]=[186,77,64]\n self.color255[186]=[240,255,30]\n self.color255[187]=[185,232,61]\n self.color255[188]=[0,226,255]\n self.color255[189]=[251,159,255]\n self.color255[190]=[230,169,29]\n self.color255[191]=[0,194,113]\n self.color255[192]=[104,160,249]\n self.color255[193]=[221,108,158]\n self.color255[194]=[137,142,0]\n self.color255[195]=[230,70,0]\n self.color255[196]=[0,147,0]\n self.color255[197]=[0,147,248]\n self.color255[198]=[231,0,206]\n self.color255[199]=[129,78,0]\n self.color255[200]=[0,116,0]\n self.color255[201]=[0,0,255]\n self.color255[202]=[157,0,0]\n self.color255[203]=[100,100,130]\n self.color255[204]=[205,205,100]\n \n return self.color255", "def color(value):\r\n return 'RGB({}, {}, {})'.format(value.red(), value.blue(), value.green())", "def color(c):\n\n if isinstance(c, tuple) and len(c) == 4:\n return c\n\n if c is None:\n return c\n\n if isinstance(c, basestring):\n if c[0] == '#':\n c = c[1:]\n\n if len(c) == 6:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = int(c[6]+c[7], 16)\n elif len(c) == 3:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = 255\n elif len(c) == 4:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = int(c[3], 16) * 0x11\n else:\n raise Exception(\"Color string must be 3, 4, 6, or 8 hex digits long.\")\n\n return (r, g, b, a)\n\n raise Exception(\"Not a color: %r\" % (c,))", "def RGB(r, g, b):\n rm = min(255, Int(r))\n gm = min(255, Int(g))\n bm = min(255, Int(b))\n #\n if rm < 0 or gm < 0 or bm < 0:\n raise ValueError(\"RGB values must be >= 0, were (%s, %s, %s)\" % (r, g, b))\n #\n return ((bm * 256) + gm) * 256 + rm", "def rainbow_all(self):\n while True:\n for g in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(255, g, 0)\n time.sleep(self.speed)\n\n for r in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(r, 255, 0)\n time.sleep(self.speed)\n\n for b in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(0, 255, b)\n time.sleep(self.speed)\n\n for g in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(0, g, 255)\n time.sleep(self.speed)\n\n for r in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(r, 0, 255)\n time.sleep(self.speed)\n\n for b in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(255, 0, b)\n time.sleep(self.speed)", "def _color(self, args):", "def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val", "def rgb_pack(r: int, g: int, b: int) -> int:\n rgb = (r << 16) | (g << 8) | b\n return rgb", "def normalize_rgb_colors_to_hex(css):\n log.debug(\"Converting all rgba to hexadecimal color values.\")\n regex = re.compile(r\"rgb\\s*\\(\\s*([0-9,\\s]+)\\s*\\)\")\n match = regex.search(css)\n while match:\n colors = map(lambda s: s.strip(), match.group(1).split(\",\"))\n hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))\n css = css.replace(match.group(), hexcolor)\n match = regex.search(css)\n return css", "def combine_colors(c1, c2, factor=0.5):\n c3 = QtGui.QColor()\n c3.setRed(int((factor * c1.red() + (1 - factor) * c2.red())))\n c3.setGreen(int((factor * c1.green() + (1 - factor) * c2.green())))\n c3.setBlue(int((factor * c1.blue() + (1 - factor) * c2.blue())))\n return c3", "def combine_colors(c1, c2, factor=0.5):\n c3 = QtGui.QColor()\n c3.setRed(int((factor * c1.red() + (1 - factor) * c2.red())))\n c3.setGreen(int((factor * c1.green() + (1 - factor) * c2.green())))\n c3.setBlue(int((factor * c1.blue() + (1 - factor) * c2.blue())))\n return c3", "def default_colors():\n # default_colors = [\n # # r, g, b, a\n # [92, 192, 98, 0.5],\n # [90, 155, 212, 0.5],\n # [246, 236, 86, 0.6],\n # [241, 90, 96, 0.4],\n # [255, 117, 0, 0.3],\n # [82, 82, 190, 0.2],\n # ]\n\n default_colors = [\n # r, g, b, a\n [188, 114, 3, 0.5],\n [3, 133, 188, 0.5],\n [155, 9, 118, 0.6],\n [155, 53, 9, 0.4],\n [4, 140, 128, 0.3],\n [140, 8, 8, 0.2],\n ]\n\n default_colors = [\n [i[0] / 255.0, i[1] / 255.0, i[2] / 255.0, i[3]]\n for i in default_colors\n ]\n\n return default_colors", "def reformatColor(self, colorStr):\n if type(colorStr) is str:\n if colorStr.startswith('#'):\n colorStr = colorStr.replace('#', '')\n else:\n raise Exception('color is not hex format')\n r = int(colorStr[:2], 16)\n g = int(colorStr[2:4], 16)\n b = int(colorStr[4:6], 16)\n return r, g, b", "def from_rgb(r, g, b) -> str:\n return \"#{0:02x}{1:02x}{2:02x}\".format(r, g, b)", "def _init_colors(self):\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'", "def fill_rgb(self, r, g, b, start=0, end=0):\n if start < 0:\n start = 0\n if end == 0 or end > self.last_index:\n end = self.last_index\n for led in range(start, end + 1): # since 0-index include end in range\n self.__set_internal(led, r, g, b)", "def glColor(self, r, g, b):\n if 0 <= r <= 1 or 0 <= g <= 1 or 0 <= b <= 1:\n self.vr = ceil(r * 255)\n self.vg = ceil(g * 255)\n self.vb = ceil(b * 255)\n else:\n print(\"Please insert numbers between 0 and 1\")\n sys.exit()", "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def glClearColor(self, r, g, b):\n if 0 <= r <= 1 or 0 <= g <= 1 or 0 <= b <= 1:\n self.r = ceil(r * 255)\n self.g = ceil(g * 255)\n self.b = ceil(b * 255)\n else:\n print(\"Please insert numbers between 0 and 1\")\n sys.exit()", "def random_color():\n colormode(255)\n return randint(0, 255), randint(0, 255), randint(0, 255)", "def revert_color(cls, colors):\n # 0.5 is to map the color to the center of the range\n return [int((c+0.5) / cls.color_level * 256) for c in colors]", "def recolorRGV(src,dst):\n b,g,r=cv2.split(src)\n cv2.min(b,g,b) # min() function computes the per-element minimum of the first two arguments\n # and writes them to the third argument\n cv2.min(b,r,b)\n cv2.merge((b,g,r),dest) # b is modified to the minimum of b,g,r at every pixel", "def set_pattern(colors=('green', 'blue', 'red')): # (10)\n for i in range(0, int(ceil(float(NUM_LEDS)/float(len(colors))))):\n for color in colors:\n push_color(color)", "def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c", "def rgb_to_color(*rgb):\n if(len(rgb)==1):\n r,g,b = rgb[0]\n else:\n r,g,b = rgb\n return \"#%02x%02x%02x\" % (r,g,b)", "def color_negative_red_positive_green(val):\n if val < 0:\n color = 'red'\n elif val > 0:\n color = 'green'\n else:\n color = 'black'\n\n return 'color: %s' % color", "def rgbToHex ( r, g = 0.0, b = 0.0 ):\n # Check if argument is list\n if isinstance(r, list):\n g = r[1]\n b = r[2]\n r = r[0]\n if isinstance( r, float ):\n r *= 255.0\n if isinstance( g, float ):\n g *= 255.0\n if isinstance( b, float ):\n b *= 255.0\n return \"%02x%02x%02x\" % ( round( r ), round( g ), round( b ))", "def calibrate_rgb(self, r, g, b):\n new_r = r*self._calibrate[\"r\"] \n new_g = g*self._calibrate[\"g\"]\n new_b = b*self._calibrate[\"b\"]\n return (new_r,new_g,new_b)", "def colorize(image, newColor):\n image = image.copy()\n\n # zero out RGB values\n image.fill((0, 0, 0, 255), None, pygame.BLEND_RGBA_MULT)\n # add in new RGB values\n image.fill(newColor[0:3] + [0,], None, pygame.BLEND_RGBA_ADD)\n\n return image", "def create_unique_color_uchar(tag, hue_step=0.41):\n r, g, b = create_unique_color_float(tag, hue_step)\n return int(255*r), int(255*g), int(255*b)", "def colormixer(colors, weights=None):\n def _to_hex(v):\n v_hex = hex(v)[2:]\n if len(v_hex) == 1:\n v_hex = \"0\" + v_hex\n return v_hex\n\n # Compute mean intensities for red, green and blue\n if weights is None:\n r = int(np.mean([int(c[1:3], 16) for c in colors]))\n g = int(np.mean([int(c[3:5], 16) for c in colors]))\n b = int(np.mean([int(c[5:7], 16) for c in colors]))\n else:\n r = int(sum([int(c[1:3], 16) * w for c, w in zip(colors, weights)]) / sum(weights))\n g = int(sum([int(c[3:5], 16) * w for c, w in zip(colors, weights)]) / sum(weights))\n b = int(sum([int(c[5:7], 16) * w for c, w in zip(colors, weights)]) / sum(weights))\n \n # Take mean of each and convert back to hex\n return '#' + _to_hex(r) + _to_hex(g) + _to_hex(b)", "def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3", "def get_rgb_normalized(r, g, b, a=1.0):\n return r / 255.0, g / 255.0, b / 255.0, a", "def rgb_565(self):\n return (\n (int(self.red * 0xF800) & 0xF800) |\n (int(self.green * 0x07E0) & 0x07E0) |\n (int(self.blue * 0x001F) & 0x001F))", "def test_assembleColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.red[A.bg.blue[\"hello\"]]),\n \"\\x0f\\x0305,02hello\",\n )", "def _from_rgb(self, rgb):\r\n return \"#%02x%02x%02x\" % rgb", "def create_color():\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n a = random.randint(0,255)\n return introcs.RGB(r,g,b,a)", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))" ]
[ "0.64377636", "0.64377636", "0.64377636", "0.6364302", "0.6349941", "0.63338196", "0.6211546", "0.6143853", "0.6105021", "0.61023855", "0.61022055", "0.604399", "0.60202074", "0.59963423", "0.5995744", "0.5948604", "0.5905812", "0.59024096", "0.5871113", "0.5865381", "0.58587396", "0.5849928", "0.5790233", "0.577555", "0.5775024", "0.5770818", "0.5755066", "0.57245517", "0.57170194", "0.57158446", "0.5676525", "0.5671496", "0.566357", "0.56464934", "0.56379133", "0.56363493", "0.5633417", "0.5622261", "0.5621576", "0.5616633", "0.5609301", "0.56029654", "0.5598874", "0.55984074", "0.5597183", "0.5587633", "0.5571618", "0.55639", "0.5560921", "0.55603486", "0.554384", "0.55382043", "0.55381936", "0.5518484", "0.5516351", "0.5500107", "0.5499479", "0.54983604", "0.547251", "0.54699135", "0.54627055", "0.54474586", "0.544473", "0.54408187", "0.54405797", "0.54362816", "0.5429488", "0.5417087", "0.5415945", "0.5414931", "0.54148436", "0.5408335", "0.54078317", "0.5400248", "0.5398956", "0.5397154", "0.5392102", "0.53889227", "0.5386328", "0.5385033", "0.53828776", "0.537499", "0.5371485", "0.5370518", "0.53659457", "0.5362467", "0.53580457", "0.53577566", "0.5354663", "0.5349667", "0.53404355", "0.5336262", "0.53360426", "0.53359425", "0.5335196", "0.5334805", "0.5333479", "0.5330766", "0.53269446", "0.53252995" ]
0.5423834
67
Get the tokenized format_string. Tokenizing is resource intensive so we only do it once and cache it
def tokens(self, format_string): if format_string not in self.format_string_cache: tokens = list(re.finditer(self.reg_ex, format_string)) self.format_string_cache[format_string] = tokens return self.format_string_cache[format_string]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_format(self, token):\n if token in self._formats:\n return self._formats[token]\n\n if self._style is None:\n result = self._get_format_from_document(token, self._document)\n else:\n result = self._get_format_from_style(token, self._style)\n\n self._formats[token] = result\n return result", "def get_format(self):\n return self._format[0]", "def get_format(cls):\n return cls._format", "def reformat(ctx):\n pass", "def token(self) -> str:", "def _get_format_from_style(self, token, style):\n result = QtGui.QTextCharFormat()\n for key, value in style.style_for_token(token).items():\n if value:\n if key == 'color':\n result.setForeground(self._get_brush(value))\n elif key == 'bgcolor':\n result.setBackground(self._get_brush(value))\n elif key == 'bold':\n result.setFontWeight(QtGui.QFont.Bold)\n elif key == 'italic':\n result.setFontItalic(True)\n elif key == 'underline':\n result.setUnderlineStyle(\n QtGui.QTextCharFormat.SingleUnderline)\n elif key == 'sans':\n result.setFontStyleHint(QtGui.QFont.SansSerif)\n elif key == 'roman':\n result.setFontStyleHint(QtGui.QFont.Times)\n elif key == 'mono':\n result.setFontStyleHint(QtGui.QFont.TypeWriter)\n return result", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_template(self, format):\n for pattern, converter in self._patterns:\n if converter.format == format:\n template = pattern.generate('{name}')\n if template:\n return template\n return '{name}' f'.{format}'", "def _get_format_from_document(self, token, document):\n code, html = next(self._formatter._format_lines([(token, u'dummy')]))\n self._document.setHtml(html)\n return QtGui.QTextCursor(self._document).charFormat()", "def get_format(self):\n pass", "def asformat(self, format):", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def token_lookup():\n Tokenize = {'.': '||Period||',\n '.': '||Period||',\n ',': '||Comma||',\n '\"': '||Quotation_Mark||', \n ';': '||Semicolon||', \n '!': '||Exclamation_mark||', \n '?': '||Question_mark||', \n '(': '||Left_Parentheses||', \n ')': '||Right_Parentheses||', \n '--': '||Dash||',\n '\\n': '||Return||'} \n \n return Tokenize", "def tokenize_text(self, s):\n tokens = []\n # We could just have a \"while s:\" here instead of \"for line: while\n # line:\", but for really large log messages with heavy\n # tokenization, the cost in both performance and memory\n # consumption of the approach taken was atrocious.\n for line in s.replace(\"\\r\\n\", \"\\n\").split(\"\\n\"):\n line = line + \"\\n\"\n while line:\n best_match = best_conv = best_userdata = None\n for test in self._formatters:\n match = test[0].search(line)\n # If we find and match and (a) its our first one, or (b) it\n # matches text earlier than our previous best match, or (c) it\n # matches text at the same location as our previous best match\n # but extends to cover more text than that match, then this is\n # our new best match.\n #\n # Implied here is that when multiple formatters match exactly\n # the same text, the first formatter in the registration list wins.\n if match and (\n (best_match is None)\n or (match.start() < best_match.start())\n or (\n (match.start() == best_match.start())\n and (match.end() > best_match.end())\n )\n ):\n best_match = match\n best_conv = test[1]\n best_userdata = test[2]\n # If we found a match...\n if best_match:\n # ... add any non-matching stuff first, then the matching bit.\n start = best_match.start()\n end = best_match.end()\n if start > 0:\n tokens.append(\n _item(match=line[:start], converter=self.format_text, userdata=None)\n )\n tokens.append(\n _item(match=best_match, converter=best_conv, userdata=best_userdata)\n )\n line = line[end:]\n else:\n # Otherwise, just add the rest of the string.\n tokens.append(_item(match=line, converter=self.format_text, userdata=None))\n line = \"\"\n return ViewVCHtmlFormatterTokens(tokens)", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def register_str_format(\n tag: Tag, conformer: Optional[Conformer] = None\n) -> Callable[[ValidatorFn], ValidatorFn]:\n\n def create_str_format(f: ValidatorFn) -> ValidatorFn:\n with _STR_FORMAT_LOCK:\n _STR_FORMATS[tag] = StrFormat(f, conformer=conformer)\n return f\n\n return create_str_format", "def format(self):\n return self._format", "def compile(format):\n try:\n return _cache[format]\n except KeyError:\n _cache[format] = retval = SF_Pattern.__new__(SF_Pattern, format)\n return retval", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def format_using_current_token(service, token_etag, token_name):\n is_current = is_service_current(service, token_etag, token_name)\n if is_current:\n return terminal.success('Current')\n else:\n return 'Not Current'", "def _interpolate(format):\n from tokenize import tokenprog\n\n def matchorfail(text, pos):\n match = tokenprog.match(text, pos)\n if match is None:\n raise _ItplError(text, pos)\n return match, match.end()\n\n namechars = \"abcdefghijklmnopqrstuvwxyz\" \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\";\n chunks = []\n pos = 0\n\n while 1:\n dollar = format.find(\"$\", pos)\n if dollar < 0: \n break\n nextchar = format[dollar + 1]\n\n if nextchar == \"{\":\n chunks.append((0, format[pos:dollar]))\n pos, level = dollar + 2, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token == \"{\": \n level = level + 1\n elif token == \"}\": \n level = level - 1\n chunks.append((1, format[dollar + 2:pos - 1]))\n\n elif nextchar in namechars:\n chunks.append((0, format[pos:dollar]))\n match, pos = matchorfail(format, dollar + 1)\n while pos < len(format):\n if format[pos] == \".\" and \\\n pos + 1 < len(format) and format[pos + 1] in namechars:\n match, pos = matchorfail(format, pos + 1)\n elif format[pos] in \"([\":\n pos, level = pos + 1, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token[0] in \"([\": \n level = level + 1\n elif token[0] in \")]\": \n level = level - 1\n else: \n break\n chunks.append((1, format[dollar + 1:pos]))\n else:\n chunks.append((0, format[pos:dollar + 1]))\n pos = dollar + 1 + (nextchar == \"$\")\n\n if pos < len(format): \n chunks.append((0, format[pos:]))\n return chunks", "def format( self ) :\n\n return( self.__format )", "def format(self):\n return self[\"format\"]", "def format(self):\n return self[\"format\"]", "def get_formatted_string(self, input_string):\n if isinstance(input_string, str):\n try:\n return self.get_processed_string(input_string)\n except KeyError as err:\n # Wrapping the KeyError into a less cryptic error for end-user\n # friendliness\n missing_key = err.args[0]\n raise KeyNotInContextError(\n f'Unable to format \\'{input_string}\\' with '\n f'{{{missing_key}}}, because '\n f'context[\\'{missing_key}\\'] doesn\\'t exist') from err\n else:\n raise TypeError(f\"can only format on strings. {input_string} is a \"\n f\"{type(input_string)} instead.\")", "def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")", "def re_format(self):\n return self._re.pattern", "def format(self) -> str:", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def format(self) -> str:\n return pulumi.get(self, \"format\")", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def get_formatter(style):\n if style == 'authoryear':\n return AuthorYearFormatter\n return AuthorYearFormatter", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")", "def get_format_identifier(self, message_type: str) -> str:", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def get(self):\n return get_msg_form(config['format_path'])", "def getFormatManager(self) -> ghidra.app.util.viewer.format.FormatManager:\n ...", "def __normalize(self, ctx: commands.Context, format: str) -> str:\n\t\t# convert to lowercase\n\t\tlower_format = format.lower()\n\t\t# check if inputted format is recognized\n\t\tif lower_format in self.formats:\n\t\t\treturn lower_format\n\t\t# check for aliases\n\t\telif lower_format in self.aliases:\n\t\t\treturn self.aliases[lower_format]\n\t\t# format is not recognized\n\t\telse:\n\t\t\traise FriendlyError(\n\t\t\t\tf\"'{format}' is not a recognized format.\", ctx.channel, ctx.author\n\t\t\t)", "def get_format(fstr):\n fstr = fstr.lower() # support uppercase letters\n if os.sep in fstr:\n fstr = fstr.split(os.sep)[-1]\n try:\n fname, ext = fstr.split(\".\", 1)\n except:\n fname, ext = (\"\", \"\")\n\n if ext.startswith(\"bm\"):\n return FORMAT_BMP\n elif ext == \"txt\":\n return FORMAT_CAR\n elif ext == \"ta.csv\":\n return FORMAT_TA_CSV\n elif ext == \"fin\":\n return FORMAT_FIN\n elif ext == \"hul\":\n return FORMAT_HUL\n elif ext in [\"ncp\"]:\n return FORMAT_NCP\n elif ext in [\"prm\", \"m\"]:\n return FORMAT_PRM\n elif ext == \"rim\":\n return FORMAT_RIM\n elif ext == \"w\":\n return FORMAT_W\n else:\n return FORMAT_UNK", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def format_string(s, formatter='minimal'):\n if not callable(formatter):\n formatter = get_formatter_for_name(formatter)\n if formatter is None:\n output = s\n else:\n output = formatter(s)\n return output", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def read_fmt(bib_name, bib_file):\n cache_name, formatted_cache_name = _cache_name(bib_name, bib_file)\n\n try:\n meta_data, formatted_entries = cache.read_global(formatted_cache_name)\n except:\n raise cache.CacheMiss()\n\n # raise a cache miss if the modification took place after the caching\n modified_time = os.path.getmtime(bib_file)\n if modified_time > meta_data[\"cache_time\"]:\n raise cache.CacheMiss()\n\n # validate the version and format strings are still valid\n if (meta_data[\"version\"] != _VERSION or\n any(meta_data[s] != get_setting(\"cite_\" + s)\n for s in [\"panel_format\", \"autocomplete_format\"])):\n print(\"Formatting string has changed, updating cache...\")\n # read the base information from the unformatted cache\n current_time, bib_entries = cache.read_global(cache_name)\n # format and cache the entries\n formatted_entries = _create_formatted_entries(formatted_cache_name,\n bib_entries,\n current_time)\n\n return formatted_entries", "def token(self):\n if not hasattr(self,'_tokens'):\n ts = str(time.time())\n rs = str(random.randint(1234,65535))\n self._tokens = '%s_%s_%s' % (self.key().name(),ts,rs)\n return self._tokens", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def _get_format(value, quote_mode='always'):\n\n formats = {'always': '{key}=\"{value}\"\\n', 'auto': '{key}={value}\\n'}\n\n if quote_mode not in formats.keys():\n return KeyError(f'quote_mode {quote_mode} is invalid')\n\n _mode = quote_mode\n if quote_mode == 'auto' and ' ' in value:\n _mode = 'always'\n return formats.get(_mode)", "def format(self) -> pulumi.Output[Optional['outputs.FlowLogFormatParametersResponse']]:\n return pulumi.get(self, \"format\")", "def from_tokens(\n output: RenderOutput,\n *,\n consecutive_numbering: bool = True,\n warning_stream: Optional[IO] = None,\n) -> str:\n md_renderer = MDRenderer()\n # TODO option for consecutive numbering consecutive_numbering, etc\n options = {\n \"parser_extension\": [\n PARSER_EXTENSIONS[name]\n for name in [\"myst\", \"tables\", \"frontmatter\", \"deflist\"]\n ]\n + [AdditionalRenderers],\n \"mdformat\": {\"number\": consecutive_numbering},\n }\n\n # temporarily redirect mdformat logging\n warning_handler = None\n if warning_stream:\n warning_handler = logging.StreamHandler(warning_stream)\n warning_handler.setLevel(logging.WARNING)\n LOGGER.addHandler(warning_handler)\n try:\n # mdformat outputs only used reference definitions during 'finalize'\n # instead we want to output all parsed reference definitions\n text = md_renderer.render(output.tokens, options, output.env, finalize=False)\n if output.env[\"references\"]:\n if text:\n text += \"\\n\\n\"\n output.env[\"used_refs\"] = set(output.env[\"references\"])\n text += md_renderer._write_references(output.env)\n finally:\n if warning_handler:\n LOGGER.removeHandler(warning_handler)\n if text:\n text += \"\\n\"\n return text", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def get_timestamp(format_str=\"%Y%m%d_%H-%M-%S\"):\n now = datetime.now()\n current_timestamp = now.strftime(format_str)\n\n return current_timestamp", "def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())", "def token_str(self) -> Optional[str]:\n return self._token_str", "def get_placeholder_formats_list(self, format_string):\n placeholders = []\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.append((token.group(\"key\"), token.group(\"format\")))\n return placeholders", "def strfdate(self, fmt):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n for f in re.findall(pattern, fmt):\n fmt = fmt.replace('%{}'.format(f), FORMAT_MAP[f](self))\n return fmt", "def format(self, tweet):\n return self._format_string.format(tweet=tweet)", "def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token", "def getCachedToken( self ):\n if ( os.path.exists( TOKEN_PATH )):\n return open( TOKEN_PATH ).read()\n else :\n return None", "def _parse(self, fmtstr):\n def _match_brace(string, start_pos, pair='[]'):\n \"\"\"Pairing brackets (used internally in _parse method)\"\"\"\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_pos + 1:]):\n if char == pair[0]:\n depth += 1\n elif char == pair[1]:\n depth -= 1\n if depth == 0:\n return start_pos + index + 1\n return None\n\n #----------------------------------------------------------------------\n\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n\n ptr = 0\n # it seems that field id 0 is invalid\n field_id = 1\n length = len(fmtstr)\n parsed_list = []\n\n while ptr < length:\n parsed = {}\n m_prefix = t_prefix.match(fmtstr[ptr:])\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed['prefix'] = m_prefix.group(1)\n\n # check if we have a nested structure\n if m_prefix.group(2):\n brace_offset = _match_brace(fmtstr, ptr - 1)\n\n # bracket not match\n if not brace_offset:\n raise BadFormatString(\n 'Unmatched brace on position {0}'.format(ptr)\n )\n parsed['field_id'] = field_id\n parsed['field_type'] = 'a'\n parsed['subcontent'] = self._parse(\n fmtstr[ptr:brace_offset]\n )\n ptr = brace_offset + 1\n field_id += 1\n\n parsed_list.append(parsed)\n continue\n m_fmt = t_fmt.match(fmtstr[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n\n # fmt is an alias\n if m_fmt.group(2):\n parsed['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n parsed['field_type'] = m_fmt.group(1)\n\n # save field id\n parsed['field_id'] = field_id\n\n # check for type clones (e.g. `v3')\n if m_fmt.group(3):\n parsed['repeat'] = int(m_fmt.group(3))\n field_id += int(m_fmt.group(3))\n else:\n parsed['repeat'] = 1\n field_id += 1\n\n parsed_list.append(parsed)\n\n else:\n raise BadFormatString(\n 'Invalid token on position {0}'.format(ptr)\n )\n\n # all set\n return parsed_list", "def get_format_table(self):\n try:\n with open(self._config.values['format'], 'r') as f:\n return f.read()\n except:\n return None", "def format(self) -> str:\n return self._format", "def format(self) -> str:\n return self._format", "def format(self) -> Optional[pulumi.Input['FlowLogFormatParametersArgs']]:\n return pulumi.get(self, \"format\")", "def _tokenize(source):\n lines = source.split(\"\\n\")\n print(\n \"{type:<10}{string:<25} {start:^12} {end:^12}\".format(\n type=\"Type\", string=\"String\", start=\"Start\", end=\"End\"\n )\n )\n print(\"-\" * 60)\n for line in lines:\n tokens = collect_tokens(line)\n for token in tokens:\n print(token)", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def string_val(self) -> str:\n return self.current_token", "def token(self) -> str:\n raise NotImplementedError", "def formatted(self) -> str:\r\n ...", "def get_token(word, flag):\n if flag == 1:\n return \"_RARE_\"\n elif flag == 2:\n if bool(re.search(r'\\d', word)):\n return \"AlphaNum\"\n else:\n return \"oThEr\"\n elif flag == 3:\n if word[-3:] == \"ing\":\n return \"enDiNg\"\n else:\n return \"oThEr\"", "def _read_token(token_file):\n try:\n return _fortworth.read(token_file).strip()\n except FileNotFoundError:\n raise _errors.TokenNotFoundError(token_file)", "def format(fmt, st):\n ret = \"\"\n if not st: return ret\n if fmt not in valid_combos:\n return st\n cm = charmap[fmt]\n for c in st:\n ret += cm.get(c, c)\n return ret", "def token(uncapped_token):\n return uncapped_token", "def get_retokenized(tokenizer, text):\n return ' '.join(tokenizer.tokenize(text))", "def _tokenize(self, _string):\n return re.search(self._access_log_regex, _string)", "def token_lookup():\n token_dict = {}\n token_dict['.'] = \"||Period||\"\n token_dict[','] = \"||Comma||\"\n token_dict['\"'] = \"||Quotation_Mark||\"\n token_dict[';'] = \"||Semicolon||\"\n token_dict['!'] = \"||Exclamation_Mark||\"\n token_dict['?'] = \"||Question_Mark||\"\n token_dict['('] = \"||Left_Parentheses||\"\n token_dict[')'] = \"||Right_Parentheses||\"\n token_dict['--'] = \"||Dash||\"\n token_dict['\\n'] = \"||Return||\"\n\n return token_dict", "def get(self):\n return get_msg_form(config['msg_format_path'])", "def formats():\n return _FORMATS", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def read_token(self):\n self._skip_white_space()\n return self._get_token()", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def get_format(request, default='html'):\n format_ = request.GET.get('format', None)\n if not format_:\n format_ = request.GET.get('view', default)\n return format_", "def format_to_extension(self, format):", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def parser_formatter(format_class, **kwargs):\n try:\n return lambda prog: format_class(prog, **kwargs)\n except TypeError:\n return format_class", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def parse_token(bn,token):\n return bn.split(token)[1].split('_')[0]", "def format(self):\n ...", "def get_format_type(self):\n raise Unimplemented()", "def GetFormatCount(format_):\n\n if isinstance(format_, str):\n match = re.search(r'\\s*(\\d+)', format_)\n if match:\n return int(match.group(0))\n\n return 1", "def _get_token(self):\n self._skip()\n\n token = None\n # Checks single-quoted string.\n if self.current_char == \"'\":\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == \"'\"):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks double-quoted string.\n elif self.current_char == '\"':\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == '\"'):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a digit.\n elif self.current_char.isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() == \".\":\n self._next_char()\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a dot.\n elif self.current_char == \".\":\n if self._peek().isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks word begins with an alphabetic letter or an underscore.\n elif self.current_char.isalpha() or self.current_char == \"_\":\n start_position = self.current_position\n while True:\n if (self._peek() in [\" \", \"\\t\", \"\\r\", \"\\n\", \"\\0\"]\n or self._peek() in _token_names.SEPARATORS\n or self._peek() in _token_names.OPERATORS):\n break\n self._next_char()\n word = self.stream[start_position:self.current_position + 1]\n # Checks if word is a keyword.\n if word in _token_names.Keywords.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.Keywords(word).name, word)\n elif word in _token_names.KeywordsType.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsType(word).name, word)\n elif word in _token_names.KeywordsAttribute.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsAttribute(word).name, word)\n # Otherwise put it as identifier.\n else:\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.IDENTIFIER, word)\n\n # Checks if is a separator.\n elif self.current_char in _token_names.Separators.values():\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks if is an operator.\n elif self.current_char in _token_names.Operators.values():\n last_position = self.current_position\n if self.current_char not in [\"&\", \"|\"] and self._peek() == \"=\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"+\" and self._peek() == \"+\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"-\" and self._peek() == \"-\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"&\" and self._peek() == \"&\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"|\" and self._peek() == \"|\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Operators(self.current_char).name, self.current_char)\n\n # Checks if is EOF\n elif self.current_char == \"\\0\":\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.EOF, self.current_char)\n\n # Raise error if is an unknown token.\n else:\n raise LexerError(self.current_position)\n\n self._next_char()\n return token", "def format(self, *args, **kwargs) -> String:\n pass", "def tokenize(*args, **kwargs):\n if kwargs.pop('pure', False):\n return base.tokenize(*args)\n else:\n return str(uuid.uuid4())", "def _get_name_from_url(self, request):\n\n format = request.GET.get('format', None)\n if not format:\n match = self._format_query_pattern.match(request.path)\n if match and match.group('format'):\n format = match.group('format')\n return format", "def initFormat(self):\n pass" ]
[ "0.7031436", "0.5833799", "0.5587852", "0.55370355", "0.55229795", "0.5514945", "0.5489987", "0.5487361", "0.5463768", "0.5454155", "0.54331946", "0.53677803", "0.5347152", "0.5261837", "0.5218769", "0.52003187", "0.5194947", "0.51801944", "0.5157241", "0.5129288", "0.51125944", "0.51009524", "0.50988346", "0.50985765", "0.50985765", "0.50941116", "0.50713986", "0.50615126", "0.50582445", "0.5048388", "0.50467664", "0.49953192", "0.49953192", "0.4989755", "0.49796894", "0.49701267", "0.49655125", "0.49524215", "0.49503097", "0.4947215", "0.49321675", "0.49241", "0.49229473", "0.49032536", "0.489689", "0.489689", "0.48950008", "0.48915634", "0.48847082", "0.48694107", "0.48614025", "0.48594677", "0.4859294", "0.4850643", "0.48468676", "0.48444346", "0.48228976", "0.48201036", "0.4800476", "0.4793135", "0.4789199", "0.47849423", "0.4779945", "0.47765595", "0.47765595", "0.4767015", "0.4762846", "0.47609147", "0.47609147", "0.474834", "0.47472036", "0.4729222", "0.47230813", "0.471819", "0.47161332", "0.4703712", "0.4702357", "0.46922752", "0.46868876", "0.46840248", "0.46786755", "0.4656001", "0.465319", "0.46497196", "0.46459505", "0.4643527", "0.46360722", "0.4618882", "0.46120974", "0.46120974", "0.45972872", "0.4596433", "0.4585631", "0.45843905", "0.45837885", "0.45770916", "0.4575302", "0.45734084", "0.45728123", "0.45683858" ]
0.7236307
0
Parses the format_string and returns a set of color names.
def get_color_names(self, format_string): names = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("command"): name = dict(parse_qsl(token.group("command"))).get("color") if ( not name or name in COLOR_NAMES_EXCLUDED or name in COLOR_NAMES or name[0] == "#" ): continue names.add(name) return names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformatColor(self, colorStr):\n if type(colorStr) is str:\n if colorStr.startswith('#'):\n colorStr = colorStr.replace('#', '')\n else:\n raise Exception('color is not hex format')\n r = int(colorStr[:2], 16)\n g = int(colorStr[2:4], 16)\n b = int(colorStr[4:6], 16)\n return r, g, b", "def get_placeholder_formats_list(self, format_string):\n placeholders = []\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.append((token.group(\"key\"), token.group(\"format\")))\n return placeholders", "def parse_color(self):\n begin = self.tokenizer.next()\n begin.must_be('{')\n for name in self.tokenizer:\n if name.text == '}': return\n name.must_match(\"^[A-Za-z]\", \"%d:%d: Expected a color name, got %s instead.\" % (name.line, name.col, name.text))\n midpunct = self.tokenizer.next()\n if midpunct.text == \"{\":\n color = self.mkColor(name)\n if color in self.ColorDefinitions:\n raise Exception(\"%d:%d: Color %s has already been defined.\" % (name.line, name.col, name.text))\n self.ColorDefinitions[name.text] = color\n elif midpunct.text == ':':\n stack = []\n for token in self.tokenizer:\n if token.text == \".\":\n self.OrderedColorMappings += [Mapping(name,stack)]\n break\n elif token.text == \"}\": raise Exception(\"%d:%d: Color section ended while defining mapping for color %s\" % (name.line, name.col, name.text))\n try:\n stack += [ self.GlobalSymbolDict[token.text] ]\n except:\n raise Exception(\"%d:%d: Literal %s does not occur in the grammar\" % (token.line, token.col, token.text))\n \n elif midpunct.text == '}': raise Exception(\"%d:%d: Coloring section ended unexpectedly here.\" % (token.line, token.col))\n else: raise Exception(\"%d:%d: Expected : or {, not %s\" % (midpunct.line, midpunct.col, midpunct.text))\n raise Exception(\"%d:%d: Unexpected end-of-file while scanning color definition section beginning here.\" % (begin.line, begin.col))", "def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)", "def parse_styles(text: str) -> List[dict]:\n styles = []\n regex = r'(\\d{3})=(\".*?\"),(\\d+\\.?\\d+),(\\(.*?\\))'\n\n for line in text.split(\"\\r\\n\"):\n if line == \"\":\n continue\n\n n, font, font_size, color = re.match(regex, line).groups()\n styles.append(\n {\n \"id\": int(n),\n \"f\": font.replace('\"', \"\"),\n \"fs\": float(font_size),\n \"rgb\": [\n int(i)\n for i in color.replace(\"(\", \"\")\n .replace(\")\", \"\").split(\",\")]\n }\n )\n\n return styles", "def tokens(self, format_string):\n if format_string not in self.format_string_cache:\n tokens = list(re.finditer(self.reg_ex, format_string))\n self.format_string_cache[format_string] = tokens\n return self.format_string_cache[format_string]", "def parse_color(colstr):\n if('rgb' in colstr):\n expr = r\"rgb\\(\\s*([0-9]{1,3})\\,\\s*([0-9]{1,3})\\,\\s*([0-9]{1,3})\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 3):\n rgb = (int(results.group(1)), int(results.group(2)), int(results.group(3)))\n for val in rgb:\n if(val > 255):\n print(\"Invalid rgb color. All values should be from 0-255.\")\n exit(-1)\n\n return ('rgb', rgb)\n\n else:\n print(\"Invalid rgb color. See help for more information\")\n exit(-1)\n elif('#' in colstr):\n if(len(colstr) != 7):\n print(\"Invalid hex value. Only 6 digit hex values are supported\")\n exit(-1)\n else:\n try:\n hexval = int(colstr[1:], 16)\n return (\"hex\", hexval)\n except:\n print(\"Failed to parse hex color\")\n exit(-1)\n elif('hsl' in colstr):\n expr = r\"hsl\\(\\s*([0-9]{1,3})\\,\\W*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 3):\n h = int(results.group(1))\n s = int(results.group(2))\n l = int(results.group(3))\n if(h < 0 or h > 359):\n print(\"Hue out of range. Range: 0-359\")\n exit(-1)\n elif(s < 0 or s > 100):\n print(\"Saturation out of range. Range: 0-100%\")\n exit(-1)\n elif(l < 0 or s > 100):\n print(\"Lightness out of range. Range: 0-100%\")\n exit(-1)\n else:\n return (\"hsl\", (h, s, l))\n else:\n print(\"Invalid hsl color. See help for more information\")\n exit(-1)\n elif('cmyk' in colstr):\n expr = r\"cmyk\\(\\s*([0-9]{1,3})%{0,1}\\,\\W*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\,\\s*([0-9]{1,3})%{0,1}\\s*\\)\"\n results = re.search(expr, colstr)\n if(results and len(results.groups()) == 4):\n c = int(results.group(1))\n m = int(results.group(2))\n y = int(results.group(3))\n k = int(results.group(4))\n\n if(c < 0 or c > 100):\n print(\"Cyan out of range. Range: 0-100%\")\n exit(-1)\n elif(m < 0 or m > 100):\n print(\"Magenta out of range. Range: 0-100%\")\n exit(-1)\n elif(y < 0 or y > 100):\n print(\"Yellow out of range. Range: 0-100%\")\n exit(-1)\n elif(k < 0 or k > 100):\n print(\"Black out of range. Range 0-100%\")\n exit(-1)\n else:\n return(\"cmyk\", (c, m, y, k))\n else:\n print(\"Invalid cmyk color. See help for more information\")\n exit(-1)", "def mkColor(self, name):\n known_attrs = [ 'font-family', 'font-style', 'font-weight', 'font-size', 'text-decoration', 'color', 'background-color' ]\n stack = []\n color = Color(name)\n for token in self.tokenizer:\n if token.text == \";\":\n stack[0].assert_symbol_name\n if stack[0].text not in known_attrs: raise Exception(\"%d:%d: Unknown color attribute %s\" % (stack[0].line, stack[0].col, stack[0].text))\n stack[1].must_be(\":\")\n stack[2].must_match(\"^\\w\", \"%d:%d: Expected a color attribute value instead of %s\" % (stack[2].line, stack[2].col, stack[2].text))\n color.attrs[stack[0].text] = stack[2].text\n stack = []\n elif token.text == \"}\":\n return color\n else:\n stack += [token]\n raise Exception(\"%d:%d: End-of-file reached while scanning color %s defined here.\" % (name.line, name.col, name.text))", "def parse_colors():\n var = os.environ.get('LS_COLORS', '')\n items = var.split(':')\n ext_map = {}\n special_map = {}\n for item in items:\n try:\n pattern, val = item.split('=')\n except ValueError:\n # TODO\n continue\n pattern = pattern.lower()\n\n if pattern.startswith('*'):\n ext_map[pattern[1:]] = val\n else:\n special_map[pattern] = val\n logger.debug(''.join(list('%s %s\\n' % (k, v) for k, v in ext_map.items())))\n return ext_map, special_map", "def __createStyleFromString(self, string):\n\n matches = re.findall(r\"([^=]+)=([^;]+)(;|$)\", str(string).lower());\n if not matches :\n return False;\n\n\n style = OutputFormatterStyle();\n for match in matches:\n if ('fg' == match[0]) :\n style.setForeground(match[1]);\n elif ('bg' == match[0]) :\n style.setBackground(match[1]);\n else :\n style.setOption(match[1]);\n\n\n\n return style;", "def format_color_name(string, frame_name):\n if frame_name == \"primary\":\n color = \"red\"\n else:\n color = \"green\"\n return format_color(string, color)", "def _string_to_colors(self):\n string = self.str_colors\n colors_three = [string[c:c+3] for c in range(0, len(string), 3)]\n colors_three = [list(color) for color in colors_three]\n pixels = [[ord(rgb) for rgb in color] for color in colors_three]\n return pixels", "def parse_color(raw_color) -> list[float]:\n if isinstance(raw_color, str):\n return list(to_rgba(raw_color))\n return list(raw_color)", "def format_color(string, color):\n cs = \"\\x1b[38;2;{};{};{}m{}\\x1b[0m\"\n\n # my colors\n if color == \"red1\":\n r, g, b = 215, 0, 0\n elif color == \"green1\":\n r, g, b = 0, 255, 0\n elif color == \"blue1\":\n r, g, b = 50, 50, 255\n\n # list from https://www.rapidtables.com/web/color/RGB_Color.html\n elif color == \"Black\":\n r, g, b = 0, 0, 0\n elif color == \"White\":\n r, g, b = 255, 255, 255\n elif color == \"Red\":\n r, g, b = 255, 0, 0\n elif color == \"Lime\":\n r, g, b = 0, 255, 0\n elif color == \"Blue\":\n r, g, b = 0, 0, 255\n elif color == \"Yellow\":\n r, g, b = 255, 255, 0\n elif color == \"Cyan\":\n r, g, b = 0, 255, 255\n elif color == \"Magenta\":\n r, g, b = 255, 0, 255\n elif color == \"Silver\":\n r, g, b = 192, 192, 192\n elif color == \"Gray\":\n r, g, b = 128, 128, 128\n elif color == \"Maroon\":\n r, g, b = 128, 0, 0\n elif color == \"Olive\":\n r, g, b = 128, 128, 0\n elif color == \"Green\":\n r, g, b = 0, 128, 0\n elif color == \"Purple\":\n r, g, b = 128, 0, 128\n elif color == \"Teal\":\n r, g, b = 0, 128, 128\n elif color == \"Navy\":\n r, g, b = 0, 0, 128\n elif color == \"maroon\":\n r, g, b = 128, 0, 0\n elif color == \"dark red\":\n r, g, b = 139, 0, 0\n elif color == \"brown\":\n r, g, b = 165, 42, 42\n elif color == \"firebrick\":\n r, g, b = 178, 34, 34\n elif color == \"crimson\":\n r, g, b = 220, 20, 60\n elif color == \"red\":\n r, g, b = 255, 0, 0\n elif color == \"tomato\":\n r, g, b = 255, 99, 71\n elif color == \"coral\":\n r, g, b = 255, 127, 80\n elif color == \"indian red\":\n r, g, b = 205, 92, 92\n elif color == \"light coral\":\n r, g, b = 240, 128, 128\n elif color == \"dark salmon\":\n r, g, b = 233, 150, 122\n elif color == \"salmon\":\n r, g, b = 250, 128, 114\n elif color == \"light salmon\":\n r, g, b = 255, 160, 122\n elif color == \"orange red\":\n r, g, b = 255, 69, 0\n elif color == \"dark orange\":\n r, g, b = 255, 140, 0\n elif color == \"orange\":\n r, g, b = 255, 165, 0\n elif color == \"gold\":\n r, g, b = 255, 215, 0\n elif color == \"dark golden rod\":\n r, g, b = 184, 134, 11\n elif color == \"golden rod\":\n r, g, b = 218, 165, 32\n elif color == \"pale golden rod\":\n r, g, b = 238, 232, 170\n elif color == \"dark khaki\":\n r, g, b = 189, 183, 107\n elif color == \"khaki\":\n r, g, b = 240, 230, 140\n elif color == \"olive\":\n r, g, b = 128, 128, 0\n elif color == \"yellow\":\n r, g, b = 255, 255, 0\n elif color == \"yellow green\":\n r, g, b = 154, 205, 50\n elif color == \"dark olive green\":\n r, g, b = 85, 107, 47\n elif color == \"olive drab\":\n r, g, b = 107, 142, 35\n elif color == \"lawn green\":\n r, g, b = 124, 252, 0\n elif color == \"chart reuse\":\n r, g, b = 127, 255, 0\n elif color == \"green yellow\":\n r, g, b = 173, 255, 47\n elif color == \"dark green\":\n r, g, b = 0, 100, 0\n elif color == \"green\":\n r, g, b = 0, 128, 0\n elif color == \"forest green\":\n r, g, b = 34, 139, 34\n elif color == \"lime\":\n r, g, b = 0, 255, 0\n elif color == \"lime green\":\n r, g, b = 50, 205, 50\n elif color == \"light green\":\n r, g, b = 144, 238, 144\n elif color == \"pale green\":\n r, g, b = 152, 251, 152\n elif color == \"dark sea green\":\n r, g, b = 143, 188, 143\n elif color == \"medium spring green\":\n r, g, b = 0, 250, 154\n elif color == \"spring green\":\n r, g, b = 0, 255, 127\n elif color == \"sea green\":\n r, g, b = 46, 139, 87\n elif color == \"medium aqua marine\":\n r, g, b = 102, 205, 170\n elif color == \"medium sea green\":\n r, g, b = 60, 179, 113\n elif color == \"light sea green\":\n r, g, b = 32, 178, 170\n elif color == \"dark slate gray\":\n r, g, b = 47, 79, 79\n elif color == \"teal\":\n r, g, b = 0, 128, 128\n elif color == \"dark cyan\":\n r, g, b = 0, 139, 139\n elif color == \"aqua\":\n r, g, b = 0, 255, 255\n elif color == \"cyan\":\n r, g, b = 0, 255, 255\n elif color == \"light cyan\":\n r, g, b = 224, 255, 255\n elif color == \"dark turquoise\":\n r, g, b = 0, 206, 209\n elif color == \"turquoise\":\n r, g, b = 64, 224, 208\n elif color == \"medium turquoise\":\n r, g, b = 72, 209, 204\n elif color == \"pale turquoise\":\n r, g, b = 175, 238, 238\n elif color == \"aqua marine\":\n r, g, b = 127, 255, 212\n elif color == \"powder blue\":\n r, g, b = 176, 224, 230\n elif color == \"cadet blue\":\n r, g, b = 95, 158, 160\n elif color == \"steel blue\":\n r, g, b = 70, 130, 180\n elif color == \"corn flower blue\":\n r, g, b = 100, 149, 237\n elif color == \"deep sky blue\":\n r, g, b = 0, 191, 255\n elif color == \"dodger blue\":\n r, g, b = 30, 144, 255\n elif color == \"light blue\":\n r, g, b = 173, 216, 230\n elif color == \"sky blue\":\n r, g, b = 135, 206, 235\n elif color == \"light sky blue\":\n r, g, b = 135, 206, 250\n elif color == \"midnight blue\":\n r, g, b = 25, 25, 112\n elif color == \"navy\":\n r, g, b = 0, 0, 128\n elif color == \"dark blue\":\n r, g, b = 0, 0, 139\n elif color == \"medium blue\":\n r, g, b = 0, 0, 205\n elif color == \"blue\":\n r, g, b = 0, 0, 255\n elif color == \"royal blue\":\n r, g, b = 65, 105, 225\n elif color == \"blue violet\":\n r, g, b = 138, 43, 226\n elif color == \"indigo\":\n r, g, b = 75, 0, 130\n elif color == \"dark slate blue\":\n r, g, b = 72, 61, 139\n elif color == \"slate blue\":\n r, g, b = 106, 90, 205\n elif color == \"medium slate blue\":\n r, g, b = 123, 104, 238\n elif color == \"medium purple\":\n r, g, b = 147, 112, 219\n elif color == \"dark magenta\":\n r, g, b = 139, 0, 139\n elif color == \"dark violet\":\n r, g, b = 148, 0, 211\n elif color == \"dark orchid\":\n r, g, b = 153, 50, 204\n elif color == \"medium orchid\":\n r, g, b = 186, 85, 211\n elif color == \"purple\":\n r, g, b = 128, 0, 128\n elif color == \"thistle\":\n r, g, b = 216, 191, 216\n elif color == \"plum\":\n r, g, b = 221, 160, 221\n elif color == \"violet\":\n r, g, b = 238, 130, 238\n elif color == \"magenta\":\n r, g, b = 255, 0, 255\n elif color == \"orchid\":\n r, g, b = 218, 112, 214\n elif color == \"medium violet red\":\n r, g, b = 199, 21, 133\n elif color == \"pale violet red\":\n r, g, b = 219, 112, 147\n elif color == \"deep pink\":\n r, g, b = 255, 20, 147\n elif color == \"hot pink\":\n r, g, b = 255, 105, 180\n elif color == \"light pink\":\n r, g, b = 255, 182, 193\n elif color == \"pink\":\n r, g, b = 255, 192, 203\n elif color == \"antique white\":\n r, g, b = 250, 235, 215\n elif color == \"beige\":\n r, g, b = 245, 245, 220\n elif color == \"bisque\":\n r, g, b = 255, 228, 196\n elif color == \"blanched almond\":\n r, g, b = 255, 235, 205\n elif color == \"wheat\":\n r, g, b = 245, 222, 179\n elif color == \"corn silk\":\n r, g, b = 255, 248, 220\n elif color == \"lemon chiffon\":\n r, g, b = 255, 250, 205\n elif color == \"light golden rod yellow\":\n r, g, b = 250, 250, 210\n elif color == \"light yellow\":\n r, g, b = 255, 255, 224\n elif color == \"saddle brown\":\n r, g, b = 139, 69, 19\n elif color == \"sienna\":\n r, g, b = 160, 82, 45\n elif color == \"chocolate\":\n r, g, b = 210, 105, 30\n elif color == \"peru\":\n r, g, b = 205, 133, 63\n elif color == \"sandy brown\":\n r, g, b = 244, 164, 96\n elif color == \"burly wood\":\n r, g, b = 222, 184, 135\n elif color == \"tan\":\n r, g, b = 210, 180, 140\n elif color == \"rosy brown\":\n r, g, b = 188, 143, 143\n elif color == \"moccasin\":\n r, g, b = 255, 228, 181\n elif color == \"navajo white\":\n r, g, b = 255, 222, 173\n elif color == \"peach puff\":\n r, g, b = 255, 218, 185\n elif color == \"misty rose\":\n r, g, b = 255, 228, 225\n elif color == \"lavender blush\":\n r, g, b = 255, 240, 245\n elif color == \"linen\":\n r, g, b = 250, 240, 230\n elif color == \"old lace\":\n r, g, b = 253, 245, 230\n elif color == \"papaya whip\":\n r, g, b = 255, 239, 213\n elif color == \"sea shell\":\n r, g, b = 255, 245, 238\n elif color == \"mint cream\":\n r, g, b = 245, 255, 250\n elif color == \"slate gray\":\n r, g, b = 112, 128, 144\n elif color == \"light slate gray\":\n r, g, b = 119, 136, 153\n elif color == \"light steel blue\":\n r, g, b = 176, 196, 222\n elif color == \"lavender\":\n r, g, b = 230, 230, 250\n elif color == \"floral white\":\n r, g, b = 255, 250, 240\n elif color == \"alice blue\":\n r, g, b = 240, 248, 255\n elif color == \"ghost white\":\n r, g, b = 248, 248, 255\n elif color == \"honeydew\":\n r, g, b = 240, 255, 240\n elif color == \"ivory\":\n r, g, b = 255, 255, 240\n elif color == \"azure\":\n r, g, b = 240, 255, 255\n elif color == \"snow\":\n r, g, b = 255, 250, 250\n elif color == \"black\":\n r, g, b = 0, 0, 0\n elif color == \"dim gray\":\n r, g, b = 105, 105, 105\n elif color == \"gray\":\n r, g, b = 128, 128, 128\n elif color == \"dark gray\":\n r, g, b = 169, 169, 169\n elif color == \"silver\":\n r, g, b = 192, 192, 192\n elif color == \"light gray\":\n r, g, b = 211, 211, 211\n elif color == \"gainsboro\":\n r, g, b = 220, 220, 220\n elif color == \"white smoke\":\n r, g, b = 245, 245, 245\n elif color == \"white\":\n r, g, b = 255, 255, 255\n else:\n r, g, b = 255, 255, 255\n\n return cs.format(r, g, b, string)", "def parseColor(c):\n if c in baseColors:\n return baseColors[c]\n if len(c) == 6:\n return tuple(map(lambda x: int(x, 16), (c[:2], c[2:4], c[4:])))\n if len(c) == 3:\n return tuple(map(lambda x: 16*int(x, 16), c))\n raise ValueError(\"Can't find color '{}'\".format(c))", "def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs", "def parse_color(color):\n return (color[0], color[1], color[2])", "def _process_colors(self, s: str) -> str:\r\n return self._color_regexp.sub(lambda m: self._ansi_equivalent(m.group()), s)", "def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c", "def __init__(self, colorNames):\n self._lengthOfPattern = 0 # will later be queried from the user\n self._palette = '' # initials for color choices, e.g., R for red\n for color in colorNames:\n self._palette += color[0].upper()", "def test_colorFormatting(self):\n self.assertEqual(irc.parseFormattedText(\"\\x0301yay\\x03\"), A.fg.black[\"yay\"])\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301,02yay\\x03\"), A.fg.black[A.bg.blue[\"yay\"]]\n )\n self.assertEqual(\n irc.parseFormattedText(\"\\x0301yay\\x0302yipee\\x03\"),\n A.fg.black[\"yay\", A.fg.blue[\"yipee\"]],\n )", "def build_rgb_and_opacity(s: str) -> tuple:\n # Set defaults\n color = \"000000\"\n opacity = 1\n\n if s.startswith(\"#\"):\n s = s[1:]\n if len(s) == 8:\n color = s[6:8] + s[4:6] + s[2:4]\n opacity = round(int(s[0:2], 16) / 256, 2)\n elif len(s) == 6:\n color = s[4:6] + s[2:4] + s[0:2]\n elif len(s) == 3:\n color = s[::-1]\n\n return \"#\" + color, opacity", "def parse_color(color):\n try:\n color = webcolors.name_to_rgb(color)\n return color.red, color.green, color.blue\n except ValueError:\n pass\n\n try:\n color = webcolors.hex_to_rgb(color)\n return color.red, color.green, color.blue\n except ValueError:\n pass\n\n try:\n data = color.split(\",\")\n return int(data[0]), int(data[1]), int(data[2])\n except Exception:\n pass\n\n return None", "def get_colors(stylename):\n style = get_style_by_name(stylename)\n fgcolor = style.style_for_token(Token.Text)['color'] or ''\n if len(fgcolor) in (3, 6):\n # could be 'abcdef' or 'ace' hex, which needs '#' prefix\n try:\n int(fgcolor, 16)\n except TypeError:\n pass\n else:\n fgcolor = \"#\" + fgcolor\n\n return dict(\n bgcolor=style.background_color,\n select=style.highlight_color,\n fgcolor=fgcolor\n )", "def test_color__name_str_arg(self):\n for name in (\"aquamarine3\", \"AQUAMARINE3\", \"AqUAmArIne3\"):\n color = pygame.Color(name)\n\n self.assertEqual(color.r, 102)\n self.assertEqual(color.g, 205)\n self.assertEqual(color.b, 170)\n self.assertEqual(color.a, 255)", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def colorname(line):\n strline = line.split('\\t')\n\n # get color name and hex\n clname = unidecode.unidecode(strline[0])\n clname = re.sub(BAD_CHARS, '', clname, 0, re.MULTILINE | re.IGNORECASE)\n clname = clname.lower()\n\n hexcol = strline[1].replace('#', '')\n return (clname, hexcol.upper(), strline[0])", "def getPredefinedColors(self):\n colorNames = [ 'Comment', \\\n 'Constant', \\\n 'String', \\\n 'VariableName', \\\n 'FunctionName', \\\n 'Keyword', \\\n 'Type', \\\n 'None', \\\n 'Error' \\\n ]\n colors = {}\n for colorName in colorNames:\n colors[colorName]=Color(Token(None,None,colorName),True)\n return colors", "def _format_rgb_str(rgb_str):\n return [int(c) for c in re.sub('[^0-9,]', '', rgb_str).split(',')]", "def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists", "def getColorValueFromName( strName ):\n dictColor = {\n \"blue\": 0x0000FF,\n \"red\": 0xFF0000,\n \"green\": 0x00FF00,\n \"yellow\": 0xFFFF00,\n \"purple\": 0x801187,\n \"pink\": 0xff00ff,\n \"orange\": 0xff9e00,\n \"brown\": 0x733300,\n \"black\": 0x000000,\n \"grey\": 0x7F7F7F, \n \"white\": 0xFFFFFF,\n };\n try:\n nColor = dictColor[strName.lower()];\n return nColor;\n except:\n pass\n print( \"WRN: abcdk.color.getColorValueFromName: don't know color '%s'\" % strName );\n return -1;", "def get_formats(self):\n return tuple(self._names.keys())", "def colorize(self, string):\n D = \"(%s)\" % colorize(\"@R{D}\")\n L = \"(%s)\" % colorize(\"@G{L}\")\n DL = \"(%s,%s)\" % (colorize(\"@R{D}\"), colorize(\"@G{L}\"))\n colorized = string.replace(\"(D)\", D)\n colorized = colorized.replace(\"(L)\", L)\n colorized = colorized.replace(\"(D,L)\", DL)\n return colorized", "def HTMLColorToRGB(colorstring):", "def parse_color_setting(config_string):\n if not config_string:\n return PALETTES[DEFAULT_PALETTE]\n\n # Split the color configuration into parts\n parts = config_string.lower().split(';')\n palette = PALETTES[NOCOLOR_PALETTE].copy()\n for part in parts:\n if part in PALETTES:\n # A default palette has been specified\n palette.update(PALETTES[part])\n elif '=' in part:\n # Process a palette defining string\n definition = {}\n\n # Break the definition into the role,\n # plus the list of specific instructions.\n # The role must be in upper case\n role, instructions = part.split('=')\n role = role.upper()\n\n styles = instructions.split(',')\n styles.reverse()\n\n # The first instruction can contain a slash\n # to break apart fg/bg.\n colors = styles.pop().split('/')\n colors.reverse()\n foreg = colors.pop()\n if foreg in COLOR_NAMES:\n definition['fg'] = foreg\n if colors and colors[-1] in COLOR_NAMES:\n definition['bg'] = colors[-1]\n\n # All remaining instructions are options\n opts = tuple(s for s in styles if s in OPT_DICT.keys())\n if opts:\n definition['opts'] = opts\n\n # The nocolor palette has all available roles.\n # Use that palette as the basis for determining\n # if the role is valid.\n if role in PALETTES[NOCOLOR_PALETTE] and definition:\n palette[role] = definition\n\n # If there are no colors specified, return the empty palette.\n if palette == PALETTES[NOCOLOR_PALETTE]:\n return None\n return palette", "def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors", "def split_color_string(color_string):\n if not color_string:\n return []\n return [thing for thing in regexes[\"whitespace\"].split(color_string) if thing]", "def get_colors(lines):\n\n patt = re.compile('\\#\\w+')\n\n return [\n patt.search(line).group(0)\n for line in lines\n if patt.search(line)\n ]", "def getColors():\n return ['#8c99fc', '#cacefd', '#fff1d7', '#feda98', '#fda85a', '#fc6647']", "def __get_colour_from_string(self, colour_string):\n # Return the RGB list (black if not in dictionary)\n return self.__colour_dictionary.get(colour_string, color.black.value)", "def convColor(colorString):\n if len(colorString) != 6:\n return None\n r, g, b = colorString[:2], colorString[2:4], colorString[4:]\n r, g, b = [int(n, 16) for n in (r, g, b)]\n return (r, g, b)", "def string_to_color(s):\n if type(s) is not str:\n raise TypeError(\"s must be a string\")\n\n if not(s.startswith(\"#\") or s.startswith(\"0x\")):\n raise ValueError(\"value is not Color-compatible\")\n\n if s.startswith(\"#\"):\n s = s[1:]\n else:\n s = s[2:]\n\n r, g, b, a = 255, 255, 255, 255\n if len(s) in (3, 4):\n # A triple/quadruple in the form #ead == #eeaadd\n r = int(s[0], 16) << 4 | int(s[0], 16)\n g = int(s[1], 16) << 4 | int(s[1], 16)\n b = int(s[2], 16) << 4 | int(s[2], 16)\n if len(s) == 4:\n a = int(s[3], 16) << 4 | int(s[3], 16)\n elif len(s) in (6, 8):\n r = int(s[0], 16) << 4 | int(s[1], 16)\n g = int(s[2], 16) << 4 | int(s[3], 16)\n b = int(s[4], 16) << 4 | int(s[5], 16)\n if len(s) == 8:\n a = int(s[6], 16) << 4 | int(s[7], 16)\n else:\n raise ValueError(\"value is not Color-compatible\")\n return Color(r, g, b, a)", "def parse(\n colstr,\n *,\n hex6=True,\n hex3=True,\n rgbfunc_int=True,\n rgbfunc_float=True,\n rgbfunc_percent=True,\n name_css=True,\n name_crayola=True,\n name_xkcd=True,\n name_meodai_best=True,\n name_meodai=True,\n):\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f\"Could not find a working parser for {colstr!r}.\")\n return res", "def hex2color(s):\n hexColorPattern = re.compile(\"\\A#[a-fA-F0-9]{6}\\Z\")\n if not isinstance(s, basestring):\n raise TypeError('hex2color requires a string argument')\n if hexColorPattern.match(s) is None:\n raise ValueError('invalid hex color string \"%s\"' % s)\n return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])", "def create_fixes_list(cls, string: str) -> List[str]:\n\n fixes = []\n\n for colour in COLOURS:\n if string.find(colour) != -1:\n fixes.append(colour)\n\n return fixes", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def from_name (name_str):\n if name_str in colour_names:\n return Colour(*colour_names[name_str])\n raise KeyError(\"'%s' is not a recognized colour name\"%name_str)", "def as_variable_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_REG)", "def parse_format(var_sample):\n # ugh\n ret = []\n # Parsing format information\n # Need to see what all these could be...\n if None in var_sample[\"GT\"]:\n ret.append(3)\n elif var_sample[\"GT\"] == (0, 0):\n ret.append(0)\n elif var_sample[\"GT\"] == (0, 1):\n ret.append(1)\n elif var_sample[\"GT\"] == (1, 1):\n ret.append(2)\n \n ret.extend([var_sample[\"GQ\"] if var_sample[\"GQ\"] is not None else 0,\n var_sample[\"OV\"],\n var_sample[\"DP\"], # be careful these aren't '.'\n #split where _r is ref-allele and _a is alt-allele\n var_sample[\"AD\"][0],\n var_sample[\"AD\"][1],\n var_sample[\"PDP\"],\n var_sample[\"PAD\"][0],\n var_sample[\"PAD\"][1],\n var_sample[\"US\"][0],\n var_sample[\"US\"][1],\n var_sample[\"DS\"][0],\n var_sample[\"DS\"][1],\n var_sample[\"UC\"][0],\n var_sample[\"UC\"][1],\n var_sample[\"DC\"][0],\n var_sample[\"DC\"][1],\n var_sample[\"UDC\"][0],\n var_sample[\"UDC\"][1],\n var_sample[\"UCC\"][0],\n var_sample[\"UCC\"][1],\n var_sample[\"DDC\"][0],\n var_sample[\"DDC\"][1],\n var_sample[\"DCC\"][0],\n var_sample[\"DCC\"][1],\n var_sample[\"UMO\"][0],\n var_sample[\"UMO\"][1],\n var_sample[\"DMO\"][0],\n var_sample[\"DMO\"][1],\n var_sample[\"UXO\"][0],\n var_sample[\"UXO\"][1],\n var_sample[\"DXO\"][0],\n var_sample[\"DXO\"][1],\n var_sample[\"NR\"][0],\n var_sample[\"NR\"][1],\n var_sample[\"MO\"][0],\n var_sample[\"MO\"][1],\n var_sample[\"XO\"][0],\n var_sample[\"XO\"][1],\n var_sample[\"XC\"][0],\n var_sample[\"XC\"][1],\n var_sample[\"AC\"][0],\n var_sample[\"AC\"][1],\n var_sample[\"MC\"][0],\n var_sample[\"MC\"][1],\n var_sample[\"EC\"][0],\n var_sample[\"EC\"][1],\n var_sample[\"PL\"][0] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][1] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][2] if var_sample[\"PL\"][0] is not None else 0])\n return ret\n #END", "def cmykstring2rgbstring(s):\n c, m, y, k = [float(j) for j in s.split()]\n r, g, b = cmyk2rgb(c, m, y, k)\n return f\"{r:.6f} {g:.6f} {b:.6f}\"", "def parse_color(color_like: ColorLike) -> ColorType:\n if isinstance(color_like, str):\n if color_like.startswith(\"#\"):\n return hex_to_bgr(color_like)\n else:\n return getattr(Color, color_like)\n # TODO: validate?\n return tuple([int(v) for v in color_like])", "def colored (string_, color, attrs):\n return string_", "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())", "def check_color(style):\n for kw in list(cc.keys()):\n m = re.search(kw, style)\n if m:\n return m.group()\n\n # Return 'b' if nothing has found\n return 'b'", "def color_parser(colors, function):\n if isinstance(colors, str):\n return function(colors)\n\n if isinstance(colors, tuple) and isinstance(colors[0], Number):\n return function(colors)\n\n if hasattr(colors, '__iter__'):\n if isinstance(colors, tuple):\n new_color_tuple = tuple(function(item) for item in colors)\n return new_color_tuple\n\n else:\n new_color_list = [function(item) for item in colors]\n return new_color_list", "def clr_tuple(colorstring):\n\n if colorstring[0] == '#':\n if len(colorstring) == 7:\n return (ONE_OVER_256 * float(_hexbyte(colorstring[1:3])),\n ONE_OVER_256 * float(_hexbyte(colorstring[3:5])),\n ONE_OVER_256 * float(_hexbyte(colorstring[5:7])))\n if len(colorstring) == 4:\n return (ONE_OVER_16 * float(_hexchar(colorstring[1])),\n ONE_OVER_16 * float(_hexchar(colorstring[2])),\n ONE_OVER_16 * float(_hexchar(colorstring[3])))\n if colorstring in colors.CSS4_COLORS:\n return clr_tuple(colors.CSS4_COLORS[colorstring])\n if colorstring in colors.BASE_COLORS:\n return clr_tuple(colors.BASE_COLORS[colorstring])\n\n rgb_re = re.compile(\"rgb:(.*),(.*),(.*)\")\n\n rgb_match = rgb_re.search(colorstring)\n if rgb_match:\n return (float(rgb_match.group(1)),\n float(rgb_match.group(2)),\n float(rgb_match.group(3)))\n return None", "def create_colors_list(color_dict):\r\n ret = []\r\n for i in range(len(color_dict)):\r\n ret.append('#' + color_dict[i]['@rgb'])\r\n return ret", "def getColorString(color):\n if type(color) is not int:\n raise TypeError(\"The input to getColorString is not of type int.\")\n if color in COLOR_STRINGS:\n return COLOR_STRINGS[color]\n else:\n raise ValueError(\"Input color not found.\")", "def get_color_schemes() -> dict[str, dict[str, str]]:\n COLOR_SCHEMES_FILE = Path(__file__).parent / \"color_schemes.tsv\"\n name2color_scheme = {}\n with open(COLOR_SCHEMES_FILE) as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n header = next(reader)\n letters = header[1:]\n for row in reader:\n name, colors = row[0], row[1:]\n color_scheme = {}\n for letter, color in zip(letters, colors):\n color_scheme[letter] = color\n name2color_scheme[name] = color_scheme\n return name2color_scheme", "def test_color__name_str_arg_from_colordict(self):\n for name, values in THECOLORS.items():\n color = pygame.Color(name)\n\n self.assertEqual(color.r, values[0])\n self.assertEqual(color.g, values[1])\n self.assertEqual(color.b, values[2])\n self.assertEqual(color.a, values[3])", "def color_conversion(string):\n if (string == 'J'):\n return 0.14\n if (string == 'I'):\n return 0.28\n if (string == 'H'):\n return 0.42\n if (string == 'G'):\n return 0.56\n if (string == 'F'):\n return 0.70\n if (string == 'E'):\n return 0.84\n if (string == 'D'):\n return 1", "def setColorConf(colors,ngroups)->list:\n if colors == \"hcl\":\n try:\n from colorspace import sequential_hcl\n color_repo = sequential_hcl(h=[15,375],l=65,c=70)\n colors_list = color_repo.colors(ngroups + 1)\n except ImportError:\n print('hcl colorspace package has not being installed.')\n print('please try the following command:')\n print('pip install git+https://github.com/retostauffer/python-colorspace')\n else:\n colors = list(plt.get_cmap(colors).colors)\n colors_list = [to_hex(color) for color in colors]\n colors_list = colors_list[:ngroups]\n\n return colors_list", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def color(c):\n\n if isinstance(c, tuple) and len(c) == 4:\n return c\n\n if c is None:\n return c\n\n if isinstance(c, basestring):\n if c[0] == '#':\n c = c[1:]\n\n if len(c) == 6:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = 255\n elif len(c) == 8:\n r = int(c[0]+c[1], 16)\n g = int(c[2]+c[3], 16)\n b = int(c[4]+c[5], 16)\n a = int(c[6]+c[7], 16)\n elif len(c) == 3:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = 255\n elif len(c) == 4:\n r = int(c[0], 16) * 0x11\n g = int(c[1], 16) * 0x11\n b = int(c[2], 16) * 0x11\n a = int(c[3], 16) * 0x11\n else:\n raise Exception(\"Color string must be 3, 4, 6, or 8 hex digits long.\")\n\n return (r, g, b, a)\n\n raise Exception(\"Not a color: %r\" % (c,))", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def hexToRgb(self, str):\n\n # If hex is shorthand, convert to a double value first.\n if len(str) == 3:\n val_1 = (int(str[0:1] * 2, 16))\n val_2 = (int(str[1:2] * 2, 16))\n val_3 = (int(str[2:3] * 2, 16))\n\n else:\n val_1 = (int(str[0:2], 16))\n val_2 = (int(str[2:4], 16))\n val_3 = (int(str[4:6], 16))\n\n # Return the preformatted string with the new values.\n return 'rgb(%d, %d, %d)' % (val_1, val_2, val_3)", "def cleanup_passed_color_value(s):\n reo = re.compile('[0-9a-f]')\n cannotBeCleaned = ''\n if s[0] == '#' and len(s) in [4,7] and reo.match(s[1:]):\n return s\n if s in colorNamesAndCodes:\n col = colorNamesAndCodes[s]\n if reo.match(col[1:]):\n return col\n else:\n return cannotBeCleaned\n if len(s) in [3,6] and reo.match(s):\n return '#' + s\n if len(s) == 2 and reo.match(s):\n return '#' +s +s +s\n return cannotBeCleaned", "def _get_colors():\n mapping = {\n \"terminal\": -1,\n \"black\": curses.COLOR_BLACK,\n \"red\": curses.COLOR_RED,\n \"green\": curses.COLOR_GREEN,\n \"yellow\": curses.COLOR_YELLOW,\n \"blue\": curses.COLOR_BLUE,\n \"magenta\": curses.COLOR_MAGENTA,\n \"cyan\": curses.COLOR_CYAN,\n \"white\": curses.COLOR_WHITE,\n }\n\n cp = get_config_file()\n\n if \"general\" in cp.sections():\n general = cp[\"general\"]\n else:\n general = {}\n\n defaults = {\n \"color_default_background\": \"black\",\n \"color_default_foreground\": \"white\",\n \"color_correct_background\": \"black\",\n \"color_correct_foreground\": \"green\",\n \"color_wrong_background\": \"red\",\n \"color_wrong_foreground\": \"white\",\n \"color_target_background\": \"magenta\",\n \"color_target_foreground\": \"white\",\n \"color_replay_background\": \"blue\",\n \"color_replay_foreground\": \"white\",\n }\n\n colors = {}\n\n for name, default in defaults.items():\n color = general.get(name, default)\n\n if color not in mapping:\n raise KeyError(f\"Unsupported color {color}\")\n\n colors[name] = mapping[color]\n\n return colors", "def _get_format_from_style(self, token, style):\n result = QtGui.QTextCharFormat()\n for key, value in style.style_for_token(token).items():\n if value:\n if key == 'color':\n result.setForeground(self._get_brush(value))\n elif key == 'bgcolor':\n result.setBackground(self._get_brush(value))\n elif key == 'bold':\n result.setFontWeight(QtGui.QFont.Bold)\n elif key == 'italic':\n result.setFontItalic(True)\n elif key == 'underline':\n result.setUnderlineStyle(\n QtGui.QTextCharFormat.SingleUnderline)\n elif key == 'sans':\n result.setFontStyleHint(QtGui.QFont.SansSerif)\n elif key == 'roman':\n result.setFontStyleHint(QtGui.QFont.Times)\n elif key == 'mono':\n result.setFontStyleHint(QtGui.QFont.TypeWriter)\n return result", "def condense_std_named_colors(css):\n log.debug(\"Condensing standard named color values.\")\n for color_name, color_hexa in iter(tuple({\n ':aqua;': ':#0ff;', ':blue;': ':#00f;',\n ':fuchsia;': ':#f0f;', ':yellow;': ':#ff0;'}.items())):\n css = css.replace(color_name, color_hexa)\n return css", "def get_colors(use=True):\n colors = {\n \"BLACK\": \"\\033[0;30m\",\n \"DARK_GRAY\": \"\\033[1;30m\",\n \"RED\": \"\\033[0;31m\",\n \"LIGHT_RED\": \"\\033[1;31m\",\n \"GREEN\": \"\\033[0;32m\",\n \"LIGHT_GREEN\": \"\\033[1;32m\",\n \"BLUE\": \"\\033[0;34m\",\n \"LIGHT_BLUE\": \"\\033[1;34m\",\n \"MAGENTA\": \"\\033[0;35m\",\n \"LIGHT_MAGENTA\": \"\\033[1;35m\",\n \"CYAN\": \"\\033[0;36m\",\n \"LIGHT_CYAN\": \"\\033[1;36m\",\n \"LIGHT_GRAY\": \"\\033[0;37m\",\n \"WHITE\": \"\\033[1;37m\",\n \"DEFAULT_COLOR\": \"\\033[00m\",\n \"ENDC\": \"\\033[0m\",\n }\n\n if not use:\n for color in colors:\n colors[color] = ''\n\n return colors", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def clean_colors(self):\n err = _(\"Color must be a valid hex triplet.\")\n colors = ['background_color_custom', 'font_color_custom']\n colors2 = colors + ['background_color', 'font_color']\n # If there are custom colors specified in settings, length of\n # self.COLORS will be > 6, so check for validity\n if len(self.COLORS) > 6:\n colors = colors2\n for color in colors:\n c = getattr(self, color)\n l = len(c)\n if l:\n if l != 6:\n raise ValidationError(err)\n else:\n try:\n int(c, 16)\n except ValueError:\n raise ValidationError(err)", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def test_color__html_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"#a1B2c3D4\")\n\n self.assertEqual(color.r, 0xA1)\n self.assertEqual(color.g, 0xB2)\n self.assertEqual(color.b, 0xC3)\n self.assertEqual(color.a, 0xD4)", "def get_placeholders(self, format_string):\n placeholders = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.add(token.group(\"key\"))\n elif token.group(\"command\"):\n # get any placeholders used in commands\n commands = dict(parse_qsl(token.group(\"command\")))\n # placeholders only used in `if`\n if_ = commands.get(\"if\")\n if if_:\n placeholders.add(Condition(if_).variable)\n return placeholders", "def guess_format(string):\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)", "def colorFlags(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tflags = [i for i in dir(cv2) if i.startswith('COLOR_') and filterStr in i]\n\treturn flags", "def get_color_styles(color: str) -> str:\r\n # fmt: off\r\n color_selectors = [\"a\", \"a:hover\", \".stMultiSelect span:hover svg\", \".streamlit-expanderHeader:hover\"]\r\n bg_selectors = ['.stCheckbox label span[aria-checked=\"true\"]', \".stMultiSelect span\"]\r\n border_selectors = [\".stSelectbox > div[aria-controls] > div\", \".stMultiSelect > div[aria-controls] > div\", \".stTextArea > div:focus-within\", \".streamlit-expanderHeader:hover\"]\r\n fill_selectors = [\".streamlit-expanderHeader:hover svg\", \".stMultiSelect span:hover svg\"]\r\n # fmt: on\r\n css_root = \"#root { --primary: %s }\" % color\r\n css_color = \", \".join(color_selectors) + \"{ color: %s !important }\" % color\r\n css_fill = \", \".join(fill_selectors) + \"{ fill: %s !important }\" % color\r\n css_bg = \", \".join(bg_selectors) + \"{ background-color: %s !important }\" % color\r\n css_border = \", \".join(border_selectors) + \"{ border-color: %s !important }\" % color\r\n other = \".decoration { background: %s !important } code { color: inherit }\" % color\r\n return f\"<style>{css_root}{css_color}{css_bg}{css_border}{css_fill}{other}</style>\"", "def prepare_colors(self):\n colors = self.colors\n # check whether user has defined colors, if not generate random colors\n if not colors :\n random_colors = [ random.randrange(0, 255) for i in range(3)]\n colors = {\n 'fillColor' : \"rgba({},{},{},0.2)\".format(*random_colors),\n 'strokeColor': \"rgba({},{},{},1)\".format(*random_colors),\n 'pointColor': \"rgba({},{},{},1)\".format(*random_colors),\n 'pointStrokeColor': \"#fff\",\n 'pointHighlightFill': \"#fff\",\n 'pointHighlightStroke': \"rgba({},{},{},1)\".format(*random_colors),\n }\n return colors", "def highlight_series(s):\n return ['background-color: #eee' for v in s]", "def getColor(k) :\n colors = [\"#862B59\",\"#A10000\",\"#0A6308\",\"#123677\",\"#ff8100\",\"#F28686\",\"#6adf4f\",\"#58ccdd\",\"#3a3536\",\"#00ab7c\"]\n return colors[k]", "def get_colors(cls, font) -> tuple:\n\n return font[BFC]", "def _color_info_text(self):\n\n t = ''\n for info in self.color_info:\n if info == 'rgbhex':\n t1 = tks.color_funcs.rgb_to_hex_string(self.rgb)\n elif info == 'rgb':\n t1 = tks.color_funcs.rgb_to_rgb_string(self.rgb, dp=2)\n elif info == 'hsv':\n t1 = tks.color_funcs.rgb_to_hsv_string(self.rgb, dp=2)\n elif info == 'hls':\n t1 = tks.color_funcs.rgb_to_hls_string(self.rgb, dp=2)\n\n t = t + '%s\\n' % t1\n\n return t", "def assigning_colors():\n rgb_colors = {}\n for name, hex in matplotlib.colors.cnames.items():\n color = []\n # So the values are from 0-255 and not 0-1\n for i in matplotlib.colors.to_rgb(hex):\n color.append(int(i * 255))\n\n color = tuple(color)\n rgb_colors[name] = color\n\n return rgb_colors", "def hex_color(s):\n\n if s.startswith(\"#\"):\n s = s[1:]\n valid = len(s) in [1, 2, 3, 4, 6, 12] and set(s) <= set(string.hexdigits)\n if not valid:\n raise ValueError(\"colour must be 1,2,3,4,6, or 12 hex-digits\")\n\n # For the 4-bit RGB, expand to 8-bit, by repeating digits.\n if len(s) == 3:\n s = \"\".join(c + c for c in s)\n\n if len(s) in [1, 2, 4]:\n # Single grey value.\n return (int(s, 16),)\n\n if len(s) in [6, 12]:\n w = len(s) // 3\n return tuple(int(s[i : i + w], 16) for i in range(0, len(s), w))", "def get_color(cls, string_color: str) -> Union['Color', bool]:\n r = False\n for color in cls:\n # if color == cls.CSI:\n # continue\n if str(color) == string_color:\n return color\n if not r:\n r = str(color).startswith(string_color)\n\n return r", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def colors():\n\tenums = dict(\n\t\t#TIME_LEFT=\"red\",\n\t\tname=\"yellow\",\n\t\ttitle=\"magenta\",\n\t\tgenre=\"green\",\n\t\tsynopsis=\"cyan\",\n\t\tduration=\"blue\",\n\t\tdimension=\"red\"\n\t)\n\treturn type('Enum', (), enums)", "def get_color_words():\n color_word_dict = {}\n color_data = csv.reader(open('./color_names.csv'), delimiter=\",\", quotechar='\"')\n\n for row in color_data:\n if row[0] != \"Colour Name\":\n name = row[0].lower()\n family = row[2].lower()\n hex_value = row[3].lower()\n color_word_dict[name] = (hex_value, family)\n return color_word_dict", "def format_color(\n color: Union[ColorInputType, Any],\n warn_if_invalid: bool = True\n) -> Union[ColorType, Any]:\n if not isinstance(color, ColorInputInstance):\n return color\n if not isinstance(color, pygame.Color):\n try:\n if isinstance(color, VectorInstance) and 3 <= len(color) <= 4:\n if PYGAME_V2:\n for j in color:\n if not isinstance(j, int):\n raise ValueError('color cannot contain floating point values')\n c = pygame.Color(*color)\n else:\n c = pygame.Color(color)\n except ValueError:\n if warn_if_invalid:\n warn(f'invalid color value \"{color}\"')\n else:\n raise\n return color\n else:\n c = color\n return c.r, c.g, c.b, c.a", "def get_color(self):\n colors = []\n color_specs = [self._red_spec, self._green_spec,\n self._blue_spec, self._white_spec]\n for spec in color_specs:\n driver = DRIVERS[spec.addr]\n colors.append(driver.get_duty_cycle(spec.pin))\n \n return colors", "def read_colormap(name):\n\n path = get_demo_file(name + '.c3g')\n\n out = []\n with open(path, 'r') as file:\n for line in file:\n if 'rgb(' not in line:\n continue\n line = line.split('(')[-1].split(')')[0]\n out.append([float(n) for n in line.split(',')])\n\n return np.asarray(out).astype(float) / 256.", "def _colorstr(self, args):", "def importColors(colorlist):\n colordict=getColorDict()\n scribus.statusMessage(\"Defining new colors...\")\n scribus.progressTotal(len(colorlist))\n i=0\n for color in colorlist:\n name=color[0]\n c=color[1]\n m=color[2]\n y=color[3]\n k=color[4]\n while colordict.has_key(name):# check if color already exists - then add PREFIX to name\n name = PREFIX+name\n \n scribus.defineColor(name, c, m, y, k)\n i=i+1\n scribus.progressSet(i)", "def stylecrunch(stystr):\n return dict(pair.split(\":\") for pair in semicolons.findall(stystr))", "def test_string_to_rgb(self):\r\n strgb = string_to_rgb # for convenience\r\n self.assertEqual(strgb('#000000'), (0, 0, 0))\r\n self.assertEqual(strgb('#FFFFFF'), (255, 255, 255))\r\n self.assertEqual(strgb('#F0F0F0'), (240, 240, 240))\r\n self.assertEqual(strgb('#AFF0AA'), (175, 240, 170))", "def _get_named_css(self):\n\n return [(name, style, attributes) for (name, (order, style, attributes)) in sorted(self._named_css.items(), key=operator.itemgetter(1))]" ]
[ "0.61174923", "0.60446614", "0.60343224", "0.6032042", "0.5956374", "0.5867158", "0.5859119", "0.5769622", "0.57691973", "0.57649046", "0.5756081", "0.5748874", "0.5739813", "0.5683501", "0.5625979", "0.5616915", "0.5559172", "0.55575323", "0.5540814", "0.5540103", "0.5533907", "0.54589003", "0.54565936", "0.5453469", "0.5450036", "0.53870416", "0.53859806", "0.53788674", "0.535782", "0.5326083", "0.528517", "0.52551705", "0.5252099", "0.5234997", "0.52267635", "0.5220064", "0.5213845", "0.5204326", "0.5199273", "0.51940846", "0.519212", "0.51888794", "0.5185755", "0.51686674", "0.51518416", "0.5112559", "0.5106242", "0.50956076", "0.50415206", "0.50283426", "0.50075555", "0.5003844", "0.49999487", "0.49996766", "0.49992442", "0.49990264", "0.49945906", "0.49900392", "0.49853548", "0.49752295", "0.4963107", "0.49569002", "0.49554914", "0.49501973", "0.49344158", "0.49295518", "0.49283588", "0.49218914", "0.49171203", "0.49145755", "0.49123728", "0.4911898", "0.49115974", "0.4904733", "0.49019864", "0.4901812", "0.4897739", "0.48912007", "0.48901996", "0.4876166", "0.48715156", "0.48689505", "0.48673952", "0.48666164", "0.4863029", "0.48616642", "0.485295", "0.48456308", "0.4840736", "0.48380876", "0.48343346", "0.48335347", "0.48315033", "0.4828827", "0.48218355", "0.48201567", "0.48192173", "0.4817997", "0.48111755", "0.48040816" ]
0.8132206
0
Parses the format_string and returns a set of placeholders.
def get_placeholders(self, format_string): placeholders = set() # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("placeholder"): placeholders.add(token.group("key")) elif token.group("command"): # get any placeholders used in commands commands = dict(parse_qsl(token.group("command"))) # placeholders only used in `if` if_ = commands.get("if") if if_: placeholders.add(Condition(if_).variable) return placeholders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_placeholder_formats_list(self, format_string):\n placeholders = []\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.append((token.group(\"key\"), token.group(\"format\")))\n return placeholders", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def _interpolate(format):\n from tokenize import tokenprog\n\n def matchorfail(text, pos):\n match = tokenprog.match(text, pos)\n if match is None:\n raise _ItplError(text, pos)\n return match, match.end()\n\n namechars = \"abcdefghijklmnopqrstuvwxyz\" \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\";\n chunks = []\n pos = 0\n\n while 1:\n dollar = format.find(\"$\", pos)\n if dollar < 0: \n break\n nextchar = format[dollar + 1]\n\n if nextchar == \"{\":\n chunks.append((0, format[pos:dollar]))\n pos, level = dollar + 2, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token == \"{\": \n level = level + 1\n elif token == \"}\": \n level = level - 1\n chunks.append((1, format[dollar + 2:pos - 1]))\n\n elif nextchar in namechars:\n chunks.append((0, format[pos:dollar]))\n match, pos = matchorfail(format, dollar + 1)\n while pos < len(format):\n if format[pos] == \".\" and \\\n pos + 1 < len(format) and format[pos + 1] in namechars:\n match, pos = matchorfail(format, pos + 1)\n elif format[pos] in \"([\":\n pos, level = pos + 1, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token[0] in \"([\": \n level = level + 1\n elif token[0] in \")]\": \n level = level - 1\n else: \n break\n chunks.append((1, format[dollar + 1:pos]))\n else:\n chunks.append((0, format[pos:dollar + 1]))\n pos = dollar + 1 + (nextchar == \"$\")\n\n if pos < len(format): \n chunks.append((0, format[pos:]))\n return chunks", "def _parse(self, fmtstr):\n def _match_brace(string, start_pos, pair='[]'):\n \"\"\"Pairing brackets (used internally in _parse method)\"\"\"\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_pos + 1:]):\n if char == pair[0]:\n depth += 1\n elif char == pair[1]:\n depth -= 1\n if depth == 0:\n return start_pos + index + 1\n return None\n\n #----------------------------------------------------------------------\n\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n\n ptr = 0\n # it seems that field id 0 is invalid\n field_id = 1\n length = len(fmtstr)\n parsed_list = []\n\n while ptr < length:\n parsed = {}\n m_prefix = t_prefix.match(fmtstr[ptr:])\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed['prefix'] = m_prefix.group(1)\n\n # check if we have a nested structure\n if m_prefix.group(2):\n brace_offset = _match_brace(fmtstr, ptr - 1)\n\n # bracket not match\n if not brace_offset:\n raise BadFormatString(\n 'Unmatched brace on position {0}'.format(ptr)\n )\n parsed['field_id'] = field_id\n parsed['field_type'] = 'a'\n parsed['subcontent'] = self._parse(\n fmtstr[ptr:brace_offset]\n )\n ptr = brace_offset + 1\n field_id += 1\n\n parsed_list.append(parsed)\n continue\n m_fmt = t_fmt.match(fmtstr[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n\n # fmt is an alias\n if m_fmt.group(2):\n parsed['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n parsed['field_type'] = m_fmt.group(1)\n\n # save field id\n parsed['field_id'] = field_id\n\n # check for type clones (e.g. `v3')\n if m_fmt.group(3):\n parsed['repeat'] = int(m_fmt.group(3))\n field_id += int(m_fmt.group(3))\n else:\n parsed['repeat'] = 1\n field_id += 1\n\n parsed_list.append(parsed)\n\n else:\n raise BadFormatString(\n 'Invalid token on position {0}'.format(ptr)\n )\n\n # all set\n return parsed_list", "def _get_placeholders(template):\n return [p[1] for p in string.Formatter().parse(template)\n if p[1] is not None and len(p[1]) > 0]", "def tokens(self, format_string):\n if format_string not in self.format_string_cache:\n tokens = list(re.finditer(self.reg_ex, format_string))\n self.format_string_cache[format_string] = tokens\n return self.format_string_cache[format_string]", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def parse_name_and_type_from_fmt_str(\n formatted_str: str,\n allowed_types: Optional[Dict[str, Component]] = None\n) -> Generator[Tuple[str, Type[Field]], None, None]:\n for _, arg_name, _type_name, _ in Formatter().parse(formatted_str):\n if arg_name is not None:\n try:\n assert _type_name is not None\n _type = (\n allowed_types[_type_name] if allowed_types is not None\n and _type_name in allowed_types\n else getattr(pyopenapi3.data_types, _type_name)\n )\n yield arg_name, _type\n except AttributeError:\n raise ValueError(\n \"A non-`Field` or `OpenApiObject` type was found. \"\n f\"Can't use `{_type_name}` as a type in {formatted_str}. \"\n f\"Must be a stringified pyopenapi3 `data_type`, such \"\n f\"as `pyopenapi3.data_types.String`, or a reference to a \"\n f\"Component.\"\n ) from None", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())", "def parse(self, s):\n\n segments = self.compiled.split(self._whitespace.sub(\" \", s))\n literals = segments[::2]\n raw = segments[1::2]\n\n if not raw:\n return []\n\n case = list(map(str.casefold, raw))\n prefixes = [{}] + [dict(self.locale_set.prefixes.get(match, ())) for match in case[:-1]]\n suffixes = [dict(self.locale_set.suffixes.get(match, ())) for match in case[1:]] + [{}]\n\n groups = _DateTime(**{ field: [] for field in _DateTime._fields })\n choices_per_position = {}\n always_literal = set()\n numeric = set()\n for idx, (prefix, suffix) in enumerate(zip(prefixes, suffixes)):\n keyword = self._lookup_keyword(raw[idx])\n if \"y\" in prefix:\n prefix[\"C\"] = tuple(set(prefix[\"y\"] + prefix.get(\"C\", ())))\n if not keyword:\n always_literal.add(idx)\n else:\n if raw[idx].isdigit():\n numeric.add(idx)\n choices_per_position[idx] = len(keyword)\n for fmt, value, locales in keyword:\n category = fmt[-1]\n if category == \"b\":\n # Month-names should be treated like numeric months.\n category = \"m\"\n elif category == \"z\":\n category = \"Z\"\n getattr(groups, category).append(_Assignment(\n fmt=fmt,\n pos=idx,\n value=value,\n locales=locales,\n prefix=prefix.get(fmt[-1]),\n suffix=suffix.get(fmt[-1]),\n ))\n numeric = frozenset(numeric)\n\n # If a required date field is unsatisfiable, this is not a date.\n if not all(getattr(groups, category) for category in _State._min_date_formats):\n for category in _State._all_date_formats:\n getattr(groups, category).clear()\n\n # If a required time field is unsatisfiable, this is not a time.\n if not all(getattr(groups, category) for category in _State._min_time_formats):\n for category in _State._all_time_formats:\n getattr(groups, category).clear()\n\n for group in groups:\n group.sort(key=lambda assignment: (\n -self._optimistic_score(assignment),\n choices_per_position[assignment.pos],\n ))\n\n required_formats = _State._min_date_formats + _State._min_time_formats\n groups = OrderedDict(sorted(\n (\n (\n category,\n (\n group,\n tuple(\n (f, required)\n for f, required in _position_constraints\n if category in required\n ),\n tuple(\n (f, required)\n for f, required, revisit in _value_constraints\n if category in required or category in revisit\n ),\n )\n )\n for category, group in zip(groups._fields, groups)\n if group\n ),\n key=lambda i: (i[0] not in required_formats, len(i[1][0]))\n ))\n\n # We've already filtered out all possibilities; there's nothing here.\n if not groups:\n return []\n\n constrained_groups = []\n while groups:\n category, (group, position, value) = groups.popitem(last=False)\n constrained_groups.append((category, group, position, value))\n required = frozenset(itertools.chain.from_iterable(required for f, required in itertools.chain(position, value)))\n if required:\n required = [\n category\n for category in reversed(groups.keys())\n if category in required\n ]\n for category in required:\n groups.move_to_end(category, last=False)\n groups = constrained_groups\n\n best_quality = 0\n best_candidates = []\n\n partials = [\n _State.empty._replace(\n unconverted=frozenset(always_literal),\n remaining_groups=tuple(groups),\n ).children(numeric=numeric)\n ]\n while partials:\n try:\n quality, locales, state = next(partials[-1])\n except StopIteration:\n partials.pop()\n continue\n\n if state.remaining_groups:\n # Admissable heuristic: compute the best score each group\n # could possibly achieve. Don't count conversion specifiers\n # that we've already used, but don't worry about conflicts\n # in the groups we haven't assigned yet. Any such conflicts\n # can only reduce the resulting score, and we only need to\n # make sure that the heuristic is at least as large as the\n # true value of the best leaf in this subtree. However, the\n # more precise we can be here, the fewer nodes we have to\n # search, so we can spend some CPU time on precision and\n # still come out ahead.\n assigned = state.unconverted.union(state.pos).difference((None,))\n heuristic = len(state.pending_hints) + sum(\n next((\n self._optimistic_score(assignment)\n for assignment in group[1]\n if assignment.pos not in assigned\n ), 0)\n for group in state.remaining_groups\n )\n\n if quality + heuristic < best_quality:\n # Even assuming the remaining groups get the highest\n # possible score, this state is still not good enough.\n continue\n\n partials.append(state.children(numeric=numeric))\n continue\n\n value = state.valid()\n if value is None:\n continue\n\n quality, locales, state = state.final_score()\n\n if best_quality is not None and quality < best_quality:\n # We've seen better, so skip this one.\n continue\n\n if quality != best_quality:\n best_quality = quality\n best_candidates = []\n\n conversions = dict(zip(state.pos, state.fmts))\n fmts = [ conversions.get(idx) or literal for idx, literal in enumerate(raw) ]\n\n pattern = ''.join(lit + fmt for lit, fmt in zip(literals, fmts + [''])).replace(\"%C%y\", \"%Y\")\n best_candidates.append((pattern, value, locales))\n return best_candidates", "def find_time(string, format):\n re_format = format\n for key, value in six.iteritems(REGEX):\n re_format = re_format.replace(key, value)\n matches = re.finditer(re_format, string)\n for match in matches:\n try:\n matchstr = string[slice(*match.span())]\n dt = datetime.strptime(matchstr, format)\n except ValueError:\n continue\n else:\n yield dt", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def parse_format(var_sample):\n # ugh\n ret = []\n # Parsing format information\n # Need to see what all these could be...\n if None in var_sample[\"GT\"]:\n ret.append(3)\n elif var_sample[\"GT\"] == (0, 0):\n ret.append(0)\n elif var_sample[\"GT\"] == (0, 1):\n ret.append(1)\n elif var_sample[\"GT\"] == (1, 1):\n ret.append(2)\n \n ret.extend([var_sample[\"GQ\"] if var_sample[\"GQ\"] is not None else 0,\n var_sample[\"OV\"],\n var_sample[\"DP\"], # be careful these aren't '.'\n #split where _r is ref-allele and _a is alt-allele\n var_sample[\"AD\"][0],\n var_sample[\"AD\"][1],\n var_sample[\"PDP\"],\n var_sample[\"PAD\"][0],\n var_sample[\"PAD\"][1],\n var_sample[\"US\"][0],\n var_sample[\"US\"][1],\n var_sample[\"DS\"][0],\n var_sample[\"DS\"][1],\n var_sample[\"UC\"][0],\n var_sample[\"UC\"][1],\n var_sample[\"DC\"][0],\n var_sample[\"DC\"][1],\n var_sample[\"UDC\"][0],\n var_sample[\"UDC\"][1],\n var_sample[\"UCC\"][0],\n var_sample[\"UCC\"][1],\n var_sample[\"DDC\"][0],\n var_sample[\"DDC\"][1],\n var_sample[\"DCC\"][0],\n var_sample[\"DCC\"][1],\n var_sample[\"UMO\"][0],\n var_sample[\"UMO\"][1],\n var_sample[\"DMO\"][0],\n var_sample[\"DMO\"][1],\n var_sample[\"UXO\"][0],\n var_sample[\"UXO\"][1],\n var_sample[\"DXO\"][0],\n var_sample[\"DXO\"][1],\n var_sample[\"NR\"][0],\n var_sample[\"NR\"][1],\n var_sample[\"MO\"][0],\n var_sample[\"MO\"][1],\n var_sample[\"XO\"][0],\n var_sample[\"XO\"][1],\n var_sample[\"XC\"][0],\n var_sample[\"XC\"][1],\n var_sample[\"AC\"][0],\n var_sample[\"AC\"][1],\n var_sample[\"MC\"][0],\n var_sample[\"MC\"][1],\n var_sample[\"EC\"][0],\n var_sample[\"EC\"][1],\n var_sample[\"PL\"][0] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][1] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][2] if var_sample[\"PL\"][0] is not None else 0])\n return ret\n #END", "def guess_format(string):\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)", "def parse(string, format):\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result", "def change_format_var_parser(text, tracker):\n param_names = format_var_parser(text)\n param_vars = {}\n for param_name in param_names:\n try:\n param_vars[param_name] = tracker.get_slot(param_name)\n except Exception as e:\n PYTHON_LOGGER.error(\"Error to get var name {}: {}\".format(param_name, e))\n return text.format(**param_vars)", "def parseSettings (formatStr):\n # split string\n fmt = formatStr.split ('.')\n if len (fmt) < 0:\n return (None, None)\n \n # find encoder\n encoderInstance = None\n for e in _registeredEncoders:\n if len (fmt) > 0:\n if e.name () == fmt[0]:\n encoderInstance = e\n break\n else:\n encoderInstance = _registeredEncoders[0] if len (_registeredEncoders) > 0 else None\n\n # return tuple with parsed settings\n presetName = '.'.join (fmt[1:])\n if presetName in (None, \"\"):\n if encoderInstance != None:\n presetName = encoderInstance.defaultPreset ()\n return (encoderInstance, presetName)", "def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))", "def extract_fields(entry_string):\n for field, value in re.findall(\"(.*?)=(.*?)\\}\", entry_string):\n yield field.strip(\",\").strip(\" \"), value.strip(\"{\").strip(\"}\")", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def strptime(date_string, format):\n i = 0\n format_len = len(format)\n # Iterate through the format string, applying parsers and matching literal\n # chars as appropriate.\n struct_time_d = {}\n while i < format_len:\n c = format[i]\n # If the character is not the start of a directive, attempt to match a\n # literal character.\n if c != '%':\n if date_string[0] != c:\n return None\n date_string = date_string[1:]\n else:\n # Read the next character of the directive, letting an IndexError\n # raise if format is exhausted/malformed.\n i += 1\n directive = format[i]\n # Raise a ValueError just like the built-in datetime.strptime()\n # if the directive is invalid.\n if directive not in DIRECTIVE_PARSER_MAP:\n raise ValueError(\"{} is a bad directive in format {}\".format(\n repr(directive[1]), repr(directive)))\n # Get the parser.\n parser = DIRECTIVE_PARSER_MAP[directive]\n # Check whether the parser is yet to be implemented.\n if parser is NOT_IMPLEMENTED:\n raise NotImplementedError(\n 'parser not defined for directive: {}'.format(directive)\n )\n # Do the parsing.\n result = parser(date_string)\n # Return None on any parsing failure.\n if result is False:\n return None\n value, date_string = result\n # Convert the directive value to a struct_time item.\n struct_time_item = directive_to_struct_time_item(directive, value)\n if struct_time_item is not None:\n k, v = struct_time_item\n # If the key already exists, accumulate, otherwise set.\n if k in struct_time_d:\n struct_time_d[k] += v\n else:\n struct_time_d[k] = v\n i += 1\n\n # Return None if the date string has not been completely consumed.\n if len(date_string) > 0:\n return None\n\n # Return None if a +12 hour AM_PM = 'PM' accumulation overflowed a parsed\n # HOUR_24 value.\n if not (0 <= struct_time_d.get(STRUCT_TIME.TM_HOUR, 0) <= 23):\n return None\n\n # Attempt to get year/month/day for date ops.\n year = struct_time_d.get(STRUCT_TIME.TM_YEAR)\n month = struct_time_d.get(STRUCT_TIME.TM_MON)\n day = struct_time_d.get(STRUCT_TIME.TM_MDAY)\n has_date = year is not None and month is not None and day is not None\n\n # Return None if the specified day is not valid for the month.\n if has_date and not is_valid_month_day(year, month, day):\n return None\n\n # Create an initial struct_time object.\n _struct_time = struct_time(\n *[struct_time_d.get(k, 0) for k in STRUCT_TIME_FIELDS]\n )\n\n if has_date:\n # Check whether accumulated minute value exceeds its max as a result of\n # accumulating a time zone offset, requiring some calendar day math.\n if not 0 <= struct_time_d.get(STRUCT_TIME.TM_MIN, 0) <= 59:\n # Pass _struct_time along with an empty time_delta to\n # add_struct_time_time_delta() to take advantage of its\n # over/underflow logic. Note that add_struct_time_time_delta() will\n # take care of setting the final day of week / year.\n _struct_time = add_struct_time_time_delta(\n _struct_time, time_delta())\n else:\n # Calculate the final day of week / year.\n _struct_time = struct_time_replace(\n _struct_time,\n tm_wday=date_to_day_of_week(year, month, day),\n tm_yday=date_to_day_of_year(year, month, day)\n )\n\n return _struct_time", "def parse_place_notation(input_string: str) -> Tuple[int, str]:\n\n # Looking for a string that matches <stage>:<place notation> where the\n # place notation is a series of bell numbers and 'x' characters\n parts = input_string.split(\":\")\n if len(parts) == 2:\n stage_part = parts[0]\n if len(stage_part) == 0 or not stage_part.isnumeric():\n raise PlaceNotationError(input_string, \"Stage must be a number\")\n stage = int(stage_part)\n place_notation = parts[1]\n if not valid_pn(place_notation):\n raise PlaceNotationError(input_string, \"Place notation is invalid\")\n else:\n raise PlaceNotationError(input_string, \"<stage>:<place notation> required\")\n\n return stage, place_notation", "def _parse_kvfmt(self, fmtlist):\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n parsed_list = []\n field_id = 1\n\n for entry in fmtlist:\n name = entry[0]\n fmt = entry[1]\n parsed_field = {}\n parsed_field['name'] = name\n if isinstance(fmt, str):\n ptr = 0\n m_prefix = t_prefix.match(fmt)\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed_field['prefix'] = m_prefix.group(1)\n # check for optional nested structure start (required if the field is also repeated)\n if m_prefix.group(2) and len(entry) > 2:\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(entry[2])\n field_id += 1\n parsed_list.append(parsed_field)\n continue\n elif m_prefix.group(2):\n raise BadFormatString('Nested field type used without specifying field format.')\n m_fmt = t_fmt.match(fmt[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n resolved_fmt_char = None\n # fmt is an alias\n if m_fmt.group(2):\n resolved_fmt_char = m_fmt.group(2)\n parsed_field['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n resolved_fmt_char = m_fmt.group(1)\n parsed_field['field_type'] = m_fmt.group(1)\n parsed_field['field_id'] = field_id\n # only skip type (`x') is allowed for copying in key-value mode\n if m_fmt.group(3) and resolved_fmt_char == 'x':\n repeats = int(m_fmt.group(3))\n parsed_field['repeat'] = repeats\n field_id += repeats\n elif m_fmt.group(3):\n raise BadFormatString('Field copying is not allowed in key-value format list.')\n else:\n field_id += 1\n else:\n raise BadFormatString('Invalid type for field \"{0}\"'.format(name))\n if len(fmt) != ptr:\n self.logger.warning('Extra content found after the type string of %s.', name)\n else:\n # Hard-code the empty prefix because we don't support copying\n parsed_field['prefix'] = ''\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(fmt)\n field_id += 1\n parsed_list.append(parsed_field)\n return parsed_list", "def build_block(self, format_string):\n first_block = Block(None, py3_wrapper=self.py3_wrapper)\n block = first_block\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group(\"block_start\"):\n # Create new block\n block = block.new_block()\n elif token.group(\"block_end\"):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n if not block.parent:\n raise Exception(\"Too many `]`\")\n block = block.parent\n elif token.group(\"switch\"):\n # a new option has been created\n block = block.switch()\n elif token.group(\"placeholder\"):\n # Found a {placeholder}\n key = token.group(\"key\")\n format = token.group(\"format\")\n block.add(Placeholder(key, format))\n elif token.group(\"literal\"):\n block.add(Literal(value))\n elif token.group(\"lost_brace\"):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(Literal(value))\n elif token.group(\"command\"):\n # a block command has been found\n block.set_commands(token.group(\"command\"))\n elif token.group(\"escaped\"):\n # escaped characters add unescaped values\n if value[0] in [\"\\\\\", \"{\", \"}\"]:\n value = value[1:]\n block.add(Literal(value))\n\n if block.parent:\n raise Exception(\"Block not closed\")\n # add to the cache\n self.block_cache[format_string] = first_block", "def _parse_date_format( fmp_database ):\n\tdef build_one( stamp ):\n\t\treturn stamp.\\\n\t\t replace( 'yyyy', '%Y' ).\\\n\t\t replace( 'MM', '%m' ).\\\n\t\t replace( 'dd', '%d' ).\\\n\t\t replace( 'HH', '%H' ).\\\n\t\t replace( 'mm', '%M' ).\\\n\t\t replace( 'ss', '%S' )\n\n\treturn {\n\t\t'date': build_one( fmp_database.get('date-format', '') ),\n\t\t'timestamp': build_one( fmp_database.get('timestamp-format', '') ),\n\t\t'time': build_one( fmp_database.get('time-format', '') ),\n\t}", "def _slice_template(cls, in_str: str) -> Iterator[RawFileSlice]:\n fmt = Formatter()\n in_idx = 0\n for literal_text, field_name, format_spec, conversion in fmt.parse(in_str):\n if literal_text:\n escape_chars = cls._sorted_occurrence_tuples(\n cls._substring_occurrences(literal_text, [\"}\", \"{\"])\n )\n idx = 0\n while escape_chars:\n first_char = escape_chars.pop()\n # Is there a literal first?\n if first_char[1] > idx:\n yield RawFileSlice(\n literal_text[idx : first_char[1]], \"literal\", in_idx\n )\n in_idx += first_char[1] - idx\n # Add the escaped\n idx = first_char[1] + len(first_char[0])\n # We double them here to make the raw\n yield RawFileSlice(\n literal_text[first_char[1] : idx] * 2, \"escaped\", in_idx\n )\n # Will always be 2 in this case.\n # This is because ALL escape sequences in the python formatter\n # are two characters which reduce to one.\n in_idx += 2\n # Deal with last one (if present)\n if literal_text[idx:]:\n yield RawFileSlice(literal_text[idx:], \"literal\", in_idx)\n in_idx += len(literal_text) - idx\n # Deal with fields\n if field_name:\n constructed_token = \"{{{field_name}{conv}{spec}}}\".format(\n field_name=field_name,\n conv=f\"!{conversion}\" if conversion else \"\",\n spec=f\":{format_spec}\" if format_spec else \"\",\n )\n yield RawFileSlice(constructed_token, \"templated\", in_idx)\n in_idx += len(constructed_token)", "def read_atom_data(atom_format, line):\n if atom_format is None:\n _format = [guess_string_format(i.strip()) for i in line.split()]\n else:\n _format = atom_format\n formatted = []\n for i, (val, fmti) in enumerate(zip(line.split(), _format)):\n istrip = val.strip()\n try:\n formatted.append(fmti(istrip))\n except ValueError:\n fmt = guess_string_format(istrip)\n _format[i] = fmt\n formatted.append(fmt(istrip))\n return formatted, _format", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def parse(cls, value: str) -> Tuple[str, Dict[str, str]]:\n raw_value = read_value_from_path(value)\n args: Dict[str, str] = {}\n\n if \"@\" in raw_value:\n args[\"region\"], raw_value = raw_value.split(\"@\", 1)\n\n # now find any other arguments that can be filters\n matches = re.findall(r\"([0-9a-zA-z_-]+:[^\\s$]+)\", raw_value)\n for match in matches:\n k, v = match.split(\":\", 1)\n args[k] = v\n\n return args.pop(\"name_regex\"), args", "def get_placeholders(self):\n\n # Define regex\n regex = f'\\{self.marker_string}([^$]+)\\{self.marker_string}'\n\n found_placeholders = [match.group(1) for match in re.finditer(regex, self.raw_sequence)]\n\n # Check for duplicates\n for found_placeholder in found_placeholders:\n if found_placeholders.count(found_placeholder) != 1:\n\n error_msg = f\"Placeholder {found_placeholder} found multiple times in sequence.\"\n self.hd.log.info(error_msg)\n\n # Remove duplicates from list\n found_placeholders = list(set(found_placeholders))\n\n return found_placeholders", "def parse_template(string):\n count = 0\n list1 = []\n for character in string:\n count = count + 1\n if character == \"{\":\n end = string.find(\"}\", count)\n s_strg = string[count:end]\n list1.append(s_strg)\n string = string.replace(s_strg, \"\", 1)\n count = count - len(s_strg)\n\n subs = tuple(list1)\n\n return(string, subs)\n print(subs)", "def _parseAttributeScanf(self, line, formatting):\n\n # multiple entrys\n if isinstance(formatting, list):\n for scanf_format in formatting:\n try:\n #print \"<<<----\", scanf_format, line\n return sscanf(line, scanf_format)\n except IncompleteCaptureError, e:\n pass\n\n # single entry\n else:\n return sscanf(line, formatting)\n\n # problem if none of the formats worked\n raise IncompleteCaptureError(\"Format error for %s\" % line)", "def regex_findall_variables(raw_string: Text) -> List[Text]:\n try:\n match_start_position = raw_string.index(\"$\", 0)\n except ValueError:\n return []\n\n vars_list = []\n while match_start_position < len(raw_string):\n\n # Notice: notation priority\n # $$ > $var\n\n # search $$\n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n continue\n\n # search variable like ${var} or $var\n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n vars_list.append(var_name)\n match_start_position = var_match.end()\n continue\n\n curr_position = match_start_position\n try:\n # find next $ location\n match_start_position = raw_string.index(\"$\", curr_position + 1)\n except ValueError:\n # break while loop\n break\n\n return vars_list", "def _parse_parameters(self, parameters_text):\n for mo in re.finditer(self._PARAMETERS_RE, parameters_text):\n self._parameters.append(Parameter(mo.group(\"param_name\"), mo.group(\"default_value\")))", "def format_regexp(simple_format):\n format, regexp = ('', '')\n for char in simple_format:\n if char in _regexps:\n format += '%' + char\n regexp += _regexps[char]\n else:\n format += char\n regexp += char\n return (format, regexp)", "def parsePercentExpression(literal, format):\n\tmat = _getREForPercentExpression(format).match(literal)\n\tif not mat:\n\t\traise ValueError(\"'%s' cannot be parsed using format '%s'\"%(\n\t\t\tliteral, format))\n\treturn mat.groupdict()", "def _parse_param_line(regex, line):\n out = []\n for r in regex.findall(line):\n converter, variable = r[0] or 'default', r[1]\n out.append((_CONVERTERS[converter], variable))\n return out", "def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self", "def scanf(format, string):\n re_fmt = compile(format)\n result = re_fmt.scanf(string)\n _log.debug('%r <- %r = %r', format, string, result)\n return result", "def parse_from_regex(string,pattern,fields):\n\n string = string.replace('\\\\','/') # USE POSIX PLEASE\n num_groups = flat_paren_counter(pattern)\n if isinstance(fields,str):\n fields = [fields]\n num_fields = len(fields)\n if not num_fields == num_groups:\n return {}\n match = re.search(pattern,string)\n if not num_groups == len(match.groups()):\n return {}\n \n l = []\n \n for field,value in zip(fields,list(match.groups())):\n d = nested_notation_to_tree(field,value)\n l.append(d)\n return deep_merge_N(l)", "def parse_params(txt):\n res = list()\n # First, slipt with stuff looking like \\TYPE:\n splitted = re.split(r'\\s*\\\\(\\w+)\\s*:', txt)\n # We now have a list looking like:\n # ['', 'flag', '....', 'param', '...']\n i = 1\n while i < len(splitted) - 1:\n type = splitted[i]\n rest = splitted[i+1]\n if type == \"argn\":\n name = \"remaining args\"\n desc = rest\n else:\n # first word is the name, the rest is the description:\n match = re.match(r'\\s*(\\w+)\\s*(.*)', rest, re.DOTALL)\n if not match:\n print(\"warning, failed to parse parameters\")\n print(\"near\", rest)\n break\n (name, desc) = match.groups()\n desc = clean_indent(desc)\n res.append((type, name, desc))\n i += 2\n return res", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")", "def parse_pattern(pattern: NumberPattern | str) -> NumberPattern:\n if isinstance(pattern, NumberPattern):\n return pattern\n\n def _match_number(pattern):\n rv = number_re.search(pattern)\n if rv is None:\n raise ValueError(f\"Invalid number pattern {pattern!r}\")\n return rv.groups()\n\n pos_pattern = pattern\n\n # Do we have a negative subpattern?\n if ';' in pattern:\n pos_pattern, neg_pattern = pattern.split(';', 1)\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix, _, neg_suffix = _match_number(neg_pattern)\n else:\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix = f\"-{pos_prefix}\"\n neg_suffix = pos_suffix\n if 'E' in number:\n number, exp = number.split('E', 1)\n else:\n exp = None\n if '@' in number and '.' in number and '0' in number:\n raise ValueError('Significant digit patterns can not contain \"@\" or \"0\"')\n if '.' in number:\n integer, fraction = number.rsplit('.', 1)\n else:\n integer = number\n fraction = ''\n\n def parse_precision(p):\n \"\"\"Calculate the min and max allowed digits\"\"\"\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max\n\n int_prec = parse_precision(integer)\n frac_prec = parse_precision(fraction)\n if exp:\n exp_plus = exp.startswith('+')\n exp = exp.lstrip('+')\n exp_prec = parse_precision(exp)\n else:\n exp_plus = None\n exp_prec = None\n grouping = parse_grouping(integer)\n return NumberPattern(pattern, (pos_prefix, neg_prefix),\n (pos_suffix, neg_suffix), grouping,\n int_prec, frac_prec,\n exp_prec, exp_plus, number)", "def getAllPlaceholders(self, template_list):\n placeholders = []\n for line in template_list:\n found = re.findall(self.pattern, line)\n placeholders.extend(found)\n placeholders.reverse()\n #place the cursor placeholder at the begin\n placeholders = self._placeCursors(placeholders)\n return placeholders", "def parse_styles(text: str) -> List[dict]:\n styles = []\n regex = r'(\\d{3})=(\".*?\"),(\\d+\\.?\\d+),(\\(.*?\\))'\n\n for line in text.split(\"\\r\\n\"):\n if line == \"\":\n continue\n\n n, font, font_size, color = re.match(regex, line).groups()\n styles.append(\n {\n \"id\": int(n),\n \"f\": font.replace('\"', \"\"),\n \"fs\": float(font_size),\n \"rgb\": [\n int(i)\n for i in color.replace(\"(\", \"\")\n .replace(\")\", \"\").split(\",\")]\n }\n )\n\n return styles", "def parse_isdcf_string(str):\n fields_dict = {}\n error_list = []\n\n if not isinstance(str, six.string_types):\n error_list.append(\"ContentTitle invalid type\")\n return fields_dict, error_list\n\n # Sort the fields to respect DCNC order\n # Note : in python3 we can declare an OrderedDict({...}) and the field\n # order is preserved so this is not needed, but not in python 2.7\n dcnc_version = DCP_SETTINGS['naming_convention']\n rules = OrderedDict(sorted(\n six.iteritems(RULES[dcnc_version]),\n key=lambda f: RULES_ORDER.index(f[0])))\n\n fields_dict = init_dict_isdcf(rules)\n fields_list = str.split('_')\n\n if len(fields_list) != 12:\n error_list.append(\n \"ContentTitle should have 12 parts to be fully compliant with\"\n \" ISDCF naming convention version {}, {} part(s) found\"\n .format(dcnc_version, len(fields_list)))\n\n # Parsing title with some robustness to missing / additionals fields\n # Find a match in nearby fields only\n max_field_shift = 3\n\n fields_matched = []\n\n for idx_field, field in enumerate(fields_list):\n matched = False\n\n for idx_rule, (name, regex) in enumerate(six.iteritems(rules)):\n pattern = re.compile(regex)\n match = re.match(pattern, field)\n\n if idx_field == 0 and not match:\n error_list.append(\n \"ContentTitle Film Name does not respect naming convention\"\n \" rules : {}\".format(field))\n elif match and idx_rule < max_field_shift:\n fields_dict[name].update(match.groupdict(DEFAULT))\n else:\n continue\n\n fields_dict[name]['Value'] = field\n fields_matched.append(name)\n sliced = islice(six.iteritems(rules), idx_rule + 1, None)\n rules = OrderedDict(sliced)\n matched = True\n break\n\n if not matched:\n error_list.append(\n \"ContentTitle Part {} not matching any naming convention field\"\n .format(field))\n\n for name, _ in six.iteritems(RULES[dcnc_version]):\n if name not in fields_matched:\n error_list.append(\n \"Field {} not found in ContentTitle\".format(name))\n\n fields_dict = post_parse_isdcf(fields_dict)\n return fields_dict, error_list", "def extract(self, optimized_str):\n\n logger.debug('START optimized_str ========================')\n logger.debug(optimized_str)\n logger.debug('END optimized_str ==========================')\n logger.debug(\n 'Date parsing: languages=%s date_formats=%s',\n self.options['languages'], self.options['date_formats'])\n logger.debug('Float parsing: decimal separator=%s', self.options['decimal_separator'])\n logger.debug(\"keywords=%s\", self['keywords'])\n logger.debug(self.options)\n\n # Try to find data for each field.\n output = {}\n for k, v in self['fields'].items():\n if k.startswith('static_'):\n logger.debug(\"field=%s | static value=%s\", k, v)\n output[k.replace('static_', '')] = v\n else:\n logger.debug(\"field=%s | regexp=%s\", k, v)\n\n # Fields can have multiple expressions\n if type(v) is list:\n for v_option in v:\n res_find = re.findall(v_option, optimized_str)\n if res_find:\n break\n else:\n res_find = re.findall(v, optimized_str)\n if res_find:\n logger.debug(\"res_find=%s\", res_find)\n if k.startswith('date'):\n raw_date = res_find[0]\n output[k] = dateparser.parse(\n raw_date, date_formats=self.options['date_formats'],\n languages=self.options['languages'])\n logger.debug(\"result of date parsing=%s\", output[k])\n if not output[k]:\n logger.error(\n \"Date parsing failed on date '%s'\", raw_date)\n return None\n elif k.startswith('amount'):\n assert res_find[0].count(self.options['decimal_separator']) < 2,\\\n 'Decimal separator cannot be present several times'\n # replace decimal separator by a |\n amount_pipe = res_find[0].replace(self.options['decimal_separator'], '|')\n # remove all possible thousands separators\n amount_pipe_no_thousand_sep = re.sub(\n '[.,\\s]', '', amount_pipe)\n # put dot as decimal sep\n amount_regular = amount_pipe_no_thousand_sep.replace('|', '.')\n # it is now safe to convert to float\n output[k] = float(amount_regular)\n else:\n output[k] = res_find[0]\n else:\n logger.warning(\"regexp for field %s didn't match\", k)\n\n output['currency'] = self.options['currency']\n\n if len(output.keys()) >= 4:\n output['desc'] = 'Invoice %s from %s' % (\n output['invoice_number'], self['issuer'])\n logger.debug(output)\n return output\n else:\n logger.error(output)\n return None", "def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs", "def format_data(data_string):\n lines = data_string.split('\\\"\\n\\\"')\n split_data = [re.split(r\"\\\"\\s*,\\s*\\\"\", line) for line in lines]\n\n return split_data", "def load(fmt: str, stream: BytesIO, _unpack=unpack):\n values = []\n bitcount = bits = 0\n for char in fmt:\n if char == '?':\n if not bitcount:\n bits = _unpack('>B', _read(stream, 1))[0]\n bitcount = 8\n value = (bits & 1) == 1\n bits >>= 1\n bitcount -= 1\n else:\n bitcount = bits = 0\n if char == 'B':\n value = _unpack('>B', _read(stream, 1))[0]\n elif char == 'H':\n value = _unpack('>H', _read(stream, 2))[0]\n elif char == 'L':\n value = _unpack('>L', _read(stream, 4))[0]\n elif char == 'Q':\n value = _unpack('>Q', _read(stream, 8))[0]\n elif char == 's':\n length = _unpack('>B', _read(stream, 1))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 'S':\n length = _unpack('>L', _read(stream, 4))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 't':\n timestamp = _unpack('>Q', _read(stream, 8))[0]\n value = datetime.utcfromtimestamp(timestamp)\n elif char == 'T':\n value = {}\n length = _unpack('>L', _read(stream, 4))[0]\n stream2 = BytesIO(_read(stream, length))\n while stream2.tell() < length:\n key = load('s', stream2)[0]\n value[key] = _load_item(stream2)\n elif char != '?':\n raise ValueError('wrong format char', char)\n values.append(value)\n return values", "def get_parameter(pstring):\n parameters = pstring.replace(',', ' ').split()\n if len(parameters) == 1:\n init_value = float(parameters[0])\n return (init_value, None, None)\n elif len(parameters) == 3:\n init_value = float(parameters[0])\n if parameters[1].upper() == 'NONE':\n lower_value = None\n else:\n lower_value = float(parameters[1])\n if parameters[2].upper() == 'NONE':\n upper_value = None\n else:\n upper_value = float(parameters[2])\n return (init_value, lower_value, upper_value)\n else:\n raise ValueError('Invalid parameter format: %s' % pstring)", "def parse_known_date_formats(dt_string):\n for fmt in ('%Y%m%d', '%Y%m%d %H:%M', '%m/%d/%Y', '%m/%d/%Y %H:%M'):\n try:\n return datetime.strptime(dt_string, fmt)\n except ValueError:\n pass\n raise ValueError(\"No valid date format found.\"\n \"See https://tidesandcurrents.noaa.gov/api/ \"\n \"for list of accepted date formats.\")", "def parseStructuredData( raw_data, format ):\n\tparsed_data = {}\n\n\tBEGIN = 0\n\tEND = 1\n\tCONV = 2\n\n\tfor key in format.keys():\n\t\tkey_format = format[key]\n\n\t\tif key_format[END] == \"\":\n\t\t\tkey_format[END] = key_format[BEGIN] + len(raw_data)\n\n\t\ttxt = raw_data[key_format[BEGIN]:key_format[END]]\n\n\t\tif len(key_format[CONV]) > 0:\n\t\t\tparsed_data[key] = struct.unpack(key_format[CONV], txt)[0]\n\t\telse:\n\t\t\tparsed_data[key] = txt\n\n\treturn parsed_data", "def get_color_names(self, format_string):\n names = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"command\"):\n name = dict(parse_qsl(token.group(\"command\"))).get(\"color\")\n if (\n not name\n or name in COLOR_NAMES_EXCLUDED\n or name in COLOR_NAMES\n or name[0] == \"#\"\n ):\n continue\n names.add(name)\n return names", "def _validate_pattern_fields(self):\n # TO ADD:\n ## check pattern is dict ??\n ## A1 ! check if all vars used in sprintf are declared\n ## check for quoted '%s' in sprintf text (not allowed).\n ## check that only all subs are %s and that the number of %s matches the length of the var list.\n ## re.search(r\"\\%(.)\", , ) => from this get list to check all %s and length to check against var list.\n ## Given these checks - makes more sense to hardwire sprintf subfield names than use config approach.\n \n for field, field_content in self.pattern.items():\n if field not in self.pkey_dict:\n warnings.warn(\"Pattern has unknown field: %s !\" % field)\n \n # The following is quote ugly and hard to follow. Should probably be refactored\n oneOf = False\n oneOf_list = []\n for field, field_spec in self.pkey_dict.items():\n if field_spec['compulsory']:\n if field not in self.pattern:\n warnings.warn(\"Pattern is missing compulsory field: %s !\" % field)\n elif field_spec['OneOf']:\n oneOf_list.append(field)\n if field in self.pattern:\n oneOf = True \n if field_spec['sprintf']:\n if field in self.pattern:\n for subfield in self.pattern[field]:\n if subfield not in self.sprintf_keys:\n warnings.warn(\"The field %s has an unknown subfield %s.\" % (field, subfield))\n for subfield in self.sprintf_keys:\n if subfield not in self.pattern[field]:\n warnings.warn(\"The field %s lacks the compulsory subfield %s.\" % (field, subfield))\n # Check that number of vars matches number %s in text field\n if not len(re.findall('%s', self.pattern[field]['text'])) == len(self.pattern[field]['vars']):\n warnings.warn(\"Wrong number of vars in field '%s' of %s\" % (field, self.pattern['pattern_name']))\n for v in self.pattern[field]['vars']:\n if v not in self.pattern['vars']:\n warnings.warn(\"%s not in varlist %s\" % (v, str(self.pattern['vars'])))\n# Move spec checks down: \n# if field_spec['msExpression']:\n# self._validate_quoted(field['text'])\n# self._validate_ms\n \n if not oneOf:\n warnings.warn(\"Pattern must have at least one of: \" + str(oneOf_list))\n\n # Poss to add: validate number of vars for sprintf subs", "def init_from_net(self, inputstr):\n offset = 0\n input_len = len(inputstr)\n used_len = 0\n\n try:\n (used_len, self.bpq_kind, self.matching_rule,\n self.creation_ts, self.creation_seq,\n self.src_eid_len) = unpack_from(\"!BBvvv\", inputstr,\n offset = offset)\n offset = used_len\n if ((offset + self.src_eid_len) > input_len):\n raise struct_error(\"Input string too short at src_eid\")\n\n self.src_eid = inputstr[offset : (offset + self.src_eid_len)]\n\n offset += self.src_eid_len\n (used_len, self.bpq_id_len) = unpack_from(\"!v\", inputstr,\n offset = offset)\n offset += used_len\n if ((offset + self.bpq_id_len) > input_len):\n raise struct_error(\"Input string too short at bpq_id\")\n\n self.bpq_id = inputstr[offset : (offset + self.bpq_id_len)]\n\n offset += self.bpq_id_len\n (used_len, self.bpq_val_len) = unpack_from(\"!v\", inputstr,\n offset = offset)\n \n offset += used_len\n if ((offset + self.bpq_val_len) > input_len):\n raise struct_error(\"Input string too short at bpq_val\")\n\n self.bpq_val = inputstr[offset : (offset + self.bpq_val_len)]\n\n offset += self.bpq_val_len\n (used_len, self.frag_cnt) = unpack_from(\"!v\", inputstr,\n offset = offset)\n offset += used_len\n\n self.frag_desc = []\n\n if self.frag_cnt > 0:\n fmt_str = \"!\" + (\"vv\" * self.frag_cnt)\n frag_tuple = unpack_from(fmt_str, inputstr, offset = offset)\n offset += frag_tuple[0]\n i = 0\n j = 1\n while (i < self.frag_cnt):\n d = {}\n d[\"frag_offset\"] = frag_tuple[j]\n j += 1\n d[\"frag_len\"] = frag_tuple[j]\n j += 1\n self.frag_desc.append(d)\n i += 1\n if (offset != input_len):\n raise struct_error(\"Input string is wrong length\")\n except Exception, e:\n return False\n\n return self.validate()", "def placeholder_to_regex(placeholder,encloser='%',matcher='(.+)'):\n pattern = placeholder\n pattern = pattern.replace('\\\\','/')\n if pattern.count('%') == 0 or pattern.count('%') % 2 != 0:\n return '',[]\n else:\n borders = pattern.split(encloser)[::2]\n fields = pattern.split(encloser)[1::2]\n for field in fields:\n pattern = pattern.replace(encloser+field+encloser, matcher, 1)\n pattern = pattern.replace('/','\\\\/')\n return pattern,fields", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def reformat_placeholders(content: str) -> str:\n return content.replace(\"<MM>\", \"[[\").replace(\"</MM>\", \"]]\")", "def _decode_block_string(self, block_string: str):\n\n arg_strings = block_string.split('_')\n args = {}\n for arg_string in arg_strings:\n splits = re.split(r'(\\d.*)', arg_string)\n if len(splits) >= 2:\n key, value = splits[:2]\n args[key] = value\n num_repeat = int(args['r'])\n block_args = {\n 'kernel_size': int(args['k']),\n 'stride': int(args['s']),\n 'expand_ratio': int(args['e']),\n 'in_channels': int(args['i']),\n 'out_channels': int(args['o']),\n 'se_ratio': float(args['se']) if 'se' in args else None,\n }\n return block_args, num_repeat", "def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)", "def parse_pattern_str(pattern: str, parent_layer: nn.Layer) -> Union[None, List[Dict[str, Union[nn.Layer, str, None]]]]:\n\n pattern_list = pattern.split(\".\")\n if not pattern_list:\n msg = f\"The pattern('{pattern}') is illegal. Please check and retry.\"\n return None\n\n layer_list = []\n while len(pattern_list) > 0:\n if '[' in pattern_list[0]:\n target_layer_name = pattern_list[0].split('[')[0]\n target_layer_index = pattern_list[0].split('[')[1].split(']')[0]\n else:\n target_layer_name = pattern_list[0]\n target_layer_index = None\n\n target_layer = getattr(parent_layer, target_layer_name, None)\n\n if target_layer is None:\n msg = f\"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}').\"\n return None\n\n if target_layer_index and target_layer:\n if int(target_layer_index) < 0 or int(target_layer_index) >= len(target_layer):\n msg = f\"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0.\"\n return None\n\n target_layer = target_layer[target_layer_index]\n\n layer_list.append({\"layer\": target_layer, \"name\": target_layer_name, \"index\": target_layer_index})\n\n pattern_list = pattern_list[1:]\n parent_layer = target_layer\n return layer_list", "def parseConfig(f):\n config = {\"formats\":{}}\n \n for line in f:\n if line.startswith(\"//\"): \n continue\n \n sline = re.split(\"[=\\s]\", line)\n if sline[0] is \"\":\n continue\n \n if sline[0]==\"format\":\n #Puts the format as a key in the dict pointed to by \"formats\"\n config[\"formats\"][sline[1]] = sline[3] \n else:\n config[sline[0]] = sline[1]\n \n return config", "def format(self, varnames, fmts):\n varnames = self._find_vars(varnames, empty_ok=False)\n indexes = list(map(self._varlist.index, varnames))\n \n # check that fmts are specified properly\n if isinstance(fmts, str):\n fmts = fmts.split()\n else:\n if ( not isinstance(fmts, collections.Iterable) \n or not all(isinstance(f, str) for f in fmts) ):\n raise TypeError(\"given fmts must be str or iterable of str\")\n fmts = [x for s in fmts for x in s.split()]\n if len(fmts) == 0:\n raise ValueError(\"no formats specified\")\n \n # check fmts for validity\n is_valid = self._is_valid_fmt\n if not all(is_valid(fmt) for fmt in fmts):\n bad_fmts = \" \".join(fmt for fmt in fmts if not is_valid(fmt))\n raise ValueError(\"invalid formats: \" + bad_fmts)\n \n # pad fmts if necessary \n nvarnames = len(varnames)\n nfmts = len(fmts)\n if nfmts < nvarnames:\n fmts = list(fmts) + [fmts[-1]]*(nvarnames - nfmts)\n \n # check that formats match Stata types\n #typlist = self._typlist\n isstrvar = self._isstrvar\n if not all(isstrvar(i) == bool(STR_FMT_RE.match(fmt))\n for i, fmt in zip(indexes, fmts)):\n raise ValueError(\"format does not match Stata variable type\")\n \n # replace fmts (extras, if any, don't get used)\n for i, fmt in zip(indexes, fmts):\n self._fmtlist[i] = fmt\n \n # assume there are changes\n self._changed = True", "def initFormat(self):\n self.formatList = []", "def _search_and_replace(parser, section):\n INTERPOLATION_RE = re.compile(r\"\\$\\{(?:(?P<section>[^:]+):)?(?P<key>[^}]+)\\}\")\n result = []\n def interpolate_func(match):\n d = match.groupdict()\n s = d.get('section')\n if s is None:\n s = section\n key = d.get('key')\n return parser.get(s, key)\n\n for key, value in parser.items(section):\n value = re.sub(INTERPOLATION_RE, interpolate_func, value)\n result.append(\n (key,value)\n )\n return result", "def parse_string(self, in_str):\n match = MAIN_REGEX.search(in_str)\n if not match:\n err_str = \"Unable to parse string: %s\" % in_str\n raise ValueError(err_str)\n self.parse_completed(match.group(1))\n self.parse_priority(match.group(2))\n if match.group(3) and match.group(4):\n self.parse_completion_date(match.group(3))\n self.parse_creation_date(match.group(4))\n else:\n self.parse_creation_date(match.group(3))\n self.parse_description(match.group(5))", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final", "def fmt_capture(kwargs: Any, *patterns: Any) -> Any: # type: ignore\n results = [copy_annotations(pattern, _fmt_capture(kwargs, pattern)) for pattern in each_string(*patterns)]\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(results) == 1\n return results[0]\n return results", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def findall(pattern, text):\r\n\tspl = re.compile(pattern).split(text)\r\n\tresult = []\r\n\tbeginTag = \"\"\r\n\tendTag = None\r\n\tbeginFormat = \"\"\r\n\tendFormat = \"\"\r\n\tinitText = text\r\n\tfor s in spl:\r\n\t\ttext = text[len(s)+2:]\r\n\t\tend = text.find(\")s\")\r\n\t\tvar = \"\"\r\n\t\tif len(text) > 0:\r\n\t\t\tvar = text[:end]\r\n\t\t\tresult.append(var)\r\n\t\tif var == \"content\":\r\n\t\t\tbeginTag += s\r\n\t\t\tendTag = \"\"\r\n\t\telif endTag != None:\r\n\t\t\tendTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tendFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tendFormat += \"self.%s,\"%var\r\n\t\t\t\tendTag += \"\\x25s\"\r\n\t\telse:\r\n\t\t\tbeginTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tbeginFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tbeginFormat += \"self.%s,\"%var\r\n\t\t\t\tbeginTag += \"\\x25s\"\r\n\t\ttext = text[end+2:]\r\n\tif endTag == None:\r\n\t\tendTag = \"\"\r\n\t\tendFormat = \"\"\r\n\treturn result, beginTag, endTag, beginFormat, endFormat", "def get_source_file_string_placeholders(file):\n placeholders = {}\n root = ET.parse(file).getroot()\n for element in root.findall('string'):\n name = element.get('name')\n value = ''.join(element.itertext())\n placeholder = get_placeholders(value)\n if placeholder:\n placeholders[name] = placeholder\n return placeholders", "def parse_variables(self, text, separator=None):\n\n def splitter(x, separator=None):\n if len(x) > 1:\n y = x.split(separator)\n return (y[0], y[-1])\n return (None, None)\n\n return dict(splitter(x, separator=separator) for x in text.split(\"\\n\"))", "def from_string(cls, string):\n m = re.match(r\"([0-9- :]+)\" # Timestamp (group 1)\n r\",[0-9]+\\s\" # Timestamp ms (ignored)\n r\"([A-Z]+)\\s+\" # Debug level (group 2)\n r\"(\\d+)\\s+\" # PID (group 3)\n r\"\\[\"\n r\"([^]]+)\\s*\" # Module name (group 4)\n r\"\\]:\\s+\"\n r\"(.*)\", # Debug message (group 5)\n string)\n if not m:\n raise DebugFormatError(\n \"Failed to match {} against the expected format\".format(\n string))\n ts = datetime.datetime.strptime(m.group(1), TS_FORMAT)\n return DebugStmt(ts, m.group(2), int(m.group(3)),\n m.group(4).strip(),\n m.group(5))", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def get_parameters_from_input_string(string):\n parameter_array = []\n start_found = False\n item = str(\"\")\n for i in range(len(string)): \n if start_found == True and string[i] != \",\" and string[i] !=\")\":\n item += string[i]\n elif start_found == True and string[i] == \",\":\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n elif start_found == True and string[i] == \")\":\n start_found = False\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n # Start here and set start_found to True\n elif string[i] == \"(\":\n start_found = True\n return parameter_array", "def check_placeholders(value):\n if isinstance(value, six.string_types):\n if TOKEN_REGEX.search(value):\n raise ValueError('{0:s} must be replaced in dictionary'.format(value))\n elif isinstance(value, list):\n return [check_placeholders(item) for item in value]\n elif isinstance(value, dict):\n return {key: check_placeholders(val) for key, val in value.items()}\n elif isinstance(value, tuple):\n return tuple(check_placeholders(val) for val in value)\n return value", "def populate_fields(self):\n self.group_name = self._extract_group_name()\n self.services = self._extract_services()\n\n self._logger.debug(\"Parsed value: {}\".format([\n self.group_name,\n self.services]))", "def _parameterize_string(raw):\n\n parts = []\n s_index = 0\n\n for match in _PARAMETER_PATTERN.finditer(raw):\n parts.append(raw[s_index:match.start()])\n parts.append({u\"Ref\": match.group(1)})\n s_index = match.end()\n\n if not parts:\n return GenericHelperFn(raw)\n\n parts.append(raw[s_index:])\n return GenericHelperFn({u\"Fn::Join\": [u\"\", parts]})", "def field_values(s):\n if s: \n for f in s.split(','):\n if f.find('=') > 0:\n (field, value) = f.split('=')\n else:\n field = f\n value = None\n yield (field, value)", "def get_formatted_string(self, input_string):\n if isinstance(input_string, str):\n try:\n return self.get_processed_string(input_string)\n except KeyError as err:\n # Wrapping the KeyError into a less cryptic error for end-user\n # friendliness\n missing_key = err.args[0]\n raise KeyNotInContextError(\n f'Unable to format \\'{input_string}\\' with '\n f'{{{missing_key}}}, because '\n f'context[\\'{missing_key}\\'] doesn\\'t exist') from err\n else:\n raise TypeError(f\"can only format on strings. {input_string} is a \"\n f\"{type(input_string)} instead.\")", "def _parse_format(mode=2, rc_kw=None, **kwargs):\n kw = {}\n rc_kw = rc_kw or {}\n for key, value in kwargs.items():\n key_fixed = _rc_nodots.get(key, None)\n if key_fixed is None:\n kw[key] = value\n else:\n rc_kw[key_fixed] = value\n return rc_kw, mode, kw", "def get_params(string_in, separator=' ', defaultmissing='-', params_to_get=3):\r\n rtr = str(string_in).split(separator)\r\n if len(rtr) > params_to_get:\r\n rtr = []\r\n rtr.append(str(string_in))\r\n for x in range(0, (params_to_get - len(rtr))):\r\n rtr.append(defaultmissing)\r\n return rtr[0],rtr[1],rtr[2]", "def readOpt(self, String0):\n Name = re.match(r\"NAME : (.*)\", String0)[1]\n COMMENT = re.search(r\"COMMENT : (.*)\", String0)[1]\n TYPE = re.search(r\"TYPE : (.*)\", String0)[1]\n DIMENSION = re.search(r\"DIMENSION : (.*)\", String0)[1]\n split = String0.split(\"\\n\")\n Tour = []\n for s0 in split:\n if (s0 and s0[0] <= '9' and s0[0] >= '0'):\n Tour.append(int(s0))\n return Name, COMMENT, TYPE, DIMENSION, Tour", "async def parse(self, raw: str) -> dict:", "def _build_parsed_values(self):\n match = SAMPLE_REGEX.match(self.raw_data)\n \n if not match:\n raise SampleException(\"No regex match of parsed sample data: [%s]\" %\n self.decoded_raw)\n \n log.trace(\"Matching sample [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s]\",\n match.group(1),match.group(2),match.group(3),match.group(4),match.group(5),\n match.group(6),match.group(7),match.group(8),match.group(9),match.group(10),\n match.group(11),match.group(12))\n res_5 = float(match.group(1))\n res_x1 = float(match.group(2))\n res_x5 = float(match.group(3))\n h_5 = float(match.group(4))\n h_x1 = float(match.group(5))\n h_x5 = float(match.group(6))\n eh = float(match.group(7))\n ref_temp_v = float(match.group(8))\n ref_temp_c = float(match.group(9))\n res_temp_v = float(match.group(10))\n res_temp_c = float(match.group(11))\n batt_v = float(match.group(12))\n \n \n result = [{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_5,\n DataParticleKey.VALUE: res_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X1,\n DataParticleKey.VALUE: res_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X5,\n DataParticleKey.VALUE: res_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_5,\n DataParticleKey.VALUE: h_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X1,\n DataParticleKey.VALUE: h_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X5,\n DataParticleKey.VALUE: h_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.EH_SENSOR,\n DataParticleKey.VALUE: eh},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_VOLTS,\n DataParticleKey.VALUE: ref_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_DEG_C,\n DataParticleKey.VALUE: ref_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_VOLTS,\n DataParticleKey.VALUE: res_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_DEG_C,\n DataParticleKey.VALUE: res_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.BATTERY_VOLTAGE,\n DataParticleKey.VALUE: batt_v}\n ]\n \n return result", "def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells", "def format_input(self, args):\n\n new_list = []\n if args[1].find('{') != -1:\n new_list = self.format_dicti(args)\n return new_list\n else:\n new_list = []\n new_list.append(args[0])\n new_str = args[1][ args[1].find('(') + 2 : args[1].find(',') - 1]\n new_str += args[1][ args[1].find(',') : args[1].find(')') - 0]\n new_list.append(\" \".join(new_str.split(\", \") ) )\n\n return \" \".join(i for i in new_list)", "def parse_line(self, line):\n line = line.strip()\n log.debug(\"Parsing line: '{}'\".format(line))\n if len(line) == 0:\n log.warning(\"Zero length line detected\")\n return\n split = line.split(DELIMETER)\n key = split[0]\n if key in FORMATS:\n log.debug(\"Using formatter for key: {}\".format(key))\n formatter = FORMATS[key]\n for (name, parser), value in zip(formatter,split[1:]):\n self._params[name] = parser(value)\n log.info(\"Parameters: {}\".format(self._params))\n self.notify_watchers()\n else:\n log.debug(\"Invalid key: {}\".format(key))", "def _parse_date(date_string: str) -> Union[datetime.datetime, str]:\n for date_format in KNOWN_DATE_FORMATS:\n try:\n date = datetime.datetime.strptime(date_string, date_format)\n return date\n except ValueError:\n continue\n return date_string", "def p_parse(toks):\n return p_question_group.parseString(toks[0])", "def placeholders(self):\n x = [i.placeholder for i in self._input_desc]\n return x[0] if len(x) == 1 else x", "def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string", "def getModelParameters(parameterstring):\n \n def getFormattedValue(strval):\n if '\\'' in strval:\n return strval.replace('\\'', '')\n elif '\"' in strval:\n return strval.replace('\"', '')\n elif '.' in strval:\n return float(strval)\n elif strval == 'True':\n return True\n elif strval == 'False':\n return False\n else:\n return int(strval)\n \n ((25,),)\n def parseTuple(strval):\n idx = strval.find(\"(\")+1\n values = []\n i = idx\n while i < len(strval):\n if strval[i] == '(':\n nested, lnested = parseTuple(strval[i:])\n print(i)\n i += lnested\n idx = i+1\n print(i)\n values.append(nested)\n elif strval[i] == ')':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n return tuple(values), i\n elif strval[i] == ',':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n idx = i+1\n i += 1\n \n rv = dict()\n if parameterstring is None:\n return rv\n params = parameterstring.strip().split(\"=\")\n nextkey = params[0]\n for pi in range(1,len(params)):\n cur = params[pi]\n if '(' in cur:\n if cur.count(\"(\") != cur.count(\")\"):\n raise InvalidParameters(\"Unequal number of paranthesis.\")\n value, _ = parseTuple(cur)\n rv[nextkey] = value\n nextkey = cur[cur.rfind(',')].strip()\n else:\n commasplit = cur.split(\",\")\n value = commasplit[0].strip()\n rv[nextkey] = getFormattedValue(value)\n nextkey = commasplit[1].strip()\n \n return rv" ]
[ "0.7910855", "0.694886", "0.64524835", "0.6108374", "0.60248744", "0.601687", "0.60016334", "0.5864088", "0.57037616", "0.55861354", "0.55645305", "0.554253", "0.5510738", "0.55103034", "0.5497989", "0.5479562", "0.5358258", "0.5325262", "0.5298705", "0.5236709", "0.5222342", "0.51827663", "0.51105356", "0.50851625", "0.50643116", "0.5060817", "0.50543", "0.504011", "0.50389504", "0.50320303", "0.5023713", "0.49628156", "0.49474", "0.4932526", "0.49250206", "0.4920185", "0.48929116", "0.488577", "0.48788518", "0.4871326", "0.48706472", "0.4867422", "0.4859408", "0.48556447", "0.48346376", "0.48269424", "0.48231214", "0.4800975", "0.4784804", "0.47704986", "0.47641996", "0.47560325", "0.47526085", "0.4747038", "0.47439802", "0.47433746", "0.47348925", "0.47204137", "0.47143885", "0.47011897", "0.46814787", "0.46608815", "0.46500766", "0.46480286", "0.4647152", "0.46470582", "0.4639534", "0.46385488", "0.46313027", "0.46297532", "0.46193704", "0.46148014", "0.46126005", "0.46121374", "0.46111414", "0.46067926", "0.45950744", "0.45924458", "0.4592238", "0.45918962", "0.45918962", "0.45899415", "0.4579904", "0.45788056", "0.45658273", "0.45550698", "0.4546244", "0.45430353", "0.45407522", "0.4539369", "0.45362082", "0.45324203", "0.45311207", "0.45287028", "0.45187566", "0.4516464", "0.45095298", "0.45090404", "0.4503912", "0.449647" ]
0.7691594
1
Parses the format_string and returns a list of tuples (placeholder, format).
def get_placeholder_formats_list(self, format_string): placeholders = [] # Tokenize the format string and process them for token in self.tokens(format_string): if token.group("placeholder"): placeholders.append((token.group("key"), token.group("format"))) return placeholders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def read_atom_data(atom_format, line):\n if atom_format is None:\n _format = [guess_string_format(i.strip()) for i in line.split()]\n else:\n _format = atom_format\n formatted = []\n for i, (val, fmti) in enumerate(zip(line.split(), _format)):\n istrip = val.strip()\n try:\n formatted.append(fmti(istrip))\n except ValueError:\n fmt = guess_string_format(istrip)\n _format[i] = fmt\n formatted.append(fmt(istrip))\n return formatted, _format", "def _parse(self, fmtstr):\n def _match_brace(string, start_pos, pair='[]'):\n \"\"\"Pairing brackets (used internally in _parse method)\"\"\"\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_pos + 1:]):\n if char == pair[0]:\n depth += 1\n elif char == pair[1]:\n depth -= 1\n if depth == 0:\n return start_pos + index + 1\n return None\n\n #----------------------------------------------------------------------\n\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n\n ptr = 0\n # it seems that field id 0 is invalid\n field_id = 1\n length = len(fmtstr)\n parsed_list = []\n\n while ptr < length:\n parsed = {}\n m_prefix = t_prefix.match(fmtstr[ptr:])\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed['prefix'] = m_prefix.group(1)\n\n # check if we have a nested structure\n if m_prefix.group(2):\n brace_offset = _match_brace(fmtstr, ptr - 1)\n\n # bracket not match\n if not brace_offset:\n raise BadFormatString(\n 'Unmatched brace on position {0}'.format(ptr)\n )\n parsed['field_id'] = field_id\n parsed['field_type'] = 'a'\n parsed['subcontent'] = self._parse(\n fmtstr[ptr:brace_offset]\n )\n ptr = brace_offset + 1\n field_id += 1\n\n parsed_list.append(parsed)\n continue\n m_fmt = t_fmt.match(fmtstr[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n\n # fmt is an alias\n if m_fmt.group(2):\n parsed['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n parsed['field_type'] = m_fmt.group(1)\n\n # save field id\n parsed['field_id'] = field_id\n\n # check for type clones (e.g. `v3')\n if m_fmt.group(3):\n parsed['repeat'] = int(m_fmt.group(3))\n field_id += int(m_fmt.group(3))\n else:\n parsed['repeat'] = 1\n field_id += 1\n\n parsed_list.append(parsed)\n\n else:\n raise BadFormatString(\n 'Invalid token on position {0}'.format(ptr)\n )\n\n # all set\n return parsed_list", "def get_placeholders(self, format_string):\n placeholders = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.add(token.group(\"key\"))\n elif token.group(\"command\"):\n # get any placeholders used in commands\n commands = dict(parse_qsl(token.group(\"command\")))\n # placeholders only used in `if`\n if_ = commands.get(\"if\")\n if if_:\n placeholders.add(Condition(if_).variable)\n return placeholders", "def tokens(self, format_string):\n if format_string not in self.format_string_cache:\n tokens = list(re.finditer(self.reg_ex, format_string))\n self.format_string_cache[format_string] = tokens\n return self.format_string_cache[format_string]", "def parse_name_and_type_from_fmt_str(\n formatted_str: str,\n allowed_types: Optional[Dict[str, Component]] = None\n) -> Generator[Tuple[str, Type[Field]], None, None]:\n for _, arg_name, _type_name, _ in Formatter().parse(formatted_str):\n if arg_name is not None:\n try:\n assert _type_name is not None\n _type = (\n allowed_types[_type_name] if allowed_types is not None\n and _type_name in allowed_types\n else getattr(pyopenapi3.data_types, _type_name)\n )\n yield arg_name, _type\n except AttributeError:\n raise ValueError(\n \"A non-`Field` or `OpenApiObject` type was found. \"\n f\"Can't use `{_type_name}` as a type in {formatted_str}. \"\n f\"Must be a stringified pyopenapi3 `data_type`, such \"\n f\"as `pyopenapi3.data_types.String`, or a reference to a \"\n f\"Component.\"\n ) from None", "def parse(string, format):\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def parse_format(var_sample):\n # ugh\n ret = []\n # Parsing format information\n # Need to see what all these could be...\n if None in var_sample[\"GT\"]:\n ret.append(3)\n elif var_sample[\"GT\"] == (0, 0):\n ret.append(0)\n elif var_sample[\"GT\"] == (0, 1):\n ret.append(1)\n elif var_sample[\"GT\"] == (1, 1):\n ret.append(2)\n \n ret.extend([var_sample[\"GQ\"] if var_sample[\"GQ\"] is not None else 0,\n var_sample[\"OV\"],\n var_sample[\"DP\"], # be careful these aren't '.'\n #split where _r is ref-allele and _a is alt-allele\n var_sample[\"AD\"][0],\n var_sample[\"AD\"][1],\n var_sample[\"PDP\"],\n var_sample[\"PAD\"][0],\n var_sample[\"PAD\"][1],\n var_sample[\"US\"][0],\n var_sample[\"US\"][1],\n var_sample[\"DS\"][0],\n var_sample[\"DS\"][1],\n var_sample[\"UC\"][0],\n var_sample[\"UC\"][1],\n var_sample[\"DC\"][0],\n var_sample[\"DC\"][1],\n var_sample[\"UDC\"][0],\n var_sample[\"UDC\"][1],\n var_sample[\"UCC\"][0],\n var_sample[\"UCC\"][1],\n var_sample[\"DDC\"][0],\n var_sample[\"DDC\"][1],\n var_sample[\"DCC\"][0],\n var_sample[\"DCC\"][1],\n var_sample[\"UMO\"][0],\n var_sample[\"UMO\"][1],\n var_sample[\"DMO\"][0],\n var_sample[\"DMO\"][1],\n var_sample[\"UXO\"][0],\n var_sample[\"UXO\"][1],\n var_sample[\"DXO\"][0],\n var_sample[\"DXO\"][1],\n var_sample[\"NR\"][0],\n var_sample[\"NR\"][1],\n var_sample[\"MO\"][0],\n var_sample[\"MO\"][1],\n var_sample[\"XO\"][0],\n var_sample[\"XO\"][1],\n var_sample[\"XC\"][0],\n var_sample[\"XC\"][1],\n var_sample[\"AC\"][0],\n var_sample[\"AC\"][1],\n var_sample[\"MC\"][0],\n var_sample[\"MC\"][1],\n var_sample[\"EC\"][0],\n var_sample[\"EC\"][1],\n var_sample[\"PL\"][0] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][1] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][2] if var_sample[\"PL\"][0] is not None else 0])\n return ret\n #END", "def convert_raw_tuple(value_tuple, format_string):\n values = []\n for v, c in zip(value_tuple, format_string):\n if v is None:\n # append None\n values.append(v)\n elif c == u\"s\":\n # string\n values.append(v)\n elif c == u\"S\":\n # string, split using space as delimiter\n values.append([s for s in v.split(u\" \") if len(s) > 0])\n elif c == u\"i\":\n # int\n values.append(int(v))\n elif c == u\"U\":\n # Unicode\n values.append(convert_unicode_field(v))\n elif c == u\"A\":\n # ASCII\n values.append(convert_ascii_field(v))\n #elif c == u\"x\":\n # # ignore\n # pass\n return tuple(values)", "def guess_format(string):\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def _parse_date_format( fmp_database ):\n\tdef build_one( stamp ):\n\t\treturn stamp.\\\n\t\t replace( 'yyyy', '%Y' ).\\\n\t\t replace( 'MM', '%m' ).\\\n\t\t replace( 'dd', '%d' ).\\\n\t\t replace( 'HH', '%H' ).\\\n\t\t replace( 'mm', '%M' ).\\\n\t\t replace( 'ss', '%S' )\n\n\treturn {\n\t\t'date': build_one( fmp_database.get('date-format', '') ),\n\t\t'timestamp': build_one( fmp_database.get('timestamp-format', '') ),\n\t\t'time': build_one( fmp_database.get('time-format', '') ),\n\t}", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def find_time(string, format):\n re_format = format\n for key, value in six.iteritems(REGEX):\n re_format = re_format.replace(key, value)\n matches = re.finditer(re_format, string)\n for match in matches:\n try:\n matchstr = string[slice(*match.span())]\n dt = datetime.strptime(matchstr, format)\n except ValueError:\n continue\n else:\n yield dt", "def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))", "def load(fmt: str, stream: BytesIO, _unpack=unpack):\n values = []\n bitcount = bits = 0\n for char in fmt:\n if char == '?':\n if not bitcount:\n bits = _unpack('>B', _read(stream, 1))[0]\n bitcount = 8\n value = (bits & 1) == 1\n bits >>= 1\n bitcount -= 1\n else:\n bitcount = bits = 0\n if char == 'B':\n value = _unpack('>B', _read(stream, 1))[0]\n elif char == 'H':\n value = _unpack('>H', _read(stream, 2))[0]\n elif char == 'L':\n value = _unpack('>L', _read(stream, 4))[0]\n elif char == 'Q':\n value = _unpack('>Q', _read(stream, 8))[0]\n elif char == 's':\n length = _unpack('>B', _read(stream, 1))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 'S':\n length = _unpack('>L', _read(stream, 4))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 't':\n timestamp = _unpack('>Q', _read(stream, 8))[0]\n value = datetime.utcfromtimestamp(timestamp)\n elif char == 'T':\n value = {}\n length = _unpack('>L', _read(stream, 4))[0]\n stream2 = BytesIO(_read(stream, length))\n while stream2.tell() < length:\n key = load('s', stream2)[0]\n value[key] = _load_item(stream2)\n elif char != '?':\n raise ValueError('wrong format char', char)\n values.append(value)\n return values", "def parseSettings (formatStr):\n # split string\n fmt = formatStr.split ('.')\n if len (fmt) < 0:\n return (None, None)\n \n # find encoder\n encoderInstance = None\n for e in _registeredEncoders:\n if len (fmt) > 0:\n if e.name () == fmt[0]:\n encoderInstance = e\n break\n else:\n encoderInstance = _registeredEncoders[0] if len (_registeredEncoders) > 0 else None\n\n # return tuple with parsed settings\n presetName = '.'.join (fmt[1:])\n if presetName in (None, \"\"):\n if encoderInstance != None:\n presetName = encoderInstance.defaultPreset ()\n return (encoderInstance, presetName)", "def parse(self, s):\n\n segments = self.compiled.split(self._whitespace.sub(\" \", s))\n literals = segments[::2]\n raw = segments[1::2]\n\n if not raw:\n return []\n\n case = list(map(str.casefold, raw))\n prefixes = [{}] + [dict(self.locale_set.prefixes.get(match, ())) for match in case[:-1]]\n suffixes = [dict(self.locale_set.suffixes.get(match, ())) for match in case[1:]] + [{}]\n\n groups = _DateTime(**{ field: [] for field in _DateTime._fields })\n choices_per_position = {}\n always_literal = set()\n numeric = set()\n for idx, (prefix, suffix) in enumerate(zip(prefixes, suffixes)):\n keyword = self._lookup_keyword(raw[idx])\n if \"y\" in prefix:\n prefix[\"C\"] = tuple(set(prefix[\"y\"] + prefix.get(\"C\", ())))\n if not keyword:\n always_literal.add(idx)\n else:\n if raw[idx].isdigit():\n numeric.add(idx)\n choices_per_position[idx] = len(keyword)\n for fmt, value, locales in keyword:\n category = fmt[-1]\n if category == \"b\":\n # Month-names should be treated like numeric months.\n category = \"m\"\n elif category == \"z\":\n category = \"Z\"\n getattr(groups, category).append(_Assignment(\n fmt=fmt,\n pos=idx,\n value=value,\n locales=locales,\n prefix=prefix.get(fmt[-1]),\n suffix=suffix.get(fmt[-1]),\n ))\n numeric = frozenset(numeric)\n\n # If a required date field is unsatisfiable, this is not a date.\n if not all(getattr(groups, category) for category in _State._min_date_formats):\n for category in _State._all_date_formats:\n getattr(groups, category).clear()\n\n # If a required time field is unsatisfiable, this is not a time.\n if not all(getattr(groups, category) for category in _State._min_time_formats):\n for category in _State._all_time_formats:\n getattr(groups, category).clear()\n\n for group in groups:\n group.sort(key=lambda assignment: (\n -self._optimistic_score(assignment),\n choices_per_position[assignment.pos],\n ))\n\n required_formats = _State._min_date_formats + _State._min_time_formats\n groups = OrderedDict(sorted(\n (\n (\n category,\n (\n group,\n tuple(\n (f, required)\n for f, required in _position_constraints\n if category in required\n ),\n tuple(\n (f, required)\n for f, required, revisit in _value_constraints\n if category in required or category in revisit\n ),\n )\n )\n for category, group in zip(groups._fields, groups)\n if group\n ),\n key=lambda i: (i[0] not in required_formats, len(i[1][0]))\n ))\n\n # We've already filtered out all possibilities; there's nothing here.\n if not groups:\n return []\n\n constrained_groups = []\n while groups:\n category, (group, position, value) = groups.popitem(last=False)\n constrained_groups.append((category, group, position, value))\n required = frozenset(itertools.chain.from_iterable(required for f, required in itertools.chain(position, value)))\n if required:\n required = [\n category\n for category in reversed(groups.keys())\n if category in required\n ]\n for category in required:\n groups.move_to_end(category, last=False)\n groups = constrained_groups\n\n best_quality = 0\n best_candidates = []\n\n partials = [\n _State.empty._replace(\n unconverted=frozenset(always_literal),\n remaining_groups=tuple(groups),\n ).children(numeric=numeric)\n ]\n while partials:\n try:\n quality, locales, state = next(partials[-1])\n except StopIteration:\n partials.pop()\n continue\n\n if state.remaining_groups:\n # Admissable heuristic: compute the best score each group\n # could possibly achieve. Don't count conversion specifiers\n # that we've already used, but don't worry about conflicts\n # in the groups we haven't assigned yet. Any such conflicts\n # can only reduce the resulting score, and we only need to\n # make sure that the heuristic is at least as large as the\n # true value of the best leaf in this subtree. However, the\n # more precise we can be here, the fewer nodes we have to\n # search, so we can spend some CPU time on precision and\n # still come out ahead.\n assigned = state.unconverted.union(state.pos).difference((None,))\n heuristic = len(state.pending_hints) + sum(\n next((\n self._optimistic_score(assignment)\n for assignment in group[1]\n if assignment.pos not in assigned\n ), 0)\n for group in state.remaining_groups\n )\n\n if quality + heuristic < best_quality:\n # Even assuming the remaining groups get the highest\n # possible score, this state is still not good enough.\n continue\n\n partials.append(state.children(numeric=numeric))\n continue\n\n value = state.valid()\n if value is None:\n continue\n\n quality, locales, state = state.final_score()\n\n if best_quality is not None and quality < best_quality:\n # We've seen better, so skip this one.\n continue\n\n if quality != best_quality:\n best_quality = quality\n best_candidates = []\n\n conversions = dict(zip(state.pos, state.fmts))\n fmts = [ conversions.get(idx) or literal for idx, literal in enumerate(raw) ]\n\n pattern = ''.join(lit + fmt for lit, fmt in zip(literals, fmts + [''])).replace(\"%C%y\", \"%Y\")\n best_candidates.append((pattern, value, locales))\n return best_candidates", "def _interpolate(format):\n from tokenize import tokenprog\n\n def matchorfail(text, pos):\n match = tokenprog.match(text, pos)\n if match is None:\n raise _ItplError(text, pos)\n return match, match.end()\n\n namechars = \"abcdefghijklmnopqrstuvwxyz\" \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\";\n chunks = []\n pos = 0\n\n while 1:\n dollar = format.find(\"$\", pos)\n if dollar < 0: \n break\n nextchar = format[dollar + 1]\n\n if nextchar == \"{\":\n chunks.append((0, format[pos:dollar]))\n pos, level = dollar + 2, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token == \"{\": \n level = level + 1\n elif token == \"}\": \n level = level - 1\n chunks.append((1, format[dollar + 2:pos - 1]))\n\n elif nextchar in namechars:\n chunks.append((0, format[pos:dollar]))\n match, pos = matchorfail(format, dollar + 1)\n while pos < len(format):\n if format[pos] == \".\" and \\\n pos + 1 < len(format) and format[pos + 1] in namechars:\n match, pos = matchorfail(format, pos + 1)\n elif format[pos] in \"([\":\n pos, level = pos + 1, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token[0] in \"([\": \n level = level + 1\n elif token[0] in \")]\": \n level = level - 1\n else: \n break\n chunks.append((1, format[dollar + 1:pos]))\n else:\n chunks.append((0, format[pos:dollar + 1]))\n pos = dollar + 1 + (nextchar == \"$\")\n\n if pos < len(format): \n chunks.append((0, format[pos:]))\n return chunks", "def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def _parse_kvfmt(self, fmtlist):\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n parsed_list = []\n field_id = 1\n\n for entry in fmtlist:\n name = entry[0]\n fmt = entry[1]\n parsed_field = {}\n parsed_field['name'] = name\n if isinstance(fmt, str):\n ptr = 0\n m_prefix = t_prefix.match(fmt)\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed_field['prefix'] = m_prefix.group(1)\n # check for optional nested structure start (required if the field is also repeated)\n if m_prefix.group(2) and len(entry) > 2:\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(entry[2])\n field_id += 1\n parsed_list.append(parsed_field)\n continue\n elif m_prefix.group(2):\n raise BadFormatString('Nested field type used without specifying field format.')\n m_fmt = t_fmt.match(fmt[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n resolved_fmt_char = None\n # fmt is an alias\n if m_fmt.group(2):\n resolved_fmt_char = m_fmt.group(2)\n parsed_field['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n resolved_fmt_char = m_fmt.group(1)\n parsed_field['field_type'] = m_fmt.group(1)\n parsed_field['field_id'] = field_id\n # only skip type (`x') is allowed for copying in key-value mode\n if m_fmt.group(3) and resolved_fmt_char == 'x':\n repeats = int(m_fmt.group(3))\n parsed_field['repeat'] = repeats\n field_id += repeats\n elif m_fmt.group(3):\n raise BadFormatString('Field copying is not allowed in key-value format list.')\n else:\n field_id += 1\n else:\n raise BadFormatString('Invalid type for field \"{0}\"'.format(name))\n if len(fmt) != ptr:\n self.logger.warning('Extra content found after the type string of %s.', name)\n else:\n # Hard-code the empty prefix because we don't support copying\n parsed_field['prefix'] = ''\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(fmt)\n field_id += 1\n parsed_list.append(parsed_field)\n return parsed_list", "def _parseAttributeScanf(self, line, formatting):\n\n # multiple entrys\n if isinstance(formatting, list):\n for scanf_format in formatting:\n try:\n #print \"<<<----\", scanf_format, line\n return sscanf(line, scanf_format)\n except IncompleteCaptureError, e:\n pass\n\n # single entry\n else:\n return sscanf(line, formatting)\n\n # problem if none of the formats worked\n raise IncompleteCaptureError(\"Format error for %s\" % line)", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def parse_template(string):\n count = 0\n list1 = []\n for character in string:\n count = count + 1\n if character == \"{\":\n end = string.find(\"}\", count)\n s_strg = string[count:end]\n list1.append(s_strg)\n string = string.replace(s_strg, \"\", 1)\n count = count - len(s_strg)\n\n subs = tuple(list1)\n\n return(string, subs)\n print(subs)", "def _get_placeholders(template):\n return [p[1] for p in string.Formatter().parse(template)\n if p[1] is not None and len(p[1]) > 0]", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def parse_known_date_formats(dt_string):\n for fmt in ('%Y%m%d', '%Y%m%d %H:%M', '%m/%d/%Y', '%m/%d/%Y %H:%M'):\n try:\n return datetime.strptime(dt_string, fmt)\n except ValueError:\n pass\n raise ValueError(\"No valid date format found.\"\n \"See https://tidesandcurrents.noaa.gov/api/ \"\n \"for list of accepted date formats.\")", "def format_regexp(simple_format):\n format, regexp = ('', '')\n for char in simple_format:\n if char in _regexps:\n format += '%' + char\n regexp += _regexps[char]\n else:\n format += char\n regexp += char\n return (format, regexp)", "def parse_task_time(line):\n stripret = \"\".join(line.split())\n p = re.compile(r'\\d+\\.\\d{2}-\\d+\\.\\d{2}')\n findret = p.findall(stripret) \n if findret:\n formatstr = \" \".join(line.split())\n timeregx = r'\\d+\\.\\d{2}\\s*-\\s*\\d+\\.\\d{2}'\n time = re.compile(timeregx).findall(formatstr)[0].replace(\" \", \"\").replace(\":\", \".\")\n taskcontext = re.sub(timeregx, \"\", formatstr).strip().replace(\":\", \"\")\n return [taskcontext, time]\n else:\n # log it if line can't be parse\n logging.warning(\"unparsed line: [%r]\" % line)", "def _parse_param_line(regex, line):\n out = []\n for r in regex.findall(line):\n converter, variable = r[0] or 'default', r[1]\n out.append((_CONVERTERS[converter], variable))\n return out", "def extract_fields(entry_string):\n for field, value in re.findall(\"(.*?)=(.*?)\\}\", entry_string):\n yield field.strip(\",\").strip(\" \"), value.strip(\"{\").strip(\"}\")", "def parse_params(txt):\n res = list()\n # First, slipt with stuff looking like \\TYPE:\n splitted = re.split(r'\\s*\\\\(\\w+)\\s*:', txt)\n # We now have a list looking like:\n # ['', 'flag', '....', 'param', '...']\n i = 1\n while i < len(splitted) - 1:\n type = splitted[i]\n rest = splitted[i+1]\n if type == \"argn\":\n name = \"remaining args\"\n desc = rest\n else:\n # first word is the name, the rest is the description:\n match = re.match(r'\\s*(\\w+)\\s*(.*)', rest, re.DOTALL)\n if not match:\n print(\"warning, failed to parse parameters\")\n print(\"near\", rest)\n break\n (name, desc) = match.groups()\n desc = clean_indent(desc)\n res.append((type, name, desc))\n i += 2\n return res", "def extract_pattern(fmt):\n class FakeDict(object):\n def __init__(self):\n self.seen_keys = set()\n\n def __getitem__(self, key):\n self.seen_keys.add(key)\n return ''\n\n def keys(self):\n return self.seen_keys\n\n fake = FakeDict()\n try:\n fmt % fake\n except TypeError:\n # Formatting error\n pass\n return set(fake.keys())", "def parse_spec(spec: str) -> Tuple[str, str]:\n\n # Single \"*\" is treated as wildcard for date, not channel.\n if spec == \"*\":\n return \"*\", \"*\"\n\n channel_rex = r\"\"\"\n (?P<channel>\n nightly | beta | stable | \\* | (?: \\d+\\.\\d+\\.\\d+ )\n )\n \"\"\"\n\n date_rex = r\"\"\"\n (?P<date>\n \\d\\d\\d\\d-\\d\\d-\\d\\d | latest | \\*\n )\n \"\"\"\n\n m = re.match(\n r\"{} (?: - {})? $\".format(channel_rex, date_rex), spec, re.VERBOSE\n )\n if m:\n channel = m.group(\"channel\")\n date = m.group(\"date\") or \"\"\n return date, channel\n\n m = re.match(r\"{} $\".format(date_rex), spec, re.VERBOSE)\n if m:\n date = m.group(\"date\")\n return date, \"*\"\n\n raise error.UsageError(\"invalid SPEC {}\".format(repr(spec)))", "def parse_place_notation(input_string: str) -> Tuple[int, str]:\n\n # Looking for a string that matches <stage>:<place notation> where the\n # place notation is a series of bell numbers and 'x' characters\n parts = input_string.split(\":\")\n if len(parts) == 2:\n stage_part = parts[0]\n if len(stage_part) == 0 or not stage_part.isnumeric():\n raise PlaceNotationError(input_string, \"Stage must be a number\")\n stage = int(stage_part)\n place_notation = parts[1]\n if not valid_pn(place_notation):\n raise PlaceNotationError(input_string, \"Place notation is invalid\")\n else:\n raise PlaceNotationError(input_string, \"<stage>:<place notation> required\")\n\n return stage, place_notation", "def get_formats(self):\n return tuple(self._names.keys())", "def _parse_date(date_string: str) -> Union[datetime.datetime, str]:\n for date_format in KNOWN_DATE_FORMATS:\n try:\n date = datetime.datetime.strptime(date_string, date_format)\n return date\n except ValueError:\n continue\n return date_string", "def format_data(data_string):\n lines = data_string.split('\\\"\\n\\\"')\n split_data = [re.split(r\"\\\"\\s*,\\s*\\\"\", line) for line in lines]\n\n return split_data", "def strptime(date_string, format):\n i = 0\n format_len = len(format)\n # Iterate through the format string, applying parsers and matching literal\n # chars as appropriate.\n struct_time_d = {}\n while i < format_len:\n c = format[i]\n # If the character is not the start of a directive, attempt to match a\n # literal character.\n if c != '%':\n if date_string[0] != c:\n return None\n date_string = date_string[1:]\n else:\n # Read the next character of the directive, letting an IndexError\n # raise if format is exhausted/malformed.\n i += 1\n directive = format[i]\n # Raise a ValueError just like the built-in datetime.strptime()\n # if the directive is invalid.\n if directive not in DIRECTIVE_PARSER_MAP:\n raise ValueError(\"{} is a bad directive in format {}\".format(\n repr(directive[1]), repr(directive)))\n # Get the parser.\n parser = DIRECTIVE_PARSER_MAP[directive]\n # Check whether the parser is yet to be implemented.\n if parser is NOT_IMPLEMENTED:\n raise NotImplementedError(\n 'parser not defined for directive: {}'.format(directive)\n )\n # Do the parsing.\n result = parser(date_string)\n # Return None on any parsing failure.\n if result is False:\n return None\n value, date_string = result\n # Convert the directive value to a struct_time item.\n struct_time_item = directive_to_struct_time_item(directive, value)\n if struct_time_item is not None:\n k, v = struct_time_item\n # If the key already exists, accumulate, otherwise set.\n if k in struct_time_d:\n struct_time_d[k] += v\n else:\n struct_time_d[k] = v\n i += 1\n\n # Return None if the date string has not been completely consumed.\n if len(date_string) > 0:\n return None\n\n # Return None if a +12 hour AM_PM = 'PM' accumulation overflowed a parsed\n # HOUR_24 value.\n if not (0 <= struct_time_d.get(STRUCT_TIME.TM_HOUR, 0) <= 23):\n return None\n\n # Attempt to get year/month/day for date ops.\n year = struct_time_d.get(STRUCT_TIME.TM_YEAR)\n month = struct_time_d.get(STRUCT_TIME.TM_MON)\n day = struct_time_d.get(STRUCT_TIME.TM_MDAY)\n has_date = year is not None and month is not None and day is not None\n\n # Return None if the specified day is not valid for the month.\n if has_date and not is_valid_month_day(year, month, day):\n return None\n\n # Create an initial struct_time object.\n _struct_time = struct_time(\n *[struct_time_d.get(k, 0) for k in STRUCT_TIME_FIELDS]\n )\n\n if has_date:\n # Check whether accumulated minute value exceeds its max as a result of\n # accumulating a time zone offset, requiring some calendar day math.\n if not 0 <= struct_time_d.get(STRUCT_TIME.TM_MIN, 0) <= 59:\n # Pass _struct_time along with an empty time_delta to\n # add_struct_time_time_delta() to take advantage of its\n # over/underflow logic. Note that add_struct_time_time_delta() will\n # take care of setting the final day of week / year.\n _struct_time = add_struct_time_time_delta(\n _struct_time, time_delta())\n else:\n # Calculate the final day of week / year.\n _struct_time = struct_time_replace(\n _struct_time,\n tm_wday=date_to_day_of_week(year, month, day),\n tm_yday=date_to_day_of_year(year, month, day)\n )\n\n return _struct_time", "def _read_range(range: str) -> Tuple[str, List[Tuple[Union[int, None], Union[int, None]]]]:\n format, split_on_pairs = range.split('=', 1)\n split_on_pairs = split_on_pairs.split(',')\n pairs = []\n for pair_str in split_on_pairs:\n split_on_range = pair_str.split('-', 1)\n start = int(split_on_range[0]) if len(split_on_range[0]) > 0 else None\n stop = int(split_on_range[1]) if len(split_on_range[1]) > 0 else None\n pairs.append((start, stop))\n return format, pairs", "def parse_styles(text: str) -> List[dict]:\n styles = []\n regex = r'(\\d{3})=(\".*?\"),(\\d+\\.?\\d+),(\\(.*?\\))'\n\n for line in text.split(\"\\r\\n\"):\n if line == \"\":\n continue\n\n n, font, font_size, color = re.match(regex, line).groups()\n styles.append(\n {\n \"id\": int(n),\n \"f\": font.replace('\"', \"\"),\n \"fs\": float(font_size),\n \"rgb\": [\n int(i)\n for i in color.replace(\"(\", \"\")\n .replace(\")\", \"\").split(\",\")]\n }\n )\n\n return styles", "def stringTimeToTuple_NEW(st):\n st, ms = split(st, '.')\n y, m, d, h, n, s = split(st, '_')\n return y,m,d,h,n,s,ms", "def initFormat(self):\n self.formatList = []", "def _read_format(instream):\n format = int(le.uint32.read_from(instream))\n if format & PCF_BYTE_MASK:\n base = be\n else:\n base = le\n return format, base", "def _parse_format(mode=2, rc_kw=None, **kwargs):\n kw = {}\n rc_kw = rc_kw or {}\n for key, value in kwargs.items():\n key_fixed = _rc_nodots.get(key, None)\n if key_fixed is None:\n kw[key] = value\n else:\n rc_kw[key_fixed] = value\n return rc_kw, mode, kw", "def GetFormatCount(format_):\n\n if isinstance(format_, str):\n match = re.search(r'\\s*(\\d+)', format_)\n if match:\n return int(match.group(0))\n\n return 1", "def get_point_list(self, string):\n a = re.findall('\\(\\d+\\.\\d+, \\d+\\.\\d+\\)', string)\n lst = []\n for tp in a:\n lst.append(self.get_tuple(tp))\n print lst", "def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))", "def display_strptime_formatters():\n data = [\n [\"%a\", \"Weekday as locale's abbreviated name.\", \"Mon\"],\n [\"%A\", \"Weekday as locale's full name.\", \"Monday\"],\n [\"%w\", \"Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.\", \"1\"],\n [\"%d\", \"Day of the month as a zero-padded decimal number.\", \"30\"],\n [\"%-d\", \"Day of the month as a decimal number. (Platform specific)\", \"30\"],\n [\"%b\", \"Month as locale's abbreviated name.\", \"Sep\"],\n [\"%B\", \"Month as locale's full name.\", \"September\"],\n [\"%m\", \"Month as a zero-padded decimal number.\", \"09\"],\n [\"%-m\", \"Month as a decimal number. (Platform specific)\", \"9\"],\n [\"%y\", \"Year without century as a zero-padded decimal number.\", \"13\"],\n [\"%Y\", \"Year with century as a decimal number.\", \"2013\"],\n [\"%H\", \"Hour (24-hour clock) as a zero-padded decimal number.\", \"07\"],\n [\"%-H\", \"Hour (24-hour clock) as a decimal number. (Platform specific)\", \"7\"],\n [\"%I\", \"Hour (12-hour clock) as a zero-padded decimal number.\", \"07\"],\n [\"%-I\", \"Hour (12-hour clock) as a decimal number. (Platform specific)\", \"7\"],\n [\"%p\", \"Locale's equivalent of either AM or PM.\", \"AM\"],\n [\"%M\", \"Minute as a zero-padded decimal number.\", \"06\"],\n [\"%-M\", \"Minute as a decimal number. (Platform specific)\", \"6\"],\n [\"%S\", \"Second as a zero-padded decimal number.\", \"05\"],\n [\"%-S\", \"Second as a decimal number. (Platform specific)\", \"5\"],\n [\"%f\", \"Microsecond as a decimal number, zero-padded on the left.\", \"000000\"],\n [\"%z\", \"UTC offset in the form +HHMM or -HHMM (empty string if the the object is naive).\", \"\"],\n [\"%Z\", \"Time zone name (empty string if the object is naive).\", \"\"],\n [\"%j\", \"Day of the year as a zero-padded decimal number.\", \"273\"],\n [\"%-j\", \"Day of the year as a decimal number. (Platform specific)\", \"273\"],\n [\"%U\", \"Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0.\", \"39\"],\n [\"%W\", \"Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0.\", \"39\"],\n [\"%c\", \"Locale's appropriate date and time representation.\", \"Mon Sep 30 07:06:05 2013\"],\n [\"%x\", \"Locale's appropriate date representation.\", \"09/30/13\"],\n [\"%X\", \"Locale's appropriate time representation.\", \"07:06:05\"],\n [\"%%\", \"A literal '%' character.\", \"%\"]\n ]\n\n display(HTML(\n '<table><tr>{}</tr></table>'.format(\n '</tr><tr>'.join(\n '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)\n )\n ))", "def split_date_string(date_string: str):\n try:\n units,_,epoch = date_string.split(None, 2)\n except ValueError:\n raise ValueError(f'Invalid format: {date_string}')\n else:\n return (units.lower(), parse(epoch))", "def split_date_string(date_string: str):\n try:\n units,_,epoch = date_string.split(None, 2)\n except ValueError:\n raise ValueError(f'Invalid format: {date_string}')\n else:\n return (units.lower(), parse(epoch))", "def parse_pattern(pattern: NumberPattern | str) -> NumberPattern:\n if isinstance(pattern, NumberPattern):\n return pattern\n\n def _match_number(pattern):\n rv = number_re.search(pattern)\n if rv is None:\n raise ValueError(f\"Invalid number pattern {pattern!r}\")\n return rv.groups()\n\n pos_pattern = pattern\n\n # Do we have a negative subpattern?\n if ';' in pattern:\n pos_pattern, neg_pattern = pattern.split(';', 1)\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix, _, neg_suffix = _match_number(neg_pattern)\n else:\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix = f\"-{pos_prefix}\"\n neg_suffix = pos_suffix\n if 'E' in number:\n number, exp = number.split('E', 1)\n else:\n exp = None\n if '@' in number and '.' in number and '0' in number:\n raise ValueError('Significant digit patterns can not contain \"@\" or \"0\"')\n if '.' in number:\n integer, fraction = number.rsplit('.', 1)\n else:\n integer = number\n fraction = ''\n\n def parse_precision(p):\n \"\"\"Calculate the min and max allowed digits\"\"\"\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max\n\n int_prec = parse_precision(integer)\n frac_prec = parse_precision(fraction)\n if exp:\n exp_plus = exp.startswith('+')\n exp = exp.lstrip('+')\n exp_prec = parse_precision(exp)\n else:\n exp_plus = None\n exp_prec = None\n grouping = parse_grouping(integer)\n return NumberPattern(pattern, (pos_prefix, neg_prefix),\n (pos_suffix, neg_suffix), grouping,\n int_prec, frac_prec,\n exp_prec, exp_plus, number)", "def change_format_var_parser(text, tracker):\n param_names = format_var_parser(text)\n param_vars = {}\n for param_name in param_names:\n try:\n param_vars[param_name] = tracker.get_slot(param_name)\n except Exception as e:\n PYTHON_LOGGER.error(\"Error to get var name {}: {}\".format(param_name, e))\n return text.format(**param_vars)", "def parse_argument_pattern(content: str) -> (ArgumentPattern, int):\n if len(content) == 0:\n raise PatternError(\"content may not be empty\")\n\n if content[0] not in \"[<\" or content[-1] not in \"]>\":\n raise PatternError(\"argument pattern must be wrapped in '[ ]' or '< >'\")\n\n open_brace = content[0]\n\n is_required = open_brace == \"<\"\n\n offset = 1\n\n names, size = __parse_names(content[offset::])\n offset += size\n\n is_positional = len(names) == 0\n\n ident, arg_num, size = __parse_var(content[offset::], is_positional)\n offset += size\n\n delim, size = __parse_delim(content[offset::])\n offset += size\n\n if (delim is not None and not (arg_num.quantifier == Quantifier.N and arg_num.count == 1\n or arg_num.quantifier == Quantifier.OPTIONAL)):\n raise PatternError(f\"Only arguments taking 1 or optional values may specify a delimiter\")\n\n try:\n if (close_brace := content[offset]) in \"]>\":\n if open_brace == \"<\" and close_brace != \">\" or open_brace == \"[\" and close_brace != \"]\":\n raise PatternError(f\"mismatching brace types, found '{open_brace}' and '{close_brace}'\")\n\n offset += 1\n else:\n raise PatternError(f\"expected '{']' if open_brace == '[' else '>'}' but found '{content[offset]}\")\n except IndexError as err:\n raise PatternError(f\"error parsing arguments pattern: {err}\")\n\n if is_positional and not is_required:\n raise PatternError(\"a positional argument may not be optional, you may specify either '?' or '*' as quantifiers\")\n\n if ident is None and len(names) > 0:\n ident = (max(names, key=lambda l: len(l)).lstrip('-')\n .upper().replace(\"-\", \"_\"))\n\n return ArgumentPattern(ident, arg_num, names, is_positional, is_required, delim), offset", "def __format_management(self, index, matched):\n\n for _, case_data in self.format_cases.items():\n if int(index) in case_data[0]:\n # The regex number is into the currently read case data.\n\n # We return a list with the formatted elements.\n # 1. We convert the day to 2 digits.\n # 2. We convert the month to the unified format.\n # 3. We return the year.\n return [\n converter.Digit2Digits(matched[case_data[1][0]]).get_converted(),\n converter.Month(matched[case_data[1][1]]).get_converted(),\n str(matched[case_data[1][2]]),\n ]\n\n return matched # pragma: no cover", "def unpack(fmt, data):\n fmt = _normalize(fmt)\n formatdef, endianness, i, alignment = _getmode(fmt)\n j = 0\n num = 0\n result = []\n length = calcsize(fmt)\n if length != len (data):\n raise StructError(\"unpack str size does not match format\")\n while i < len(fmt):\n num, i = _getnum(fmt, i)\n cur = fmt[i]\n i += 1\n try:\n format = formatdef[cur]\n except KeyError:\n raise StructError(\"%s is not a valid format\" % cur)\n\n if not num :\n num = 1\n\n if cur == 'x':\n j += num\n elif cur == 's':\n result.append(data[j:j + num])\n j += num\n elif cur == 'p':\n n = data[j]\n if n >= num:\n n = num - 1\n result.append(data[j + 1:j + n + 1])\n j += num\n else:\n # skip padding bytes until we get at a multiple of size\n if j > 0 and alignment:\n padding = format['size'] - j % format['size']\n j += padding\n for n in range(num):\n result += [format['unpack'](data, j, format['size'],\n endianness)]\n j += format['size']\n\n return tuple(result)", "def getFormatList(base_url):\n\n\tquery_url = base_url + \"/formats\"\n\trequest = urllib2.urlopen(query_url)\n\tresponse = request.read()\n\tresponse_xml = ET.fromstring(response)\n\n\tfmt_list = {}\n\n\tformats = response_xml.findall(\".//objectFormat\")\n\n\tfor f in formats:\n\t\tfmt_identifier = f.find(\"formatId\").text\n\t\tfmt_name = f.find(\"formatName\").text\n\t\tfmt_type = f.find(\"formatType\").text\n\t\tfmt_path = makeValidFormatPath(fmt_name)\n\n\t\tfmt_list[fmt_identifier] = { \"formatId\" : fmt_identifier, \"formatName\" : fmt_name, \"formatType\" : fmt_type, \"formatPath\" : fmt_path }\n\n\treturn fmt_list", "def parseStructuredData( raw_data, format ):\n\tparsed_data = {}\n\n\tBEGIN = 0\n\tEND = 1\n\tCONV = 2\n\n\tfor key in format.keys():\n\t\tkey_format = format[key]\n\n\t\tif key_format[END] == \"\":\n\t\t\tkey_format[END] = key_format[BEGIN] + len(raw_data)\n\n\t\ttxt = raw_data[key_format[BEGIN]:key_format[END]]\n\n\t\tif len(key_format[CONV]) > 0:\n\t\t\tparsed_data[key] = struct.unpack(key_format[CONV], txt)[0]\n\t\telse:\n\t\t\tparsed_data[key] = txt\n\n\treturn parsed_data", "def GetFormatType(format_):\n\n formattype = format_\n bitsize = 0\n if isinstance(format_, str):\n match = re.search(r'\\s*(\\D+)', format_)\n if match:\n formattype = match.group(0)\n bitsize = struct.calcsize(formattype) * 8\n return formattype, bitsize", "def makeNamesFromFormats(formats):\n i = getIter(formats)\n if not i:\n return\n\n try:\n c = 0\n item = i.next()\n while item:\n c = c +1\n name = 'c%s' % c\n if isinstance(item, str):\n yield name\n else:\n l = []\n for a in makeNamesFromFormats(item):\n l.append(a)\n yield (name, l)\n item = i.next()\n except StopIteration:\n pass", "def guess_file_date_format(filename):\n for line in open(filename):\n try:\n format = guess_format(line)\n except CannotParse:\n pass\n else:\n return format\n\n raise CannotParse(\"No date/time strings found in '%s'\" % filename)", "def read_fmt(bib_name, bib_file):\n cache_name, formatted_cache_name = _cache_name(bib_name, bib_file)\n\n try:\n meta_data, formatted_entries = cache.read_global(formatted_cache_name)\n except:\n raise cache.CacheMiss()\n\n # raise a cache miss if the modification took place after the caching\n modified_time = os.path.getmtime(bib_file)\n if modified_time > meta_data[\"cache_time\"]:\n raise cache.CacheMiss()\n\n # validate the version and format strings are still valid\n if (meta_data[\"version\"] != _VERSION or\n any(meta_data[s] != get_setting(\"cite_\" + s)\n for s in [\"panel_format\", \"autocomplete_format\"])):\n print(\"Formatting string has changed, updating cache...\")\n # read the base information from the unformatted cache\n current_time, bib_entries = cache.read_global(cache_name)\n # format and cache the entries\n formatted_entries = _create_formatted_entries(formatted_cache_name,\n bib_entries,\n current_time)\n\n return formatted_entries", "def split_str(str):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('{0}'.format(str))\n \n match = re.match(r\"([0-9]+.?\\d{0,32}?)(d|m|s)\", str)\n \n if match:\n items = match.groups()\n \n return items[0], items[1]", "def getConverter( format ):\n\n data = set(format.split(\"-\"))\n\n if \"one\" in data:\n if \"forward\" in data:\n if \"closed\" in data:\n return __one_forward_closed \n else:\n return __one_forward_open\n else:\n if \"closed\" in data:\n return __one_both_closed\n else:\n return __one_both_open\n else:\n if \"forward\" in data:\n if \"closed\" in data:\n return __zero_forward_closed\n else:\n return __zero_forward_open\n else:\n if \"closed\" in data:\n return __zero_both_closed\n else:\n return __zero_both_open", "def get_formatted_messages(formats, label, context):\r\n format_templates = {}\r\n for format in formats:\r\n # conditionally turn off autoescaping for .txt extensions in format\r\n if format.endswith(\".txt\"):\r\n context.autoescape = False\r\n format_templates[format] = render_to_string((\r\n 'notification/%s/%s' % (label, format),\r\n 'notification/%s' % format), context_instance=context)\r\n return format_templates", "def getFormatsFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n item1 = item[1]\n if isinstance(item1, str):\n yield normalize_format(item1)\n else:\n l = []\n for j in getFormatsFromDescr(item1):\n l.append(j)\n yield l\n item = i.next()\n except StopIteration:\n pass", "def generate_logformat_regex(logformat):\n headers = []\n splitters = re.split(r'(<[^<>]+>)', logformat)\n regex = ''\n for k in range(len(splitters)):\n if k % 2 == 0:\n splitter = re.sub(' +', '\\\\\\s+', splitters[k])\n regex += splitter\n else:\n header = splitters[k].strip('<').strip('>')\n regex += '(?P<%s>.*?)' % header\n headers.append(header)\n regex = re.compile('^' + regex + '$')\n return headers, regex", "def resolve_date_format(year, month, day, fail_safe=True):\n\n FAIL_SAFE_DEFAULT = \"%Y-%m-%d\"\n\n def order_terms_formats(fmt_str):\n # see date (1), 'O' (not '0') is a mystery, 'E' is Buddhist calendar, '(.*)'\n # is an arbitrary suffix\n field_spec_re = re.compile(r'([-_0OE^#]*)([yYmbBde])(.*)')\n\n # see date (1)\n fmt_str = fmt_str.replace(\"%F\", \"%Y-%m-%d\")\n\n # e.g. \"%d.%m.%Y\" -> ['d.', 'm.', 'Y']\n fields = fmt_str.split(\"%\")[1:]\n\n ordered_terms = []\n ordered_formats = []\n for field in fields:\n match = field_spec_re.match(field)\n if not match:\n # ignore fields we are not interested in (like %A for weekday name, etc.)\n continue\n\n prefix, item, suffix = match.groups()\n if item in (\"d\", \"e\"):\n # \"e\" is the same as \"_d\"\n ordered_terms.append(day)\n elif item in (\"Y\", \"y\"):\n # 4-digit year, 2-digit year\n ordered_terms.append(year)\n elif item in (\"m\", \"b\", \"B\"):\n # month number, short month name, long month name\n ordered_terms.append(month)\n\n # \"%\" + prefix + item gives a format for date/time formatting functions\n ordered_formats.append(_DateFieldSpec(\"%\" + prefix + item, suffix.strip()))\n\n if len(ordered_terms) != 3 or len(ordered_formats) != 3:\n raise ValueError(\"Not all fields successfully identified in the format '%s'\" % fmt_str)\n\n return (tuple(ordered_terms), tuple(ordered_formats))\n\n fmt_str = locale_mod.nl_langinfo(locale_mod.D_FMT)\n\n if not fmt_str or \"%\" not in fmt_str:\n if fail_safe:\n # use some sane default\n fmt_str = FAIL_SAFE_DEFAULT\n else:\n raise ValueError(\"Invalid date format string for current locale: '%s'\" % fmt_str)\n\n try:\n return order_terms_formats(fmt_str)\n except ValueError:\n if not fail_safe:\n raise\n else:\n # if this call fails too, something is going terribly wrong and we\n # should be informed about it\n return order_terms_formats(FAIL_SAFE_DEFAULT)", "def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def _parse_data(data: str) -> Tuple[str, str, str, int, int, int, str]:\n\n phg = None\n rng = None\n dfs = None\n course = None\n speed = None\n altitude = None\n comment = None\n\n if re.match(r'^PHG[0-9]{4}', data[:7]):\n # Packet has a PHG (power, antenna height/gain/directivity) value\n phg = data[3:7]\n logger.debug(\"PHG is {}\".format(phg))\n data = data[7:]\n\n elif re.match('^RNG[0-9]{4}', data[:7]):\n # Packet has an RNG (radio range) value\n rng = data[3:7]\n logger.debug(\"RNG is {}\".format(rng))\n data = data[7:]\n\n elif re.match('^DFS[0-9]{4}', data[:7]):\n # Packet has a DFS (DF signal strength, antenna height/gain/directivity) value\n dfs = data[3:7]\n logger.debug(\"DFS is {}\".format(dfs))\n data = data[7:]\n\n elif re.match('^[0-9]{3}/[0-9]{3}', data[:7]):\n # Packet has course and speed values\n course = int(data[:3])\n speed = int(data[4:7])\n logger.debug(\"Course is {}, speed is {}\".format(course, speed))\n data = data[7:]\n\n # TODO - parse BRG/NRQ\n\n # Check for comment\n if len(data) > 0:\n\n # Check for altitude\n # As per APRS 1.01 C6 P26, altitude as /A=nnnnnn may appear anywhere in the comment\n has_altitude = re.match('.*/A=([0-9]{6}).*', data)\n if has_altitude:\n # TODO - fix altitude format\n altitude = int(has_altitude.groups()[0])\n logger.debug(\"Altitude is {} ft\".format(altitude))\n\n # Strip out the altitude from the comment\n data = re.sub(r'/A=[0-9]{6}', \"\", data)\n\n # Set the comment as the remainder of the information field\n comment = data\n logger.debug(\"Comment is {}\".format(comment))\n\n return (phg, rng, dfs, course, speed, altitude, comment)", "def valueformat(value, format_list):\n\n # print(\"\\n\", format_list, value)\n concat_key = format_list.split('.')\n # Pass in either the key of the field\n # or pass in resource.key to enable a resource lookup.\n key = \"\"\n resource = \"\"\n member_id = \"\"\n key_sequence = [key, resource, member_id]\n count = 0\n for r in reversed(concat_key):\n key_sequence[count] = r\n count += 1\n\n # print(\"Concat_key:\", concat_key)\n key = key_sequence[0]\n resource = key_sequence[1]\n member_id = key_sequence[2]\n\n # print(\"Key:\", key)\n\n if key:\n if key.lower() == \"address\":\n return dt_address(value)\n\n elif key.lower() == \"telecom\":\n return dt_telecom(value)\n\n elif key.lower() == \"name\":\n return dt_name(value)\n elif key.lower() == 'dosage':\n return dt_dosage(value)\n elif key.lower() == 'medicationreference':\n # print(\"Working on\", key, \": \", value)\n # f_value = value\n # lookup field_formats\n # concat_key should have a resource name\n # print(\"\\n\\nRESOURCE:\", resource)\n # print(\"calling dt_medicationreference with Resource:\", resource, \", value:\", value)\n return dt_medicationreference(value, member_id, resource)\n elif key.lower() == 'dataabsentreason':\n if isinstance(value, dict):\n return value['coding'][0]['display']\n else:\n return value\n elif key.lower() == 'valuequantity':\n # return str(value['value']) + \" \" + value['unit']\n return dt_valuequantity(value)\n elif key.lower() == 'valuestring':\n return value\n elif key.lower() == 'interpretation':\n return value['coding'][0]['display']\n elif key.lower() == 'referencerange':\n return dt_referencerange(value)\n elif key.lower() == 'requester':\n if 'display' in value['agent']:\n return dt_reference(value['agent'], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n # elif key.lower() == \"result\":\n # return dt_reference(value[0], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'participant':\n if 'display' in value[0]['individual']:\n return dt_reference(value[0]['individual'], member_id)\n elif key.lower() == 'location':\n if 'display' in value[0]['location']:\n return dt_reference(value[0]['location'], member_id)\n elif key.lower() == 'communication':\n return dt_communication(value)\n else:\n # print(\"value:\", value, \" type:\", type(value), \" for: \", key)\n return value", "def is_valid_format(format_string): \n # default\n is_valid = True\n \n # list of valid formats\n valid_formats = ['hex', 'char', 'schar','uint', 'int', 'double', \n 'ascii', 'long', 'long long', 'float']\n \n # list of formats provided (may be a single format)\n format_list = format_string.split(', ')\n \n # check each item in the provided list\n for item in format_list:\n \n # if it does not match a valid format then it is invalid\n if item not in valid_formats:\n is_valid = False\n # end if\n # end for\n \n return is_valid", "def build_block(self, format_string):\n first_block = Block(None, py3_wrapper=self.py3_wrapper)\n block = first_block\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group(\"block_start\"):\n # Create new block\n block = block.new_block()\n elif token.group(\"block_end\"):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n if not block.parent:\n raise Exception(\"Too many `]`\")\n block = block.parent\n elif token.group(\"switch\"):\n # a new option has been created\n block = block.switch()\n elif token.group(\"placeholder\"):\n # Found a {placeholder}\n key = token.group(\"key\")\n format = token.group(\"format\")\n block.add(Placeholder(key, format))\n elif token.group(\"literal\"):\n block.add(Literal(value))\n elif token.group(\"lost_brace\"):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(Literal(value))\n elif token.group(\"command\"):\n # a block command has been found\n block.set_commands(token.group(\"command\"))\n elif token.group(\"escaped\"):\n # escaped characters add unescaped values\n if value[0] in [\"\\\\\", \"{\", \"}\"]:\n value = value[1:]\n block.add(Literal(value))\n\n if block.parent:\n raise Exception(\"Block not closed\")\n # add to the cache\n self.block_cache[format_string] = first_block", "def parser(string: str, token: str) -> List[float]:\n search_token = re.compile(r\"{token}: (.*?){unit}\".format(token=token,\n unit=UNIT))\n output = re.findall(search_token, string)\n if len(output) == 0:\n return []\n\n return [float(i) for i in output]", "def parseConfig(f):\n config = {\"formats\":{}}\n \n for line in f:\n if line.startswith(\"//\"): \n continue\n \n sline = re.split(\"[=\\s]\", line)\n if sline[0] is \"\":\n continue\n \n if sline[0]==\"format\":\n #Puts the format as a key in the dict pointed to by \"formats\"\n config[\"formats\"][sline[1]] = sline[3] \n else:\n config[sline[0]] = sline[1]\n \n return config", "def interpret_datetime(timestamp):\n formats = (\n \"%Y-%m-%d_%H:%M:%S.%f\",\n \"%Y-%m-%d_%H-%M-%S-%f\",\n DATE_FORMAT,\n )\n\n for i, fmt in enumerate(formats):\n try:\n return datetime.strptime(timestamp, fmt)\n except ValueError:\n if i == len(formats) - 1:\n raise\n continue", "def select_stylestrs(cfgstr):\n stylestrs = []\n for s in cfgstr.split():\n if s in vars(fmt):\n stylestrs.append(s)\n return stylestrs", "def __format_input_translator(str_to_process):\n return re.sub(r'\\([^)]*\\)', '', str_to_process).replace(' ', '').split('/')", "def _prepare_to_convert(coordinates: str) -> tuple:\n degrees, minutes, seconds = True, True, True\n\n if coordinates == coordinates.replace(\"°\", \" \"): degrees = False\n if coordinates == coordinates.replace(\"′\", \" \"): minutes = False\n if coordinates == coordinates.replace(\"″\", \" \"): seconds = False\n\n coordinates = coordinates.replace(\"°\", \" \").replace(\"′\", \" \").replace(\"″\", \" \").split(\" \")\n del (coordinates[-1])\n\n if seconds is False: coordinates.append(0)\n if minutes is False: coordinates.insert(0, 1)\n if degrees is False: coordinates.insert(0, 0)\n\n for i in range(len(coordinates)):\n coordinates[i] = float(coordinates[i])\n return tuple(coordinates)", "def parse_date(date_string, format):\n try:\n return datetime.strptime(date_string, format)\n except ValueError:\n return None", "def get_color_names(self, format_string):\n names = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"command\"):\n name = dict(parse_qsl(token.group(\"command\"))).get(\"color\")\n if (\n not name\n or name in COLOR_NAMES_EXCLUDED\n or name in COLOR_NAMES\n or name[0] == \"#\"\n ):\n continue\n names.add(name)\n return names", "def parsePercentExpression(literal, format):\n\tmat = _getREForPercentExpression(format).match(literal)\n\tif not mat:\n\t\traise ValueError(\"'%s' cannot be parsed using format '%s'\"%(\n\t\t\tliteral, format))\n\treturn mat.groupdict()", "def read_f(self, fmt, offset=None):\n\n if offset is not None:\n self.seek(offset)\n\n # place commas before and after each instance of S or Z\n for special in ['S', 'Z']:\n fmt = fmt.replace(special, ',' + special + ',')\n\n # split S and Z into isolated strings\n fmt = fmt.split(',')\n\n # construct a tuple of unpacked data\n data = ()\n for subfmt in fmt:\n if subfmt == 'S':\n data += (self.read_string(),)\n elif subfmt == 'Z':\n data += (self.read_bool(),)\n else:\n data += self.read_and_unpack(subfmt)\n\n if len(data) == 1:\n return data[0]\n else:\n return data", "def asformat(self, format):", "def format_input(self, args):\n\n new_list = []\n if args[1].find('{') != -1:\n new_list = self.format_dicti(args)\n return new_list\n else:\n new_list = []\n new_list.append(args[0])\n new_str = args[1][ args[1].find('(') + 2 : args[1].find(',') - 1]\n new_str += args[1][ args[1].find(',') : args[1].find(')') - 0]\n new_list.append(\" \".join(new_str.split(\", \") ) )\n\n return \" \".join(i for i in new_list)", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def format(self, varnames, fmts):\n varnames = self._find_vars(varnames, empty_ok=False)\n indexes = list(map(self._varlist.index, varnames))\n \n # check that fmts are specified properly\n if isinstance(fmts, str):\n fmts = fmts.split()\n else:\n if ( not isinstance(fmts, collections.Iterable) \n or not all(isinstance(f, str) for f in fmts) ):\n raise TypeError(\"given fmts must be str or iterable of str\")\n fmts = [x for s in fmts for x in s.split()]\n if len(fmts) == 0:\n raise ValueError(\"no formats specified\")\n \n # check fmts for validity\n is_valid = self._is_valid_fmt\n if not all(is_valid(fmt) for fmt in fmts):\n bad_fmts = \" \".join(fmt for fmt in fmts if not is_valid(fmt))\n raise ValueError(\"invalid formats: \" + bad_fmts)\n \n # pad fmts if necessary \n nvarnames = len(varnames)\n nfmts = len(fmts)\n if nfmts < nvarnames:\n fmts = list(fmts) + [fmts[-1]]*(nvarnames - nfmts)\n \n # check that formats match Stata types\n #typlist = self._typlist\n isstrvar = self._isstrvar\n if not all(isstrvar(i) == bool(STR_FMT_RE.match(fmt))\n for i, fmt in zip(indexes, fmts)):\n raise ValueError(\"format does not match Stata variable type\")\n \n # replace fmts (extras, if any, don't get used)\n for i, fmt in zip(indexes, fmts):\n self._fmtlist[i] = fmt\n \n # assume there are changes\n self._changed = True", "def _string_to_date(datestr,fmt):\n if not isinstance(datestr,str):\n raise InstrumentParameterException('Value %s is not a string.' % str(datestr))\n try:\n date_time = time.strptime(datestr,fmt)\n date = (date_time[2],date_time[1],date_time[0])\n\n except ValueError:\n raise InstrumentParameterException('Value %s could not be formatted to a date.' % str(datestr))\n \n return date", "def try_parsing_date(text):\n for fmt in ('%I %p', '%I %M %p', '%I:%M %p'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:pass\n if \":\" in text:\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text.split(\":\")[0])>=8 else \"PM\"), '%I:%M %p')\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text)>=8 else \"PM\"), '%I %p')", "def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result", "def parseDate(date):\n formats = [\n \"D MMM YY, hh:mm a\", \n \"YYYY-MM-DDTHH:mm:ss+00:00\", \n \"ddd, D MMM YYYY HH:mm:ss +0530\", # NDTV\n \"ddd, D MMM YYYY HH:mm:ss +0100\", # skynews\n \"ddd, D MMM YYYY HH:mm:ss -0400\", # reuters\n \"D MMM, YYYY\", # espn cricket\n \"ddd, D MMM YYYY HH:mm:ss GMT\", # times of india\n \"ddd, D MMM YYYY HH:mm:ss +0200\", # lifrea\n \"ddd, D MMM YYYY HH:mm:ss +0000\", # linux, ubuntu\n \"ddd, D MMM YYYY HH:mm:ss -0700\", # iTunes\n ]\n\n for f in formats:\n try:\n parsed_date = tryDateFormat(date, f)\n return parsed_date.format(\"D MMM YY, hh:mm a\")\n except Exception as e:\n pass\n else:\n return \"Invalid date\"", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def get_parameter(pstring):\n parameters = pstring.replace(',', ' ').split()\n if len(parameters) == 1:\n init_value = float(parameters[0])\n return (init_value, None, None)\n elif len(parameters) == 3:\n init_value = float(parameters[0])\n if parameters[1].upper() == 'NONE':\n lower_value = None\n else:\n lower_value = float(parameters[1])\n if parameters[2].upper() == 'NONE':\n upper_value = None\n else:\n upper_value = float(parameters[2])\n return (init_value, lower_value, upper_value)\n else:\n raise ValueError('Invalid parameter format: %s' % pstring)", "def register_format(recipe):\n afr = AFMFormatRecipe(recipe)\n formats_available.append(afr)\n # suffix\n if afr.suffix not in formats_by_suffix:\n formats_by_suffix[afr.suffix] = []\n formats_by_suffix[afr.suffix].append(afr)\n # mode\n if afr.mode not in formats_by_mode:\n formats_by_mode[afr.mode] = []\n formats_by_mode[afr.mode].append(afr)\n # supported extensions\n if afr.suffix not in supported_extensions: # avoid duplucates\n supported_extensions.append(afr.suffix)\n supported_extensions.sort()", "def findall(pattern, text):\r\n\tspl = re.compile(pattern).split(text)\r\n\tresult = []\r\n\tbeginTag = \"\"\r\n\tendTag = None\r\n\tbeginFormat = \"\"\r\n\tendFormat = \"\"\r\n\tinitText = text\r\n\tfor s in spl:\r\n\t\ttext = text[len(s)+2:]\r\n\t\tend = text.find(\")s\")\r\n\t\tvar = \"\"\r\n\t\tif len(text) > 0:\r\n\t\t\tvar = text[:end]\r\n\t\t\tresult.append(var)\r\n\t\tif var == \"content\":\r\n\t\t\tbeginTag += s\r\n\t\t\tendTag = \"\"\r\n\t\telif endTag != None:\r\n\t\t\tendTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tendFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tendFormat += \"self.%s,\"%var\r\n\t\t\t\tendTag += \"\\x25s\"\r\n\t\telse:\r\n\t\t\tbeginTag += s\r\n\t\t\tif var != \"\":\r\n\t\t\t\tif var in [\"disabled\",\"checked\",\"active\",\"selected\"]:\r\n\t\t\t\t\tbeginFormat += \" b'%s' if self.%s else b'',\"%(var, var)\r\n\t\t\t\telse:\r\n\t\t\t\t\tbeginFormat += \"self.%s,\"%var\r\n\t\t\t\tbeginTag += \"\\x25s\"\r\n\t\ttext = text[end+2:]\r\n\tif endTag == None:\r\n\t\tendTag = \"\"\r\n\t\tendFormat = \"\"\r\n\treturn result, beginTag, endTag, beginFormat, endFormat" ]
[ "0.65669346", "0.62842053", "0.6130616", "0.61288065", "0.60436875", "0.5937322", "0.5901326", "0.58831894", "0.5852524", "0.57953036", "0.5765644", "0.57541496", "0.57023305", "0.56520545", "0.56048083", "0.5603498", "0.5459637", "0.5429871", "0.5376852", "0.53540975", "0.5352906", "0.5329984", "0.53212583", "0.52624923", "0.5258512", "0.5246587", "0.5198944", "0.51661754", "0.513486", "0.51218945", "0.51000667", "0.5083689", "0.50711507", "0.50639105", "0.5063032", "0.5026774", "0.502133", "0.5018339", "0.5011537", "0.49798977", "0.49584708", "0.49565127", "0.49387592", "0.49334702", "0.49329984", "0.49299213", "0.4894328", "0.48817024", "0.48789778", "0.4875315", "0.48703697", "0.48524705", "0.4851324", "0.4851324", "0.48367384", "0.48213378", "0.48147085", "0.48093012", "0.4807934", "0.48026043", "0.48013297", "0.47994635", "0.47959974", "0.47884703", "0.47848812", "0.4783625", "0.47507963", "0.47454315", "0.47452757", "0.4744936", "0.4740796", "0.47353637", "0.47226292", "0.47182122", "0.47135818", "0.4702489", "0.46980354", "0.4692607", "0.46920088", "0.46756324", "0.46708277", "0.46681905", "0.46606493", "0.4650407", "0.46467695", "0.46439475", "0.46402755", "0.46391624", "0.4634674", "0.4632874", "0.4632874", "0.46311998", "0.4623561", "0.46231136", "0.46132636", "0.46086472", "0.46049073", "0.45984036", "0.4596587", "0.45883885" ]
0.784211
0
Update a format string renaming placeholders.
def update_placeholders(self, format_string, placeholders): # Tokenize the format string and process them output = [] for token in self.tokens(format_string): if token.group("key") in placeholders: output.append( "{{{}{}}}".format(placeholders[token.group("key")], token.group("format")) ) continue elif token.group("command"): # update any placeholders used in commands commands = parse_qsl(token.group("command"), keep_blank_values=True) # placeholders only used in `if` if "if" in [x[0] for x in commands]: items = [] for key, value in commands: if key == "if": # we have to rebuild from the parts we have condition = Condition(value) variable = condition.variable if variable in placeholders: variable = placeholders[variable] # negation via `!` not_ = "!" if not condition.default else "" condition_ = condition.condition or "" # if there is no condition then there is no # value if condition_: value_ = condition.value else: value_ = "" value = "{}{}{}{}".format(not_, variable, condition_, value_) if value: items.append(f"{key}={value}") else: items.append(key) # we cannot use urlencode because it will escape things # like `!` output.append(r"\?{} ".format("&".join(items))) continue value = token.group(0) output.append(value) return "".join(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def renameFormats(self, nameDict):\n for item in globalref.docRef.root.descendantGen():\n item.formatName = nameDict.get(item.formatName, item.formatName)", "def _fix_fmts(self, labname, mapping):\n default_fmt_widths = self._default_fmt_widths\n \n indexes = [i for i in range(self._nvar) if self._lbllist[i] == labname]\n if indexes == []: return\n lab_size = max([len(v) for k, v in mapping.items()])\n fmtlist = self._fmtlist\n typlist = self._typlist\n isstrvar = self._isstrvar\n for i in indexes:\n if isstrvar(i):\n continue # string values should not be labeled\n old_fmt = fmtlist[i]\n # check match agains numerical format\n match = NUM_FMT_RE.match(old_fmt)\n if match:\n fmt_width = int(match.group(3))\n if fmt_width < lab_size:\n prefix = ('%' + (match.group(1) or '') + \n (match.group(2) or ''))\n suffix = (match.group(4) + match.group(5) + \n match.group(6) + (match.group(7) or ''))\n new_fmt = prefix + str(lab_size) + suffix\n fmtlist[i] = new_fmt\n self._changed = True\n elif TIME_FMT_RE.match(old_fmt) or TB_FMT_RE.match(old_fmt):\n continue\n else: \n # Here, some garbled format must have been entered. \n # More effort could be made to identify intended format, \n # but instead we'll just paint over it.\n fmt_width = default_fmt_widths[typlist[i]]\n fmtlist[i] = '%' + str(max((lab_size,fmt_width))) + '.0g'\n self._changed = True", "def wrap_columns_name(self, format_string):\n self._data_frame = self._data_frame.rename(\n columns=lambda column: format_string.format(column)\n )", "def change_format_var_parser(text, tracker):\n param_names = format_var_parser(text)\n param_vars = {}\n for param_name in param_names:\n try:\n param_vars[param_name] = tracker.get_slot(param_name)\n except Exception as e:\n PYTHON_LOGGER.error(\"Error to get var name {}: {}\".format(param_name, e))\n return text.format(**param_vars)", "def sformatf(cls, msg, *args):\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)", "def update_name(name, mapping):\n m = street_type_re.search(name)\n if m:\n street_type = m.group()\n for key, value in mapping.iteritems():\n if street_type == key:\n name = name.replace(key,value)\n\n return name", "def formatNames(string_from):\n return re.sub('/', '-', string_from)", "def replace_param(string, param, value, param_format=None):\n\n if param_format == \"json\":\n return sub(r\"(?P<json_replacement>\\\"%s\\\"\\s*:\\s*)\\\"\\s*\\\"\" %\n escape(str(param)), \"\\\\1\\\"%s\\\"\" % value, string)\n elif param_format == \"header\":\n return sub(r\"%s=[^\\\\n]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)\n else:\n return sub(r\"%s=[^&]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)", "def updatePreview(self, baseName, *args):\n\n prefix = str(self.prefix.text())\n suffix = str(self.suffix.text())\n\n string = \"\"\n if len(prefix) > 0:\n string += prefix + \"_\"\n\n string += baseName\n\n if len(suffix) > 0:\n string += \"_\" + suffix\n\n self.previewName.setText(string)", "def update_weight_name(repl_patterns: Dict[str, str], weight_name: str) -> str:\n # Create a regular expression from all of the dictionary keys\n regex = re.compile('|'.join(map(re.escape, repl_patterns.keys())))\n\n # For each match, look up the corresponding value in the repl_patterns dict.\n return regex.sub(lambda match: repl_patterns[match.group(0)], weight_name)", "def rename_bindnames(tqry, li_adjust):\n for bindname, attrname in li_adjust:\n from_ = \"%(\" + bindname + \")s\"\n to_ = \"%(\" + attrname + \")s\"\n tqry = tqry.replace(from_, to_)\n return tqry", "def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string", "def format(self, varnames, fmts):\n varnames = self._find_vars(varnames, empty_ok=False)\n indexes = list(map(self._varlist.index, varnames))\n \n # check that fmts are specified properly\n if isinstance(fmts, str):\n fmts = fmts.split()\n else:\n if ( not isinstance(fmts, collections.Iterable) \n or not all(isinstance(f, str) for f in fmts) ):\n raise TypeError(\"given fmts must be str or iterable of str\")\n fmts = [x for s in fmts for x in s.split()]\n if len(fmts) == 0:\n raise ValueError(\"no formats specified\")\n \n # check fmts for validity\n is_valid = self._is_valid_fmt\n if not all(is_valid(fmt) for fmt in fmts):\n bad_fmts = \" \".join(fmt for fmt in fmts if not is_valid(fmt))\n raise ValueError(\"invalid formats: \" + bad_fmts)\n \n # pad fmts if necessary \n nvarnames = len(varnames)\n nfmts = len(fmts)\n if nfmts < nvarnames:\n fmts = list(fmts) + [fmts[-1]]*(nvarnames - nfmts)\n \n # check that formats match Stata types\n #typlist = self._typlist\n isstrvar = self._isstrvar\n if not all(isstrvar(i) == bool(STR_FMT_RE.match(fmt))\n for i, fmt in zip(indexes, fmts)):\n raise ValueError(\"format does not match Stata variable type\")\n \n # replace fmts (extras, if any, don't get used)\n for i, fmt in zip(indexes, fmts):\n self._fmtlist[i] = fmt\n \n # assume there are changes\n self._changed = True", "def reformat(ctx):\n pass", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def update_street_name(name, mapping):\r\n m = street_type_re.search(name)\r\n if m:\r\n street_type = m.group()\r\n if street_type in list(mapping.keys()):\r\n better_street_type = mapping[street_type]\r\n name = street_type_re.sub(better_street_type, name)\r\n return name", "def fix_label(label):\n\n replace_dict = {'_': ' ',\n 'degE': '$^{\\circ}$E',\n 'ms-1': '$m s^{-1}$',\n 'm.s-1': '$m s^{-1}$',\n 'Wm-2': '$W m^{-2}$',\n '1000000 m2.s-1': '$10^6$m$^2$s$^{-1}$'\n } \n\n for value, replacement in list(replace_dict.items()):\n label = label.replace(value, replacement)\n\n return label", "def renameFields(self, nameDict):\n for format in self.values():\n if format.genericType in nameDict:\n nameDict[format.name] = nameDict[format.genericType]\n for item in globalref.docRef.root.descendantGen():\n for oldName, newName in nameDict.get(item.formatName, []):\n if oldName in item.data:\n item.data[newName] = item.data[oldName]\n del item.data[oldName]", "def _assign_label(self, format):\n cht_tmpl = self.out_label_tmpl\n return cht_tmpl.substitute(format)", "def dt_format_translate(pyfmt):\n\n translate = {\"%a\": \"ddd\",\n \"%A\": \"dddd\",\n \"%b\": \"mmm\",\n \"%B\": \"mmmm\",\n \"%c\": \"\",\n \"%d\": \"dd\",\n \"%f\": \"\",\n \"%H\": \"hh\",\n \"%I\": \"hh\",\n \"%j\": \"\",\n \"%m\": \"mm\",\n \"%M\": \"mm\",\n \"%p\": \"AM/PM\",\n \"%S\": \"ss\",\n \"%U\": \"\",\n \"%w\": \"\",\n \"%W\": \"\",\n \"%x\": \"\",\n \"%X\": \"\",\n \"%y\": \"yy\",\n \"%Y\": \"yyyy\",\n \"%z\": \"\",\n \"%Z\": \"\",\n \"%%\": \"%\"}\n\n xlfmt = str(pyfmt)\n\n for item in translate:\n if item in xlfmt:\n xlfmt = xlfmt.replace(item, translate[item])\n return xlfmt", "def update(name):\n strRet = mapping(name)\n return strRet", "def replace(self, string):\n for i, j in self.defs.items():\n string = string.replace(i, j)\n return string", "def reformat_placeholders(content: str) -> str:\n return content.replace(\"<MM>\", \"[[\").replace(\"</MM>\", \"]]\")", "def format_map(self, format_string, mapping):\n return self.vformat(format_string, args=None, kwargs=mapping)", "def VarNameReplace(old, new, *vars):\n\t#syntax = [ \"rename variables\" ]\n\tsyntax = []\n\tif not vars or \"*\" in vars:\n\t\tvars = None\n\tvd = spssaux.VariableDict(vars)\n\tfor v in vd:\n\t\toldname = v.VariableName\n\t\tnewname = oldname.replace(old,new).strip()\n\t\tif newname.lower() != oldname.lower():\n\t\t\tsyntax += [ \"(%s=%s)\" % (oldname, newname) ]\n\tif syntax:\n\t\tsyntax.insert(0, \"rename variables\")\n\t\tsyntax += [ spssterm ]\n\t\tif __debug__:\n\t\t\tprint \" \".join(syntax)\n\t\tspss.Submit(syntax)", "def reformat(self):\n\t\told_path = os.path.join( self.path, self.init_str )\n\t\tnew_path = os.path.join( self.path, self.reorder() )\n\t\tos.rename(old_path,new_path)", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def format_name(f_name, l_name): #docstring (documentation)\n if f_name == \"\" or l_name == \"\":\n return \"You didn't provide valid inputs.\"\n formated_f_name = f_name.title()\n formated_l_name = l_name.title()\n return f\"Result: {formated_f_name} {formated_l_name}\"", "def transform_table_name(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n largs = list(args)\n if \"%\" in args[1]:\n if 'transform_time' in kwargs \\\n and isinstance(kwargs['transform_time'], datetime.datetime):\n t = kwargs['transform_time']\n else:\n t = datetime.datetime.utcnow()\n largs[1] = t.strftime(args[1])\n largs = tuple(largs)\n return f(*largs, **kwargs)\n return wrapper", "def replace_placeholders(self, placeholder_dict):\n\n for placeholder, value in placeholder_dict.items():\n placeholder_wrapped = f\"{self.marker_string}{placeholder}{self.marker_string}\"\n\n if placeholder not in self.unresolved_placeholders:\n self.hd.log.warn(f\"Placeholder {placeholder} not found in sequence.\")\n else:\n self.sequence = self.sequence.replace(f\"{placeholder_wrapped}\", str(value))\n self.unresolved_placeholders.discard(placeholder)", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def reformat(self, seq_name, *, prefix=\"s\"):\n\t\treturn \"%s_%012u\" % (prefix, self.get_sid(seq_name))", "def format_fss_name(original):\n sections = original.split('_')\n direction = sections[-1]\n\n code = sections[0]\n\n left, right = ' '.join(sections[1:-1]).split('VS')\n if sections[-1] == \"DN\":\n direction = '<FONT COLOR=\"#943126\">downregulated</FONT>'\n else:\n direction = '<FONT COLOR=\"#196f3d\">upregulated</FONT>'\n\n result = f\"< {code}<BR/>{left}vs{right}<BR/>{direction} >\"\n return(result)", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name", "async def edit_names(message):\n split_message = message.content.split()\n\n try:\n target_number = int(split_message[1])\n correct_name = ' '.join(split_message[2:])\n old_name = char_name_dict[message.author.id][target_number]\n temp_name_bucket[message.author.id] = [target_number, correct_name]\n\n message_state[message.author.id] = 'EDIT'\n await message.channel.send(f\"You would like to replace `{old_name}` with `{correct_name}`. If that is correct, please type `confirm` otherwise type `cancel`.\")\n except:\n print(traceback.format_exc())", "def register_string_format(name: str, strategy: st.SearchStrategy) -> None:\n if not isinstance(name, str):\n raise TypeError(f\"name must be of type {str}, not {type(name)}\")\n if not isinstance(strategy, st.SearchStrategy):\n raise TypeError(f\"strategy must be of type {st.SearchStrategy}, not {type(strategy)}\")\n\n STRING_FORMATS[name] = strategy", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def doEdit(var, value, target):\n currentValue = target.get(var, \"\")\n newValue = Simplifier.simplify(str(value).replace(f\"{{{var}}}\", str(currentValue)))\n target[var] = newValue", "def _interpolate(string):\n return string % env.project", "def update(self, database_vals):\n # TODO : handle evaluation delimited by $. Imply a try except\n vals = {d: database_vals[d] for d in self.depend_on}\n new_val = self.formatting.format(**vals)\n deferred_call(setattr, self, 'value', new_val)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def update_short_name(name):\n # First verify that the common errors have been fixed\n name = update_street_name(name)\n\n # Find the abbreviation to replace\n m = over_abbr_re.search(name)\n if m:\n if m.group() in abbreviations:\n name = over_abbr_re.sub(abbreviations[m.group()], name)\n\n return name", "def fmt(competitor_name: str) -> str:\n name = competitor_name.replace(\"_a\", r\" $\\alpha$ \")\n name = name.replace(\"_b\", r\" $\\beta$ \")\n return name", "def custom_strftime(format, t): \n return t.strftime(format).replace(\"{S}\", str(t.day) + suffix(t.day))", "def str_replace(data):\n for key, value in data.items():\n if isinstance(value, (str, unicode)):\n data[key] = value.format(**data)", "def format_name(f_name, l_name):\n #Using an early return if inputs aren't valid\n if f_name == \"\" or l_name == \"\":\n return \"You didn't provide valid inputs.\"\n\n formatted_f_name = f_name.title()\n formatted_l_name = l_name.title()\n\n #Returning a formatted string when inputs are valid\n return f\"{formatted_f_name} {formatted_l_name}\"", "def _interpolate(format):\n from tokenize import tokenprog\n\n def matchorfail(text, pos):\n match = tokenprog.match(text, pos)\n if match is None:\n raise _ItplError(text, pos)\n return match, match.end()\n\n namechars = \"abcdefghijklmnopqrstuvwxyz\" \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\";\n chunks = []\n pos = 0\n\n while 1:\n dollar = format.find(\"$\", pos)\n if dollar < 0: \n break\n nextchar = format[dollar + 1]\n\n if nextchar == \"{\":\n chunks.append((0, format[pos:dollar]))\n pos, level = dollar + 2, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token == \"{\": \n level = level + 1\n elif token == \"}\": \n level = level - 1\n chunks.append((1, format[dollar + 2:pos - 1]))\n\n elif nextchar in namechars:\n chunks.append((0, format[pos:dollar]))\n match, pos = matchorfail(format, dollar + 1)\n while pos < len(format):\n if format[pos] == \".\" and \\\n pos + 1 < len(format) and format[pos + 1] in namechars:\n match, pos = matchorfail(format, pos + 1)\n elif format[pos] in \"([\":\n pos, level = pos + 1, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token[0] in \"([\": \n level = level + 1\n elif token[0] in \")]\": \n level = level - 1\n else: \n break\n chunks.append((1, format[dollar + 1:pos]))\n else:\n chunks.append((0, format[pos:dollar + 1]))\n pos = dollar + 1 + (nextchar == \"$\")\n\n if pos < len(format): \n chunks.append((0, format[pos:]))\n return chunks", "def markup_text(self, text):\n for moniker, name in S['names'].items():\n text = text.replace('${0}'.format(moniker.split('_')[1]), name)\n return text", "def update_column_format(self):\n pass", "def update_named_font(self, *a, **kw):\n return update_named_font(*a, **kw)", "def date_string_to_strftime_format(date_string):\n for data in STRF_DATA:\n for pattern in data.get('patterns', []):\n if pattern in date_string:\n date_string = date_string.replace(pattern, data['replacement'])\n break\n else:\n if data.get('regex'):\n date_string = re.sub(data['regex'], data['replacement'], date_string)\n # matches = find()\n # if any(matches):\n # date_string = date_string.replace(matches[0], data['replacement'])\n\n return date_string", "def undo_format_field_name(field_name):\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_value(field_name, \"underscore\")\n\n return field_name", "def fileRename(current_file,num,digits):\n # Key, value pairs of what to replace.\n dictobj = {\n '<num>': get_numbering_format(digits, num),\n '<datetaken>': date_to_string(get_date_taken(current_file),'%Y%m%d__%H_%M'),\n '<dname>': dirname\n }\n # Rename\n new_filename = multi_replace(filename_pattern, dictobj)\n shutil.move(current_file, new_filename)", "def format_string_2(file_num, float_num1, int_num, float_num2):\n\n result = f\"file_{file_num:0>3d} :{float_num1:9.2f}, {int_num:.2e}, {float_num2:.3g}\"\n print(result)\n return result", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def setFormattedStrings(object, event):\n entry = interfaces.IBiblatexEntry(object) # assert a biblatex entry object\n generator = interfaces.IFormattedEntryGenerator(entry)\n writer = interfaces.IWriteFormatted(entry)\n config = zope.component.queryUtility(\n interfaces.IBiblatexConfiguration, \n context = object)\n if config:\n languages = config.languages\n styles = config.styles\n else:\n languages = styles = (None,)\n for language in languages:\n for style in styles:\n #raise Exception(u\"language: %s, style: %s\" % (language, style))\n generator.setUp(language = language, style = style)\n generator.generate()\n writer.setBibliographicEntry(generator.getBibliographicEntry(), language, style)\n writer.setCitation(generator.getCitation(), language, style)\n writer.setCitationAgain(generator.getCitationAgain(), language, style)\n generator.tearDown()\n del generator\n del writer", "def strfdate(self, fmt):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n for f in re.findall(pattern, fmt):\n fmt = fmt.replace('%{}'.format(f), FORMAT_MAP[f](self))\n return fmt", "def set_text_f(self, format, *args):\n self._text.set(format % args)\n self.change_bg(\"green\")\n self._label.update_idletasks()", "def edit_format(sample, **kwargs):\n n_call_data = vcf.model.make_calldata_tuple(sample.data._fields + tuple(kwargs.keys()))\n n_data = tuple(kwargs.values())\n sample.data += n_data\n sample.data = n_call_data(*sample.data)", "def format_color_name(string, frame_name):\n if frame_name == \"primary\":\n color = \"red\"\n else:\n color = \"green\"\n return format_color(string, color)", "def line_replacer(config,change_this_line,key):\n for arg in config['HyperParameter'][key]: \n pattern=r'{}[ ]*=.*,'.format(arg)\n replace_value=config['HyperParameter'][key][arg][counter]\n if type(replace_value) is str:\n replace_value=\"'\"+replace_value+\"'\"\n change_this_line=re.sub(pattern,\"{}= {},\".format(arg,replace_value),change_this_line)\n return change_this_line", "def adjust_name_for_printing(name):\n if name is not None:\n name2 = name\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_m_\")\n name = name.replace(\"+\", \"_p_\").replace(\"!\", \"_I_\")\n name = name.replace(\"**\", \"_xx_\").replace(\"*\", \"_x_\")\n name = name.replace(\"/\", \"_l_\").replace(\"@\", '_at_')\n name = name.replace(\"(\", \"_of_\").replace(\")\", \"\")\n if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:\n raise NameError(\"name {} converted to {} cannot be further converted to valid python variable name!\".format(name2, name))\n return name\n return ''", "def format_like(s: str, escape='\\\\') -> str:\n return '%{}%'.format(s.replace('%', escape+'%').replace('_', escape+'_'))", "def replace_with(*, replacement, f=DECORATED):\n return replacement", "def adjust_date_format(date, format_in, format_out):\n return datetime.strptime(date, format_in).strftime(format_out)", "def format(self, fmt):\n if fmt not in [\".jpg\", \"jpg\", \".png\", \"png\"]:\n raise ValueError(f\"Invalid format: {fmt}\")\n if fmt.find(\".\") != 0:\n fmt = \".\" + fmt\n self._format = fmt", "def format_name(field_name):\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()", "def format(self, *args, **kwargs) -> String:\n pass", "def format_string_1(file_num, float_num1, int_num, float_num2):\n\n text = \"file_{:0>3d} :{:9.2f}, {:.2e}, {:.3g}\"\n result = text.format(file_num, float_num1, int_num, float_num2)\n print(result)\n return result", "def register_code_name(code_name,format_name):\n if format_name not in data_format_parser:\n raise ValueError(\"unknown format_name: {:s}\".format(format_name))\n code_name_map[code_name] = format_name", "def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql", "def username_format(self, username_format):\n\n self._username_format = username_format", "def reformat():\n toolkit.reformat()", "def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"", "def w__format(self, string):\n s = ''\n for i in range(0, len(string) - 1, 2):\n s = s + \"%03s\" % string[i:i + 2]\n return s[1:]", "def replace_with(*, replacement='hello', f=DECORATED):\n return replacement", "def register_filename_format(format_name,parser):\n if format_name == \"ALL\":\n raise ValueError(\"filename format code ALL is reserved\")\n\n filename_format_parser[format_name] = parser", "def vformat(self, format_string, args, kwargs):\n self._used_kwargs = {}\n self._unused_kwargs = {}\n return super(MemorizeFormatter, self).vformat(format_string, args, kwargs)", "def test_update_multiecho_name():\n # Standard name update\n fn = 'sub-X_ses-Y_task-Z_run-01_bold'\n metadata = {'EchoTime': 0.01,\n 'EchoNumber': 1}\n echo_times = [0.01, 0.02, 0.03]\n out_fn_true = 'sub-X_ses-Y_task-Z_run-01_echo-1_bold'\n out_fn_test = update_multiecho_name(metadata, fn, echo_times)\n assert out_fn_test == out_fn_true\n # EchoNumber field is missing from metadata, so use echo_times\n metadata = {'EchoTime': 0.01}\n out_fn_test = update_multiecho_name(metadata, fn, echo_times)\n assert out_fn_test == out_fn_true\n # Catch an unsupported type and *do not* update\n fn = 'sub-X_ses-Y_task-Z_run-01_phasediff'\n out_fn_test = update_multiecho_name(metadata, fn, echo_times)\n assert out_fn_test == fn", "def substitute_names(tmpl_string, dct):\n return Template(tmpl_string).substitute(dct)", "def OnRenameTimer(self):\r\n\r\n self.EditLabel(self._current, self._curColumn)", "def format_colname(name):\n colnames = [\n \"AV\",\n \"RV\",\n \"EBV\",\n \"CAV1\",\n \"CAV2\",\n \"CAV3\",\n \"CAV4\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\",\n \"x_o\",\n \"gamma\",\n \"bump_area\",\n \"fh2\",\n \"nhtot\",\n \"nh2\",\n \"nhi\",\n \"NH_AV\",\n \"NH_EBV\",\n ]\n plotnames = [\n \"$A(V)$\",\n \"$R(V)$\",\n \"$E(B-V)$\",\n \"$C^{A(V)}_1$\",\n \"$C^{A(V)}_2$\",\n \"$C^{A(V)}_3$\",\n \"$C^{A(V)}_4$\",\n \"$C_1$\",\n \"$C_2$\",\n \"$C_3$\",\n \"$C_4$\",\n \"$x_o$\",\n r\"$\\gamma$\",\n r\"$\\pi C^{A(V)}_3 / 2 \\gamma$\",\n \"$f(H_2)$\",\n \"$N(H)$\",\n \"$N(H_2)$\",\n \"$N(HI)$\",\n \"$N(H)/A(V)$\",\n \"$N(H)/E(B-V)$\",\n ]\n dic_pairs = dict(zip(colnames, plotnames))\n\n out_name = name\n if name[:3] == \"log\":\n out_name = r\"$\\log (\" + name[3:].upper() + \")$\"\n elif name in dic_pairs.keys():\n out_name = dic_pairs[name]\n\n return out_name", "def fix_name(row, index, name_map):\n # print(\"Input row: {}\".format(row))\n name = row[index].strip()\n # print(\"Name entry is {}\".format(name))\n if name.endswith(\" (yourself)\"):\n name = name[:-len(\" (yourself)\")]\n # print(\"Shortening to |{}|\".format(name))\n if name not in name_map:\n name_map[name] = name # Initially the identity transform\n row[index] = name_map[name]", "def _format_label(self, lbl, plot_src):\n lbl.text = \"{0:}\".format(plot_src.name)", "def update_name(name, mapping): \n words = name.split()\n for w in range(len(words)):\n if words[w] in mapping:\n #print words[w]\n words[w] = mapping[words[w]]\n name = \" \".join(words)\n return name", "def asformat(self, format):", "def _substitute(template, fuzzer, benchmark):\n return template.format(fuzzer=fuzzer, benchmark=benchmark)", "def TransformNames(self) -> _n_2_t_0[str]:", "def _format(self):\n min_value = self.replacements.get(str(self.min), str(self.min))\n max_value = self.replacements.get(str(self.max), str(self.max))\n l_brace = '(' if min_value.find('inf') != -1 else '['\n r_brace = ')' if max_value.find('inf') != -1 else ']'\n\n return '{l_brace}{min_value}, {max_value}{r_brace}'.format(\n l_brace=l_brace, r_brace=r_brace,\n min_value=min_value, max_value=max_value)", "def _substitute(template, files, user_values):\n # Get all placeholder names\n placeholders = _get_placeholders(template)\n\n # Pre-fill placeholders based on existing file aliases\n placeholder_values = _prefill_placeholders(placeholders, files,\n user_values)\n\n # Add user specified values for the placeholders\n placeholder_values.update(**user_values)\n\n # Check whether all placeholder values are now properly provided.\n provided = set(placeholder_values.keys())\n needed = set(placeholders)\n missing = needed - provided\n if len(missing) > 0:\n raise ValueError('Cannot construct filename, because the following '\n 'parameters are missing: %s' % missing)\n\n # Do the substitution\n return template.format(**placeholder_values)", "def renameUI(*args, **kwargs)->AnyStr:\n pass", "def _replace_config_variables(self, string, node_id, cluster_name, region):\n\n if node_id:\n string = string.replace(\"{instance_id}\", node_id)\n if cluster_name:\n string = string.replace(\"{cluster_name}\", cluster_name)\n if region:\n string = string.replace(\"{region}\", region)\n return string", "def rename_cmip6_raw(ds, dim_name_di, printing=False, debug=False, verbose=False):\n ds = ds.copy()\n source_id = ds.attrs['source_id']\n \n # Check if there is an entry in the dict matching the source id\n if debug:\n print(dim_name_di.keys())\n # rename variables\n if len(dim_name_di) == 0:\n warnings.warn('input dictionary empty for source_id: `%s`. Please add values to https://github.com/jbusecke/cmip6_preprocessing/blob/master/cmip6_preprocessing/preprocessing.py' %ds.attrs['source_id'])\n else:\n \n for di in dim_name_di.keys():\n \n if debug or printing or verbose:\n print(di)\n print(dim_name_di[di])\n \n # make sure the input is a list\n if isinstance(dim_name_di[di], str):\n dim_name_di[di] = [dim_name_di[di]]\n \n \n if di in ds.variables:\n # if the desired key is already present do nothing\n if printing:\n print(\"Skipped renaming for [%s]. Name already correct.\" % di)\n \n else:\n \n # Track if the dimension was renamed already...\n # For some source ids (e.g. CNRM-ESM2-1) the key 'x':['x', 'lon'], leads to problems\n # because it renames the 2d lon in the gr grid into x. Ill try to fix this, below.\n # But longterm its probably better to go and put out another rename dict for gn and \n # go back to not support lists of dim names. \n \n # For now just stop in the list if the dimension is already there, or one 'hit'\n # was already encountered.\n trigger = False\n for wrong in dim_name_di[di]:\n if printing:\n print('Processing %s. Trying to replace %s' %(di, wrong))\n if wrong in ds.variables or wrong in ds.dims:\n if not trigger:\n if debug:\n print('Changing %s to %s' %(wrong, di))\n ds = ds.rename({wrong: di})\n trigger = True\n if printing:\n print('Renamed.')\n else:\n if wrong is None:\n if printing:\n print(\"No variable available for [%s]\" % di)\n return ds", "def strftime_localized(dtime, format): # pylint: disable=redefined-builtin\r\n\r\n if format == \"SHORT_DATE\":\r\n format = \"%x\"\r\n elif format == \"LONG_DATE\":\r\n # Translators: the translation for \"LONG_DATE_FORMAT\" must be a format\r\n # string for formatting dates in a long form. For example, the\r\n # American English form is \"%A, %B %d %Y\".\r\n # See http://strftime.org for details.\r\n format = ugettext(\"LONG_DATE_FORMAT\")\r\n if format == \"LONG_DATE_FORMAT\":\r\n format = DEFAULT_LONG_DATE_FORMAT\r\n elif format == \"DATE_TIME\":\r\n # Translators: the translation for \"DATE_TIME_FORMAT\" must be a format\r\n # string for formatting dates with times. For example, the American\r\n # English form is \"%b %d, %Y at %H:%M\".\r\n # See http://strftime.org for details.\r\n format = ugettext(\"DATE_TIME_FORMAT\")\r\n if format == \"DATE_TIME_FORMAT\":\r\n format = DEFAULT_DATE_TIME_FORMAT\r\n elif format == \"TIME\":\r\n format = \"%X\"\r\n\r\n def process_percent_code(match):\r\n \"\"\"\r\n Convert one percent-prefixed code in the format string.\r\n\r\n Called by re.sub just below.\r\n\r\n \"\"\"\r\n code = match.group()\r\n if code == \"%\":\r\n # This only happens if the string ends with a %, which is not legal.\r\n raise ValueError(\"strftime format ends with raw %\")\r\n\r\n if code == \"%a\":\r\n part = pgettext('abbreviated weekday name', WEEKDAYS_ABBREVIATED[dtime.weekday()])\r\n elif code == \"%A\":\r\n part = pgettext('weekday name', WEEKDAYS[dtime.weekday()])\r\n elif code == \"%b\":\r\n part = pgettext('abbreviated month name', MONTHS_ABBREVIATED[dtime.month])\r\n elif code == \"%B\":\r\n part = pgettext('month name', MONTHS[dtime.month])\r\n elif code == \"%p\":\r\n part = pgettext('am/pm indicator', AM_PM[dtime.hour // 12])\r\n elif code == \"%x\":\r\n # Get the localized short date format, and recurse.\r\n # Translators: the translation for \"SHORT_DATE_FORMAT\" must be a\r\n # format string for formatting dates in a brief form. For example,\r\n # the American English form is \"%b %d %Y\".\r\n # See http://strftime.org for details.\r\n actual_format = ugettext(\"SHORT_DATE_FORMAT\")\r\n if actual_format == \"SHORT_DATE_FORMAT\":\r\n actual_format = DEFAULT_SHORT_DATE_FORMAT\r\n if \"%x\" in actual_format:\r\n # Prevent infinite accidental recursion.\r\n actual_format = DEFAULT_SHORT_DATE_FORMAT\r\n part = strftime_localized(dtime, actual_format)\r\n elif code == \"%X\":\r\n # Get the localized time format, and recurse.\r\n # Translators: the translation for \"TIME_FORMAT\" must be a format\r\n # string for formatting times. For example, the American English\r\n # form is \"%H:%M:%S\". See http://strftime.org for details.\r\n actual_format = ugettext(\"TIME_FORMAT\")\r\n if actual_format == \"TIME_FORMAT\":\r\n actual_format = DEFAULT_TIME_FORMAT\r\n if \"%X\" in actual_format:\r\n # Prevent infinite accidental recursion.\r\n actual_format = DEFAULT_TIME_FORMAT\r\n part = strftime_localized(dtime, actual_format)\r\n else:\r\n # All the other format codes: just let built-in strftime take\r\n # care of them.\r\n part = dtime.strftime(code)\r\n\r\n return part\r\n\r\n formatted_date = re.sub(r\"%.|%\", process_percent_code, format)\r\n return formatted_date", "def format(fmt, st):\n ret = \"\"\n if not st: return ret\n if fmt not in valid_combos:\n return st\n cm = charmap[fmt]\n for c in st:\n ret += cm.get(c, c)\n return ret", "def normalize_format(fmt):\n # Remove shape '()' at the forefront which is equivalent to an scalar\n if fmt[:2] == '()':\n fmt = fmt[2:]\n # Accept 'S' as a synonym of 'a'\n if fmt.find('S') >= 0:\n fmt = fmt.replace('S', 'a')\n return fmt", "def updateParameters(self, parameters):\n if parameters[0].value and parameters[3].value:\n if (parameters[0].altered or paramaters[3].altered) and not parameters[4].altered:\n layer = parameters[0].valueAsText;\n desc = arcpy.Describe(layer)\n name = desc.file;\n type = parameters[3].valueAsText;\n char = type[:1];\n if (char != 'U'):\n if (char != 'C'):\n char = 'C' + char; #Output _C + first letter of type unless it is U\n else:\n char = 'CT'; # Unless it is C, then it is CT... \n #Update name accordingly\n resulttmp = \"%WORKSPACE%\\\\\" + name + \"_\" + char; \n parameters[4].value = resulttmp.replace(\".\",\"\"); #Remove illegal characters\n return" ]
[ "0.7112868", "0.6138306", "0.60262024", "0.60064745", "0.5947875", "0.5682744", "0.5643566", "0.5574067", "0.5571616", "0.5536187", "0.5533459", "0.545436", "0.5446608", "0.5419904", "0.539936", "0.53418016", "0.5337642", "0.5330448", "0.53120375", "0.5303506", "0.5267097", "0.52625644", "0.52379227", "0.5234319", "0.52196205", "0.5217962", "0.5208606", "0.5203752", "0.52023137", "0.51803064", "0.5175924", "0.5151957", "0.5139591", "0.51289487", "0.512607", "0.5120324", "0.51127154", "0.5107696", "0.51042634", "0.5104196", "0.5090387", "0.50823325", "0.5080351", "0.50786734", "0.5078603", "0.50722677", "0.50703067", "0.50697726", "0.50691885", "0.50682545", "0.5064103", "0.50395876", "0.50289315", "0.5018929", "0.5005409", "0.4994267", "0.4992165", "0.49861625", "0.49825096", "0.49760148", "0.49713245", "0.4967435", "0.49671316", "0.49574465", "0.49536112", "0.49519706", "0.4949248", "0.49451312", "0.49450988", "0.49409023", "0.49384296", "0.49287224", "0.49277726", "0.4924541", "0.4913871", "0.49107614", "0.49094462", "0.49032193", "0.4890608", "0.4882596", "0.48779368", "0.48692435", "0.4867755", "0.48598078", "0.4855166", "0.48498067", "0.48481545", "0.4839047", "0.48306677", "0.48281664", "0.48144346", "0.48131162", "0.48094463", "0.48085898", "0.47987106", "0.4798335", "0.479736", "0.47936144", "0.4781583", "0.47755322" ]
0.6070207
2
Update a format string adding formats if they are not already present.
def update_placeholder_formats(self, format_string, placeholder_formats): # Tokenize the format string and process them output = [] for token in self.tokens(format_string): if ( token.group("placeholder") and (not token.group("format")) and token.group("key") in placeholder_formats ): output.append(f"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}") continue value = token.group(0) output.append(value) return "".join(output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddFormat(self, format):\n self._legacy = False\n if format:\n self._format = format", "def addIfMissing(self, format):\n self.setdefault(format.name, format)", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def sformatf(cls, msg, *args):\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)", "def add_to_format(existing_format, dict_of_properties, workbook):\n new_dict={}\n for key, value in existing_format.__dict__.iteritems():\n if (value != 0) and (value != {}) and (value != None):\n new_dict[key]=value\n del new_dict['escapes']\n\n return(workbook.add_format(dict(new_dict.items() + dict_of_properties.items())))", "def add_format(self, key, value=None):\n if key in self.FORMAT:\n return\n self.FORMAT.append(key)\n if value is not None:\n for call in self:\n call.data.setdefault(key, value)", "def strfdate(self, fmt):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n for f in re.findall(pattern, fmt):\n fmt = fmt.replace('%{}'.format(f), FORMAT_MAP[f](self))\n return fmt", "def asformat(self, format):", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def _add_format_spec(self, fmt_name_key: str, fmt_spec : gsfmt.CellFormat):\n if not len(fmt_name_key) or not isinstance(fmt_spec, gsfmt.CellFormat):\n raise (ValueError, \"Invalid format specification data\")\n else:\n self.format_specs[fmt_name_key] = fmt_spec", "def reset_format(self):\n ## Formatters\n self._format_setters(*self.format_set_info)\n self._format_getters(*self.format_get_info)\n self._format_joining_functions()", "def set_format(self, fmt):\n\n if \"{message}\" not in fmt:\n raise ValueError(\"Defining a log format (%s) that doesn't contain '{message}'\" % fmt)\n\n self._fmt = fmt", "def initFormat(self):\n self.formatList = []", "def register_format(recipe):\n afr = AFMFormatRecipe(recipe)\n formats_available.append(afr)\n # suffix\n if afr.suffix not in formats_by_suffix:\n formats_by_suffix[afr.suffix] = []\n formats_by_suffix[afr.suffix].append(afr)\n # mode\n if afr.mode not in formats_by_mode:\n formats_by_mode[afr.mode] = []\n formats_by_mode[afr.mode].append(afr)\n # supported extensions\n if afr.suffix not in supported_extensions: # avoid duplucates\n supported_extensions.append(afr.suffix)\n supported_extensions.sort()", "def _fix_fmts(self, labname, mapping):\n default_fmt_widths = self._default_fmt_widths\n \n indexes = [i for i in range(self._nvar) if self._lbllist[i] == labname]\n if indexes == []: return\n lab_size = max([len(v) for k, v in mapping.items()])\n fmtlist = self._fmtlist\n typlist = self._typlist\n isstrvar = self._isstrvar\n for i in indexes:\n if isstrvar(i):\n continue # string values should not be labeled\n old_fmt = fmtlist[i]\n # check match agains numerical format\n match = NUM_FMT_RE.match(old_fmt)\n if match:\n fmt_width = int(match.group(3))\n if fmt_width < lab_size:\n prefix = ('%' + (match.group(1) or '') + \n (match.group(2) or ''))\n suffix = (match.group(4) + match.group(5) + \n match.group(6) + (match.group(7) or ''))\n new_fmt = prefix + str(lab_size) + suffix\n fmtlist[i] = new_fmt\n self._changed = True\n elif TIME_FMT_RE.match(old_fmt) or TB_FMT_RE.match(old_fmt):\n continue\n else: \n # Here, some garbled format must have been entered. \n # More effort could be made to identify intended format, \n # but instead we'll just paint over it.\n fmt_width = default_fmt_widths[typlist[i]]\n fmtlist[i] = '%' + str(max((lab_size,fmt_width))) + '.0g'\n self._changed = True", "def format(self, varnames, fmts):\n varnames = self._find_vars(varnames, empty_ok=False)\n indexes = list(map(self._varlist.index, varnames))\n \n # check that fmts are specified properly\n if isinstance(fmts, str):\n fmts = fmts.split()\n else:\n if ( not isinstance(fmts, collections.Iterable) \n or not all(isinstance(f, str) for f in fmts) ):\n raise TypeError(\"given fmts must be str or iterable of str\")\n fmts = [x for s in fmts for x in s.split()]\n if len(fmts) == 0:\n raise ValueError(\"no formats specified\")\n \n # check fmts for validity\n is_valid = self._is_valid_fmt\n if not all(is_valid(fmt) for fmt in fmts):\n bad_fmts = \" \".join(fmt for fmt in fmts if not is_valid(fmt))\n raise ValueError(\"invalid formats: \" + bad_fmts)\n \n # pad fmts if necessary \n nvarnames = len(varnames)\n nfmts = len(fmts)\n if nfmts < nvarnames:\n fmts = list(fmts) + [fmts[-1]]*(nvarnames - nfmts)\n \n # check that formats match Stata types\n #typlist = self._typlist\n isstrvar = self._isstrvar\n if not all(isstrvar(i) == bool(STR_FMT_RE.match(fmt))\n for i, fmt in zip(indexes, fmts)):\n raise ValueError(\"format does not match Stata variable type\")\n \n # replace fmts (extras, if any, don't get used)\n for i, fmt in zip(indexes, fmts):\n self._fmtlist[i] = fmt\n \n # assume there are changes\n self._changed = True", "def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def update_column_format(self):\n pass", "def format(fmt, st):\n ret = \"\"\n if not st: return ret\n if fmt not in valid_combos:\n return st\n cm = charmap[fmt]\n for c in st:\n ret += cm.get(c, c)\n return ret", "def format(self, fmt):\n if fmt not in [\".jpg\", \"jpg\", \".png\", \"png\"]:\n raise ValueError(f\"Invalid format: {fmt}\")\n if fmt.find(\".\") != 0:\n fmt = \".\" + fmt\n self._format = fmt", "def update_format(self, record):\n prefix = \"\\u001b[\"\n color = f\"{prefix}{self.color_map[record.levelno]}m\"\n bold = f\"{prefix}1m\"\n gray = f\"{prefix}1m{prefix}30m\"\n reset = f\"{prefix}0m\"\n self._style._fmt = (\n f\"%(asctime)s\"\n f\" {gray}│{reset} {color}%(levelname)-8s{reset} {gray}│{reset} \"\n )\n if hasattr(record, \"function\"):\n self._style._fmt += (\n f\"{gray}%(indent)s{reset}\"\n f\"{bold}%(function)s{reset}{gray}:{reset}\"\n \" %(message)s\"\n )\n else:\n self._style._fmt += \"%(indent)s%(message)s\"", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def set_formatstring(self, formatstring):\n self._logger_formatstring = formatstring\n\n # 각 핸들러에 포매터를 지정한다.\n self._logger_file_handler.setFormatter(self._Logger_formatstring)\n self._logger_stream_handler.setFormatter(self._Logger_formatstring)", "def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')", "def __format__(self, formatstr):\n if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()\n formatstr = '+.3f'\n\n string = \\\n \"{:\" + formatstr +\"} \" + \\\n \"{:\" + formatstr +\"}i \" + \\\n \"{:\" + formatstr +\"}j \" + \\\n \"{:\" + formatstr +\"}k\"\n return string.format(self.q[0], self.q[1], self.q[2], self.q[3])", "def __format__(self, fmt):\n if not isinstance(fmt, str):\n raise TypeError(\"must be str, not %s\" % type(fmt).__name__)\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)", "def to_format(self, format_string: str) -> str:\n return self.strftime(format_string)", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def extension (formatStr):\n assert False, \"TODO:\"", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def register_string_format(name: str, strategy: st.SearchStrategy) -> None:\n if not isinstance(name, str):\n raise TypeError(f\"name must be of type {str}, not {type(name)}\")\n if not isinstance(strategy, st.SearchStrategy):\n raise TypeError(f\"strategy must be of type {st.SearchStrategy}, not {type(strategy)}\")\n\n STRING_FORMATS[name] = strategy", "def newFormatter(*args, **kw):\n originalResult = originalFormatter(*args, **kw)\n if all():\n originalResult += ' %r' % all()\n return originalResult", "def add_formats(self, formats):\n self._formats = []\n layout = self.file_formats_container.layout()\n for i in range(layout.count()):\n layout.itemAt(i).widget().close()\n\n self._formats_as_string = \"\"\n for format in formats:\n self._formats_as_string += \" \" + format\n cb = QCheckBox(format, self)\n cb.setMinimumWidth(100)\n cb.setStyleSheet(\"color: white\")\n\n if format == \"MP3\":\n cb.setChecked(True)\n\n self._formats.append(cb)\n self.file_formats_container.layout().addWidget(cb)", "def w__format(self, string):\n s = ''\n for i in range(0, len(string) - 1, 2):\n s = s + \"%03s\" % string[i:i + 2]\n return s[1:]", "def __format__(self, format_spec):\n # Reject anything that isn't an s\n if format_spec[-1] != 's':\n raise ValueError('{} format specifier not understood for this object',\n format_spec[:-1])\n # Output in this example will be (<a>,<b>,<c>)\n raw = \"(\" + \",\".join([str(self.a), str(self.b), str(self.c)]) + \")\"\n # Honor the format language by using the inbuilt string format\n # Since we know the original format_spec ends in an 's'\n # we can take advantage of the str.format method with a\n # string argument we constructed above\n return \"{r:{f}}\".format( r=raw, f=format_spec )", "def format_to_extension(self, format):", "def format(self, format):\n\n self._format = format", "def format(self, format):\n\n self._format = format", "def adjust_date_format(date, format_in, format_out):\n return datetime.strptime(date, format_in).strftime(format_out)", "def format(\n self,\n format_string,\n module=None,\n param_dict=None,\n force_composite=False,\n attr_getter=None,\n ):\n if param_dict is None:\n param_dict = {}\n\n # if the processed format string is not in the cache then create it.\n if format_string not in self.block_cache:\n self.build_block(format_string)\n\n first_block = self.block_cache[format_string]\n\n def get_parameter(key):\n \"\"\"\n function that finds and returns the value for a placeholder.\n \"\"\"\n if key in param_dict:\n # was a supplied parameter\n param = param_dict.get(key)\n elif module and hasattr(module, key):\n param = getattr(module, key)\n if hasattr(param, \"__call__\"):\n # we don't allow module methods\n raise Exception()\n elif attr_getter:\n # get value from attr_getter function\n try:\n param = attr_getter(key)\n except: # noqa e722\n raise Exception()\n else:\n raise Exception()\n if isinstance(param, Composite):\n if param.text():\n param = param.copy()\n else:\n param = \"\"\n return param\n\n # render our processed format\n valid, output = first_block.render(get_parameter, module)\n\n # clean things up a little\n if isinstance(output, list):\n output = Composite(output)\n if not output:\n if force_composite:\n output = Composite()\n else:\n output = \"\"\n\n return output", "def _format(val, valtype, floatfmt, intfmt, missingval=\"\", has_invisible=True): # noqa\n if val is None:\n return missingval\n\n if valtype is str:\n return f\"{val}\"\n elif valtype is int:\n return format(val, intfmt)\n elif valtype is bytes:\n try:\n return str(val, \"ascii\")\n except (TypeError, UnicodeDecodeError):\n return str(val)\n elif valtype is float:\n is_a_colored_number = has_invisible and isinstance(val, (str, bytes))\n if is_a_colored_number:\n raw_val = _strip_ansi(val)\n formatted_val = format(float(raw_val), floatfmt)\n return val.replace(raw_val, formatted_val)\n else:\n return format(float(val), floatfmt)\n else:\n return f\"{val}\"", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def setInputFormats(self, value):\n return self._set(inputFormats=value)", "def read_fmt(bib_name, bib_file):\n cache_name, formatted_cache_name = _cache_name(bib_name, bib_file)\n\n try:\n meta_data, formatted_entries = cache.read_global(formatted_cache_name)\n except:\n raise cache.CacheMiss()\n\n # raise a cache miss if the modification took place after the caching\n modified_time = os.path.getmtime(bib_file)\n if modified_time > meta_data[\"cache_time\"]:\n raise cache.CacheMiss()\n\n # validate the version and format strings are still valid\n if (meta_data[\"version\"] != _VERSION or\n any(meta_data[s] != get_setting(\"cite_\" + s)\n for s in [\"panel_format\", \"autocomplete_format\"])):\n print(\"Formatting string has changed, updating cache...\")\n # read the base information from the unformatted cache\n current_time, bib_entries = cache.read_global(cache_name)\n # format and cache the entries\n formatted_entries = _create_formatted_entries(formatted_cache_name,\n bib_entries,\n current_time)\n\n return formatted_entries", "def reformat(ctx):\n pass", "def _get_nullformat(self, newformat):\n if self._type == int:\n length = len(str(newformat % 1))\n return '%'+str(length)+'s'\n elif self._type == float:\n length = len(str(newformat % 1.0))\n return '%'+str(length)+'s'\n else:\n return newformat", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def guess_format(string):\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)", "def set_format_by_type(self, value, format):\n self.set_render_func_by_type(value, format.format)", "def format(*args, stringArg: Union[AnyStr, List[AnyStr]]=\"\", **kwargs)->AnyStr:\n pass", "def date_string_to_strftime_format(date_string):\n for data in STRF_DATA:\n for pattern in data.get('patterns', []):\n if pattern in date_string:\n date_string = date_string.replace(pattern, data['replacement'])\n break\n else:\n if data.get('regex'):\n date_string = re.sub(data['regex'], data['replacement'], date_string)\n # matches = find()\n # if any(matches):\n # date_string = date_string.replace(matches[0], data['replacement'])\n\n return date_string", "def _set_real_format(self, fmt):\n # try to use the _nomax variant if available\n if not self._max and fmt + '_nomax' in self.formats:\n self._format = self.formats[fmt + '_nomax']\n elif fmt in self.formats:\n self._format = self.formats[fmt]\n else:\n self._format = fmt\n\n self._format_line_count = self._format.count('\\n')", "def setFormat( self, fmt, style = '{' ):\n formatter = logging.Formatter( fmt, style = style )\n for handler in self.logger.handlers:\n handler.setFormatter( formatter )", "def format_cell_updated(self, cell, value=None):\n self.is_not_used()\n if value is not None:\n cell.value = value\n\n cell.fill = PatternFill(start_color='7fffd4', end_color='7fffd4', fill_type='solid')\n cell.font = Font(name='Ubuntu', size=11, color='555555', bold=False, italic=False)", "def change_file_format(filename, old_format_extension, new_format_extension, append = ''):\r\n filename = unmake_file_format(filename, old_format_extension)\r\n filename += append + new_format_extension\r\n \r\n return(filename)", "def formatter(self, formatString=\"{old} {new}\"):\n lines = []\n for url in self.redirects.keys():\n for match in self.redirects[url]:\n parsed = urlparse(url)\n\n netloc = parsed.netloc\n if self.subdomain:\n netloc = netloc[(netloc.find(\".\")+1):]\n\n prefix = \"{0}://{1}\".format(parsed.scheme, netloc)\n\n lines.append(formatString.format(old=url, oldPath=parsed.path,\n prefix=prefix, new=match))\n return \"\\n\".join([line for line in lines])", "def initFormat(self):\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '", "def format(self, extra=None, *args, **kwargs):\n if extra is not None:\n for key, value in extra.items():\n if key not in kwargs:\n kwargs[key] = value\n return super(Format, self).format(self.format_string, *args, **kwargs)", "def strpdate(cls, string, fmt=\"%Y/%m/%d\", lang='eng'):\n pattern = r'%({})'.format(reduce(lambda x, y: '{}|{}'.format(x, y), FORMAT_MAP.keys()))\n params = re.findall(pattern, fmt)\n if len(params) != len(set(params)):\n raise ValueError(\"Duplicate format specifier not allowed.\")\n for f in params:\n if f == 'Y':\n fmt = fmt.replace('%{}'.format(f), r'(?P<year>\\d{4})')\n elif f == 'y':\n fmt = fmt.replace('%{}'.format(f), r'(?P<year>\\d{2})')\n elif f == 'm':\n fmt = fmt.replace('%{}'.format(f), r'(?P<month>\\d{1,2})')\n elif f == 'd':\n fmt = fmt.replace('%{}'.format(f), r'(?P<day>\\d{1,2})')\n fmt = '^{}$'.format(fmt)\n if re.match(fmt, string) is None:\n raise ValueError('Mismatch in \"string\" and \"fmt\".')\n _ = {**MIN_DATE, 'lang': lang}\n _.update(re.match(fmt, string).groupdict())\n return cls(**_)", "def initFormat(self):\n pass", "def setFormattedStrings(object, event):\n entry = interfaces.IBiblatexEntry(object) # assert a biblatex entry object\n generator = interfaces.IFormattedEntryGenerator(entry)\n writer = interfaces.IWriteFormatted(entry)\n config = zope.component.queryUtility(\n interfaces.IBiblatexConfiguration, \n context = object)\n if config:\n languages = config.languages\n styles = config.styles\n else:\n languages = styles = (None,)\n for language in languages:\n for style in styles:\n #raise Exception(u\"language: %s, style: %s\" % (language, style))\n generator.setUp(language = language, style = style)\n generator.generate()\n writer.setBibliographicEntry(generator.getBibliographicEntry(), language, style)\n writer.setCitation(generator.getCitation(), language, style)\n writer.setCitationAgain(generator.getCitationAgain(), language, style)\n generator.tearDown()\n del generator\n del writer", "def replace_param(string, param, value, param_format=None):\n\n if param_format == \"json\":\n return sub(r\"(?P<json_replacement>\\\"%s\\\"\\s*:\\s*)\\\"\\s*\\\"\" %\n escape(str(param)), \"\\\\1\\\"%s\\\"\" % value, string)\n elif param_format == \"header\":\n return sub(r\"%s=[^\\\\n]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)\n else:\n return sub(r\"%s=[^&]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)", "def format(self, *args, **kwargs) -> String:\n pass", "def edit_format(sample, **kwargs):\n n_call_data = vcf.model.make_calldata_tuple(sample.data._fields + tuple(kwargs.keys()))\n n_data = tuple(kwargs.values())\n sample.data += n_data\n sample.data = n_call_data(*sample.data)", "def add_format(vcf_file, nid, num, ntype, desc):\n # pylint: disable=protected-access\n vcf_file.formats[nid] = vcf.parser._Format(id=nid, num=num, type=ntype, desc=desc)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def apply_formatting_dict(obj: Any, formatting: Dict[str, Any]) -> Any:\n # logger.debug(\"Processing object of type {}\".format(type(obj)))\n new_obj = obj\n\n if isinstance(obj, str):\n # Apply the formatting options to the string.\n # We explicitly allow for missing keys. They will be kept so they can be filled later.\n # see: https://stackoverflow.com/a/17215533\n # If a more sophisticated solution is needed,\n # see: https://ashwch.github.io/handling-missing-keys-in-str-format-map.html\n # Note that we can't use format_map because it is python 3.2+ only.\n # The solution below works in py 2/3\n if \"$\" not in obj:\n new_obj = string.Formatter().vformat(obj, (), formatting_dict(**formatting))\n # else:\n # logger.debug(\"Skipping str {} since it appears to be a latex string, which may break the formatting.\".format(obj))\n elif isinstance(obj, dict):\n new_obj = {}\n for k, v in obj.items():\n # Using indirect access to ensure that the original object is updated.\n new_obj[k] = apply_formatting_dict(v, formatting)\n elif isinstance(obj, list):\n new_obj = []\n for i, el in enumerate(obj):\n # Using indirect access to ensure that the original object is updated.\n new_obj.append(apply_formatting_dict(el, formatting))\n elif isinstance(obj, int) or isinstance(obj, float) or obj is None:\n # Skip over this, as there is nothing to be done - we just keep the value.\n pass\n elif isinstance(obj, enum.Enum):\n # Skip over this, as there is nothing to be done - we just keep the value.\n # This only occurs when a formatting value has already been transformed\n # into an enumeration.\n pass\n else:\n # This may or may not be expected, depending on the particular value.\n logger.debug(f\"Unrecognized obj '{obj}' of type '{type(obj)}'\")\n\n return new_obj", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def filter_formatdate(val, format_str):\n if not isinstance(val, (datetime, date, time)):\n return val\n return val.strftime(format_str)", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def add_formatter(self, fmt):\n if fmt and not isfunction(fmt):\n raise TypeError(\"custom format function must be a type of function\")\n\n if fmt and fmt.__code__.co_argcount < 2:\n raise TypeError(\"custom format function requires at least 2 arguments\")\n\n self.formatter = fmt", "def _resolve_sse_format(self, sse_format, **kwargs):\n sse_format_int = [x[-1] for x in pytan.constants.SSE_FORMAT_MAP if sse_format.lower() in x]\n\n if not sse_format_int:\n m = \"Unsupport export format {!r}, must be one of:\\n{}\".format\n ef_map_txt = '\\n'.join(\n [', '.join(['{!r}'.format(x) for x in y]) for y in pytan.constants.SSE_FORMAT_MAP]\n )\n raise pytan.exceptions.HandlerError(m(sse_format, ef_map_txt))\n\n sse_format_int = sse_format_int[0]\n\n m = \"'sse_format resolved from '{}' to '{}'\".format\n self.mylog.debug(m(sse_format, sse_format_int))\n\n self._check_sse_format_support(\n sse_format=sse_format, sse_format_int=sse_format_int, **kwargs\n )\n\n return sse_format_int", "def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result", "def valueformat(value, format_list):\n\n # print(\"\\n\", format_list, value)\n concat_key = format_list.split('.')\n # Pass in either the key of the field\n # or pass in resource.key to enable a resource lookup.\n key = \"\"\n resource = \"\"\n member_id = \"\"\n key_sequence = [key, resource, member_id]\n count = 0\n for r in reversed(concat_key):\n key_sequence[count] = r\n count += 1\n\n # print(\"Concat_key:\", concat_key)\n key = key_sequence[0]\n resource = key_sequence[1]\n member_id = key_sequence[2]\n\n # print(\"Key:\", key)\n\n if key:\n if key.lower() == \"address\":\n return dt_address(value)\n\n elif key.lower() == \"telecom\":\n return dt_telecom(value)\n\n elif key.lower() == \"name\":\n return dt_name(value)\n elif key.lower() == 'dosage':\n return dt_dosage(value)\n elif key.lower() == 'medicationreference':\n # print(\"Working on\", key, \": \", value)\n # f_value = value\n # lookup field_formats\n # concat_key should have a resource name\n # print(\"\\n\\nRESOURCE:\", resource)\n # print(\"calling dt_medicationreference with Resource:\", resource, \", value:\", value)\n return dt_medicationreference(value, member_id, resource)\n elif key.lower() == 'dataabsentreason':\n if isinstance(value, dict):\n return value['coding'][0]['display']\n else:\n return value\n elif key.lower() == 'valuequantity':\n # return str(value['value']) + \" \" + value['unit']\n return dt_valuequantity(value)\n elif key.lower() == 'valuestring':\n return value\n elif key.lower() == 'interpretation':\n return value['coding'][0]['display']\n elif key.lower() == 'referencerange':\n return dt_referencerange(value)\n elif key.lower() == 'requester':\n if 'display' in value['agent']:\n return dt_reference(value['agent'], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n # elif key.lower() == \"result\":\n # return dt_reference(value[0], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'participant':\n if 'display' in value[0]['individual']:\n return dt_reference(value[0]['individual'], member_id)\n elif key.lower() == 'location':\n if 'display' in value[0]['location']:\n return dt_reference(value[0]['location'], member_id)\n elif key.lower() == 'communication':\n return dt_communication(value)\n else:\n # print(\"value:\", value, \" type:\", type(value), \" for: \", key)\n return value", "def _printAttributePrintf(self, formatting, value):\n\n # multiple entrys\n if isinstance(formatting, list):\n\n for scanf_format in formatting:\n try:\n #print \"-->>\", scanf_format, value\n return scanf_format % value\n except TypeError, e:\n pass\n\n # single entry\n else:\n return formatting % value\n\n # problem if none of the formats worked\n raise TypeError(\"Valid format not found for values.\")", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def _add_report_formatting(self, cell_range: str, fmt_spec: gsfmt.CellFormat):\n self.report_formatting.append((cell_range, fmt_spec))", "def vformat(self, format_string, args, kwargs):\n self._used_kwargs = {}\n self._unused_kwargs = {}\n return super(MemorizeFormatter, self).vformat(format_string, args, kwargs)", "def _format(path, arformat):\n if isinstance(path, unicode):\n path = path.encode('utf-8')\n\n if path.startswith('#1/'):\n if not arformat:\n arformat = AR_FORMAT_BSD\n elif arformat is AR_FORMAT_SIMPLE:\n raise IOError('File name starts with special for format!')\n\n if len(path) >= 16:\n if arformat is None:\n arformat = AR_FORMAT_BSD\n elif arformat is AR_FORMAT_SIMPLE:\n raise IOError('File name too long for format!')\n\n if ' ' in path:\n if not arformat:\n arformat = AR_FORMAT_BSD\n elif arformat is AR_FORMAT_SIMPLE:\n raise IOError('File name contains forbidden character for format!')\n\n if arformat is None:\n arformat = AR_FORMAT_SIMPLE\n\n return arformat", "def merge(string: str, user_input: tuple) -> str:\n merged_string = string.format(*user_input)\n return merged_string", "def register_filename_format(format_name,parser):\n if format_name == \"ALL\":\n raise ValueError(\"filename format code ALL is reserved\")\n\n filename_format_parser[format_name] = parser", "def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def format_string(s, formatter='minimal'):\n if not callable(formatter):\n formatter = get_formatter_for_name(formatter)\n if formatter is None:\n output = s\n else:\n output = formatter(s)\n return output", "def register_str_format(\n tag: Tag, conformer: Optional[Conformer] = None\n) -> Callable[[ValidatorFn], ValidatorFn]:\n\n def create_str_format(f: ValidatorFn) -> ValidatorFn:\n with _STR_FORMAT_LOCK:\n _STR_FORMATS[tag] = StrFormat(f, conformer=conformer)\n return f\n\n return create_str_format", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def setup(self, formats):\n for f in formats:\n try:\n self.counters[f] += 1\n except KeyError:\n self.counters[f] = 1\n self.vim.command(f\"set efm+={f}\")", "def dt_format_translate(pyfmt):\n\n translate = {\"%a\": \"ddd\",\n \"%A\": \"dddd\",\n \"%b\": \"mmm\",\n \"%B\": \"mmmm\",\n \"%c\": \"\",\n \"%d\": \"dd\",\n \"%f\": \"\",\n \"%H\": \"hh\",\n \"%I\": \"hh\",\n \"%j\": \"\",\n \"%m\": \"mm\",\n \"%M\": \"mm\",\n \"%p\": \"AM/PM\",\n \"%S\": \"ss\",\n \"%U\": \"\",\n \"%w\": \"\",\n \"%W\": \"\",\n \"%x\": \"\",\n \"%X\": \"\",\n \"%y\": \"yy\",\n \"%Y\": \"yyyy\",\n \"%z\": \"\",\n \"%Z\": \"\",\n \"%%\": \"%\"}\n\n xlfmt = str(pyfmt)\n\n for item in translate:\n if item in xlfmt:\n xlfmt = xlfmt.replace(item, translate[item])\n return xlfmt", "def __normalize(self, ctx: commands.Context, format: str) -> str:\n\t\t# convert to lowercase\n\t\tlower_format = format.lower()\n\t\t# check if inputted format is recognized\n\t\tif lower_format in self.formats:\n\t\t\treturn lower_format\n\t\t# check for aliases\n\t\telif lower_format in self.aliases:\n\t\t\treturn self.aliases[lower_format]\n\t\t# format is not recognized\n\t\telse:\n\t\t\traise FriendlyError(\n\t\t\t\tf\"'{format}' is not a recognized format.\", ctx.channel, ctx.author\n\t\t\t)", "def get_formatted_string(self, input_string):\n if isinstance(input_string, str):\n try:\n return self.get_processed_string(input_string)\n except KeyError as err:\n # Wrapping the KeyError into a less cryptic error for end-user\n # friendliness\n missing_key = err.args[0]\n raise KeyNotInContextError(\n f'Unable to format \\'{input_string}\\' with '\n f'{{{missing_key}}}, because '\n f'context[\\'{missing_key}\\'] doesn\\'t exist') from err\n else:\n raise TypeError(f\"can only format on strings. {input_string} is a \"\n f\"{type(input_string)} instead.\")", "def reformat():\n toolkit.reformat()", "def __format__(self, format_spec):\n if format_spec == \"polite\":\n return self.polite\n elif format_spec == \"casual\":\n return self.casual\n else:\n # Using string addition here to avoid triggering flake8-sfs\n # while still giving a meaningful self-contained example:\n raise ValueError(format_spec + \" not a format defined by Client object\")", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def format(self, message):", "def format_string(self, pat=None, pat_args={}):\n if pat is None:\n pat = self.parent.pat\n if pat_args == {}:\n pat_args = self.parent.pat_args\n return entry_format.output(self, pat, pat_args)", "def formats():\n return _FORMATS" ]
[ "0.64630926", "0.62091905", "0.5800091", "0.5796576", "0.5712765", "0.57114947", "0.57108057", "0.55900645", "0.5558063", "0.55575746", "0.543823", "0.5434389", "0.5409675", "0.54084057", "0.5407691", "0.53960705", "0.5389524", "0.5365024", "0.536046", "0.53591377", "0.5347189", "0.53174406", "0.5299757", "0.52616566", "0.5260935", "0.52346927", "0.52232057", "0.52054524", "0.52024364", "0.5178537", "0.5174698", "0.51715887", "0.5169623", "0.51565874", "0.51419294", "0.5110237", "0.5106162", "0.50872636", "0.50872636", "0.50857365", "0.50797033", "0.5068049", "0.5047103", "0.5045401", "0.5042234", "0.5029329", "0.5019787", "0.50191045", "0.49938348", "0.49809968", "0.49774867", "0.49749777", "0.49488375", "0.49193978", "0.4911113", "0.49104512", "0.49016646", "0.48896325", "0.4888514", "0.48864", "0.48852763", "0.48846358", "0.4882669", "0.48785567", "0.48667544", "0.4865224", "0.4838269", "0.48359227", "0.48305956", "0.48236126", "0.48159927", "0.47873333", "0.4786662", "0.47859585", "0.4772103", "0.47608912", "0.47545344", "0.47523347", "0.47488862", "0.4731774", "0.47315702", "0.47303045", "0.47083676", "0.47082552", "0.4707627", "0.4706674", "0.46773958", "0.46729234", "0.467078", "0.46677604", "0.46592674", "0.46567273", "0.46514422", "0.46440995", "0.4638231", "0.4636683", "0.4636164", "0.4630081", "0.46279675", "0.46245953" ]
0.6789656
0
Parse the format string into blocks containing Literals, Placeholders etc that we can cache and reuse.
def build_block(self, format_string): first_block = Block(None, py3_wrapper=self.py3_wrapper) block = first_block # Tokenize the format string and process them for token in self.tokens(format_string): value = token.group(0) if token.group("block_start"): # Create new block block = block.new_block() elif token.group("block_end"): # Close block setting any valid state as needed # and return to parent block to continue if not block.parent: raise Exception("Too many `]`") block = block.parent elif token.group("switch"): # a new option has been created block = block.switch() elif token.group("placeholder"): # Found a {placeholder} key = token.group("key") format = token.group("format") block.add(Placeholder(key, format)) elif token.group("literal"): block.add(Literal(value)) elif token.group("lost_brace"): # due to how parsing happens we can get a lonesome } # eg in format_string '{{something}' this fixes that issue block.add(Literal(value)) elif token.group("command"): # a block command has been found block.set_commands(token.group("command")) elif token.group("escaped"): # escaped characters add unescaped values if value[0] in ["\\", "{", "}"]: value = value[1:] block.add(Literal(value)) if block.parent: raise Exception("Block not closed") # add to the cache self.block_cache[format_string] = first_block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse(self, fmtstr):\n def _match_brace(string, start_pos, pair='[]'):\n \"\"\"Pairing brackets (used internally in _parse method)\"\"\"\n depth = 1\n if string[start_pos] != pair[0]:\n return None\n for index, char in enumerate(string[start_pos + 1:]):\n if char == pair[0]:\n depth += 1\n elif char == pair[1]:\n depth -= 1\n if depth == 0:\n return start_pos + index + 1\n return None\n\n #----------------------------------------------------------------------\n\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n\n ptr = 0\n # it seems that field id 0 is invalid\n field_id = 1\n length = len(fmtstr)\n parsed_list = []\n\n while ptr < length:\n parsed = {}\n m_prefix = t_prefix.match(fmtstr[ptr:])\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed['prefix'] = m_prefix.group(1)\n\n # check if we have a nested structure\n if m_prefix.group(2):\n brace_offset = _match_brace(fmtstr, ptr - 1)\n\n # bracket not match\n if not brace_offset:\n raise BadFormatString(\n 'Unmatched brace on position {0}'.format(ptr)\n )\n parsed['field_id'] = field_id\n parsed['field_type'] = 'a'\n parsed['subcontent'] = self._parse(\n fmtstr[ptr:brace_offset]\n )\n ptr = brace_offset + 1\n field_id += 1\n\n parsed_list.append(parsed)\n continue\n m_fmt = t_fmt.match(fmtstr[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n\n # fmt is an alias\n if m_fmt.group(2):\n parsed['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n parsed['field_type'] = m_fmt.group(1)\n\n # save field id\n parsed['field_id'] = field_id\n\n # check for type clones (e.g. `v3')\n if m_fmt.group(3):\n parsed['repeat'] = int(m_fmt.group(3))\n field_id += int(m_fmt.group(3))\n else:\n parsed['repeat'] = 1\n field_id += 1\n\n parsed_list.append(parsed)\n\n else:\n raise BadFormatString(\n 'Invalid token on position {0}'.format(ptr)\n )\n\n # all set\n return parsed_list", "def parse_blocks(fblocks):\n print('Parse blocks: ', end='')\n result = []\n\n for line in fblocks:\n stripped = line.strip()\n if len(stripped) > 0 and stripped[0] != '#':\n match = re.match(r\"([0-9A-F]+)\\.{2}([0-9A-F]+);\\s+(.+)\", stripped)\n result.append({\n 'begin': int(match.group(1), 16),\n 'end': int(match.group(2), 16),\n 'name': match.group(3)\n })\n\n print('done')\n return result", "def _interpolate(format):\n from tokenize import tokenprog\n\n def matchorfail(text, pos):\n match = tokenprog.match(text, pos)\n if match is None:\n raise _ItplError(text, pos)\n return match, match.end()\n\n namechars = \"abcdefghijklmnopqrstuvwxyz\" \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\";\n chunks = []\n pos = 0\n\n while 1:\n dollar = format.find(\"$\", pos)\n if dollar < 0: \n break\n nextchar = format[dollar + 1]\n\n if nextchar == \"{\":\n chunks.append((0, format[pos:dollar]))\n pos, level = dollar + 2, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token == \"{\": \n level = level + 1\n elif token == \"}\": \n level = level - 1\n chunks.append((1, format[dollar + 2:pos - 1]))\n\n elif nextchar in namechars:\n chunks.append((0, format[pos:dollar]))\n match, pos = matchorfail(format, dollar + 1)\n while pos < len(format):\n if format[pos] == \".\" and \\\n pos + 1 < len(format) and format[pos + 1] in namechars:\n match, pos = matchorfail(format, pos + 1)\n elif format[pos] in \"([\":\n pos, level = pos + 1, 1\n while level:\n match, pos = matchorfail(format, pos)\n tstart, tend = match.regs[3]\n token = format[tstart:tend]\n if token[0] in \"([\": \n level = level + 1\n elif token[0] in \")]\": \n level = level - 1\n else: \n break\n chunks.append((1, format[dollar + 1:pos]))\n else:\n chunks.append((0, format[pos:dollar + 1]))\n pos = dollar + 1 + (nextchar == \"$\")\n\n if pos < len(format): \n chunks.append((0, format[pos:]))\n return chunks", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def _parse(self, template, fpos=0):\r\n # blank out comments\r\n # (So that its content does not collide with other syntax, and\r\n # because removing them completely would falsify the character-\r\n # position (\"match.start()\") of error-messages)\r\n template = self._reComment.sub(lambda match: self._comment_start+\" \"*len(match.group(1))+match.group(2), template)\r\n\r\n # init parser\r\n parsetree = []\r\n curr = 0 # current position (= end of previous block)\r\n block_type = None # block type: if,for,macro,raw,...\r\n block_indent = None # None: single-line, >=0: multi-line\r\n\r\n # find blocks\r\n for match in self._reBlock.finditer(template):\r\n start = match.start()\r\n # process template-part before this block\r\n if start > curr:\r\n self._parse_sub(parsetree, template[curr:start], fpos)\r\n\r\n # analyze block syntax (incl. error-checking and -messages)\r\n keyword = None\r\n block = match.groupdict()\r\n pos__ = fpos + start # shortcut\r\n if block[\"sKeyw\"] is not None: # single-line block tag\r\n block_indent = None\r\n keyword = block[\"sKeyw\"]\r\n param = block[\"sParam\"]\r\n content = block[\"sContent\"]\r\n if block[\"sSpace\"]: # restore spaces before start-tag\r\n if len(parsetree) > 0 and parsetree[-1][0] == \"str\":\r\n parsetree[-1] = (\"str\", parsetree[-1][1] + block[\"sSpace\"])\r\n else:\r\n parsetree.append((\"str\", block[\"sSpace\"]))\r\n pos_p = fpos + match.start(\"sParam\") # shortcuts\r\n pos_c = fpos + match.start(\"sContent\")\r\n elif block[\"mKeyw\"] is not None: # multi-line block tag\r\n block_indent = len(block[\"indent\"])\r\n keyword = block[\"mKeyw\"]\r\n param = block[\"mParam\"]\r\n content = block[\"mContent\"]\r\n pos_p = fpos + match.start(\"mParam\")\r\n pos_c = fpos + match.start(\"mContent\")\r\n ignored = block[\"mIgnored\"].strip()\r\n if ignored and ignored != self._comment_start:\r\n raise TemplateSyntaxError(\"No code allowed after block-tag.\", self._errpos(fpos+match.start(\"mIgnored\")))\r\n elif block[\"mEnd\"] is not None: # multi-line block end\r\n if block_type is None:\r\n raise TemplateSyntaxError(\"No block to end here/invalid indent.\", self._errpos(pos__) )\r\n if block_indent != len(block[\"mEnd\"]):\r\n raise TemplateSyntaxError(\"Invalid indent for end-tag.\", self._errpos(pos__) )\r\n ignored = block[\"meIgnored\"].strip()\r\n if ignored and ignored != self._comment_start:\r\n raise TemplateSyntaxError(\"No code allowed after end-tag.\", self._errpos(fpos+match.start(\"meIgnored\")))\r\n block_type = None\r\n elif block[\"sEnd\"] is not None: # single-line block end\r\n if block_type is None:\r\n raise TemplateSyntaxError(\"No block to end here/invalid indent.\", self._errpos(pos__))\r\n if block_indent is not None:\r\n raise TemplateSyntaxError(\"Invalid indent for end-tag.\", self._errpos(pos__))\r\n block_type = None\r\n else:\r\n raise TemplateException(\"FATAL: Block regexp error. Please contact the author. (%s)\" % match.group())\r\n\r\n # analyze block content (mainly error-checking and -messages)\r\n if keyword:\r\n keyword = keyword.lower()\r\n if 'for' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'for'\r\n cond = self._reForParam.match(param)\r\n if cond is None:\r\n raise TemplateSyntaxError(\"Invalid 'for ...' at '%s'.\" %(param), self._errpos(pos_p))\r\n names = tuple(n.strip() for n in cond.group(\"names\").split(\",\"))\r\n self._testexpr(cond.group(\"iter\"), pos_p+cond.start(\"iter\"))\r\n parsetree.append((\"for\", names, cond.group(\"iter\"), self._parse(content, pos_c)))\r\n elif 'if' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing condition for 'if' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'if'\r\n self._testexpr(param, pos_p)\r\n parsetree.append((\"if\", param, self._parse(content, pos_c)))\r\n elif 'elif' == keyword:\r\n if block_type != 'if':\r\n raise TemplateSyntaxError(\"'elif' may only appear after 'if' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing condition for 'elif' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n self._testexpr(param, pos_p)\r\n parsetree.append((\"elif\", param, self._parse(content, pos_c)))\r\n elif 'else' == keyword:\r\n if block_type not in ('if', 'for'):\r\n raise TemplateSyntaxError(\"'else' may only appear after 'if' of 'for' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'else' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n parsetree.append((\"else\", self._parse(content, pos_c)))\r\n elif 'macro' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'macro'\r\n # make sure param is \"\\w+\" (instead of \".+\")\r\n if not param:\r\n raise TemplateSyntaxError(\"Missing name for 'macro' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n if not self._reMacroParam.match(param):\r\n raise TemplateSyntaxError(\"Invalid name for 'macro' at '%s'.\" %(match.group()), self._errpos(pos__))\r\n #remove last newline\r\n if len(content) > 0 and content[-1] == '\\n':\r\n content = content[:-1]\r\n if len(content) > 0 and content[-1] == '\\r':\r\n content = content[:-1]\r\n parsetree.append((\"macro\", param, self._parse(content, pos_c)))\r\n\r\n # parser-commands\r\n elif 'raw' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'raw' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'raw'\r\n parsetree.append((\"str\", content))\r\n elif 'include' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'include' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'include'\r\n try:\r\n u = self._load(content.strip())\r\n except Exception,err:\r\n raise TemplateIncludeError(err, self._errpos(pos__))\r\n self._includestack.append((content.strip(), u)) # current filename/template for error-msg.\r\n p = self._parse(u)\r\n self._includestack.pop()\r\n parsetree.extend(p)\r\n elif 'set_escape' == keyword:\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing block-end-tag before new block '%s'.\" %(match.group()), self._errpos(pos__))\r\n if param:\r\n raise TemplateSyntaxError(\"'set_escape' may not have parameters at '%s'.\" %(match.group()), self._errpos(pos__))\r\n block_type = 'set_escape'\r\n esc = content.strip().upper()\r\n if esc not in ESCAPE_SUPPORTED:\r\n raise TemplateSyntaxError(\"Unsupported escape '%s'.\" %(esc), self._errpos(pos__))\r\n self.escape = ESCAPE_SUPPORTED[esc]\r\n else:\r\n raise TemplateSyntaxError(\"Invalid keyword '%s'.\" %(keyword), self._errpos(pos__))\r\n curr = match.end()\r\n\r\n if block_type is not None:\r\n raise TemplateSyntaxError(\"Missing end-tag.\", self._errpos(pos__))\r\n\r\n if len(template) > curr: # process template-part after last block\r\n self._parse_sub(parsetree, template[curr:], fpos)\r\n\r\n return parsetree", "def parse_block(\n string: str,\n vars: Dict,\n neg: bool = False,\n min_: int = 1,\n max_: int = 1,\n label: Optional[str] = None,\n start: int = -1,\n) -> Block:\n it: BlockIterator = BlockIterator(string, start=start)\n members: List[Union[Block, Unit, Ref]] = []\n block = Block(members, vars, neg=neg, min_=min_, max_=max_, label=label)\n for content, neg, min_, max_, label, type_, line_num in it:\n if type_ == Types.BLOCK:\n members.append(parse_block(content, vars, neg, min_, max_, label, line_num))\n elif type_ == Types.UNIT:\n members.append(parse_unit(content, neg, min_, max_, label))\n elif type_ == Types.VAR_REF:\n members.append(Ref(content, neg, min_, max_, label))\n # use this after the iteration is complete\n block.union = it.is_union\n if not block.members and not EMPTY_BLOCK_RE.match(string):\n raise ValueError(\n f\"Cannot parse block contents: {string} \" f\"starting at {start}\"\n )\n return block", "def get_placeholder_formats_list(self, format_string):\n placeholders = []\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.append((token.group(\"key\"), token.group(\"format\")))\n return placeholders", "def parse(self, s):\n\n segments = self.compiled.split(self._whitespace.sub(\" \", s))\n literals = segments[::2]\n raw = segments[1::2]\n\n if not raw:\n return []\n\n case = list(map(str.casefold, raw))\n prefixes = [{}] + [dict(self.locale_set.prefixes.get(match, ())) for match in case[:-1]]\n suffixes = [dict(self.locale_set.suffixes.get(match, ())) for match in case[1:]] + [{}]\n\n groups = _DateTime(**{ field: [] for field in _DateTime._fields })\n choices_per_position = {}\n always_literal = set()\n numeric = set()\n for idx, (prefix, suffix) in enumerate(zip(prefixes, suffixes)):\n keyword = self._lookup_keyword(raw[idx])\n if \"y\" in prefix:\n prefix[\"C\"] = tuple(set(prefix[\"y\"] + prefix.get(\"C\", ())))\n if not keyword:\n always_literal.add(idx)\n else:\n if raw[idx].isdigit():\n numeric.add(idx)\n choices_per_position[idx] = len(keyword)\n for fmt, value, locales in keyword:\n category = fmt[-1]\n if category == \"b\":\n # Month-names should be treated like numeric months.\n category = \"m\"\n elif category == \"z\":\n category = \"Z\"\n getattr(groups, category).append(_Assignment(\n fmt=fmt,\n pos=idx,\n value=value,\n locales=locales,\n prefix=prefix.get(fmt[-1]),\n suffix=suffix.get(fmt[-1]),\n ))\n numeric = frozenset(numeric)\n\n # If a required date field is unsatisfiable, this is not a date.\n if not all(getattr(groups, category) for category in _State._min_date_formats):\n for category in _State._all_date_formats:\n getattr(groups, category).clear()\n\n # If a required time field is unsatisfiable, this is not a time.\n if not all(getattr(groups, category) for category in _State._min_time_formats):\n for category in _State._all_time_formats:\n getattr(groups, category).clear()\n\n for group in groups:\n group.sort(key=lambda assignment: (\n -self._optimistic_score(assignment),\n choices_per_position[assignment.pos],\n ))\n\n required_formats = _State._min_date_formats + _State._min_time_formats\n groups = OrderedDict(sorted(\n (\n (\n category,\n (\n group,\n tuple(\n (f, required)\n for f, required in _position_constraints\n if category in required\n ),\n tuple(\n (f, required)\n for f, required, revisit in _value_constraints\n if category in required or category in revisit\n ),\n )\n )\n for category, group in zip(groups._fields, groups)\n if group\n ),\n key=lambda i: (i[0] not in required_formats, len(i[1][0]))\n ))\n\n # We've already filtered out all possibilities; there's nothing here.\n if not groups:\n return []\n\n constrained_groups = []\n while groups:\n category, (group, position, value) = groups.popitem(last=False)\n constrained_groups.append((category, group, position, value))\n required = frozenset(itertools.chain.from_iterable(required for f, required in itertools.chain(position, value)))\n if required:\n required = [\n category\n for category in reversed(groups.keys())\n if category in required\n ]\n for category in required:\n groups.move_to_end(category, last=False)\n groups = constrained_groups\n\n best_quality = 0\n best_candidates = []\n\n partials = [\n _State.empty._replace(\n unconverted=frozenset(always_literal),\n remaining_groups=tuple(groups),\n ).children(numeric=numeric)\n ]\n while partials:\n try:\n quality, locales, state = next(partials[-1])\n except StopIteration:\n partials.pop()\n continue\n\n if state.remaining_groups:\n # Admissable heuristic: compute the best score each group\n # could possibly achieve. Don't count conversion specifiers\n # that we've already used, but don't worry about conflicts\n # in the groups we haven't assigned yet. Any such conflicts\n # can only reduce the resulting score, and we only need to\n # make sure that the heuristic is at least as large as the\n # true value of the best leaf in this subtree. However, the\n # more precise we can be here, the fewer nodes we have to\n # search, so we can spend some CPU time on precision and\n # still come out ahead.\n assigned = state.unconverted.union(state.pos).difference((None,))\n heuristic = len(state.pending_hints) + sum(\n next((\n self._optimistic_score(assignment)\n for assignment in group[1]\n if assignment.pos not in assigned\n ), 0)\n for group in state.remaining_groups\n )\n\n if quality + heuristic < best_quality:\n # Even assuming the remaining groups get the highest\n # possible score, this state is still not good enough.\n continue\n\n partials.append(state.children(numeric=numeric))\n continue\n\n value = state.valid()\n if value is None:\n continue\n\n quality, locales, state = state.final_score()\n\n if best_quality is not None and quality < best_quality:\n # We've seen better, so skip this one.\n continue\n\n if quality != best_quality:\n best_quality = quality\n best_candidates = []\n\n conversions = dict(zip(state.pos, state.fmts))\n fmts = [ conversions.get(idx) or literal for idx, literal in enumerate(raw) ]\n\n pattern = ''.join(lit + fmt for lit, fmt in zip(literals, fmts + [''])).replace(\"%C%y\", \"%Y\")\n best_candidates.append((pattern, value, locales))\n return best_candidates", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def blockParser(block):\n struct = []\n first = True\n record = False\n for line in block:\n if line.startswith('Structure #'):\n record = True\n if not first:\n yield struct\n struct = []\n first = False\n if record:\n struct.append(line)\n yield struct", "def parse_block_layout(raw_block_layout):\n\n validate_raw_block_layout(raw_block_layout)\n\n block_layout = []\n for raw_layer_layout in raw_block_layout:\n raw_block_specs = raw_layer_layout.split('-')\n layer = [raw_block_spec.split(',') for raw_block_spec in raw_block_specs]\n layer = [(block_name, int(num_repeats)) for block_name, num_repeats in layer]\n block_layout.append(layer)\n\n return block_layout", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def _decode_block_string(self, block_string: str):\n\n arg_strings = block_string.split('_')\n args = {}\n for arg_string in arg_strings:\n splits = re.split(r'(\\d.*)', arg_string)\n if len(splits) >= 2:\n key, value = splits[:2]\n args[key] = value\n num_repeat = int(args['r'])\n block_args = {\n 'kernel_size': int(args['k']),\n 'stride': int(args['s']),\n 'expand_ratio': int(args['e']),\n 'in_channels': int(args['i']),\n 'out_channels': int(args['o']),\n 'se_ratio': float(args['se']) if 'se' in args else None,\n }\n return block_args, num_repeat", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def parse_blocks(self):\n if not self.options.blocks:\n return\n block_identifiers, block_aliases = [list(b) for b in zip(*self.options.blocks)]\n while block_identifiers:\n nodelist = self.parser.parse(block_identifiers)\n token = self.parser.next_token()\n current_identifier = block_identifiers.pop(0)\n current_alias = block_aliases.pop(0)\n while token.contents != current_identifier:\n current_identifier = block_identifiers.pop(0)\n self.blocks[block_aliases.pop(0)] = template.NodeList() \n self.blocks[current_alias] = nodelist\n assert len(self.blocks) == len(self.options.blocks), \"%s block parsing failed: %r => %r\" % (self.tagname, self.options.blocks, self.blocks)", "def get_placeholders(self, format_string):\n placeholders = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"placeholder\"):\n placeholders.add(token.group(\"key\"))\n elif token.group(\"command\"):\n # get any placeholders used in commands\n commands = dict(parse_qsl(token.group(\"command\")))\n # placeholders only used in `if`\n if_ = commands.get(\"if\")\n if if_:\n placeholders.add(Condition(if_).variable)\n return placeholders", "def _slice_template(cls, in_str: str) -> Iterator[RawFileSlice]:\n fmt = Formatter()\n in_idx = 0\n for literal_text, field_name, format_spec, conversion in fmt.parse(in_str):\n if literal_text:\n escape_chars = cls._sorted_occurrence_tuples(\n cls._substring_occurrences(literal_text, [\"}\", \"{\"])\n )\n idx = 0\n while escape_chars:\n first_char = escape_chars.pop()\n # Is there a literal first?\n if first_char[1] > idx:\n yield RawFileSlice(\n literal_text[idx : first_char[1]], \"literal\", in_idx\n )\n in_idx += first_char[1] - idx\n # Add the escaped\n idx = first_char[1] + len(first_char[0])\n # We double them here to make the raw\n yield RawFileSlice(\n literal_text[first_char[1] : idx] * 2, \"escaped\", in_idx\n )\n # Will always be 2 in this case.\n # This is because ALL escape sequences in the python formatter\n # are two characters which reduce to one.\n in_idx += 2\n # Deal with last one (if present)\n if literal_text[idx:]:\n yield RawFileSlice(literal_text[idx:], \"literal\", in_idx)\n in_idx += len(literal_text) - idx\n # Deal with fields\n if field_name:\n constructed_token = \"{{{field_name}{conv}{spec}}}\".format(\n field_name=field_name,\n conv=f\"!{conversion}\" if conversion else \"\",\n spec=f\":{format_spec}\" if format_spec else \"\",\n )\n yield RawFileSlice(constructed_token, \"templated\", in_idx)\n in_idx += len(constructed_token)", "def reformat(ctx):\n pass", "def format(\n self,\n format_string,\n module=None,\n param_dict=None,\n force_composite=False,\n attr_getter=None,\n ):\n if param_dict is None:\n param_dict = {}\n\n # if the processed format string is not in the cache then create it.\n if format_string not in self.block_cache:\n self.build_block(format_string)\n\n first_block = self.block_cache[format_string]\n\n def get_parameter(key):\n \"\"\"\n function that finds and returns the value for a placeholder.\n \"\"\"\n if key in param_dict:\n # was a supplied parameter\n param = param_dict.get(key)\n elif module and hasattr(module, key):\n param = getattr(module, key)\n if hasattr(param, \"__call__\"):\n # we don't allow module methods\n raise Exception()\n elif attr_getter:\n # get value from attr_getter function\n try:\n param = attr_getter(key)\n except: # noqa e722\n raise Exception()\n else:\n raise Exception()\n if isinstance(param, Composite):\n if param.text():\n param = param.copy()\n else:\n param = \"\"\n return param\n\n # render our processed format\n valid, output = first_block.render(get_parameter, module)\n\n # clean things up a little\n if isinstance(output, list):\n output = Composite(output)\n if not output:\n if force_composite:\n output = Composite()\n else:\n output = \"\"\n\n return output", "def parse(message):\n if not isinstance(message, str):\n raise TypeError(\"Block.from_network_format: expected message to be of type str\")\n if not message[0] == 'd':\n raise ValueError()\n block_number = int(message[1:7], 16)\n timestamp = int(message[7:15], 16)\n difficulty = int(message[15:17], 16)\n nonce = int(message[17:81], 16)\n previous_block_hash = message[81:145]\n merkle_root_hash = message[145:209]\n transaction_count = int(message[209:211], 16)\n message = message[211:]\n block_transactions = []\n for x in range(transaction_count):\n transaction_length = int(message[:5], 16)\n transaction = message[5:transaction_length + 5]\n block_transactions.append(transaction)\n message = message[transaction_length + 5:]\n str_block_transactions = \"\"\n for t in block_transactions:\n str_block_transactions += t + \",\"\n str_block_transactions = str_block_transactions[:-1]\n self_hash = calculate_hash(previous_block_hash, merkle_root_hash, nonce)\n block = (\n 0, block_number, timestamp, difficulty, nonce, previous_block_hash, merkle_root_hash,\n str_block_transactions,\n self_hash)\n return Block(block)", "def parseBlock(self, text, prevLineData):\n return self.parser.parseBlock(text, prevLineData)", "def block_parser(part, rgxin, rgxout, fmtin, fmtout):\r\n\r\n block = []\r\n lines = part.split('\\n')\r\n N = len(lines)\r\n i = 0\r\n decorator = None\r\n while 1:\r\n\r\n if i==N:\r\n # nothing left to parse -- the last line\r\n break\r\n\r\n line = lines[i]\r\n i += 1\r\n line_stripped = line.strip()\r\n if line_stripped.startswith('#'):\r\n block.append((COMMENT, line))\r\n continue\r\n\r\n if line_stripped.startswith('@'):\r\n # we're assuming at most one decorator -- may need to\r\n # rethink\r\n decorator = line_stripped\r\n continue\r\n\r\n # does this look like an input line?\r\n matchin = rgxin.match(line)\r\n if matchin:\r\n lineno, inputline = int(matchin.group(1)), matchin.group(2)\r\n\r\n # the ....: continuation string\r\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\r\n Nc = len(continuation)\r\n # input lines can continue on for more than one line, if\r\n # we have a '\\' line continuation char or a function call\r\n # echo line 'print'. The input line can only be\r\n # terminated by the end of the block or an output line, so\r\n # we parse out the rest of the input line if it is\r\n # multiline as well as any echo text\r\n\r\n rest = []\r\n while i<N:\r\n\r\n # look ahead; if the next line is blank, or a comment, or\r\n # an output line, we're done\r\n\r\n nextline = lines[i]\r\n matchout = rgxout.match(nextline)\r\n #print \"nextline=%s, continuation=%s, starts=%s\"%(nextline, continuation, nextline.startswith(continuation))\r\n if matchout or nextline.startswith('#'):\r\n break\r\n elif nextline.startswith(continuation):\r\n inputline += '\\n' + nextline[Nc:]\r\n else:\r\n rest.append(nextline)\r\n i+= 1\r\n\r\n block.append((INPUT, (decorator, inputline, '\\n'.join(rest))))\r\n continue\r\n\r\n # if it looks like an output line grab all the text to the end\r\n # of the block\r\n matchout = rgxout.match(line)\r\n if matchout:\r\n lineno, output = int(matchout.group(1)), matchout.group(2)\r\n if i<N-1:\r\n output = '\\n'.join([output] + lines[i:])\r\n\r\n block.append((OUTPUT, output))\r\n break\r\n\r\n return block", "def __init__(self, text, *contexts):\n\n self.context = {}\n for context in contexts:\n self.context.update(context)\n\n buffered = ''\n\n tokens = re.split(r\"(?s)({{.*?}})\", text)\n for token in tokens:\n if token.startswith(\"{{\"):\n # An expression to evaluate.\n buffered += self.context[token[2:-2].strip()]\n else:\n # Literal content, if not empty, output it.\n if token:\n buffered += token\n\n self.buffered = buffered", "def update(blockstring):\n with BLOCK_LOCK:\n for block in blockstring.split('|'):\n name, values = parse(block)\n BLOCKS[name] = values", "async def parse(self, raw: str) -> dict:", "def structure_parse(source):\r\n return structure_grammar().parseString(source)", "def _parse_kvfmt(self, fmtlist):\n t_fmt = self.__class__._T_FMT\n t_prefix = self.__class__._T_PREFIX\n parsed_list = []\n field_id = 1\n\n for entry in fmtlist:\n name = entry[0]\n fmt = entry[1]\n parsed_field = {}\n parsed_field['name'] = name\n if isinstance(fmt, str):\n ptr = 0\n m_prefix = t_prefix.match(fmt)\n if m_prefix:\n ptr += _get_length_of_match(m_prefix)\n parsed_field['prefix'] = m_prefix.group(1)\n # check for optional nested structure start (required if the field is also repeated)\n if m_prefix.group(2) and len(entry) > 2:\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(entry[2])\n field_id += 1\n parsed_list.append(parsed_field)\n continue\n elif m_prefix.group(2):\n raise BadFormatString('Nested field type used without specifying field format.')\n m_fmt = t_fmt.match(fmt[ptr:])\n if m_fmt:\n ptr += _get_length_of_match(m_fmt)\n resolved_fmt_char = None\n # fmt is an alias\n if m_fmt.group(2):\n resolved_fmt_char = m_fmt.group(2)\n parsed_field['field_type'] = self.__class__\\\n .FIELD_ALIAS[m_fmt.group(2)]\n # fmt is an actual field type\n elif m_fmt.group(1):\n resolved_fmt_char = m_fmt.group(1)\n parsed_field['field_type'] = m_fmt.group(1)\n parsed_field['field_id'] = field_id\n # only skip type (`x') is allowed for copying in key-value mode\n if m_fmt.group(3) and resolved_fmt_char == 'x':\n repeats = int(m_fmt.group(3))\n parsed_field['repeat'] = repeats\n field_id += repeats\n elif m_fmt.group(3):\n raise BadFormatString('Field copying is not allowed in key-value format list.')\n else:\n field_id += 1\n else:\n raise BadFormatString('Invalid type for field \"{0}\"'.format(name))\n if len(fmt) != ptr:\n self.logger.warning('Extra content found after the type string of %s.', name)\n else:\n # Hard-code the empty prefix because we don't support copying\n parsed_field['prefix'] = ''\n parsed_field['field_id'] = field_id\n parsed_field['field_type'] = 'a'\n parsed_field['subcontent'] = self._parse_kvfmt(fmt)\n field_id += 1\n parsed_list.append(parsed_field)\n return parsed_list", "def htmlFormat(self, text):\n txt_blocks = self._parser_block(lex_block(text))\n\n #XXX: Maybe there is a better solution, but I doubt\n #The problem is nested escapestyles\n escape_d = {}\n escapes = re.compile('\\[escapestyle\\] \\s* (?P<inner>(.|\\s)*?) \\s* \\[/escapestyle\\]', re.VERBOSE)\n def rem(mo):\n h_code = hash(mo.group(0))\n escape_d[h_code] = mo.group('inner')\n return '(<!%s!>)' % h_code\n txt_blocks = escapes.sub(rem, txt_blocks)\n\n txt_style = parser_style(lex_style(txt_blocks))\n\n eess = re.compile('\\(<!(-?\\d+)!>\\)')\n def back(mo):\n val = int(mo.group(1))\n if escape_d.has_key(val):\n return escape_d[val]\n return mo.group(0)\n txt_style = eess.sub(back, txt_style)\n\n return txt_style", "def parse_template(string):\n count = 0\n list1 = []\n for character in string:\n count = count + 1\n if character == \"{\":\n end = string.find(\"}\", count)\n s_strg = string[count:end]\n list1.append(s_strg)\n string = string.replace(s_strg, \"\", 1)\n count = count - len(s_strg)\n\n subs = tuple(list1)\n\n return(string, subs)\n print(subs)", "def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text", "def parser(block):\n def unwrap(result):\n tupe, value = result\n if tupe & ParserResult.DONE:\n value, chunk, last = value\n return (ParserResult.from_done(value.value, chunk, last) if value.error is None else\n ParserResult.from_error(value.error))\n elif tupe & ParserResult.PARTIAL:\n return ParserResult.from_partial(Parser(lambda chunk, last: unwrap(value(chunk, last))))\n else:\n return result\n do_block = do(Parser)(block)\n return F.wraps(block)(\n lambda *args, **kwargs: Parser(\n lambda chunk, last: unwrap(do_block(*args, **kwargs)(chunk, last))))", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def _chunk(self, string):\n #~ a = r'\\**\\s*(?:a\\.?|\\(?a\\))' #SMA option dot now required\n a = r'\\**\\s*(?:a\\.|\\(?a\\))'\n b = r'\\**\\s*(?:b\\.|\\(?b\\))'\n c = r'\\**\\s*(?:c\\.|\\(?c\\))'\n d = r'\\**\\s*(?:d\\.|\\(?d\\))'\n e = r'\\**\\s*(?:e\\.|\\(?e\\))'\n l = r'\\s+.+?\\s+'\n # last option trucated here \\/\n regex = r\"({a}{line}{b}{line}{c}{line}(?:{d}{line})(?:{e}.*?)?)\\n?\".format(\n a=a, b=b, c=c, d=d, e=e, line=l, \n )\n p = re.compile(regex, re.IGNORECASE | re.DOTALL)\n\n self._tokens = p.split(string)", "def parseString(self, s):\n\n t0 = time.time()\n lines = self.getline(s)\n lineno = 0\n for l in lines:\n lineno += 1\n logging.log(10, \"raw line %05d: %s\" % (lineno, l))\n if len(l) == 0 or l[0] == '#':\n continue\n \n if l.startswith('typedef'):\n lidx = self.parseTypedef(l, lines)\n lineno += lidx\n else:\n # Not a typedef -- see if the 1st token matches a known\n # structure name. If not, create a new variable.\n sidx = l.find(' ')\n if sidx > 0:\n name = l[0:sidx]\n struct = self.structs.get(name.upper(), None)\n if struct:\n struct.parseOne(l)\n else:\n v = YPFVar(l, debug=0)\n if v.name in self.vars:\n newValue = v.value\n oldValue = self.vars[v.name].value\n if newValue != oldValue:\n print(\"Variable %s is being defined with a new value, overwriting it. old=%s, new=%s\" \n % (v.name, oldValue, newValue))\n self.vars[v.name] = v", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def reformat(refstr, listed=False):\n \n formatted = ' '.join(refstr.split()).replace(r'\\newblock', '\\n\\\\newblock').splitlines()\n if listed:\n return formatted\n else:\n return '\\n'.join(formatted)", "def _split_by_block(self, path=None,category='meminfo'):\n \n with open(path, \"r\") as f: \n text = f.read()\n \n lst = re.split('zzz', text, flags=re.DOTALL) # to list based on time\n lst = [x for x in lst if x] # remove empty strings\n \"\"\"\n Python 2.x\n lst = map(lambda v: re.split('(\\s\\W{1,}\\w{3}\\s\\w{3}\\s\\w{2,3}\\s\\d{2}:\\d{2}:\\d{2}\\s\\w{3}\\s\\d{4})', v), lst)\n \"\"\"\n lst = [re.split('(\\s\\W{1,}\\w{3}\\s\\w{3}\\s\\w{2,3}\\s\\d{2}:\\d{2}:\\d{2}\\s\\w{3}\\s\\d{4})', v) for v in lst]\n block_dict = []\n for v in lst:\n timestamp=v[1]\n value=v[2]\n _d = [{'timestamp':timestamp, \n 'category': category, \n 'sub_category': '',\n 'key': 'raw_block',\n 'value': value}]\n block_dict.extend(_d)\n \n return block_dict", "def reformat_block(specline, values):\n data = reformat_spec_line(specline)\n desc = '\\n'.join(values)\n data.append(desc)\n return data", "def build_block_parser(md_instance, **kwargs):\r\n parser = BlockParser(md_instance)\r\n parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)\r\n parser.blockprocessors['indent'] = ListIndentProcessor(parser)\r\n parser.blockprocessors['code'] = CodeBlockProcessor(parser)\r\n parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)\r\n parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)\r\n parser.blockprocessors['hr'] = HRProcessor(parser)\r\n parser.blockprocessors['olist'] = OListProcessor(parser)\r\n parser.blockprocessors['ulist'] = UListProcessor(parser)\r\n parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)\r\n parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)\r\n return parser", "def parse(intLanguageName, content, formatDetails, threadstop):\r\n\r\n if len(content) == 0:\r\n return buildSyntaxNode([], 0, \"text\")\r\n\r\n if formatDetails.noFormat:\r\n return buildSyntaxNode([buildSyntaxNode(content, 0, \"plainText\")],\r\n 0, \"text\")\r\n\r\n baseDict = _buildBaseDict(formatDetails=formatDetails)\r\n\r\n## _prof.start()\r\n try:\r\n print content\r\n print baseDict\r\n t = text.parseString(content, parseAll=True, baseDict=baseDict,\r\n threadstop=threadstop)\r\n print t\r\n t = buildSyntaxNode(t, 0, \"text\")\r\n print t\r\n\r\n finally:\r\n## _prof.stop()\r\n pass\r\n\r\n return t", "def __ParseBlock(self, ast):\n for node in ast:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'statement':\n self.__ParseStatement(node_value)\n else:\n logging.info('Unknown AST node in message block: %s' % (node_name))", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def read_fmt(bib_name, bib_file):\n cache_name, formatted_cache_name = _cache_name(bib_name, bib_file)\n\n try:\n meta_data, formatted_entries = cache.read_global(formatted_cache_name)\n except:\n raise cache.CacheMiss()\n\n # raise a cache miss if the modification took place after the caching\n modified_time = os.path.getmtime(bib_file)\n if modified_time > meta_data[\"cache_time\"]:\n raise cache.CacheMiss()\n\n # validate the version and format strings are still valid\n if (meta_data[\"version\"] != _VERSION or\n any(meta_data[s] != get_setting(\"cite_\" + s)\n for s in [\"panel_format\", \"autocomplete_format\"])):\n print(\"Formatting string has changed, updating cache...\")\n # read the base information from the unformatted cache\n current_time, bib_entries = cache.read_global(cache_name)\n # format and cache the entries\n formatted_entries = _create_formatted_entries(formatted_cache_name,\n bib_entries,\n current_time)\n\n return formatted_entries", "def _parse_data(self):\n current_block = []\n current_section = \"docstring\"\n\n # if we get a line that starts with #, this is a new comment or\n # part of a block comment. Otherwise, it means the current block\n # comment has ended.\n\n for this in self.data:\n # Beginning of a new section at top level\n if self.regex_section.findall(this):\n name = self.regex_section.findall(this)[0]\n current_section = name.strip(\":\")\n self.sections[current_section] = \"\".join(current_block)\n current_block = []\n current_section = None\n elif this.startswith(\"#\"): # a comment at top level\n current_block.append(this)\n elif this.strip() == \"\": # an empty line\n # this was the main comment, or an isolated comment\n current_block = []\n else: # a non-empty line to skip\n current_block = []\n\n for key in self._get_expected_sections():\n if key not in self.sections.keys():\n logger.warning(\"section %s not dealt by the parsing function\" % key)", "def tokens(self, format_string):\n if format_string not in self.format_string_cache:\n tokens = list(re.finditer(self.reg_ex, format_string))\n self.format_string_cache[format_string] = tokens\n return self.format_string_cache[format_string]", "def parse(source):\n\n def blockify(source):\n\n \"\"\"This is the first step, where the source is broken into paragraphs,\n based on blank lines in the source. The output is a list of strings.\n Each string is a paragraph. Newlines (with any trailing whitespace)\n inside paragraphs are converted to single spaces.\"\"\"\n\n paragraphs = [\"\"]\n for line in source.strip().split(\"\\n\"):\n line = line.strip()\n if line: paragraphs[-1] += line + \" \"\n elif paragraphs[-1]: paragraphs.append(\"\")\n\n return paragraphs\n\n def subparse(block):\n\n \"\"\"This function parses a single paragraph of source, as returned by\n the `blockify` function. This finds the individual verses within the\n given paragraph. It returns an AST for the paragraph, as previously\n described.\n\n TODO: Validate the input based on the AST.\n \"\"\"\n\n verses = []\n context = None\n for char in block:\n\n if char == \"[\":\n if verses: verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n verses.append({\"surah\": \"\", \"verse\": \"\", \"quran\": \"\"})\n context = \"surah\"\n elif char == \":\" and context == \"surah\":\n verses[-1][\"surah\"] = int(verses[-1][\"surah\"])\n context = \"verse\"\n elif char == \"]\":\n verses[-1][\"verse\"] = int(verses[-1][\"verse\"])\n context = \"quran\"\n else: verses[-1][context] += char\n\n verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n return verses\n\n return [ subparse(block) for block in blockify(source) ]", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def highlightBlock(self, text):\n for format_, expression in self.rules:\n # get first match\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length, format_)\n # jump to next match\n index = expression.indexIn(text, index + length)\n self.setCurrentBlockState(0)", "def parse_format(var_sample):\n # ugh\n ret = []\n # Parsing format information\n # Need to see what all these could be...\n if None in var_sample[\"GT\"]:\n ret.append(3)\n elif var_sample[\"GT\"] == (0, 0):\n ret.append(0)\n elif var_sample[\"GT\"] == (0, 1):\n ret.append(1)\n elif var_sample[\"GT\"] == (1, 1):\n ret.append(2)\n \n ret.extend([var_sample[\"GQ\"] if var_sample[\"GQ\"] is not None else 0,\n var_sample[\"OV\"],\n var_sample[\"DP\"], # be careful these aren't '.'\n #split where _r is ref-allele and _a is alt-allele\n var_sample[\"AD\"][0],\n var_sample[\"AD\"][1],\n var_sample[\"PDP\"],\n var_sample[\"PAD\"][0],\n var_sample[\"PAD\"][1],\n var_sample[\"US\"][0],\n var_sample[\"US\"][1],\n var_sample[\"DS\"][0],\n var_sample[\"DS\"][1],\n var_sample[\"UC\"][0],\n var_sample[\"UC\"][1],\n var_sample[\"DC\"][0],\n var_sample[\"DC\"][1],\n var_sample[\"UDC\"][0],\n var_sample[\"UDC\"][1],\n var_sample[\"UCC\"][0],\n var_sample[\"UCC\"][1],\n var_sample[\"DDC\"][0],\n var_sample[\"DDC\"][1],\n var_sample[\"DCC\"][0],\n var_sample[\"DCC\"][1],\n var_sample[\"UMO\"][0],\n var_sample[\"UMO\"][1],\n var_sample[\"DMO\"][0],\n var_sample[\"DMO\"][1],\n var_sample[\"UXO\"][0],\n var_sample[\"UXO\"][1],\n var_sample[\"DXO\"][0],\n var_sample[\"DXO\"][1],\n var_sample[\"NR\"][0],\n var_sample[\"NR\"][1],\n var_sample[\"MO\"][0],\n var_sample[\"MO\"][1],\n var_sample[\"XO\"][0],\n var_sample[\"XO\"][1],\n var_sample[\"XC\"][0],\n var_sample[\"XC\"][1],\n var_sample[\"AC\"][0],\n var_sample[\"AC\"][1],\n var_sample[\"MC\"][0],\n var_sample[\"MC\"][1],\n var_sample[\"EC\"][0],\n var_sample[\"EC\"][1],\n var_sample[\"PL\"][0] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][1] if var_sample[\"PL\"][0] is not None else 0,\n var_sample[\"PL\"][2] if var_sample[\"PL\"][0] is not None else 0])\n return ret\n #END", "def parse(cls, source):\n lines = list(cls._strip_lines(source.splitlines()))\n offset = 0\n blocks = {}\n while offset < len(lines):\n # Scan until we find a block.\n while offset < len(lines):\n header_match = re.match(r\"---+ (.*) ---+\", lines[offset])\n offset += 1\n if header_match:\n header = header_match.group(1)\n break\n config = {}\n while offset < len(lines):\n config_match = re.match(r\"^%\\s*([\\w_]+)\\s*:(.*)$\", lines[offset])\n if not config_match:\n break\n config[config_match.group(1).strip()] = config_match.group(2).strip()\n offset += 1\n block_lines = []\n while offset < len(lines) and (re.match(r\"---+ (.*) ---+\", lines[offset]) == None):\n block_lines.append(lines[offset])\n offset += 1\n if not header in blocks:\n blocks[header] = []\n blocks[header].append(SpecTestBlock.parse(header, config, block_lines))\n return ParsedSpecTest(blocks)", "def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n format = self.styles[format]\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n # Do multi-line strings\n in_multiline = self.match_multiline(text, *self.tri_single)\n if not in_multiline:\n in_multiline = self.match_multiline(text, *self.tri_double)", "def stringToBlock(strBlock):\r\n blocksplit = strBlock.split(';')\r\n block = Blockchain(int(blocksplit[0]), blocksplit[1], int(blocksplit[2]), int(blocksplit[3]) ,blocksplit[4])\r\n return block", "def highlightBlock(self, string):\n prev_data = self.currentBlock().previous().userData()\n if prev_data is not None:\n self._lexer._saved_state_stack = prev_data.syntax_stack\n elif hasattr(self._lexer, '_saved_state_stack'):\n del self._lexer._saved_state_stack\n\n # Lex the text using Pygments\n index = 0\n for token, text in self._lexer.get_tokens(string):\n length = len(text)\n self.setFormat(index, length, self._get_format(token))\n index += length\n\n if hasattr(self._lexer, '_saved_state_stack'):\n data = PygmentsBlockUserData(\n syntax_stack=self._lexer._saved_state_stack)\n self.currentBlock().setUserData(data)\n # Clean up for the next go-round.\n del self._lexer._saved_state_stack", "def _hash_html_blocks(self, text, raw=False):\r\n if '<' not in text:\r\n return text\r\n\r\n # Pass `raw` value into our calls to self._hash_html_block_sub.\r\n hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)\r\n\r\n # First, look for nested blocks, e.g.:\r\n # <div>\r\n # <div>\r\n # tags for inner block must be indented.\r\n # </div>\r\n # </div>\r\n #\r\n # The outermost tags must start at the left margin for this to match, and\r\n # the inner nested divs must be indented.\r\n # We need to do this before the next, more liberal match, because the next\r\n # match will start at the first `<div>` and stop at the first `</div>`.\r\n text = self._strict_tag_block_re.sub(hash_html_block_sub, text)\r\n\r\n # Now match more liberally, simply from `\\n<tag>` to `</tag>\\n`\r\n text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)\r\n\r\n # Special case just for <hr />. It was easier to make a special\r\n # case than to make the other regex more complicated.\r\n if \"<hr\" in text:\r\n _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)\r\n text = _hr_tag_re.sub(hash_html_block_sub, text)\r\n\r\n # Special case for standalone HTML comments:\r\n if \"<!--\" in text:\r\n start = 0\r\n while True:\r\n # Delimiters for next comment block.\r\n try:\r\n start_idx = text.index(\"<!--\", start)\r\n except ValueError:\r\n break\r\n try:\r\n end_idx = text.index(\"-->\", start_idx) + 3\r\n except ValueError:\r\n break\r\n\r\n # Start position for next comment block search.\r\n start = end_idx\r\n\r\n # Validate whitespace before comment.\r\n if start_idx:\r\n # - Up to `tab_width - 1` spaces before start_idx.\r\n for i in range(self.tab_width - 1):\r\n if text[start_idx - 1] != ' ':\r\n break\r\n start_idx -= 1\r\n if start_idx == 0:\r\n break\r\n # - Must be preceded by 2 newlines or hit the start of\r\n # the document.\r\n if start_idx == 0:\r\n pass\r\n elif start_idx == 1 and text[0] == '\\n':\r\n start_idx = 0 # to match minute detail of Markdown.pl regex\r\n elif text[start_idx-2:start_idx] == '\\n\\n':\r\n pass\r\n else:\r\n break\r\n\r\n # Validate whitespace after comment.\r\n # - Any number of spaces and tabs.\r\n while end_idx < len(text):\r\n if text[end_idx] not in ' \\t':\r\n break\r\n end_idx += 1\r\n # - Must be following by 2 newlines or hit end of text.\r\n if text[end_idx:end_idx+2] not in ('', '\\n', '\\n\\n'):\r\n continue\r\n\r\n # Escape and hash (must match `_hash_html_block_sub`).\r\n html = text[start_idx:end_idx]\r\n if raw and self.safe_mode:\r\n html = self._sanitize_html(html)\r\n key = _hash_text(html)\r\n self.html_blocks[key] = html\r\n text = text[:start_idx] + \"\\n\\n\" + key + \"\\n\\n\" + text[end_idx:]\r\n\r\n if \"xml\" in self.extras:\r\n # Treat XML processing instructions and namespaced one-liner\r\n # tags as if they were block HTML tags. E.g., if standalone\r\n # (i.e. are their own paragraph), the following do not get\r\n # wrapped in a <p> tag:\r\n # <?foo bar?>\r\n #\r\n # <xi:include xmlns:xi=\"http://www.w3.org/2001/XInclude\" href=\"chapter_1.md\"/>\r\n _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)\r\n text = _xml_oneliner_re.sub(hash_html_block_sub, text)\r\n\r\n return text", "def parsePercentExpression(literal, format):\n\tmat = _getREForPercentExpression(format).match(literal)\n\tif not mat:\n\t\traise ValueError(\"'%s' cannot be parsed using format '%s'\"%(\n\t\t\tliteral, format))\n\treturn mat.groupdict()", "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "def structParser(lines):\n blc = 0 #blank line counter\n bc = 0 #block counter\n struct = []\n record = False\n for line in lines:\n if len(line) == 1:\n blc +=1\n record = False\n if blc == 2:\n blc = 0\n bc +=1\n record = True\n if record and bc < 3:\n struct.append(line)\n\n yield struct", "def parse(s):\n return s", "def split_blocks(strings):\n blocks = [StringList()]\n for item in strings.xitems(): # (source, offset, value)\n if item[2].strip():\n blocks[-1].append(item[2], source=item[0], offset=item[1])\n elif len(blocks[-1]):\n blocks.append(StringList())\n # remove the last block if empty\n if len(blocks[-1]) == 0:\n del blocks[-1]\n return blocks", "def __parse_blocks_pass(self):\n\n self.stack = [DocumentStackToken()]\n\n self.tokenized_document = []\n token_to_use = self.source_provider.get_next_line()\n did_start_close = False\n did_started_close = False\n requeue = []\n ignore_link_definition_start = False\n POGGER.debug(\"---$---\", token_to_use)\n POGGER.debug(\"---\")\n self.__parse_properties.pragma_lines = {}\n line_number = 1\n try:\n (\n token_to_use,\n line_number,\n requeue,\n ) = self.__process_front_matter_header_if_present(\n token_to_use, line_number, requeue\n )\n did_start_close = token_to_use is None\n keep_on_going = True\n while keep_on_going:\n POGGER.debug(\"next-line>>$\", token_to_use)\n POGGER.debug(\"stack>>$\", self.stack)\n POGGER.debug(\"current_block>>$\", self.stack[-1])\n POGGER.debug(\"line_number>>$\", line_number)\n POGGER.debug(\"---\")\n\n position_marker = PositionMarker(line_number, 0, token_to_use)\n parser_state = ParserState(\n self.stack,\n self.tokenized_document,\n TokenizedMarkdown.__close_open_blocks,\n self.__handle_blank_line,\n )\n if did_start_close:\n POGGER.debug(\"\\n\\ncleanup\")\n\n was_link_definition_started_before_close = self.stack[\n -1\n ].was_link_definition_started\n\n did_started_close = True\n (\n tokens_from_line,\n requeue_line_info,\n ) = TokenizedMarkdown.__close_open_blocks(\n parser_state,\n self.tokenized_document,\n include_block_quotes=True,\n include_lists=True,\n caller_can_handle_requeue=True,\n was_forced=True,\n )\n if tokens_from_line and not self.tokenized_document:\n self.tokenized_document.extend(tokens_from_line)\n\n if not (requeue_line_info and requeue_line_info.lines_to_requeue):\n keep_on_going = False\n else:\n assert was_link_definition_started_before_close\n assert not requeue_line_info.lines_to_requeue[0]\n\n del requeue_line_info.lines_to_requeue[0]\n line_number -= 1\n\n did_start_close = False\n tokens_from_line = None\n else:\n POGGER.debug(\">>>>$\", self.tokenized_document)\n\n if not token_to_use or not token_to_use.strip():\n POGGER.debug(\"call __parse_blocks_pass>>handle_blank_line\")\n (\n tokens_from_line,\n requeue_line_info,\n ) = self.__handle_blank_line(\n parser_state,\n token_to_use,\n from_main_transform=True,\n position_marker=position_marker,\n )\n else:\n POGGER.debug(\"\\n\\nnormal lines\")\n (\n tokens_from_line,\n _,\n _,\n requeue_line_info,\n _,\n ) = ContainerBlockProcessor.parse_line_for_container_blocks(\n parser_state,\n position_marker,\n ignore_link_definition_start,\n self.__parse_properties,\n None,\n )\n\n POGGER.debug(\"<<<<$\", self.tokenized_document)\n\n if keep_on_going:\n line_number, ignore_link_definition_start = TokenizedMarkdown.__xx(\n line_number, requeue_line_info, requeue\n )\n\n POGGER.debug(\n \"---\\nbefore>>$\",\n self.tokenized_document,\n )\n POGGER.debug(\"before>>$\", tokens_from_line)\n if tokens_from_line:\n self.tokenized_document.extend(tokens_from_line)\n POGGER.debug(\n \"after>>$\",\n self.tokenized_document,\n )\n if requeue:\n POGGER.debug(\"requeue>>$\", requeue)\n POGGER.debug(\"---\")\n\n (\n token_to_use,\n did_start_close,\n did_started_close,\n ) = self.__determine_next_token_process(\n requeue, did_start_close, did_started_close\n )\n except AssertionError as this_exception:\n error_message = f\"A project assertion failed on line {line_number} of the current document.\"\n raise BadTokenizationError(error_message) from this_exception\n\n if self.__parse_properties.pragma_lines:\n self.tokenized_document.append(\n PragmaToken(self.__parse_properties.pragma_lines)\n )\n return self.tokenized_document", "def parse_block(lines):\n term = {\"alt_id\": [], \"relationship\": []}\n splitkv = re.compile(r\"(^[a-zA-Z_]+): (.+)\")\n for line in lines:\n m = re.search(splitkv, line)\n # assert m, f\"unexpected line: {line}\"\n key = m.group(1)\n value = m.group(2)\n if key in [\"id\", \"name\", \"namespace\", \"is_obsolete\"]:\n term[key] = value\n elif key == \"alt_id\":\n term[\"alt_id\"].append(value)\n elif key == \"is_a\":\n goid = value.split(\"!\")[0].strip()\n term[\"relationship\"].append({\"type\": \"is_a\", \"id\": goid})\n elif key == \"relationship\":\n typedef, goid = value.split(\"!\")[0].strip().split(\" \")\n term[\"relationship\"].append({\"type\": typedef, \"id\": goid})\n return term", "def parseChunk(self, parent, text):\r\n self.parseBlocks(parent, text.split('\\n\\n'))", "def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self", "def _decode_block_str(block_str, depth_multiplier=1.0):\n assert isinstance(block_str, str)\n ops = block_str.split('_')\n block_type = ops[0] # take the block type off the front\n ops = ops[1:]\n options = {}\n noskip = False\n for op in ops:\n # string options being checked on individual basis, combine if they grow\n if op == 'noskip':\n noskip = True\n elif op.startswith('n'):\n # activation fn\n key = op[0]\n v = op[1:]\n if v == 're':\n value = F.relu\n elif v == 'r6':\n value = F.relu6\n elif v == 'hs':\n value = hard_swish\n elif v == 'sw':\n value = swish\n else:\n continue\n options[key] = value\n else:\n # all numeric options\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # if act_fn is None, the model default (passed to model init) will be used\n act_fn = options['n'] if 'n' in options else None\n exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1\n pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1\n fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def\n\n num_repeat = int(options['r'])\n # each type of block has different valid arguments, fill accordingly\n if block_type == 'ir':\n block_args = dict(\n block_type=block_type,\n dw_kernel_size=_parse_ksize(options['k']),\n exp_kernel_size=exp_kernel_size,\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n exp_ratio=float(options['e']),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n noskip=noskip,\n )\n elif block_type == 'ds' or block_type == 'dsa':\n block_args = dict(\n block_type=block_type,\n dw_kernel_size=_parse_ksize(options['k']),\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n pw_act=block_type == 'dsa',\n noskip=block_type == 'dsa' or noskip,\n )\n elif block_type == 'er':\n block_args = dict(\n block_type=block_type,\n exp_kernel_size=_parse_ksize(options['k']),\n pw_kernel_size=pw_kernel_size,\n out_chs=int(options['c']),\n exp_ratio=float(options['e']),\n fake_in_chs=fake_in_chs,\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=int(options['s']),\n act_fn=act_fn,\n noskip=noskip,\n )\n elif block_type == 'cn':\n block_args = dict(\n block_type=block_type,\n kernel_size=int(options['k']),\n out_chs=int(options['c']),\n stride=int(options['s']),\n act_fn=act_fn,\n )\n else:\n assert False, 'Unknown block type (%s)' % block_type\n\n return block_args, num_repeat", "def find_time(string, format):\n re_format = format\n for key, value in six.iteritems(REGEX):\n re_format = re_format.replace(key, value)\n matches = re.finditer(re_format, string)\n for match in matches:\n try:\n matchstr = string[slice(*match.span())]\n dt = datetime.strptime(matchstr, format)\n except ValueError:\n continue\n else:\n yield dt", "def change_format_var_parser(text, tracker):\n param_names = format_var_parser(text)\n param_vars = {}\n for param_name in param_names:\n try:\n param_vars[param_name] = tracker.get_slot(param_name)\n except Exception as e:\n PYTHON_LOGGER.error(\"Error to get var name {}: {}\".format(param_name, e))\n return text.format(**param_vars)", "def _consume(self, block: str) -> None:\n self.buffer = []\n self.level = 0\n last_char = \"START\"\n for ch in char_iter(block):\n if ch in \"\\n\\r\":\n self._check_body_level()\n self.line_number += 1\n if self.in_comment:\n if ch == \"\\n\":\n self.in_comment = False\n elif ch in \" \\n\\t\\r\" and last_char in \" \\n\\t\\r\":\n pass\n elif ch == \"#\" and not self.in_value and not self.in_var_ref:\n self.in_comment = True\n elif not self.in_value and ch == \"(\":\n self._open_bracket(ch)\n elif not self.in_value and ch == \")\":\n self._close_bracket(ch)\n elif ch == '\"':\n self.in_value = not self.in_value\n self.buffer.append(ch)\n elif self.level == 1:\n if (\n ch == \"r\"\n and last_char == \"o\"\n and \"\".join(self.buffer).strip() == \"o\"\n ):\n self.n_ors += 1\n self.buffer = []\n elif ch == \"$\":\n self.param_buffer = self._parse_operator()\n self.in_var_ref = True\n self.buffer = [ch]\n elif self.in_var_ref and ch in \" \\n\\r\\t\":\n self._parse_var()\n elif ch == \">\" and last_char == \"-\":\n self._parse_label()\n else:\n self.buffer.append(ch)\n else:\n self.buffer.append(ch)\n last_char = ch\n if self.opened:\n raise ValueError(\n f\"Unmatched opening bracket \" f\"at line {self.line_number}\"\n )\n if self.n_ors and not self.is_union:\n raise ValueError(f'Missing \"or\" operator ' f\"at line {self.line_number}\")", "def subparse(block):\n\n verses = []\n context = None\n for char in block:\n\n if char == \"[\":\n if verses: verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n verses.append({\"surah\": \"\", \"verse\": \"\", \"quran\": \"\"})\n context = \"surah\"\n elif char == \":\" and context == \"surah\":\n verses[-1][\"surah\"] = int(verses[-1][\"surah\"])\n context = \"verse\"\n elif char == \"]\":\n verses[-1][\"verse\"] = int(verses[-1][\"verse\"])\n context = \"quran\"\n else: verses[-1][context] += char\n\n verses[-1][\"quran\"] = verses[-1][\"quran\"].strip()\n return verses", "def __parse_next(self, buffer):\n\t\ttoken = buffer.read(1)\n\t\t\n\t\t_tell = buffer.tell()\n\t\t# Is it an operator?\n\t\tif token == \"/\":\n\t\t\tnum, var = self.__parse_operator(buffer)\n\t\t\tif num is None:\n\t\t\t\tbuffer.seek(_tell - 1)\n\t\t\t\treturn \"$\"\n\t\t\t\n\t\t\tif isinstance(var, str):\n\t\t\t\treturn var\n\t\t\t\n\t\t\tret = (var / num)\n\t\t\tif isinstance(ret, Range):\n\t\t\t\tret = ret.min # XXX is this right?\n\t\t\tif int(ret) != ret:\n\t\t\t\treturn \"%.1f\" % ret\n\t\t\treturn str(int(ret))\n\t\t\n\t\tif token == \"*\":\n\t\t\tnum, var = self.__parse_operator(buffer)\n\t\t\tret = var * num\n\t\t\tif isinstance(ret, float):\n\t\t\t\tret = int(round(ret))\n\t\t\treturn str(ret)\n\t\t\n\t\t# Is it a conditional?\n\t\tif token == \"?\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tblocks = self.__parse_conditional(buffer)\n\t\t\t\n\t\t\t# Prepare the condition cache\n\t\t\t# This shouldn't be done here, but anyway...\n\t\t\tfor condition, value in blocks:\n\t\t\t\tcondition.evaluate({})\n\t\t\t\tself.conditions.extend(condition.identifiers)\n\t\t\t\n\t\t\t# blocks is a list of (condition, value) tuples\n\t\t\t# We evaluate the paperdoll against each of them\n\t\t\t# and return when we get a hit\n\t\t\t\n\t\t\tfor condition, value in blocks:\n\t\t\t\tif condition.evaluate(self.paperdoll):\n\t\t\t\t\treturn value\n\t\t\t\n\t\t\treturn\n\t\t\n\t\tif token == \"<\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tidentifier = self.__read_block(buffer, startchr=\"<\", endchr=\">\")\n\t\t\ttry:\n\t\t\t\tvalue = self.get_variable(identifier)\n\t\t\t\treturn SpellString(value).format(self.obj, proxy=self.proxy)\n\t\t\texcept VariableNotFound:\n\t\t\t\treturn \"<%s>\" % (identifier)\n\t\t\n\t\tif token == \"{\":\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tblock = self.__read_block(buffer, startchr=\"{\", endchr=\"}\")\n\t\t\t\n\t\t\t# Attempt to read decimals formatting\n\t\t\tdecimals = 0\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == \".\":\n\t\t\t\tdecimals = self.__read_number(buffer)\n\t\t\telif token:\n\t\t\t\t# Step one char back, only if we are not at the end\n\t\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\t\n\t\t\tblock = SpellString(block).format(self.obj, proxy=self.proxy, braced=True)\n\t\t\ttry: # FIXME\n\t\t\t\tblock = eval(block)\n\t\t\t\tif decimals:\n\t\t\t\t\tblock = round(block, decimals)\n\t\t\t\treturn \"%g\" % (block)\n\t\t\texcept Exception:\n\t\t\t\treturn \"[%s]\" % (block)\n\t\t\n\t\t# At this point, we need to check for functions and variables\n\t\t# but only if we don't already have a digit\n\t\tif not token.isdigit():\n\t\t\t_tell = buffer.tell()\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\tidentifier = self.__read_alpha(buffer)\n\t\t\t\n\t\t\tif identifier.lower() in FUNCTIONS:\n\t\t\t\targs = self.__parse_function_args(buffer)\n\t\t\t\treturn self.formatter.format_function(identifier, args)\n\t\t\t\n\t\t\tif identifier.lower() in PAPERDOLL_VALUES:\n\t\t\t\treturn self.formatter.format_paperdoll(identifier)\n\t\t\t\n\t\t\t\n\t\t\t# We didn't find any valid identifier\n\t\t\tif not identifier:\n\t\t\t\treturn \"$\"\n\t\t\t\n\t\t\t# Nothing left to check for but booleans\n\t\t\t# The values get messed with the identifier however, so we need to\n\t\t\t# look at only the first char\n\t\t\tif identifier[0] in BOOLEANS:\n\t\t\t\tidentifier = identifier[0]\n\t\t\t\tbuffer.seek(_tell)\n\t\t\t\tvalues = self.__parse_boolean(buffer)\n\t\t\t\treturn self.formatter.format_boolean(token, values)\n\t\t\n\t\t# It's probably a variable then\n\t\tbuffer.seek(-1, SEEK_CUR)\n\t\tspell, identifier, effect = self.__parse_macro(buffer)\n\t\t\n\t\tif identifier:\n\t\t\tspell = int(spell or 0)\n\t\t\teffect = int(effect or 1)\n\t\t\t\n\t\t\tvalue = self.formatter.format_macro(spell, identifier, effect)\n\t\t\tself.formatter.last_value = value\n\t\t\treturn str(value)\n\t\telse:\n\t\t\treturn \"$\"\n\t\t\n\t\tif not token or token.isspace():\n\t\t\treturn token\n\t\t\n\t\treturn token", "def _parseAttributeScanf(self, line, formatting):\n\n # multiple entrys\n if isinstance(formatting, list):\n for scanf_format in formatting:\n try:\n #print \"<<<----\", scanf_format, line\n return sscanf(line, scanf_format)\n except IncompleteCaptureError, e:\n pass\n\n # single entry\n else:\n return sscanf(line, formatting)\n\n # problem if none of the formats worked\n raise IncompleteCaptureError(\"Format error for %s\" % line)", "def eval_python_blocks(req, body):\n localsdict = {\"request\": req}\n globalsdict = {}\n\n old_stdout = sys.stdout\n old_stderr = sys.stderr\n\n try:\n start = 0\n while body.find(\"<%\", start) != -1:\n start = body.find(\"<%\")\n end = body.find(\"%>\", start) \n\n if start != -1 and end != -1:\n codeblock = body[start+2:end].lstrip()\n\n sys.stdout = StringIO.StringIO()\n sys.stderr = StringIO.StringIO()\n\n try:\n exec codeblock in localsdict, globalsdict\n\n except Exception, e:\n print \"ERROR in processing: %s\" % e\n\n output = sys.stdout.getvalue() + sys.stderr.getvalue()\n body = body[:start] + output + body[end+2:]\n\n finally:\n sys.stdout = old_stdout\n sys.stderr = old_stderr\n\n return body", "def _parse(self, line):\n\n while True:\n sign, before, after = _splitbycharset(line, self._checkstr)\n\n # End of line\n if not sign:\n if self._inside_quote:\n self._buffer.append(before)\n elif self._after_equal_sign:\n self._text(\"\".join(self._buffer) + before.strip())\n self._closetag()\n self._after_equal_sign = False\n elif not self._inside_attrib:\n self._buffer.append(before)\n elif before.strip():\n self._error(SYNTAX_ERROR, (self._currline, self._currline))\n break\n\n # Special character is escaped\n elif before.endswith(\"\\\\\") and not before.endswith(\"\\\\\\\\\"):\n self._buffer.append(before + sign)\n\n # Equal sign\n elif sign == \"=\":\n # Ignore if followed by \"{\" (DFTB+ compatibility)\n if after.lstrip().startswith(\"{\"): # _oldbefore may already contain the tagname, if the # tagname was followed by an attribute -> append\n self._oldbefore += before\n else:\n self._hsdattrib[common.HSD_ATTRIB_EQUAL] = True\n self._starttag(before, False)\n self._after_equal_sign = True\n\n # Opening tag by curly brace\n elif sign == \"{\":\n #self._has_child = True\n self._hsdattrib[common.HSD_ATTRIB_EQUAL] = False\n self._starttag(before, self._after_equal_sign)\n self._buffer = []\n self._after_equal_sign = False\n\n # Closing tag by curly brace\n elif sign == \"}\":\n self._text(\"\".join(self._buffer) + before)\n self._buffer = []\n # If 'test { a = 12 }' occurs, curly brace closes two tags\n if self._after_equal_sign:\n self._after_equal_sign = False\n self._closetag()\n self._closetag()\n\n # Closing tag by semicolon\n elif sign == \";\" and self._after_equal_sign:\n self._after_equal_sign = False\n self._text(before)\n self._closetag()\n\n # Comment line\n elif sign == \"#\":\n self._buffer.append(before)\n after = \"\"\n\n # Opening attribute specification\n elif sign == \"[\":\n if \"\".join(self._buffer).strip():\n self._error(SYNTAX_ERROR, (self._currline, self._currline))\n self._oldbefore = before\n self._buffer = []\n self._inside_attrib = True\n self._opened_tags.append((\"[\", self._currline, None, None, None))\n self._checkstr = _ATTRIB_SPECIALS\n\n # Closing attribute specification\n elif sign == \"]\":\n value = \"\".join(self._buffer) + before\n self._attrib = value.strip()\n self._inside_attrib = False\n self._buffer = []\n self._opened_tags.pop()\n self._checkstr = _GENERAL_SPECIALS\n\n # Quoting strings\n elif sign in (\"'\", '\"'):\n if self._inside_quote:\n self._checkstr = self._oldcheckstr\n self._inside_quote = False\n self._buffer.append(before + sign)\n self._opened_tags.pop()\n else:\n self._oldcheckstr = self._checkstr\n self._checkstr = sign\n self._inside_quote = True\n self._buffer.append(before + sign)\n self._opened_tags.append(('\"', self._currline, None, None, None))\n\n # Interrupt\n elif sign == \"<\" and not self._after_equal_sign:\n txtinc = after.startswith(\"<<\")\n hsdinc = after.startswith(\"<+\")\n if txtinc:\n self._text(\"\".join(self._buffer) + before)\n self._buffer = []\n self._eventhandler.add_text(self._include_txt(after[2:]))\n break\n if hsdinc:\n self._include_hsd(after[2:])\n break\n self._buffer.append(before + sign)\n\n else:\n self._error(SYNTAX_ERROR, (self._currline, self._currline))\n\n line = after", "def parse_template(self, template, **context):\n required_blocks = [\"subject\", \"body\"]\n optional_blocks = [\"text_body\", \"html_body\", \"return_path\", \"format\"]\n\n if self.template_context:\n context = dict(self.template_context.items() + context.items())\n blocks = self.template.render_blocks(template, **context)\n\n for rb in required_blocks:\n if rb not in blocks:\n raise AttributeError(\"Template error: block '%s' is missing from '%s'\" % (rb, template))\n\n mail_params = {\n \"subject\": blocks[\"subject\"].strip(),\n \"body\": blocks[\"body\"]\n }\n for ob in optional_blocks:\n if ob in blocks:\n if ob == \"format\" and mail_params[ob].lower() not in [\"html\", \"text\"]:\n continue\n mail_params[ob] = blocks[ob]\n return mail_params", "def parse_blocks(self, text):\n code_matches = [m for m in self.code_pattern.finditer(text)]\n\n # determine where the limits of the non code bits are\n # based on the code block edges\n text_starts = [0] + [m.end() for m in code_matches]\n text_stops = [m.start() for m in code_matches] + [len(text)]\n text_limits = list(zip(text_starts, text_stops))\n\n # list of the groups from the code blocks\n code_blocks = [self.new_code_block(**m.groupdict())\n for m in code_matches]\n\n text_blocks = [self.new_text_block(content=text[i:j])\n for i, j in text_limits]\n\n # remove indents\n list(map(self.pre_process_code_block, code_blocks))\n # remove blank line at start and end of markdown\n list(map(self.pre_process_text_block, text_blocks))\n\n # create a list of the right length\n all_blocks = list(range(len(text_blocks) + len(code_blocks)))\n\n # NOTE: the behaviour here is a bit fragile in that we\n # assume that cells must alternate between code and\n # markdown. This isn't the case, as we could have\n # consecutive code cells, and we get around this by\n # stripping out empty cells. i.e. two consecutive code cells\n # have an empty markdown cell between them which is stripped\n # out because it is empty.\n\n # cells must alternate in order\n all_blocks[::2] = text_blocks\n all_blocks[1::2] = code_blocks\n\n # remove possible empty text cells\n all_blocks = [cell for cell in all_blocks if cell['content']]\n\n return all_blocks", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def parse_cfg(cfgfile):\n file = open(cfgfile, 'r')\n lines = file.read().split('\\n') # store the lines in a list\n lines = [x for x in lines if len(x) > 0] # skip empty lines\n lines = [x for x in lines if x[0] != '#'] # skip comment\n lines = [x.rstrip().lstrip() for x in lines]\n file.close()\n\n block = {}\n blocks = []\n\n for line in lines:\n if line[0] == \"[\": # This marks the start of a new block\n if len(block) != 0:\n blocks.append(block)\n block = {}\n block['type'] = line[1:-1].rstrip()\n else:\n key, value = line.split(\"=\")\n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n\n return blocks", "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> Dict:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: Dict = {}\n split_me = {'it_value:', 'it_interval:'}\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n\n # epoll files\n if line.startswith('tfd:'):\n line_match = re.findall(r'(?P<key>\\S+):(?:\\s+)?(?P<val>\\S+s*)', line)\n if line_match:\n raw_output.update({'epoll': {k.strip(): v.strip() for k, v in line_match}})\n continue\n\n # inotify files\n if line.startswith('inotify'):\n split_line = line[8:].split()\n raw_output['inotify'] = {}\n for item in split_line:\n k, v = item.split(':', maxsplit=1)\n raw_output['inotify'][k] = v\n continue\n\n # fanotify files\n if line.startswith('fanotify'):\n split_line = line[9:].split()\n\n if not 'fanotify' in raw_output:\n raw_output['fanotify'] = {}\n\n for item in split_line:\n k, v = item.split(':', maxsplit=1)\n raw_output['fanotify'][k] = v\n continue\n\n # timerfd files\n if line.split()[0] in split_me:\n split_line = line.replace(':', '').replace('(', '').replace(')', '').replace(',', '').split()\n raw_output[split_line[0]] = [int(x) for x in split_line[1:]]\n continue\n\n key, val = line.split(':', maxsplit=1)\n raw_output[key.strip()] = val.strip()\n continue\n\n return raw_output if raw else _process(raw_output)", "def parseConfig(f):\n config = {\"formats\":{}}\n \n for line in f:\n if line.startswith(\"//\"): \n continue\n \n sline = re.split(\"[=\\s]\", line)\n if sline[0] is \"\":\n continue\n \n if sline[0]==\"format\":\n #Puts the format as a key in the dict pointed to by \"formats\"\n config[\"formats\"][sline[1]] = sline[3] \n else:\n config[sline[0]] = sline[1]\n \n return config", "def parse_graph(self, graph_str, use_bs=True):\n blocks = graph_str.strip().split('BasicBlock ')\n \n # 1st iteration: collect block labels and node's bvars\n for block in blocks:\n if len(block) == 0: # skip empty lines\n continue\n\n block_var, instructions = block.split(':', 1)\n\n if instructions.find('<label>:') != -1:\n #raise Exception(\"Not supported\") # NOTE: legacy error, need to check when it happens though\n label = instructions.split('<label>:')[1].split(' ')[0]\n else:\n label = instructions.split('\\n', 1)[1].split(':')[0]\n\n if \"bd_\" in block_var: # only bd_ blocks can be start/end nodes\n # 'b' stands for block, 'd' for dominant\n if self._start_var is not None:\n self._end_var = block_var\n else:\n self._start_var = block_var\n\n self._label2var[label] = block_var\n self._var2label[block_var] = label\n\n # 2nd iteration: collect nodes and edges\n for block in blocks:\n if len(block) == 0: # skip empty lines\n continue\n\n # collect node\n block_var, instructions = block.split(':', 1)\n block_label = self._var2label[block_var]\n block_cost = int(instructions.split(' ')[1])\n block_dominator = instructions.split('Dominator = ', 1)[1].split('\\n', 1)[0]\n if block_dominator == 'NULL':\n block_dominator = \"-1\"\n else:\n block_dominator = block_dominator.split('_')[1]\n\n if block_var == self._start_var and use_bs:\n block_var = block_var.replace(\"bd_\", \"bs_\")\n\n b = Node(block_var, block_label, block_cost, block_dominator, self)\n\n self._nodes[b.get_uid()] = b\n self._label2uid[block_label] = b.get_uid()\n\n # collect edges\n if instructions.find('br ') != -1:\n for succ in instructions.split('br ')[1].strip().split('label'):\n succ = succ.strip()\n if len(succ) >= 2 and succ[0] == '%':\n dst_label = succ[1:] if succ[-1] != ',' else succ[1:-1]\n dst_var = self._label2var[dst_label]\n e = Edge(block_var, dst_var, 0, self)\n self._edges[e.get_uid()] = e\n\n if self._end_var is None:\n # single-block graph\n assert(len(self._nodes.keys()) <= 1)\n assert(len(self._edges.keys()) <= 0)\n self._end_var = self._start_var\n\n # update start/end uids\n self._start_uid = self._label2uid[self._var2label[self._start_var]]\n self._end_uid = self._label2uid[self._var2label[self._end_var]]\n\n # update nodes with ingoing/outgoing edges\n for edge_uid in self._edges.keys():\n edge = self._edges[edge_uid]\n src_block_uid = edge.get_src_uid()\n dst_block_uid = edge.get_dst_uid()\n self._nodes[src_block_uid].add_successor(dst_block_uid)\n self._nodes[dst_block_uid].add_predecessor(src_block_uid)\n return", "def highlightBlock(self, text):\n\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = expression.cap(nth).length()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n self.setCurrentBlockState(0)", "def _parse_format(mode=2, rc_kw=None, **kwargs):\n kw = {}\n rc_kw = rc_kw or {}\n for key, value in kwargs.items():\n key_fixed = _rc_nodots.get(key, None)\n if key_fixed is None:\n kw[key] = value\n else:\n rc_kw[key_fixed] = value\n return rc_kw, mode, kw", "def initFormat(self):\n self.formatList = []", "def build_format(i, ex, args, meta_args):\n formatter = string.Formatter()\n format_string = meta_args.format_string\n fields = list(formatter.parse(format_string))\n\n kwarg_fields = []\n indexed_fields = []\n\n i.result = hive.variable('str')\n i.result_out = hive.pull_out(i.result)\n\n for index, field in enumerate(fields):\n literal_text = field[1]\n\n if literal_text is None:\n continue\n\n if not literal_text.isidentifier():\n field_name = \"field_{}\".format(index)\n indexed_fields.append(field_name)\n\n else:\n field_name = literal_text\n kwarg_fields.append(field_name)\n\n # Create IO\n attr = hive.variable()\n setattr(i, field_name, attr)\n\n in_attr = hive.pull_in(attr)\n setattr(i, \"{}_in\".format(field_name), in_attr)\n\n setattr(ex, field_name, hive.antenna(in_attr))\n hive.trigger(i.result_out, in_attr, pretrigger=True)\n\n ex.result = hive.output(i.result_out)\n\n def do_format(self):\n args = [getattr(self, \"_{}\".format(attr_name)) for attr_name in indexed_fields]\n kwargs = {attr_name: getattr(self, \"_{}\".format(attr_name)) for attr_name in kwarg_fields}\n self._result = formatter.format(format_string, *args, **kwargs)\n\n i.func = hive.modifier(do_format)\n hive.trigger(i.result_out, i.func, pretrigger=True)", "def parse(input):\n\t\n\toutput = []\n\t\n\t# Comment delimiter of the docstring\n\tcommentDelim = '\"\"\"'\n\t\n\t# Some regexes\n\ttriggerRe = re.compile(\"^(\\s*)(def .+:|class .+:)\")\n\tcommentStartRe = re.compile('^\\s*(%s)' % commentDelim)\n\tcommentEndRe = re.compile('(%s)\\s*$' % commentDelim)\n\temptyRe = re.compile(\"^\\s*$\")\n\thashLineRe = re.compile(\"^\\s*#.*$\")\n\timportLineRe = re.compile(\"^\\s*(import |from .+ import)\")\n\t\n\t# split input into lines\n\tlines = input.split(\"\\n\")\n\t\n\t# flags, buffers, ...\n\tfileHeadFlag = True\n\ttriggerWordFlag = False\n\tcommentFlag = False\n\tcomment = []\n\ttriggerWs = \"\"\n\ttriggerLines = None\n\t\n\t# process each line\n\tfor line in enumerate(lines):\n\n\t\tmatch = re.search(triggerRe, line[1])\n\t\tif match:\n\t\t\tif triggerWordFlag and triggerLines:\n\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\n\t\t\ttriggerWordFlag = True\n\t\t\ttriggerWs = match.group(1)\n\t\t\tfileHeadFlag = False\n\t\t\ttriggerLines = [line[1]]\n\t\t\tcontinue\n\n\t\t# file header or active keyword trigger?\n\t\tif fileHeadFlag or triggerWordFlag:\n\t\t\t# comment end of multiline comment found\n\t\t\tif re.search(commentEndRe, line[1]) and commentFlag:\n\t\t\t\tcomment.append( line[1][ : line[1].rfind(commentDelim) ] )\n\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs, (triggerLines is None)))\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\tcomment = []\n\t\t\t\tcommentFlag = False\n\t\t\t\ttriggerWs = \"\"\n\t\t\t\ttriggerLines = None\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\n\t\t\t# comment start found\n\t\t\telif re.search(commentStartRe, line[1]):\n\t\n\t\t\t\tif re.search(commentEndRe, line[1][line[1].find(commentDelim)+len(commentDelim) :]):\n\t\t\t\t\t# singleline comment\n\t\t\t\t\tcomment.append(line[1][line[1].find(commentDelim)+len(commentDelim) : line[1].rfind(commentDelim)])\n\t\t\t\t\toutput.append(makeCommentBlock(comment, triggerWs))\n\t\t\t\t\t\n\t\t\t\t\tif triggerLines:\n\t\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\t\t\t\n\t\t\t\t\tcomment = []\n\t\t\t\t\tcommentFlag = False\n\t\t\t\t\ttriggerWs = \"\"\n\t\t\t\t\ttriggerLines = None\n\t\t\t\t\ttriggerWordFlag = False\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# multiline comment begin\n\t\t\t\t\tcommentFlag = True\n\t\t\t\t\tcomment.append(\n\t\t\t\t\t\tline[1][line[1].find(commentDelim)+len(commentDelim):]\n\t\t\t\t\t)\n\t\n\t\t\t# active multiline comment -> append comment\n\t\t\telif commentFlag:\n\t\t\t\tcomment.append(line[1])\n\t\t\t\n\t\t\t# still searching for comment\n\t\t\telif re.search(emptyRe, line[1]):\n\t\t\t\tif triggerLines:\n\t\t\t\t\ttriggerLines.append(line[1])\n\t\t\t\telse:\n\t\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# searching for file header\n\t\t\telif fileHeadFlag:\n\t\t\t\tif not (re.search(hashLineRe, line[1]) or re.search(emptyRe, line[1]) or re.search(importLineRe, line[1])):\n\t\t\t\t\t# fileheader over -> disable search\n\t\t\t\t\tfileHeadFlag = False\n\t\t\t\toutput.append(line[1])\n\t\t\t\n\t\t\t# no comment, disable comment search mode\n\t\t\telse:\n\t\t\t\ttriggerWordFlag = False\n\t\t\t\tif triggerLines:\n\t\t\t\t\toutput.append(\"\\n\".join(triggerLines))\n\t\t\t\ttriggerLines = None\n\t\t\t\toutput.append(line[1])\n\t\t\n\t\t# just append the line\n\t\telse:\n\t\t\toutput.append(line[1])\n\t\n\t# return output\n\treturn \"\\n\".join(output)", "def parse_name_and_type_from_fmt_str(\n formatted_str: str,\n allowed_types: Optional[Dict[str, Component]] = None\n) -> Generator[Tuple[str, Type[Field]], None, None]:\n for _, arg_name, _type_name, _ in Formatter().parse(formatted_str):\n if arg_name is not None:\n try:\n assert _type_name is not None\n _type = (\n allowed_types[_type_name] if allowed_types is not None\n and _type_name in allowed_types\n else getattr(pyopenapi3.data_types, _type_name)\n )\n yield arg_name, _type\n except AttributeError:\n raise ValueError(\n \"A non-`Field` or `OpenApiObject` type was found. \"\n f\"Can't use `{_type_name}` as a type in {formatted_str}. \"\n f\"Must be a stringified pyopenapi3 `data_type`, such \"\n f\"as `pyopenapi3.data_types.String`, or a reference to a \"\n f\"Component.\"\n ) from None", "def from_string(block_string: str):\n ops = block_string.split(\"_\")\n options = {}\n for op in ops:\n splits = re.split(r\"(\\d.*)\", op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # check stride\n stride_check = (\n (\"s\" in options and len(options[\"s\"]) == 1)\n or (len(options[\"s\"]) == 2 and options[\"s\"][0] == options[\"s\"][1])\n or (len(options[\"s\"]) == 3 and options[\"s\"][0] == options[\"s\"][1] and options[\"s\"][0] == options[\"s\"][2])\n )\n if not stride_check:\n raise ValueError(\"invalid stride option received\")\n\n return BlockArgs(\n num_repeat=int(options[\"r\"]),\n kernel_size=int(options[\"k\"]),\n stride=int(options[\"s\"][0]),\n expand_ratio=int(options[\"e\"]),\n input_filters=int(options[\"i\"]),\n output_filters=int(options[\"o\"]),\n id_skip=(\"noskip\" not in block_string),\n se_ratio=float(options[\"se\"]) if \"se\" in options else None,\n )", "def format(self, formatter, form, title):\n # store line offsets in self.lines\n self.lines = [0, 0]\n pos = 0\n while 1:\n pos = self.raw.find('\\n', pos) + 1\n if not pos: break\n self.lines.append(pos)\n self.lines.append(len(self.raw))\n\n # parse the source and write it\n self.pos = 0\n text = io.StringIO(self.raw)\n self.out.write('<html><body bgcolor=\"#e0e0e0\"><head><title>%s</title></head><pre><font face=\"Lucida,Courier New\">'%title)\n try:\n if (sys.version_info > (3, 0)):\n tokens = tokenize.generate_tokens(text.readline)\n for toktype,toktext,tok_start,tok_end,line in tokens:\n self.__call__(toktype,toktext,tok_start,tok_end,line)\n else:\n tokenize.tokenize(text.readline,self)\n except tokenize.TokenError as ex:\n msg = ex.args[0]\n line = ex.args[1][0]\n self.out.write(\"<h3>ERROR: %s</h3>%s\\n\" % (\n msg, self.raw[self.lines[line]:]))\n self.out.write('</font></pre></body></html>')", "def format(self, formatter):\n WikiParser.format(self, formatter)\n # reset those states every time format is done\n self.break_next = False\n self.prev_list = False", "def read_atom_data(atom_format, line):\n if atom_format is None:\n _format = [guess_string_format(i.strip()) for i in line.split()]\n else:\n _format = atom_format\n formatted = []\n for i, (val, fmti) in enumerate(zip(line.split(), _format)):\n istrip = val.strip()\n try:\n formatted.append(fmti(istrip))\n except ValueError:\n fmt = guess_string_format(istrip)\n _format[i] = fmt\n formatted.append(fmt(istrip))\n return formatted, _format", "def test_regexp_chunk_parser():", "def parse_shortform_block_annotation(description):\n if not description:\n return {}\n # TODO: Test encoded string formatting using regexp\n\n ref_version, chrom, scaffold, ori, gpos, _, coords = description.split('_')\n coords = [slice(int(start) - 1, int(stop))\n for start, stop in [c.split(':') for c in coords.split(';')]]\n\n return {\n 'ref_version': ref_version,\n 'chromosome': chrom,\n 'chromosome_scaffold': int(scaffold),\n 'genome_pos': int(gpos) - 1,\n 'orientation': ori,\n 'blocks': coords,\n }", "def parse_block_math(self, m):\n self.tokens.append({\n 'type': 'block_math',\n 'text': m.group(1)\n })", "def reparseText(parsed):\n out = []\n buffer = ''\n for type, data in parsed:\n if type is RAW:\n buffer += data\n else:\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n buffer = ''\n out.append((type,data))\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n return out", "def commonmark(text, format=\"html\"):\n parser = Parser()\n ast = parser.parse(text)\n if format not in [\"html\", \"json\", \"ast\"]:\n raise ValueError(\"format must be 'html', 'json' or 'ast'\")\n if format == \"html\":\n renderer = HTMLRenderer()\n return renderer.render(ast)\n if format == \"json\":\n return ASTtoJSON(ast)\n if format == \"ast\":\n return dumpAST(ast)", "def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)", "def parse(self, input):\n pass", "def register_data_format(format_name,parser):\n\n data_format_parser[format_name] = parser" ]
[ "0.64509463", "0.61767745", "0.6141088", "0.6093478", "0.59834534", "0.59391457", "0.5813534", "0.5672864", "0.565769", "0.5619488", "0.5604256", "0.5531012", "0.5529596", "0.55168927", "0.54857767", "0.545964", "0.5454546", "0.5443225", "0.54375136", "0.54195976", "0.5334066", "0.5294258", "0.52878046", "0.52471036", "0.52260244", "0.5211148", "0.5207812", "0.5192551", "0.5192131", "0.5188234", "0.5181986", "0.516625", "0.5156391", "0.5146858", "0.5127734", "0.51167744", "0.5112718", "0.5108214", "0.510674", "0.51062196", "0.5087044", "0.507185", "0.5070044", "0.50675386", "0.504869", "0.50477725", "0.5043177", "0.4994124", "0.49899256", "0.4957978", "0.494674", "0.49395174", "0.4930837", "0.49271208", "0.4925568", "0.4924901", "0.49181744", "0.49181744", "0.49149057", "0.49130347", "0.4907486", "0.4904251", "0.49040323", "0.4901149", "0.48774403", "0.48737454", "0.4872731", "0.48693103", "0.48654395", "0.48566082", "0.48276517", "0.4822794", "0.48206964", "0.48145694", "0.48090145", "0.48029166", "0.4797748", "0.4792716", "0.4788952", "0.4782847", "0.4781099", "0.4768461", "0.4766974", "0.47593212", "0.47561", "0.47545156", "0.47518572", "0.47505465", "0.47385955", "0.47381458", "0.473445", "0.47305465", "0.47220948", "0.47200245", "0.47192532", "0.47171953", "0.47148168", "0.4685674", "0.46790516", "0.46754774" ]
0.73439676
0
Format a string, substituting place holders which can be found in param_dict, attributes of the supplied module, or provided via calls to the attr_getter function.
def format( self, format_string, module=None, param_dict=None, force_composite=False, attr_getter=None, ): if param_dict is None: param_dict = {} # if the processed format string is not in the cache then create it. if format_string not in self.block_cache: self.build_block(format_string) first_block = self.block_cache[format_string] def get_parameter(key): """ function that finds and returns the value for a placeholder. """ if key in param_dict: # was a supplied parameter param = param_dict.get(key) elif module and hasattr(module, key): param = getattr(module, key) if hasattr(param, "__call__"): # we don't allow module methods raise Exception() elif attr_getter: # get value from attr_getter function try: param = attr_getter(key) except: # noqa e722 raise Exception() else: raise Exception() if isinstance(param, Composite): if param.text(): param = param.copy() else: param = "" return param # render our processed format valid, output = first_block.render(get_parameter, module) # clean things up a little if isinstance(output, list): output = Composite(output) if not output: if force_composite: output = Composite() else: output = "" return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_param(string, param, value, param_format=None):\n\n if param_format == \"json\":\n return sub(r\"(?P<json_replacement>\\\"%s\\\"\\s*:\\s*)\\\"\\s*\\\"\" %\n escape(str(param)), \"\\\\1\\\"%s\\\"\" % value, string)\n elif param_format == \"header\":\n return sub(r\"%s=[^\\\\n]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)\n else:\n return sub(r\"%s=[^&]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)", "def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail", "def _replace_params(self, text: str, control: cat.Control, param_dict: Dict[str, prof.SetParameter]) -> str:\n if control.params is not None:\n for param in control.params:\n # set default if no information available for text\n param_text = f'[{param.id} = no description available]'\n set_param = param_dict.get(param.id, None)\n # param value provided so just replace it\n if set_param is not None:\n values = [value.__root__ for value in set_param.values]\n param_text = values[0] if len(values) == 1 else f\"[{', '.join(values)}]\"\n else:\n # if select present, use it\n if param.select is not None:\n param_text = '['\n if param.select.how_many is not None:\n param_text += f'{param.select.how_many.value}: '\n if param.select.choice is not None:\n param_text += ', '.join(param.select.choice)\n param_text = f'{param_text}]'\n # else use the label\n if param.label is not None:\n param_text = f'[{param.label}]'\n # this needs to be a regex match to distinguish param_1 from param_10\n pattern = re.compile(f'{param.id}(?:[^0-9a-zA-Z._\\-#@])')\n text = pattern.sub(param_text, text)\n\n # strip {{ }}\n pattern = re.compile('( *{{| *}})')\n text = pattern.sub('', text)\n text = text.replace('insert: param, ', '').strip()\n\n return text", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string", "def _parameterize_string(raw):\n\n parts = []\n s_index = 0\n\n for match in _PARAMETER_PATTERN.finditer(raw):\n parts.append(raw[s_index:match.start()])\n parts.append({u\"Ref\": match.group(1)})\n s_index = match.end()\n\n if not parts:\n return GenericHelperFn(raw)\n\n parts.append(raw[s_index:])\n return GenericHelperFn({u\"Fn::Join\": [u\"\", parts]})", "def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail", "def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s", "def change_format_var_parser(text, tracker):\n param_names = format_var_parser(text)\n param_vars = {}\n for param_name in param_names:\n try:\n param_vars[param_name] = tracker.get_slot(param_name)\n except Exception as e:\n PYTHON_LOGGER.error(\"Error to get var name {}: {}\".format(param_name, e))\n return text.format(**param_vars)", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def format_arguments(data: Dict) -> str:\n\n def prep(key: str, value: Any) -> str:\n if isinstance(value, str):\n value = f'''\"{value.replace('\"', \"'\")}\"'''\n if key == \"pattern\":\n value = f\"r{value}\"\n return f\"{key}={value}\"\n\n return \",\\n\".join([prep(key, value) for key, value in data.items()])", "def format(self, *args, **kwargs) -> String:\n pass", "def test_mixed_manual_positional_arguments_with_attribute_access_arguments():\n '{0}{1[FOO]}'.format(123, {'FOO': 456})\n '{0}{1[FOO]}'.format(123, {'FOO': 456}, 321)\n '{0}{1[FOO]}'.format(123)", "def format(value, arg):\n try:\n if value is not None:\n # return (str(arg)) % value\n return (str(value)) % arg\n else:\n return \"\"\n except (ValueError, TypeError):\n return \"\"", "def format_string(self, template):\n out_str = \"\"\n search_property_name = \"\"\n in_property = False\n for char in template:\n if (in_property):\n if (char == '%'):\n if (len(search_property_name) > 0):\n prop_value = \"\"\n try:\n prop_value = str(self.get_property(search_property_name))\n except KeyError:\n pass\n out_str += prop_value\n search_property_name = \"\"\n in_property = False\n else:\n search_property_name += char\n else:\n if (char == '%'):\n in_property = True\n else:\n out_str += char\n\n # Handle unterminated property names\n if (in_property):\n out_str += '%'\n out_str += search_property_name\n\n return out_str", "def pformat(class_instance):\n s = ''\n for var, val in vars(class_instance).items():\n s += var + ': ' + str(val) + '\\n'\n return s[:-1]", "def format(self, value, key=None, **kwargs):\n if not isinstance(value, str):\n return value\n\n # always format strings using the root so the full path is available\n if self.root:\n return self.root.format(value, key, **kwargs)\n\n variables = CONFIG_VARIABLE_PATTERN.findall(value)\n expanded = {}\n for variable in variables:\n if variable not in kwargs:\n try:\n root_key = variable.split(\".\")[0]\n root = self.root if self.root else self\n\n expanded[root_key] = self.format(getattr(root, root_key), variable, **kwargs)\n except AttributeError:\n raise MissingConfiguration(variable, key)\n\n expanded.update(**kwargs)\n return value.format(**expanded)", "def format(self, valDict):\n return self._formatStr % valDict", "def replace_param_occurrences(string, params):\n for k, v in params.items():\n string = string.replace(k, str(v))\n return string", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def printParameter (a,b):\n print(a)\n print(b)\n print(\"this is formating of {} and {}\".format(a,b))", "def string(self, string, values=None):\n if values:\n values = self.values(values)\n return string.format(*values)\n return string", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def get_str(self, item: str, fmt: str = \"{}\") -> str:\n return fmt.format(self[item])", "def format_directive(module, package=None):\n directive = '.. automodule:: %s\\n' % makename(package, module)\n for option in OPTIONS:\n directive += ' :%s:\\n' % option\n return directive", "def autoformat(\n cls: Type[U] = None,\n /,\n params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object\n \"message\",\n \"msg\",\n ),\n):\n if isinstance(params, str):\n params = (params,)\n\n if cls is None:\n return functools.partial(autoformat, params=params)\n\n orig_init = cls.__init__\n signature = inspect.signature(orig_init)\n params = signature.parameters.keys() & set(params)\n\n @functools.wraps(orig_init)\n def init(*args, **kwargs):\n bounds = signature.bind(*args, **kwargs)\n bounds.apply_defaults()\n pre_formatted = {\n name: bounds.arguments.pop(name)\n for name in params\n if name in bounds.arguments\n }\n formatted = {\n name: string.format(**bounds.arguments)\n for name, string in pre_formatted.items()\n }\n for name, arg in formatted.items():\n bounds.arguments[name] = arg\n return orig_init(*bounds.args, **bounds.kwargs)\n\n # init.__signature__ = signature\n setattr(cls, \"__init__\", init)\n return cls", "def __getattr__(self, name):\n if name in self:\n if (isinstance(self[name], dict)):\n return self._format_dict(self[name])\n else:\n return str(self[name])\n else:\n return \"\"", "def format_string(self, pat=None, pat_args={}):\n if pat is None:\n pat = self.parent.pat\n if pat_args == {}:\n pat_args = self.parent.pat_args\n return entry_format.output(self, pat, pat_args)", "def repr_str(obj, attrs, update=None):\n props = [\n (attr, value)\n for attr, value in (\n (attr, getattr(obj, attr))\n for attr in attrs\n )\n if value is not None\n ]\n\n if update is not None:\n props = update(props)\n\n props_str = ', '.join('%s=%s' % (attr, value) for attr, value in props)\n return \"<{klass}: {props_str}>\".format(\n klass=obj.__class__.__name__,\n props_str=props_str,\n )", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def _printAttributePrintf(self, formatting, value):\n\n # multiple entrys\n if isinstance(formatting, list):\n\n for scanf_format in formatting:\n try:\n #print \"-->>\", scanf_format, value\n return scanf_format % value\n except TypeError, e:\n pass\n\n # single entry\n else:\n return formatting % value\n\n # problem if none of the formats worked\n raise TypeError(\"Valid format not found for values.\")", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def format(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append('{}={}'.format(k, v))\n print ', '.join(sub_items)", "def _format_api_string(self):\n api_string = self.api_name\n arg_string_list = []\n if not self.api_args is None and \\\n len(self.api_args) > 0:\n for key in self.api_args:\n try:\n value = self.api_args[key]\n except TypeError:\n #assert False, f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n # f\" type: {self.api_args.__class__.__name__}\"\n print(f\"node: {self.api_name} key: {key} bad arg: {self.api_args}\" + \\\n f\" type: {self.api_args.__class__.__name__}\")\n raise TypeError\n if isinstance(value, list):\n value_string = \"[ \" + \",\".join([self._api_value_string(x) for x in value]) + \" ]\"\n else:\n if value is None:\n assert False, f\"key={key}\"\n value_string = self._api_value_string(value)\n arg_string_list.append(f\"{key}:{value_string}\")\n api_string += \"(\" + \" \".join(arg_string_list) + \")\"\n #assert False, f\"{self.api_name}: {api_string}\"\n return api_string", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def make_string_repr(instance):\n arg_list = [] if args is None else [getattr(instance, arg) for arg in args if hasattr(instance, arg)]\n\n kwarg_dict = {} if kwargs is None else {key: getattr(instance, key) for key in kwargs if hasattr(instance, key)}\n\n # Check that we could bind the args/kwargs that found matches to the __init__ method\n # Basically this is checking that any arguments we didn't find on the instance have default values\n signature(instance.__class__).bind(*arg_list, **kwarg_dict)\n\n return instance.__class__.__name__ + '(' + format_arguments(*arg_list, **kwarg_dict) + ')'", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def call_spec_string():\n # pylint: disable=protected-access\n frame = sys._getframe(1)\n argvals = inspect.getargvalues(frame)\n if argvals.args[0] == 'self':\n return inspect.formatargvalues(argvals.args[1:], *argvals[1:])\n else:\n return inspect.formatargvalues(*argvals)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def format(self, **kw):\n params = self.defaults.copy()\n params.update(kw)\n if self.filter:\n self.filter(self, params)\n msg = self.msg\n if self.key is not None:\n key = self.key.format(**params)\n msg = msg[key]\n return msg.format(**params)", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def reparam(string_, dictionary):\n dictionary = dictionary.copy() # eval mucks with it\n # disable builtins to avoid risk for remote code exection.\n dictionary['__builtins__'] = object()\n vals = []\n result = []\n for live, chunk in _interpolate(string_):\n if live:\n v = eval(chunk, dictionary)\n result.append(sqlquote(v))\n else: \n result.append(chunk)\n return SQLQuery.join(result, '')", "def render_param(self, format):\n\t\tdef renderer(ctx, data):\n\t\t\tparName = ctx.tag.children[0].strip()\n\t\t\tctx.tag.clear()\n\t\t\ttry:\n\t\t\t\tval = data.getParam(parName)\n\t\t\t\tif val is None:\n\t\t\t\t\treturn ctx.tag[\"N/A\"]\n\n\t\t\t\treturn ctx.tag[format%val]\n\t\t\texcept base.NotFoundError:\n\t\t\t\treturn ctx.tag[\"N/A\"]\n\t\treturn renderer", "def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def formatargvalues(args, varargs, varkw, locals,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n def convert(name, locals=locals,\r\n formatarg=formatarg, formatvalue=formatvalue):\r\n return formatarg(name) + formatvalue(locals[name])\r\n specs = []\r\n for i in range(len(args)):\r\n specs.append(strseq(args[i], convert, join))\r\n if varargs:\r\n specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\r\n if varkw:\r\n specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\r\n return '(' + string.join(specs, ', ') + ')'", "def _formatted_string(self, message: str, dict_values: dict) -> str:\n formatted_values = self._format_values_in_map(dict_values)\n return message.format(**formatted_values)", "def format_person(adj, dict):\n name = dict['name']\n net = int(dict['net_worth (USD)'])\n return \"The {0} person is {1}. They have a net worth of ${2:,}\".format(adj, name, net)", "def format_params(self, params):\n return params", "def safe_format(\n format: str,\n dictionary: dict[str, Any],\n types: set[type[Any]] | None = None,\n adapt: 'Callable[[str], str] | None' = None,\n raise_on_missing: bool = False\n) -> str:\n\n types = types or {int, str, float}\n output = StringIO()\n buffer = StringIO()\n opened = 0\n\n for ix, char in enumerate(format):\n if char == '[':\n opened += 1\n\n if char == ']':\n opened -= 1\n\n if opened == 1 and char != '[' and char != ']':\n print(char, file=buffer, end='')\n continue\n\n if opened == 2 or opened == -2:\n if buffer.tell():\n raise RuntimeError(\"Unexpected bracket inside bracket found\")\n\n print(char, file=output, end='')\n opened = 0\n continue\n\n if buffer.tell():\n k = adapt(buffer.getvalue()) if adapt else buffer.getvalue()\n\n if raise_on_missing and k not in dictionary:\n raise RuntimeError(\"Key '{}' is unknown\".format(k))\n\n v = dictionary.get(k, '')\n t = type(v)\n\n if t not in types:\n raise RuntimeError(\"Invalid type for '{}': {}\".format(k, t))\n\n print(v, file=output, end='')\n buffer = StringIO()\n\n if char != '[' and char != ']':\n print(char, file=output, end='')\n\n if opened != 0:\n raise RuntimeError(\"Uneven number of brackets in '{}'\".format(format))\n\n return output.getvalue()", "def format(fmt, st):\n ret = \"\"\n if not st: return ret\n if fmt not in valid_combos:\n return st\n cm = charmap[fmt]\n for c in st:\n ret += cm.get(c, c)\n return ret", "def format_arg(arg_name: str, value: Any, max_length: int = 200) -> str:\n return \"{arg_name}={value}\".format(\n arg_name=arg_name, value=trim_string(repr(value), max_length=max_length)\n )", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string", "def trans_format(trans_key, **kwargs):\n translated: str = _(trans_key)\n return translated.format(**kwargs)", "def format_directive(self, module, package):\n if module:\n automodule = '%s.%s' % (package, module)\n else:\n automodule = package\n\n directive = '.. automodule:: %s\\n' % automodule\n for option in OPTIONS:\n directive += ' :%s:\\n' % option\n return directive", "def format_str(string, dictionary):\n tokens = string.split('}')\n output = []\n for token in tokens[:-1]:\n if token.split('{')[-1] in dictionary.keys():\n output.append((token + '}').format(**dictionary))\n else:\n output.append(token + '}')\n\n return ''.join(output) + tokens[-1]", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def __format__(self, format_spec):\n # Reject anything that isn't an s\n if format_spec[-1] != 's':\n raise ValueError('{} format specifier not understood for this object',\n format_spec[:-1])\n # Output in this example will be (<a>,<b>,<c>)\n raw = \"(\" + \",\".join([str(self.a), str(self.b), str(self.c)]) + \")\"\n # Honor the format language by using the inbuilt string format\n # Since we know the original format_spec ends in an 's'\n # we can take advantage of the str.format method with a\n # string argument we constructed above\n return \"{r:{f}}\".format( r=raw, f=format_spec )", "def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output", "def format_parameter_list(parameters):\n items = sorted(dict(parameters).items())\n return \" \".join([\"=\".join([key, repr(str(value))]) for (key,value) in items])", "def getString(self):\n string = self.itemType.find('format').text.strip()\n paramString = string[string.find('('):]\n string = string[:string.find('(')]\n for i in self.params.keys():\n paramString = paramString.replace(i,str(self.params[i]) if isFloat(str(self.params[i])) else '\"'+str(self.params[i]).replace('\"','\\\\\"')+'\"',1)\n return string+paramString", "def get_param_htmldesc(linker, func):\r\n import inspect\r\n # XXX copy and modify formatargspec to produce html\r\n return inspect.formatargspec(*inspect.getargspec(func))", "def format_param_pairs(self, params_pairs):\n out = \"\"\n for param in params_pairs:\n out += \"{} {} \".format(*param)\n return out", "def _pretty_print(self,s):\n if not self.pretty_parameters:\n return s\n else:\n n = s.replace(\"_\",\" \")\n n = n.capitalize()\n return n", "def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))", "def format(*args, stringArg: Union[AnyStr, List[AnyStr]]=\"\", **kwargs)->AnyStr:\n pass", "def get__repr__(self, params):\n\n def format_params(params1):\n if at_least_python3:\n items = params1.items()\n else:\n items = list(params1.items())\n for k, v in sorted(items, key=itemgetter(0), reverse=True):\n is_text = isinstance(v, str)\n if is_text and not at_least_python3:\n v = v.encode('utf-8')\n yield '{k}=\"{v}\"'.format(k=k, v=v) if is_text else '{k}={v}'.format(k=k, v=v)\n\n return '{class_name}({params})'.format(\n class_name=self.__class__.__name__,\n params=\", \".join(list(format_params(params)))\n )", "def __format__(self, formatstr):\n if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()\n formatstr = '+.3f'\n\n string = \\\n \"{:\" + formatstr +\"} \" + \\\n \"{:\" + formatstr +\"}i \" + \\\n \"{:\" + formatstr +\"}j \" + \\\n \"{:\" + formatstr +\"}k\"\n return string.format(self.q[0], self.q[1], self.q[2], self.q[3])", "def dict2argstr(d: Dict[str, Any]) -> str:\n return \",\".join(\"{!s}={!r}\".format(key, val) for (key, val) in d.items())", "def docstring_format(*values):\n\n def _decorator_(function):\n function.__doc__ = function.__doc__.format(*values).replace('_', '\\_')\n return function\n\n return _decorator_", "def str_replace(data):\n for key, value in data.items():\n if isinstance(value, (str, unicode)):\n data[key] = value.format(**data)", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def line_replacer(config,change_this_line,key):\n for arg in config['HyperParameter'][key]: \n pattern=r'{}[ ]*=.*,'.format(arg)\n replace_value=config['HyperParameter'][key][arg][counter]\n if type(replace_value) is str:\n replace_value=\"'\"+replace_value+\"'\"\n change_this_line=re.sub(pattern,\"{}= {},\".format(arg,replace_value),change_this_line)\n return change_this_line", "def formatted_loss_components_string(components: dict) -> str:\n total_loss = components['L_V']+components['L_beta']\n fractions = { k : v/total_loss for k, v in components.items() }\n fkey = lambda key: f'{components[key]:+.4f} ({100.*fractions[key]:.1f}%)'\n s = (\n 'L_V+L_beta = {L:.4f}'\n '\\n L_V = {L_V}'\n '\\n L_V_attractive = {L_V_attractive}'\n '\\n L_V_repulsive = {L_V_repulsive}'\n '\\n L_beta = {L_beta}'\n '\\n L_beta_noise = {L_beta_noise}'\n '\\n L_beta_sig = {L_beta_sig}'\n .format(L=total_loss,**{k : fkey(k) for k in components})\n )\n if 'L_beta_norms_term' in components:\n s += (\n '\\n L_beta_norms_term = {L_beta_norms_term}'\n '\\n L_beta_logbeta_term = {L_beta_logbeta_term}'\n .format(**{k : fkey(k) for k in components})\n )\n return s", "def build_format(i, ex, args, meta_args):\n formatter = string.Formatter()\n format_string = meta_args.format_string\n fields = list(formatter.parse(format_string))\n\n kwarg_fields = []\n indexed_fields = []\n\n i.result = hive.variable('str')\n i.result_out = hive.pull_out(i.result)\n\n for index, field in enumerate(fields):\n literal_text = field[1]\n\n if literal_text is None:\n continue\n\n if not literal_text.isidentifier():\n field_name = \"field_{}\".format(index)\n indexed_fields.append(field_name)\n\n else:\n field_name = literal_text\n kwarg_fields.append(field_name)\n\n # Create IO\n attr = hive.variable()\n setattr(i, field_name, attr)\n\n in_attr = hive.pull_in(attr)\n setattr(i, \"{}_in\".format(field_name), in_attr)\n\n setattr(ex, field_name, hive.antenna(in_attr))\n hive.trigger(i.result_out, in_attr, pretrigger=True)\n\n ex.result = hive.output(i.result_out)\n\n def do_format(self):\n args = [getattr(self, \"_{}\".format(attr_name)) for attr_name in indexed_fields]\n kwargs = {attr_name: getattr(self, \"_{}\".format(attr_name)) for attr_name in kwarg_fields}\n self._result = formatter.format(format_string, *args, **kwargs)\n\n i.func = hive.modifier(do_format)\n hive.trigger(i.result_out, i.func, pretrigger=True)", "def format_invocation(name='', args=(), kwargs=None, **kw):\n _repr = kw.pop('repr', bbrepr)\n if kw:\n raise TypeError('unexpected keyword args: %r' % ', '.join(kw.keys()))\n kwargs = kwargs or {}\n a_text = ', '.join([_repr(a) for a in args])\n if isinstance(kwargs, dict):\n kwarg_items = [(k, kwargs[k]) for k in sorted(kwargs)]\n else:\n kwarg_items = kwargs\n kw_text = ', '.join(['%s=%s' % (k, _repr(v)) for k, v in kwarg_items])\n\n all_args_text = a_text\n if all_args_text and kw_text:\n all_args_text += ', '\n all_args_text += kw_text\n\n return '%s(%s)' % (name, all_args_text)", "def __format__(self, *args, **kwargs): # real signature unknown\r\n pass", "def format(query: str) -> str:\n return query.format(**Constants.to_dict())", "def field(*args, **kwargs):\n\n alias = None\n name = None\n if len(args) == 1:\n name = args[0]\n elif len(args) == 2:\n alias, name = args\n else:\n raise ValueError(\"Field name or alias and name must be provided to 'field', got '{}'\".format(args))\n\n result = \"\"\n if alias:\n result = \"{}: \".format(alias)\n\n if not kwargs:\n return result + name\n\n result += \"{}({})\".format(\n name,\n \", \".join([\"{}: {}\".format(k, params(v)) for k, v in sorted(kwargs.items())])\n )\n\n return result", "def _interpolate(string):\n return string % env.project", "def test_extra_with_string_format():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\n \"Hello {world}!\",\n extra=dict(\n world=\"{}\".format(\"World\"),\n ),\n )\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, is_(empty()))", "def test_nested_format_fields():\n '{0:>{1}}'.format(42, 24)\n '{0:{a[1]}} {a}'.format(1, a=[1, 2])\n '{:>{}}'.format(42, 24)\n '{0:>{1}}'.format(42)\n '{0:>{1}}'.format(42, 24, 54)\n ##Warn: W1303\n '{0:{a[1]}}'.format(1)\n ##Warn: W1306\n '{0:{a.x}}'.format(1, a=2)", "def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n return \"\"", "def docfmt(**kwargs):\n kwargs = {k: v.lstrip() for k, v in kwargs.items()}\n\n def outer(fn):\n buf = []\n if fn.__doc__ is None:\n return fn\n formatsiter = string.Formatter().parse(fn.__doc__)\n for literal, field, fmtspec, conv in formatsiter:\n assert conv is None\n assert not fmtspec\n buf.append(literal)\n if field is not None:\n # get indentation\n lines = literal.rsplit(\"\\n\", 1)\n if _only_spaces(lines[-1]):\n indent = \" \" * len(lines[-1])\n valuelines = kwargs[field].splitlines(True)\n # first line\n buf.append(valuelines[0])\n # subsequent lines are indented\n buf.extend([indent + ln for ln in valuelines[1:]])\n else:\n buf.append(kwargs[field])\n fn.__doc__ = \"\".join(buf)\n return fn\n\n return outer", "def format_arg(namespace, arg, lex):\n if lex and arg[0] in ('[', '('):\n return arg[0] + namespace + arg[1:]\n try:\n return namespace + arg\n except:\n return str.encode(namespace) + arg", "def __format__(self, fmt):\n if not isinstance(fmt, str):\n raise TypeError(\"must be str, not %s\" % type(fmt).__name__)\n if len(fmt) != 0:\n return self.strftime(fmt)\n return str(self)", "def replace_params(template, data):\n\t# Find all variables between curly braces, potentially with some spaces\n\tmatches = re.findall(\"({{\\s*)(\\w+(-\\w+)*)(\\s*}})\", template)\n\t# Process the matches\n\tfor match in matches:\n\t\tkey = match[1]\n\t\tif key in data:\n\t\t\ttemplate = template.replace(match[0] + match[1] + match[3], data[key])\n\t\t# If no data supplied in dictionary, that was an optional parameter, just delete it\n\t\telse:\n\t\t\ttemplate = template.replace(match[0] + match[1] + match[3], \"\")\n\treturn template", "def format(value):\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n if isinstance(value, bool):\n return 'true' if value is True else 'false'\n elif isinstance(value, dict):\n assert False, 'Not implemented for dictionary type'\n elif hasattr(value, '__len__'): # should cover list and numpy array\n return '{{{}}}'.format(', '.join([str(v) for v in value]))\n else: # assume scalar value\n return value", "def decorate(cls, attr_names=None, **kwargs):\n if attr_names is None:\n attr_names = ()\n elif not isinstance(attr_names,(list,tuple)):\n raise DecoError('type {} not accepted for list/tuple of decorated members'.format(type(attr_names)))\n # various local objects\n obj_decorator = cls._format_obj(**kwargs)#analysis:ignore\n format_decorator = cls.format(**kwargs)#analysis:ignore\n str_format = lambda s: s.format(**kwargs) if s is not None else '' \n special_members = ['__metaclass__','__module__','__weakref__','__dict__','__class__']#analysis:ignore\n def decorator(obj, obj_name=None):\n # deal with special '__doc__' member\n if obj_name=='__doc__':\n try: \n return str_format(obj)\n except: return obj or ''\n # don't consider other special members and other special members unless \n # it is explicitely to decorate them (e.g. __init__)\n elif obj_name in special_members: \\\n # or (obj_name.startswith('__') and obj_name.endswith('__') and obj_name not in attr_names):\n return obj\n # deal with properties\n elif isinstance(obj, property): \n try: \n return property(obj.__get__, obj.__set__, obj.__delattr__, str_format(obj.__doc__))\n except: return obj # e.g. property not decorated\n # deal with class members\n elif inspect.isclass(obj):\n try: \n return cls.format_class(**kwargs)(obj) \n except: return obj\n # deal with method members\n elif inspect.isroutine(obj): # inspect.ismethod(obj):\n try: \n return cls.format_method(**kwargs)(obj) \n except: return obj\n ## deal with attribute members\n else: \n try: # whenever __doc__ is writeable\n obj.__doc__ = obj_decorator(obj)\n return obj\n except: \n return obj\n return class_decorator(decorator, *attr_names)", "def formatargspec(args, varargs=None, varkw=None, defaults=None,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n specs = []\r\n if defaults:\r\n firstdefault = len(args) - len(defaults)\r\n for i, arg in enumerate(args):\r\n spec = strseq(arg, formatarg, join)\r\n if defaults and i >= firstdefault:\r\n spec = spec + formatvalue(defaults[i - firstdefault])\r\n specs.append(spec)\r\n if varargs is not None:\r\n specs.append(formatvarargs(varargs))\r\n if varkw is not None:\r\n specs.append(formatvarkw(varkw))\r\n return '(' + string.join(specs, ', ') + ')'", "def substitutes(string, **kwargs):\n return json.dumps(substitute(json.loads(string), **kwargs))", "def test__str__and__repr__(self, kwargs, expected):\n fparam = FParameter(**kwargs)\n assert str(fparam) == expected\n assert repr(fparam) == '<FParameter \"{}\">'.format(expected)", "def info(capsys, format_str, format_args=None):\n\n if format_args is not None:\n msg = (format_str % format_args)\n else:\n msg = format_str\n\n with capsys.disabled():\n print(msg)", "def _safe_attr(attr, camel_killer=False, replacement_char='x'):\n allowed = string.ascii_letters + string.digits + '_'\n attr = _safe_key(attr)\n if camel_killer:\n attr = _camel_killer(attr)\n attr = attr.replace(' ', '_')\n out = ''\n for character in attr:\n out += character if character in allowed else '_'\n out = out.strip('_')\n try:\n int(out[0])\n except (ValueError, IndexError):\n pass\n else:\n out = '{0}{1}'.format(replacement_char, out)\n if out in kwlist:\n out = '{0}{1}'.format(replacement_char, out)\n return re.sub('_+', '_', out)", "def dict(self, arg: SeField[Any]) -> str:\n if is_bare_dict(arg.type):\n return arg.varname\n else:\n karg = arg[0]\n karg.name = \"k\"\n varg = arg[1]\n varg.name = \"v\"\n return f\"{{{self.render(karg)}: {self.render(varg)} for k, v in {arg.varname}.items()}}\"", "def sformatf(cls, msg, *args):\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)" ]
[ "0.5990877", "0.59723574", "0.57197136", "0.5632017", "0.56243557", "0.5579166", "0.54825616", "0.5474345", "0.5385853", "0.5365595", "0.5335425", "0.5315145", "0.5266371", "0.5259829", "0.52348405", "0.52140254", "0.5195806", "0.5177217", "0.5099359", "0.5097019", "0.50952864", "0.5078579", "0.50761694", "0.5062998", "0.5062329", "0.505489", "0.5044439", "0.50416446", "0.50361025", "0.50129026", "0.5012273", "0.5008693", "0.49973017", "0.49924472", "0.49922407", "0.49914008", "0.49775755", "0.4968604", "0.49456397", "0.49340487", "0.49241576", "0.49125293", "0.48898837", "0.48873314", "0.4874479", "0.4870638", "0.4866174", "0.48635432", "0.4861874", "0.48494786", "0.48443127", "0.48418087", "0.48361883", "0.48227674", "0.48222896", "0.4818961", "0.48122936", "0.48015186", "0.47960576", "0.47930512", "0.47776666", "0.477639", "0.47577536", "0.47574937", "0.47532377", "0.4751495", "0.47503197", "0.47495934", "0.47461376", "0.4742805", "0.47217295", "0.4710514", "0.47039402", "0.47039062", "0.4666654", "0.4666211", "0.46640712", "0.46558627", "0.46533406", "0.46518236", "0.46510231", "0.46493372", "0.4647056", "0.46444726", "0.46440208", "0.46428192", "0.4641235", "0.46402997", "0.4638094", "0.46352684", "0.46339253", "0.4630008", "0.46262667", "0.4611843", "0.46057966", "0.46048802", "0.46028778", "0.4599217", "0.4598511", "0.45887202" ]
0.6764331
0
function that finds and returns the value for a placeholder.
def get_parameter(key): if key in param_dict: # was a supplied parameter param = param_dict.get(key) elif module and hasattr(module, key): param = getattr(module, key) if hasattr(param, "__call__"): # we don't allow module methods raise Exception() elif attr_getter: # get value from attr_getter function try: param = attr_getter(key) except: # noqa e722 raise Exception() else: raise Exception() if isinstance(param, Composite): if param.text(): param = param.copy() else: param = "" return param
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __findPlaceholder(self, data, index):\r\n m = self.__placeholder_re.search(data, index)\r\n if m:\r\n return m.group(1), m.end()\r\n else:\r\n return None, index + 1", "def placeholder(self):\n return self._placeholder", "def placeholder(self) -> str | None:\n return self._underlying.placeholder", "def findPlaceholder(self, placeholder):\n start = self.row\n for lineno in xrange(start-1, len(self.buffer)):\n x = self.buffer[lineno].find(placeholder)\n if x != -1:\n return (lineno, x, len(placeholder))\n #not found after, search to start backwords!\n for lineno in xrange(start-1, 0, -1):\n x = self.buffer[lineno].find(placeholder)\n if x != -1:\n return (lineno, x, len(placeholder))\n return (0,0,0)", "def find(found_item, _):\n if found_item:\n return found_item[1]\n else:\n return default", "def MatchPlaceholder(string, node, placeholder):\n matched_text = placeholder.Match(node, string)\n if not matched_text:\n return string\n ValidateStart(string, matched_text)\n if not isinstance(placeholder, TextPlaceholder):\n matched_text = StripStartParens(matched_text)\n before, after = string.split(matched_text, 1)\n if StripStartParens(before):\n raise BadlySpecifiedTemplateError(\n 'string \"{}\" should have started with placeholder \"{}\"'\n .format(string, placeholder))\n return after", "def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value", "def getValue(name, default=None):", "def get_geom_placeholder(self, value, srid):\r\n if hasattr(value, 'expression'):\r\n placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))\r\n else:\r\n placeholder = '%s(%%s)' % self.from_text\r\n return placeholder", "def get_value(name):\n\n named_value = get_named_value_raw(name)\n if named_value is not None:\n return named_value.value", "def get_parameter(cur, par):\n cur.execute(\"SELECT value FROM parameters WHERE par='%s';\" % par)\n return cur.fetchone()[0]", "def find_variable(self, standard_name, any_scope=True, loop_subst=False):\n if standard_name in self:\n var = self[standard_name]\n elif any_scope and (self._parent_dict is not None):\n var = self._parent_dict.find_variable(standard_name, any_scope)\n else:\n var = None\n # End if\n if (var is None) and loop_subst:\n var = self.find_loop_subst(standard_name, any_scope=any_scope)\n # End if\n return var", "def resolve(self, var, context):\n if var[0] in ('\"', \"'\") and var[-1] == var[0]:\n return var[1:-1]\n else:\n return Variable(var).resolve(context)", "def resolve(self, var, context):\n if var[0] in ('\"', \"'\") and var[-1] == var[0]:\n return var[1:-1]\n else:\n return Variable(var).resolve(context)", "def placeholders(self):\n x = [i.placeholder for i in self._input_desc]\n return x[0] if len(x) == 1 else x", "def get_match_result(self, variable):\n try:\n return self._match_result_dict[variable]\n except KeyError:\n return None", "def lookup(self, val):\n vid = id(val)\n return self.values.get(vid, None)", "def lookup(input_field, input_val, output_field):\n l = list(filter(lambda x : x[input_field] == input_val, data))\n if len(l) != 0:\n return l[0][output_field]\n print(\"No entry found for \" + input_field + \": \" + input_val)\n return \"\"", "def find_value(dic, key):\n return dic[key]", "def get_value(soup, tag, cond, default=None):\r\n ele = soup.find(tag, cond)\r\n if ele:\r\n return ele.text.strip()\r\n return default", "def _get_value(self, value, context):\n try:\n var_value = template.Variable(value).resolve(context)\n except template.VariableDoesNotExist:\n try:\n var_value = self.var_value.var\n except AttributeError:\n var_value = self.var_value\n return var_value", "def placeholder(self, name, type_name):\n provenance = NQExprProvenance(\n operation='placeholder', args=(type_name, name))\n value = tf.compat.v1.placeholder(\n tf.float32, shape=[None, self.get_max_id(type_name)], name=name)\n return self.as_nql(value, type_name, provenance)", "def find_value(self, func_addr, instr_addr, register):\n reg_num = idaapi.ph_get_regnames().index(register)\n\n # go back from the current instruction to the start of the function\n for instr_addr in list(instructions(func_addr, instr_addr))[::-1]:\n # look for instrucations that move a value into the desired register\n mnemonic = idc.GetMnem(instr_addr)\n if mnemonic == 'mov':\n\n op1_type = idc.get_operand_type(instr_addr, 0)\n op1_value = idc.get_operand_value(instr_addr, 0)\n \n if op1_type == idc.o_reg and op1_value == reg_num:\n op2_type = idc.get_operand_type(instr_addr, 1)\n op2_value = idc.get_operand_value(instr_addr, 1)\n\n # if this instruction sets the register to an immediate value\n if op2_type == idc.o_imm:\n # return that value\n return op2_value\n else:\n # it is not an immediate value, so we say we cannot\n # resolve the value\n return None\n\n # we did not find an allocation of the register,\n # so we return None to indicate that\n return None", "def read_value(where, ttype=None):\n ttype = VOID_P if ttype is None else ttype\n\n frame = gdb.selected_frame()\n if where.startswith(\"$\"):\n return frame.read_register(where[1:]).cast(ttype)\n else:\n to_parse = \"(%s) %s\" % (str(ttype), where)\n return gdb.parse_and_eval(to_parse)", "def _findIdentifierValue (self, identifier : String) -> String:\n\n Logging.trace(\">>: %s\", identifier)\n cls = self.__class__\n\n if identifier not in self._keyToValueMap:\n # leave identifier as is (it might be some value name like\n # wahr or false\n Logging.traceError(\"no expansion found\")\n result = identifier\n else:\n result = self._keyToValueMap[identifier]\n\n if not isString(result):\n result = repr(result)\n else:\n result = (cls._doubleQuoteCharacter + result\n + cls._doubleQuoteCharacter)\n\n Logging.trace(\"<<: expanded %s into %r\", identifier, result)\n return result", "def get_constant(v):\r\n if isinstance(v, Variable):\r\n try:\r\n return get_scalar_constant_value(v)\r\n except NotScalarConstantError:\r\n return None\r\n else:\r\n return v", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def get(name: str):\n if name not in Replacements._rep:\n return None\n return Replacements._rep[name]", "def lookup(self, s, default=None):\n\t\thead, tail = s[0], s[1:]\n\t\tnode = self.root[head]\n\t\tif tail:\n\t\t\treturn node.lookup(tail)\n\t\treturn node.value or default", "def get_replacement_value(self, to_be_replaced):\n name_reg = re.compile('[a-zA-z_]+')\n param_name = name_reg.search(to_be_replaced).group()\n return self.params.get(param_name)", "def lookup(index,keyword):\n\tif keyword in index:\n\t\treturn index[keyword]\n\treturn None", "def lookup(scopes, name):\n # type: (Scopes[T], str) -> Optional[T]\n\n for scope in scopes:\n for key, val in scope:\n if key == name:\n return val\n return None", "def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node", "def lookup(what, where, default=None):\n if isinstance(what, (str, unicode)):\n res = getattr(where, what, default)\n else:\n res = what\n return res", "def lookup_some_key(what, where, default=None):\n for w in what:\n try:\n return where[w]\n except KeyError:\n pass\n return default", "def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None", "def get_embedding_placeholder(self):\n return self.embedding_layer.get_embedding_placeholder()", "def extract_location(x):\n if not pd.isna(x):\n x = ast.literal_eval(x)\n location = x['name']\n return location\n else:\n return np.nan", "def suggestValue(self, variable: Variable, value: int | float, /) -> None:\n ...", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def find_node(self, value):\n for (fun, node) in self.__root.__fast_find:\n if fun(value):\n return node\n return None", "def is_placeholder_value(cls, value):\n return value == cls.PLACEHOLDER_VALUE", "def _identity_placeholder(\n self,\n x: typing.Any,\n ) -> typing.Any:\n return x", "def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)", "def lookup_variable_value(var, env):\n def env_loop(environment):\n \"\"\"\n calls scan on each frame in the env list\n \"\"\"\n def scan(vars, vals):\n \"\"\"\n scans variables in a frame\n \"\"\"\n if isNull(vars):\n return env_loop(enclosing_env(environment)) # 5-4: env -> environment\n elif isEq(var, car(vars)) == TRUE:\n return car(vals)\n else:\n return scan(cdr(vars), cdr(vals))\n if environment is the_empty_environment:\n raise UnboundLocalError(\"lookup_variable\")\n frame = first_frame(environment)\n return scan(frame_variables(frame), frame_values(frame))\n return env_loop(env)", "def lookup(name):", "def lookup(name):", "def test_find_default(self):\n mute_map = MutableMap(**VALUE)\n\n assert mute_map.find('NOT_VALID', 'default_val') == \\\n 'default_val', 'default should be used'\n assert mute_map.find('str_val', 'default_val') == \\\n VALUE['str_val'], 'default should be ignored'", "def default(value, replacement):\n return value if value is not None else replacement", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def get(name, default=None):", "def var_or_atomic_or_blank():\n return var_or_atomic() | next_value('_')", "def find_regvar(*args):\n return _ida_frame.find_regvar(*args)", "def _get_simple_null_value(simple):\n return _SIMPLE_NULL_VALUES[simple]", "def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"", "def _get_variable_name(param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def get_value(self, locator: Locator) -> Optional[str]:\n element = self.ctx.get_element(locator)\n get_value_pattern = self.get_value_pattern(element)\n\n if get_value_pattern:\n func_name = get_value_pattern.__name__\n self.logger.info(\n \"Retrieving the element value with the %r method.\", func_name\n )\n value_pattern = get_value_pattern()\n return value_pattern.Value if value_pattern else None\n\n raise ActionNotPossible(\n f\"Element found with {locator!r} doesn't support value retrieval\"\n )", "def lookup(self, var):\n\t\tsearched = self._search(self._root, var)\n\t\tif searched is not None:\n\t\t\treturn searched._value\n\t\telse:\n\t\t\tself._assign(var, 0)\t#creates the new variable to 0\n\t\t\treturn 0", "def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])", "def _find(self, val, cur_node):\n if val == cur_node.data:\n return cur_node\n elif val > cur_node.data:\n if not cur_node.right:\n return None\n return self._find(val, cur_node.right)\n elif val < cur_node.data:\n if not cur_node.left:\n return None\n return self._find(val, cur_node.left)", "def __getitem__(self, v):\r\n return self.unif.get(v, (v, None))[0]", "def get(self, var: str, default: Union[str, T] = '') -> Union[str, T]:\n if var[0] == '$':\n var = var[1:]\n folded_var = var.casefold()\n if folded_var in self._fixup:\n return self._fixup[folded_var].value\n else:\n return default", "def find(self, number: str) -> Optional[str]:\n if number in self.data: # noqa\n return number\n else:\n return None", "def get_value(cursor, address):\n for part in address.split('.'):\n try:\n cursor = cursor[part]\n except KeyError:\n LOG.warning(\"KeyError: %s (%s)\", part, address)\n return\n return cursor", "def get_variable(self, col: str, name: str, default: T = None) -> T:\n if self.scope is None:\n raise ValueError(\"Can't access variables on unbound modules\")\n return self.scope.get_variable(col, name, default)", "def get_value(arg):\n if arg in self.args_repository:\n return self.args_repository[arg]\n if arg in self.data_repository:\n return self.data_repository[arg]\n print_error(\"value for mandatory argument '{0}' not available in \"\n \"data_repository/args_repository\".format(args))\n return None", "def lookup():", "def _pick_variable(self, variable_and_offsets):\n\n if isinstance(self.operand, MemoryOperand):\n if len(variable_and_offsets) > 1:\n log.error(\"Instruction %#x has two memory operands. Please report it on GitHub.\", self.insn.addr)\n return variable_and_offsets[0]\n\n elif isinstance(self.operand, RegisterOperand):\n # there might be multiple register-type variables for an instruction. pick the right one is... not easy\n\n the_reg = self.operand.register\n if the_reg is None:\n # huh, it does not have a Register child\n return None, None\n\n reg_name = the_reg.reg\n arch = self.instance.project.arch\n\n if len(variable_and_offsets) == 1:\n # only one candidate...\n var, offset = variable_and_offsets[0]\n if arch.registers[reg_name][0] == var.reg:\n return var, offset\n return None, None\n\n if self.operand_index > 0:\n # this is the source operand\n # which variable is read here?\n for var, offset in variable_and_offsets:\n if arch.registers[reg_name][0] == var.reg:\n if self._variable_has_access(var, self.insn.addr, \"read\"):\n return var, offset\n\n log.debug(\n \"Cannot find any source variable for operand %d at instruction %#x.\",\n self.operand_index,\n self.insn.addr,\n )\n return None, None\n\n # this is the destination operand\n # which variable is written here?\n for var, offset in variable_and_offsets:\n if arch.registers[reg_name][0] == var.reg and self._variable_has_access(var, self.insn.addr, \"write\"):\n return var, offset\n\n log.debug(\n \"Cannot find any destination variable for operand %d at instruction %#x.\",\n self.operand_index,\n self.insn.addr,\n )\n # just return the first one\n return None, None\n\n else:\n # what's this type? why am I here?\n log.error(\"_pick_variable: Unsupported operand type %s.\", self.operand.__class__)\n\n return None, None", "def get_parameter(self, param):\n try:\n result = self._data[\"queryResult\"][\"parameters\"][param]\n except KeyError:\n result = None\n\n return result", "def find_field(browser, field, value):\r\n return find_field_by_id(browser, field, value) or \\\r\n find_field_by_name(browser, field, value) or \\\r\n find_field_by_label(browser, field, value)", "def extract_placename(query_result):\n if 'name' in query_result.keys():\n return query_result['name']\n else:\n return np.float('nan')", "def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False", "def soar_substitute(value, json_str):\n replace_dict = json.loads(json_str)\n if value in replace_dict:\n return replace_dict[value]\n\n # use a default value if specific match is missing\n if 'DEFAULT' in replace_dict:\n return replace_dict['DEFAULT']\n\n return value", "def search(self, val):\n search = self.head\n while search:\n if search.val == val:\n return search\n search = search.next\n return None", "def getValue(splits, featureName):\n for split in splits:\n if split.startswith(featureName):\n return split[split.find(\"=\")+1:]\n \n return None", "def _get_simple_default_value(simple):\n return _SIMPLE_DEFAULT_VALUES[simple]", "def find_arg(needle, haystack):\r\n for i in range(0, len(haystack)):\r\n if haystack[i] == needle:\r\n try:\r\n return haystack[i+1]\r\n except IndexError:\r\n pass\r\n return None", "def _tkvar_get(self,param_name):\n tk_val = self._tkvars[param_name]._original_get()\n po_val = self.get_parameter_value(param_name)\n\n po_stringrep = self._object2string(param_name,po_val)\n\n if not self.translators[param_name].last_string2object_failed and not tk_val==po_stringrep:\n self._tkvars[param_name]._original_set(po_stringrep)\n return tk_val", "def _lookup_typedef_value(typedef, name):\n assert typedef is not None\n assert name is not None\n \n # Check if the typedef has the attribute\n value = typedef.get(name)\n if value:\n return value\n \n # Otherwise, see if it's defined in the base type(s)\n base_type_name = typedef.get('base-type')\n if base_type_name:\n base_typedef = typedef_registry.get(base_type_name)\n if not base_typedef:\n raise error.CommandDescriptionError('Unknown type name: %s' % base_type_name)\n return _lookup_typedef_value(base_typedef, name)\n\n return None", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def resolve_resolver_value(self, resolver: \"Resolver\") -> Any:\n try:\n return resolver.resolve()\n except RecursiveResolve:\n # Recursive resolve issues shouldn't be masked by a placeholder.\n raise\n except Exception:\n if are_placeholders_enabled():\n placeholder_value = create_placeholder_value(\n resolver, self.placeholder_type\n )\n\n self.logger.debug(\n \"Error encountered while resolving the resolver. This is allowed for the current \"\n f\"operation. Resolving it to a placeholder value instead: {placeholder_value}\"\n )\n return placeholder_value\n raise", "def _find_first(self, ast, label):\n res = self._find_all(ast, label, max_results=1)\n if len(res):\n return res[0]\n return None", "def get_val(self, name, default=None):\r\n val = cF.get_val(name, default=default)\r\n if name == \"first\":\r\n if val == \"\":\r\n val == 0\r\n elif name == \"last\":\r\n if val == \"\" or val == 0:\r\n val = 0\r\n return val", "def resolve(var, context):\n try:\n return var.resolve(context)\n except template.VariableDoesNotExist:\n return var.var", "def visit_Variable(self, node):\n var_name = node.value\n val = self.VARIABLES.get(var_name)\n if val is None:\n raise NameError(repr(var_name))\n else:\n return val", "def placeholder():\n return ResultProxy(TaskResult())", "def find_value(self, x, y):\n for cell in self.cells:\n if cell.coordinates == (x,y):\n return cell.value\n else:\n return None", "def find_value(self, key):\n values = self.values\n if key not in values:\n raise AttributeError(\"Config has no value for {}\".format(key))\n\n val = values[key]\n if isinstance(val, Default):\n return val.val\n else:\n return val", "def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None", "def findFootnotesPlaceholder(self, root):\n def finder(element):\n for child in element:\n if child.text:\n if child.text.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, True\n if child.tail:\n if child.tail.find(self.getConfig(\"PLACE_MARKER\")) > -1:\n return child, element, False\n finder(child)\n return None\n \n res = finder(root)\n return res", "def js_var(var, raw):\n lestr = r\"\\b{0}\\s*=\\s*\\\"([^\\\"]+)\".format(var)\n match = search(lestr, raw)\n return None if match is None else match.group(1)", "def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent", "def _find(self, details: CallableDetails) -> CallableArg:\n if self.name:\n return self._find_by_name(details, self.name)\n else:\n return self._get_first(details)", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _getInputValue(self, name, target = ''):\n value = self._getInputValue(name, target)\n return value.getData() if value else None" ]
[ "0.72384614", "0.65259403", "0.6496153", "0.64682907", "0.60817194", "0.58944154", "0.5852093", "0.5762202", "0.57511127", "0.56862986", "0.56270653", "0.55724573", "0.5522929", "0.5522929", "0.5460267", "0.5454473", "0.5445658", "0.54357004", "0.54279006", "0.54072934", "0.54031765", "0.5377545", "0.53610915", "0.53523374", "0.534216", "0.5340468", "0.53398013", "0.53364336", "0.5324088", "0.5315964", "0.5315938", "0.5311586", "0.5308344", "0.5298594", "0.5292584", "0.52908075", "0.5267696", "0.5265393", "0.52648836", "0.5254388", "0.52468324", "0.52361566", "0.5231783", "0.5227554", "0.5216189", "0.5211533", "0.5211533", "0.52098745", "0.5204919", "0.52039826", "0.5169481", "0.51592124", "0.51491433", "0.5143561", "0.5141684", "0.51393497", "0.51341003", "0.51210153", "0.5116357", "0.51162255", "0.51160914", "0.511534", "0.5114423", "0.51122344", "0.5107098", "0.50997925", "0.5099471", "0.50977534", "0.5094953", "0.509326", "0.5091725", "0.5085734", "0.50846124", "0.50833476", "0.5080354", "0.50776094", "0.5076754", "0.5074039", "0.50633365", "0.5057327", "0.5057327", "0.5057327", "0.5057327", "0.5057327", "0.5055252", "0.5053316", "0.50516766", "0.50498646", "0.5048661", "0.50462514", "0.5038646", "0.5037388", "0.5021009", "0.50200844", "0.5019467", "0.5018014", "0.5013128", "0.5008758", "0.5008758", "0.5008758", "0.50016594" ]
0.0
-1
return the correct value for the placeholder
def get(self, get_params, block): value = f"{{{self.key}}}" try: value = value_ = get_params(self.key) if self.format.startswith(":"): # if a parameter has been set to be formatted as a numeric # type then we see if we can coerce it to be. This allows # the user to format types that normally would not be # allowed eg '123' it also allows {:d} to be used as a # shorthand for {:.0f}. Use {:g} to remove insignificant # trailing zeroes and the decimal point too if there are # no remaining digits following it. If the parameter cannot # be successfully converted then the format will be removed. try: if "escape" in self.format: value = escape(value) if "ceil" in self.format: value = ceil(float(value)) if "f" in self.format: value = float(value) if "g" in self.format: value = float(value) if "d" in self.format: value = int(float(value)) output = f"{{[{self.key}]{self.format}}}" value = output.format({self.key: value}) value_ = float(value) except ValueError: pass elif self.format.startswith("!"): output = f"{{{self.key}{self.format}}}" value = value_ = output.format(**{self.key: value}) if block.commands.not_zero: valid = value_ not in ["", None, False, "0", "0.0", 0, 0.0] else: # '', None, and False are ignored # numbers like 0 and 0.0 are not. valid = not (value_ in ["", None] or value_ is False) enough = False except: # noqa e722 # Exception raised when we don't have the param enough = True valid = False return valid, value, enough
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def placeholder(self):\n return self._placeholder", "def placeholder(self) -> str | None:\n return self._underlying.placeholder", "def get_val(self):\n return", "def get_geom_placeholder(self, value, srid):\r\n if hasattr(value, 'expression'):\r\n placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))\r\n else:\r\n placeholder = '%s(%%s)' % self.from_text\r\n return placeholder", "def get_value(self):\n return None", "def __prepare_value(self, val, none_symbol=\"-\"):\n\t\t# If type is a tuple, then it is a \n\t\t# (func_unit_label, replica_id) pair. \n\t\t# Concatenate then using \"_\" symbol.\n\t\tif type(val) == type(()):\n\t\t\tval = \"_\".join(map(str, val))\n\n\t\t# Cast value to string/character type,\n\t\t# if it is not a None value\n\t\tif val is not None:\n\t\t\tval = str(val)\n\t\telse:\n\t\t\tval = none_symbol\n\n\t\t# Value processing finished\n\t\treturn val", "def get_value(self):", "def getValue(name, default=None):", "def __findPlaceholder(self, data, index):\r\n m = self.__placeholder_re.search(data, index)\r\n if m:\r\n return m.group(1), m.end()\r\n else:\r\n return None, index + 1", "def get_value(self):\n pass", "def _tkvar_get(self,param_name):\n tk_val = self._tkvars[param_name]._original_get()\n po_val = self.get_parameter_value(param_name)\n\n po_stringrep = self._object2string(param_name,po_val)\n\n if not self.translators[param_name].last_string2object_failed and not tk_val==po_stringrep:\n self._tkvars[param_name]._original_set(po_stringrep)\n return tk_val", "def _get_value(self):\n return self.__value", "def _value(self) -> Any:\n if not self._stack:\n if self.required:\n raise PyParamValueError(\"Argument is required.\")\n return self.default\n ret = self._stack[-1][0]\n self._stack = []\n return ret", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def _value_if_not_fixed(o, d):\n if o.fixed:\n return ()\n return (\"value\",)", "def is_placeholder_value(cls, value):\n return value == cls.PLACEHOLDER_VALUE", "def _value(self):\n if self.data is None:\n return self._original_value\n elif self.get_label:\n return self.get_label(self.data)\n else:\n return unicode(self.data)", "def _safe_value(self, var_name, type_formatter = None):\n var = self.device.get_variable(var_name)\n if var is not None:\n if type_formatter is not None and var.value is not None:\n return type_formatter(var.value)\n else:\n return var.value\n return None", "def get_value ( self, object ):\n try:\n value = getattr( object, self.name )\n try:\n return self.format % ( value, )\n except:\n return 'Format!'\n except:\n return 'Undefined!'", "def inputValue(self):\n return self.variable", "def get_default_value(self):\n pass", "def get_val(self, name, default=None):\r\n val = cF.get_val(name, default=default)\r\n if name == \"first\":\r\n if val == \"\":\r\n val == 0\r\n elif name == \"last\":\r\n if val == \"\" or val == 0:\r\n val = 0\r\n return val", "def variable(self, val):", "def _get_value(self):\n \n return self._value", "def getval(self):\r\n return self.value", "def getDefaultValue(self) -> Optional[int]:\n try:\n return int(self.placeholderText())\n except ValueError:\n return None", "def run(self, value):\r\n return '' if value is None else value", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def request_value(self) -> global___Expression:", "def get_val(self):\n return self.value", "def _getInputValue(self, name, target = ''):\n value = self._getInputValue(name, target)\n return value.getData() if value else None", "def get_replacement_value(self, to_be_replaced):\n name_reg = re.compile('[a-zA-z_]+')\n param_name = name_reg.search(to_be_replaced).group()\n return self.params.get(param_name)", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def _get_value(self, value, context):\n try:\n var_value = template.Variable(value).resolve(context)\n except template.VariableDoesNotExist:\n try:\n var_value = self.var_value.var\n except AttributeError:\n var_value = self.var_value\n return var_value", "def get_value(self):\r\n return 0", "def param_value(self):\n if self.string:\n return self.string\n if self.token:\n return self.token\n if self.number:\n return self.number\n if self.date:\n return self.date\n if self.quantity:\n return self.quantity\n if self.reference:\n return self.reference\n return ''", "def placeholders(self):\n x = [i.placeholder for i in self._input_desc]\n return x[0] if len(x) == 1 else x", "def _identity_placeholder(\n self,\n x: typing.Any,\n ) -> typing.Any:\n return x", "def get_value(self):\n return self._value", "def getValue(self):\n return self.initValue", "def getValue(self) -> int:\n ...", "def get_val(self, **kwargs):\n return self._value", "def default(value, replacement):\n return value if value is not None else replacement", "def default_value_scalar(source=None):\n if not default:\n return None\n if not source:\n return default\n else:\n return source", "def __call__(self):\n return self.value", "def get_val(self):\n if self.val_obj is None:\n return None\n else:\n return self.val_obj.val", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def __getSafeValue(self, key, item):\n try:\n if key.value in self.local:\n value = self.local[key.value]\n else:\n value = self.system.value(key.value, item.default)\n\n if item.type is int:\n value = int(value)\n if item.limits is not None and (value > item.limits[1] or value < item.limits[0]):\n return item.default\n return value\n\n elif item.type is float:\n value = float(value)\n if item.limits is not None and (value > item.limits[1] or value < item.limits[0]):\n return item.default\n return value\n\n elif item.type is bool:\n if type(value) is bool:\n return value\n else:\n # QSetting stores boolean as string in ini file\n return (value.lower() == 'true') if type(value) is str else item.default\n\n elif item.type is list or item.type is tuple:\n if type(value) is str:\n # QSetting could return string when list contains single value\n value = [value] if value else item.default\n\n if item.sub_type is not None:\n value = item.type(map(item.sub_type, value))\n\n if item.fixed_size and item.size != len(value):\n return item.default\n\n if item.limits is not None:\n for v in value:\n if v > item.limits[1] or v < item.limits[0]:\n return item.default\n\n return value\n\n else:\n return item.type(value)\n\n except (ValueError, TypeError):\n return item.default", "def __getitem__(self, item):\n if item == \"data\":\n return self.f_get()\n elif item == \"default\" or item == -1:\n return self.f_get_default()\n else:\n return super(Parameter, self).__getitem__(item)", "def Value(self) -> str:", "def proc_next_ph_tpl(self, ft, nul_val): # TODO change to phoneme!!\n return getattr(self, ft)[1:] + (nul_val,)", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def _default_value(self):\n return None", "def suggestValue(self, variable: Variable, value: int | float, /) -> None:\n ...", "def value_constraint(self):\n return self.fixed if self.fixed is not None else self.default", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value\")", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def get_value(self, key, args, kwargs):\n if self.default is not None:\n try:\n return string.Formatter.get_value(self, key, args, kwargs)\n except KeyError:\n return self.default\n else:\n return string.Formatter.get_value(self, key, args, kwargs)", "def get_value(self, instance):\n try:\n return ParameterSetting.objects.get(\n base_parameter=instance,\n project=self.context.get(\"view\").kwargs.get(\"project\"),\n ).raw_value\n except ParameterSetting.DoesNotExist:\n return None", "def _assignValue(value):\n if value == \"\":\n return None\n else:\n return value", "def get_embedding_placeholder(self):\n return self.embedding_layer.get_embedding_placeholder()", "def vvalue(self) -> Qval:\n return self.get(self.greedy())", "def placeholder(self, name, type_name):\n provenance = NQExprProvenance(\n operation='placeholder', args=(type_name, name))\n value = tf.compat.v1.placeholder(\n tf.float32, shape=[None, self.get_max_id(type_name)], name=name)\n return self.as_nql(value, type_name, provenance)", "def _get_simple_default_value(simple):\n return _SIMPLE_DEFAULT_VALUES[simple]", "def get_value(self):\n raise NotImplementedError", "def proc_prev_ph_tpl(self, ft, nul_val): # TODO change to phoneme!!\n return (nul_val,) + getattr(self, ft)[:-1]" ]
[ "0.7000699", "0.67986435", "0.6459541", "0.6312645", "0.61937106", "0.6171836", "0.615392", "0.6140465", "0.61130303", "0.60600764", "0.59495264", "0.5948369", "0.5947296", "0.5940576", "0.59120387", "0.59089774", "0.5895668", "0.58923846", "0.58919656", "0.58830136", "0.58829343", "0.58770293", "0.58736074", "0.5855019", "0.58477247", "0.58408934", "0.5818353", "0.5811792", "0.5811792", "0.5808128", "0.5808128", "0.5800015", "0.57869196", "0.57847804", "0.5783371", "0.57695687", "0.576113", "0.5755081", "0.57480407", "0.57340515", "0.57300806", "0.57207644", "0.57090056", "0.57054883", "0.57040375", "0.5668319", "0.56625825", "0.5647044", "0.5640584", "0.564045", "0.564027", "0.5638977", "0.56372166", "0.56358784", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5634058", "0.5631929", "0.5628087", "0.5627616", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5627033", "0.5625671", "0.562472", "0.56181896", "0.5618089", "0.5600421", "0.5585819", "0.5580983", "0.55751526", "0.5561195", "0.55555034" ]
0.0
-1
Check if the condition has been met. We need to make sure that we are of the correct type.
def _check_valid_condition(self, get_params): try: variable = get_params(self.variable) except: # noqa e722 variable = None value = self.value # if None, return oppositely if variable is None: return not self.default # convert the value to a correct type if isinstance(variable, bool): value = bool(self.value) elif isinstance(variable, Number): try: value = int(self.value) except: # noqa e722 try: value = float(self.value) except: # noqa e722 # could not parse return not self.default # compare and return the result if self.condition == "=": return (variable == value) == self.default elif self.condition == ">": return (variable > value) == self.default elif self.condition == "<": return (variable < value) == self.default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition(self):\n return True", "def check_condition(self):\n\n\t\traw_context = {\n\t\t\t'folk': self.folk\n\t\t}\n\n\t\tstatus, param = self.execute(self.mission_grid, 'condition', self.pending_mission.kingdom, raw_context)\n\t\treturn status", "def check_condition(self, comment):\n if comment.id in self.touched_comment_ids:\n return False, None\n # First check for keywords in comment, for now we don't care about formatting after the keyword\n has_keyword = self.check_word_in_list_in_string(self.keywords, comment.body)\n if not has_keyword:\n return False, None\n # Next we check if we have states or abbreviations\n abbrevs = self.check_comment_for_dictionary_keys_and_values(comment, self.states)\n if len(abbrevs) < 1:\n return False, None\n if str(comment.author) == self.bot_name:\n return False, None\n for reply in comment.replies:\n if str(reply.author) == self.bot_name:\n return False, None\n\n return True, abbrevs", "def _truth_value(self, condition):\n if condition:\n return 'true stuff'\n else:\n return 'false stuff'", "def _truth_value(self, condition):\n if condition:\n return 'true stuff'\n else:\n return 'false stuff'", "def check(self):\n\n return self.variable.check(self.cval, self.conditional)", "def conditional(self) -> global___Statement.Conditional:", "def meets_condition(db_type: str):\n\t\t...", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def check(self):\n return True", "def check(self):\n raise NotImplementedError", "def check(self, runtime):\n return True", "def condition(self, device, log):\n return True", "def isConditional(self) -> bool:\n ...", "def CheckType(self, *args, **kwargs):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self) -> None:\n\n raise NotImplementedError", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def check(self):\n\n if not self.target.ok():\n return False\n\n if not self.progid.ok():\n return False\n\n if not self.prinapp.ok():\n return False\n\n if not self.observers.ok():\n return False\n\n return True", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def conditionPassed(self):\n result = Activatable(self.effects, condition=AlwaysTrueCondition()).canActivate(self.game)\n self.assertTrue(result, \"The Activatable should be activatable\")", "def passed(self):\n return self.is_executed and self.is_executed_ok and self.is_equal_result", "def check(self, value):\n raise NotImplementedError", "def check(self):\n raise NotImplementedError('Must be implemented by subclass.')", "def is_satisfied(self, item: Any) -> bool:", "def ready_to_proceed(self):\n if self.current_step is None or self.step_position == StepPosition.Before:\n return False\n\n for condition, _ in self.current_step.conditions:\n if condition.satisfied():\n return True\n return False", "def checkType(self, value):\n pass", "def match_condition(self, event_type, rule):\n logger.debug(\"Matching rule conditions\")\n rule_event_type = rule['trigger']['event']\n \n if event_type == rule_event_type:\n logger.debug(\"Matching rule conditions: type MATCHED\")\n # Hm, might be worth adding perms, owner, status?\n return True\n return False", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def check(self) -> None:", "def decision(self) -> bool:\n\n while True:\n # Get's user input, makes all charactures lowercase, and removes any whitespace\n decision = input('Enter \"hit\" or \"stay\". \\n').lower().strip()\n\n if decision == 'hit' or decision == 'stay':\n return decision == 'hit'\n else:\n # Humans can be dumb. Doesn't break the while loop\n print('\\nYou must type \"hit\" or \"stay\".')", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def check(self, node):\n # do the necessary setup/arguments and call self.visit (node, args)\n self.visit(node, defined=set())", "def self_check(self):\r\n \r\n try:\r\n #tries to get a value from each sensor\r\n \r\n sensor_1_value = self.sen.get_sensor_value()\r\n\r\n # checks if the value is a float else rase exception\r\n\r\n if type(sensor_1_value) != float:\r\n raise Exception()\r\n\r\n #if the sensors dont return a value or is in the wrong type\r\n #the code will fail before here and get caught by the catch.\r\n #otherwise its sets the pass or fail condition to true\r\n \r\n pass_or_fail = True\r\n \r\n\r\n except:\r\n #if the self check fails then it sets the pass or fail\r\n #condition to false\r\n \r\n pass_or_fail = False\r\n \r\n \r\n return(pass_or_fail)", "def select(condition: Union[Callable, int], meta: Counter) -> bool:\n if condition is None:\n return True\n elif isinstance(condition, int):\n return sum(meta.values()) == condition\n elif callable(condition):\n if not isinstance(condition(meta), bool):\n raise TypeError('selection condition expected to return a boolean')\n return condition(meta)\n return False", "def check(steps: Steps, error_message: str, condition) -> bool:\n if steps:\n step = steps.get_last_step()\n if not callable(condition):\n raise ValueError(\"The 'condition' argument \"\n \"must be a callable object\")\n else:\n if not condition():\n raise ValueError(error_message)\n else:\n step.set_status(Status.PASSED)\n return True\n\n return False", "def check(self, value: Any) -> None:\n if not isinstance(value, self.oktype):\n raise TypeError(value)", "def check_condition(self, element):\n conditional = element.getAttribute(\"conditional\")\n\n # No condition, then we execute this statement.\n #\n if len(conditional) == 0:\n return True\n\n # We have a conditional. See if it begins with a '!', which inverts\n # our test.\n #\n result = True\n oc = conditional\n if conditional[0] == '!':\n result = False\n conditional = conditional[1:]\n\n if self.settings is not None and conditional in self.settings.ids:\n if self.settings.value(conditional) is True:\n return result\n return not result\n return not result", "def check():", "def _if(self):\n debug.show(\"if:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 2:\n ifcode = isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"if:True\")\n evaluate(ifcode)\n else:\n debug.err(\"not enough items on the stack\")\n debug.show(\"if:False\")\n return None", "def check(self,):\n self.is_valid_according_policy()", "def test_empty_condition(self):\n assert_that(Condition.is_valid(''), equal_to(True))", "def condition_check(self, tic, condition1=100, condition2=10, condition3=10, earned_animation_duration=1,\r\n defense_duration_mean=30,\r\n defense_duration_sd=5):\r\n if self.counters['1'] == condition1: # changed: added provoked negation from fulfilling any condition\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n self.points += 1\r\n self.earned_point_animation_timer = earned_animation_duration\r\n self.counters['1'] = 0\r\n if self.counters['2'] == condition2:\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n self.stole_point = 1\r\n self.counters['2'] = 0\r\n if self.counters['3'] == condition3: # changed\r\n self.provoked = 0\r\n self.provoked_timer = 0\r\n block_value = abs(np.random.normal(defense_duration_mean, defense_duration_sd, 1))\r\n self.defense_blocks = np.append(self.defense_blocks, block_value)\r\n self.defense_blocks_data = np.append(self.defense_blocks_data, block_value)\r\n self.counters['3'] = 0", "def satisfied(self, tailValue, headValue):\n return False", "def applies(self, *args, **kwargs) -> bool:\n return self.__guard is None or self.__guard(*args, **kwargs)", "def ok(self) -> bool:\n return self._exc_type is None", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def is_waitable(self):\n return self._condition is not None", "def check(self, data):# ->bool:\r\n return check(self.gd, data)", "def checkGoal(self):\n # -- It is not included for simplifity --#\n if self.reward_cumulative != None:\n x = round((abs(self.reward_cumulative) - abs(round(self.reward_cumulative))) * 100);\n rem_goal = x % 25\n rem_timeout = x % 20\n if rem_goal == 0 and x != 0:\n self.is_goal = True\n else:\n self.is_goal = False\n\n if rem_timeout == 0 and x != 0:\n self.is_timeout = True\n else:\n self.is_timeout = False", "def check_type(self):\n return True", "def check(self):\n\n try:\n if self.compare == \"eq\":\n return self._varstate == self.transform(self.compval)\n if self.compare == \"ne\":\n return self._varstate != self.transform(self.compval)\n if self.compare == \"in\":\n return self._varstate in [self.transform(x) for x in self.compval.split(\",\")]\n if self.compare == \"not in\":\n return self._varstate not in [self.transform(x) for x in self.compval.split(\",\")]\n if self.compare == \"gt\":\n return self._varstate > self.transform(self.compval)\n if self.compare == \"ge\":\n return self._varstate >= self.transform(self.compval)\n if self.compare == \"lt\":\n return self._varstate < self.transform(self.compval)\n if self.compare == \"le\":\n return self._varstate <= self.transform(self.compval)\n return False\n except Exception as e:\n emsg = \"Error: Counter variable {} : Comparison failed with value \\\"{}\\\".\\nTrace: {}\".format(\n self.name, val, e.__traceback__)\n self.sending({\"subject\": self.target,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"error report\",\n \"target\": self.type + \".state variable\",\n \"value\": emsg}})\n\n return False", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def IsOk(self):\r\n \r\n return True", "def ok(self) -> bool:\n # pylint:disable=invalid-name\n raise NotImplementedError # pragma: no cover", "def does_match(self, situation: Perception) -> bool:\n return self.condition.does_match(situation)", "def _alert_condition(self, soup: BeautifulSoup) -> bool:\n raise NotImplementedError()", "def Assert(condition):\n try:\n assert TestStepsTools.Eval(condition)\n except AssertionError:\n _LOGGER.error('Condition %s is not True', condition)\n raise\n\n return True", "def check_criteria(self, _sender, **msg):\n msg = msg['iter_msg']\n if len(_sender.performance_history) == _sender.patience:\n # Value / threshold based methods:\n # Check the latest value of the performance history against a\n # threshold calculated based on the performance history\n msg.should_stop = \\\n check_should_stop(\n mode=_sender.mode,\n performance_history=_sender.performance_history)", "def __bool__(self):\n return self.isValid()", "def hit(self):\n hit = self.delegate.checkHit(100, 0, self.environment)\n assert hit, \"Should always hit\"", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def test_variable(self, condition, undefined_paths, current_path):\n var = str(condition.find('name').text)\n if not var in self.variables.get_keys() and self.fail_on_undefined_vars:\n current_path += ((var, 'undefined variable'),)\n undefined_paths.add(current_path)\n return False\n else:\n return True", "def is_arbitrary(self):\n return 'conditions' not in type(self)._fields", "def verify(condition):\n global _is_in_verify_within\n if not condition:\n calling_frame = inspect.currentframe().f_back\n error_message = 'verify() failed at line {} in \"{}\"'.format(calling_frame.f_lineno,\n calling_frame.f_code.co_filename)\n if not _is_in_verify_within:\n print error_message\n return False\n return True", "def _check_value(self, value):\n raise NotImplementedError", "def perform(self):\n if self.format_and_eval_string(self.condition):\n raise ContinueException()", "def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))", "def check(self,item):\r\n raise AbstractError\r\n return False", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def __bool__(self):\n return not(self.outcome != 0 or self.filled)", "def check_event_status(self):\n pass", "def check(self, answer):\n return self.answer == answer", "async def should_handle(self):\n local_controller = self.controller\n cavern = local_controller.caverns\n if local_controller.hives and not cavern:\n return False\n if not local_controller.can_train(HYDRALISK, local_controller.hydradens.ready):\n return False\n if local_controller.pits.ready and not local_controller.hives and not await BuildHive.morphing_lairs(self):\n return False\n if cavern.ready:\n return len(local_controller.ultralisks) * 2.75 > len(local_controller.hydras)\n return not local_controller.floating_buildings_bm", "def is_met(self, *args, **kwargs):\n today = timezone.now().date()\n if self.day == today.day and (not self.last_executed or self.last_executed.date() != today):\n return True\n\n return False", "def check(self, description: Description) -> bool:", "def check_condition(self, query_dict):\n return all(key in self.__data and self.__data[key] == value\n for key, value in query_dict.items())", "def is_satisfied_by(self, val):", "def _check(self, event):\n\n if self._type is None:\n if isinstance(event, Event):\n self._type = Event\n elif isinstance(event, TimeRangeEvent):\n self._type = TimeRangeEvent\n elif isinstance(event, IndexedEvent):\n self._type = IndexedEvent\n else:\n if not isinstance(event, self._type):\n raise PipelineIOException('Homogeneous events expected')", "def run_precondition(self) -> bool:\n return self._does_apply", "def run_checks(self, tile_model: TileModel) -> bool:\n\n # Doge cannot fire the deck gun\n if self.player.role == PlayerRoleEnum.DOGE:\n return False\n\n if not self.player == GameStateModel.instance().players_turn:\n return False\n\n ap_deduct = 2 if self.player.role == PlayerRoleEnum.DRIVER else 4\n\n if not TurnEvent.has_required_AP(self.player.ap, ap_deduct):\n return False\n\n # If the player is not located in the\n # same space as the engine, they cannot\n # fire the deck gun.\n engine_orient = self.engine.orientation\n if engine_orient == VehicleOrientationEnum.HORIZONTAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row and self.player.column == self.engine.column + 1\n if not on_first_spot and not on_second_spot:\n return False\n\n elif engine_orient == VehicleOrientationEnum.VERTICAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row + 1 and self.player.column == self.engine.column\n if not on_first_spot and not on_second_spot:\n return False\n\n engine_quadrant = self._determine_quadrant(self.engine.row, self.engine.column)\n tile_input_quadrant = self._determine_quadrant(tile_model.row, tile_model.column)\n # If there are players present in the\n # quadrant, the deck gun cannot be fired.\n # tile input gotta be on quadrant adjacent to engine\n if self._are_players_in_quadrant(engine_quadrant) or tile_input_quadrant != engine_quadrant:\n return False\n\n return True", "def isUnConditional(self) -> bool:\n ...", "def check_status(self):", "def check(self, val, compare=\"eq\"):\n try:\n if compare == \"eq\":\n return len(self.entities) == int(val)\n if compare == \"ne\":\n return len(self.entities) != int(val)\n if compare == \"in\":\n return val in self.entities\n if compare == \"not in\":\n return val not in self.entities\n if compare == \"gt\":\n return len(self.entities) > int(val)\n if compare == \"ge\":\n return len(self.entities) >= int(val)\n if compare == \"lt\":\n return len(self.entities) < int(val)\n if compare == \"le\":\n return len(self.entities) <= int(val)\n if compare == \"first in\": # trigger comparator\n return len(self.entities) == 1 and self.lastone in self.entities\n if compare == \"last out\": # trigger comparator\n return len(self.entities) == 0 and val == self.lastone\n if compare == \"got in\": # trigger comparator\n return val == self.lastone and self.lastone in self.entities\n if compare == \"got out\": # trigger comparator\n return val == self.lastone and self.lastone not in self.entities\n if compare == \"frozen\":\n return self.frozen\n if compare == \"not frozen\":\n return not self.frozen\n\n return False\n except Exception as e:\n emsg = \"Error: State variable {} : Tracker compares the number of tracked entities. Comparison value ({}) must be an integer.\\nTrace: {}\".format(\n self.name, val, e.__traceback__)\n self.sending({\"subject\": self.target,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"error report\",\n \"target\": self.type + \".state variable\",\n \"value\": emsg}})\n\n return False", "def test_has_exactly_type():\r\n\r\n return has_exactly_type(1, int) and not has_exactly_type(True, int) and has_exactly_type(True, bool)", "def check_event_type(self, cond, center_cont=True, norm_cont=True):\n cond_mask = (self.event_table[:, 1] == cond)\n heights = self.event_table[cond_mask, 4].astype(float)\n if set(np.unique(heights)) <= {1., 0.}:\n event_type = 'dummy'\n else:\n event_type = 'continuous'\n if center_cont:\n heights -= np.mean(heights)\n if norm_cont:\n heights /= np.std(heights)\n self.event_table[cond_mask, 4] = heights\n return event_type", "def _check_feedback_func(self, assessment_type):\r\n if assessment_type == 'ai':\r\n section_name = 'AI-Assessed'\r\n elif assessment_type == 'peer':\r\n section_name = self.peer_problem_name\r\n else:\r\n raise ValueError('Assessment type not recognized. Must be either \"ai\" or \"peer\"')\r\n\r\n def _inner_check():\r\n self.course_nav.go_to_sequential('Self-Assessed')\r\n self.course_nav.go_to_sequential(section_name)\r\n\r\n try:\r\n feedback = self.open_response.rubric.feedback\r\n\r\n # Unsuccessful if the rubric hasn't loaded\r\n except BrokenPromise:\r\n return False, None\r\n\r\n # Successful if `feedback` is a non-empty list\r\n else:\r\n return bool(feedback), feedback\r\n\r\n return _inner_check", "def check_state(self):\n pass", "def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0", "def conditions():\n pass", "def available(self):\n return self.value_type in self._values", "def _check_pert(self, **kwargs):\n conditions = {\n 'mode >= low' : kwargs['mode'] >= kwargs['low'],\n 'high >= mode' : kwargs['high'] >= kwargs['mode'],\n }\n for condition_name, condition_value in conditions.items():\n if condition_value == False:\n err = 'Param \"{}\" fails PERT requirement \"{}\".'.format(kwargs, condition_name)\n raise FairException(err)", "def should_execute(self, context: dict) -> bool:\n\n print(f'Checking snippet: {self.name}')\n\n if 'when' not in self.metadata:\n # always execute when no when conditional is present\n print(f'No conditional present, proceeding with skillet: {self.name}')\n return True\n\n when = self.metadata['when']\n when_str = '{{%- if {0} -%}} True {{%- else -%}} False {{%- endif -%}}'.format(when)\n when_template = self._env.from_string(when_str)\n results = when_template.render(context)\n print(f' Conditional Evaluation results: {results} ')\n if str(results).strip() == 'True':\n return True\n else:\n return False", "def check_value(self, value):", "def is_met(self, *args, **kwargs):\n today = timezone.now().date()\n if self.weekday == today.isoweekday() and (not self.last_executed or self.last_executed.date() != today):\n return True\n\n return False" ]
[ "0.67982715", "0.6380257", "0.6109528", "0.60492665", "0.60492665", "0.5985159", "0.5962633", "0.5921427", "0.5918396", "0.5918396", "0.5901136", "0.5835458", "0.5809594", "0.5807828", "0.5793611", "0.57830834", "0.5774989", "0.5774989", "0.5774989", "0.5774989", "0.57692593", "0.57667524", "0.5766104", "0.57485825", "0.57485825", "0.5740793", "0.5722028", "0.5713305", "0.5710395", "0.56870604", "0.5674086", "0.56343937", "0.5633267", "0.56101954", "0.5608999", "0.56048083", "0.55792713", "0.55721676", "0.5566361", "0.5534553", "0.5533721", "0.55245763", "0.5522143", "0.5515", "0.5514821", "0.549943", "0.54739803", "0.5461081", "0.5458915", "0.54354477", "0.54265165", "0.54241383", "0.5420588", "0.5415391", "0.5413489", "0.54111385", "0.54081917", "0.53978235", "0.537918", "0.5375732", "0.53715295", "0.53671056", "0.5357238", "0.53561413", "0.5352969", "0.53526676", "0.53471136", "0.53465766", "0.5344842", "0.5343896", "0.53431594", "0.5341947", "0.5337501", "0.53346014", "0.5333632", "0.5327375", "0.5324745", "0.5319971", "0.5314797", "0.5312537", "0.5308442", "0.5306019", "0.53040946", "0.5300428", "0.5298351", "0.5296963", "0.52969253", "0.52939093", "0.52929574", "0.5290867", "0.528796", "0.5279996", "0.5279613", "0.5277578", "0.5274209", "0.5272402", "0.52685076", "0.526639", "0.526587", "0.52631336" ]
0.5867462
11
Simple check that the variable is set
def _check_valid_basic(self, get_params): try: if get_params(self.variable): return self.default except: # noqa e722 pass return not self.default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_value(var) :\n return var != None", "def isSet(self) -> bool:\n ...", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing", "def isSetVariable(self):\n return _libsbml.EventAssignment_isSetVariable(self)", "def isSetVariable(self):\n return _libsbml.Rule_isSetVariable(self)", "def isset(cls,name):\n inst = cls.inst()\n if name in inst.options and \\\n len(inst.options[name]) > 0:\n return True\n else:\n return False", "def _set(env_var: str) -> bool:\n return os.getenv(env_var) not in [None, \"0\"]", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\r\n if \"_is_valid\" in self.__dict__:\r\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def is_assign(self):\n return self.var.initializer is not None", "def sanity_check(self):\n return True", "def requested() -> bool:\n\treturn _flag.is_set()", "def check_vars(self):\n\n # The pipe name.\n if not has_pipe(self.pipe_name):\n raise RelaxNoPipeError(self.pipe_name)", "def test_variable(self, condition, undefined_paths, current_path):\n var = str(condition.find('name').text)\n if not var in self.variables.get_keys() and self.fail_on_undefined_vars:\n current_path += ((var, 'undefined variable'),)\n undefined_paths.add(current_path)\n return False\n else:\n return True", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "def global_check(self):\n return None", "def _validate_variable(self, variable):\n if variable is not None:\n # test type\n if not self.validate_type(variable):\n return False\n\n return True", "def required_field(variable_to_test: any, err_string: str) -> None:\n if variable_to_test is None:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")", "def _check_vals(self):\n\n try:\n self.is_set = True\n self.pack()\n except Exception as err:\n # Set default values again\n raise ValueError(\"Invalid arguments. Could not packed since: {}\".format(err))\n self.__init__()", "def is_initialized(self) -> bool:\n return (\n (self._exchange_params_by_currency_id is not None)\n and (self._utility_params_by_good_id is not None)\n and (self._transaction_fees is not None)\n )", "def checkValName(self):\n valLength = len(self.val)\n if valLength == 0:\n try:\n valsLength = len(self.val)\n if valsLength == 0:\n self.val = self.vals\n except Exception:\n print \"No value set\"", "def check_settings(self):\r\n pass", "def isMandatory(self, is_set, get_value):\n\t\tif self._mandatory and not is_set(self.name):\treturn True\n\t\treturn False", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def check_settings(self):\n pass", "def Sets(self, variable):\n return variable and variable.upper().strip() in self.variables", "def exist(x):\n return x is not None", "def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid", "def test_no_existing_value(self):\n var_name = \"PICCOLO_TEST_1\"\n\n # Make sure it definitely doesn't exist already\n if os.environ.get(var_name) is not None:\n del os.environ[var_name]\n\n new_value = \"hello world\"\n\n with set_env_var(var_name=var_name, temp_value=new_value):\n self.assertEqual(os.environ.get(var_name), new_value)\n\n self.assertEqual(os.environ.get(var_name), None)", "def hasEditVariable(self, variable: Variable, /) -> bool:\n ...", "def validate_value_flag(self):\n if not self.app.args.value is None or self.app.args.value == '':\n return True\n else:\n return False", "def _check_env():\n\tif os.getenv(_DATA_DIRECTORY_ENV_KEY) is None:\n\t\texit_everything(ERROR_DATA_DIRECTORY_NOT_SET, f'{_DATA_DIRECTORY_ENV_KEY} env var not set')\n\t\n\tif os.getenv(_FRONTEND_URL_ENV_KEY) is None:\n\t\texit_everything(ERROR_FRONTEND_NOT_SET, f'{_FRONTEND_URL_ENV_KEY} env var not set')", "def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)", "def f_exists(self, varname):\r\n return (varname in self.locals_ptr)", "def required(self) -> bool:\n return self._default is None", "def check(self):\n return True", "def _check_settings(self):\n if self.api_key is None:\n raise ImproperlyConfigured(\"You must provide an API key.\")", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def check():", "def sanity_check(self):\n pass", "def hasVeryTrustedValue(self):", "def check_envvar(envvar):\n if not os.environ.get(envvar):\n raise EnvironmentError(\"Variable '%s' not set\" % envvar)", "def test_variables(self):\n self._api.SetVariable(\"debug_file\", \"/dev/null\")\n self.assertEqual(self._api.GetVariableAsString(\"debug_file\"), \"/dev/null\")", "def payload_undefined(self):\n return self._attr is None", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)", "def verify(self):\n D,S,I,C = False,False,False,False\n if self.geoData and os.path.exists(self.geoData):\n D = True\n if self.scales:\n S = True\n if type(self.idVariable) == int:\n I = True\n if self.cacheFile:\n C = True\n if D and S and I and C:\n return True\n return False", "def empty(self, value):\r\n return value is None", "def empty(self, value):\r\n return value is None", "def empty(self, value):\r\n return value is None", "def empty(self, value):\r\n return value is None", "def empty(self, value):\r\n return value is None", "def not_set(string):\n if string is None:\n return True\n elif string == '':\n return True\n return False", "def id_is_variable(self):\n return not self.defined", "def is_optionally_defined(self):\n return self._is_optionally_defined", "def check_persist_model():\n return env_helper.has_env(_store_environment_variable)", "def check_variable_line(self, line):\n self.E_str = \"check_variable_line\"\n line, any_vars = self.find_vars_in_str(line)\n words = [i for i in line.split('=') if i]\n words = self.fix_words(words)\n\n if len(words) < 2:\n self.print_error(\"The syntax for declaring variables is: \"\n + \"'<name> = <value>'\")", "def check_value(self, value):", "def has_value(self):\n return hasattr(self, '_value')", "def _is_ready(self):\n res = True\n for (key, val) in self._attrs.iteritems():\n if key not in self._optional_attrs:\n if val is None:\n res = False\n return res", "def check(self) -> None:", "def checkValid(self):\n if (self.noteName is not None) and (self.accidental is not None) and (self.octave is not None):\n return True\n else:\n return False", "def _check_env_var_presence_s3_db(env_var_name):\n if os.environ.get(env_var_name) is None:\n logger.info(\"Warning: the {name} environment variable is not set.\\n\"\n \"All tests that access AWS S3 database will fail\\n\".format(\n name=env_var_name))", "def invariant(self):\n\t\treturn ((self.url != \"\") and (self.locationId != \"\"))", "def test_exists(self, condition, undefined_paths, current_path):\n var = str(condition.find('variable').text)\n if not var in self.variables.get_keys() and self.fail_on_undefined_vars:\n current_path += ((var, 'undefined variable'),)\n undefined_paths.add(current_path)\n return False\n else:\n return True", "async def set_value(self, value: int | float, init: bool = False) -> bool:\n req_url = self.isy.conn.compile_url(\n [\n URL_VARIABLES,\n ATTR_INIT if init else ATTR_SET,\n str(self._type),\n str(self._id),\n str(value),\n ]\n )\n if not await self.isy.conn.request(req_url):\n _LOGGER.warning(\n \"ISY could not set variable%s: %s.%s\",\n \" init value\" if init else \"\",\n str(self._type),\n str(self._id),\n )\n return False\n _LOGGER.debug(\n \"ISY set variable%s: %s.%s\",\n \" init value\" if init else \"\",\n str(self._type),\n str(self._id),\n )\n return True", "def _is_initialized(self) -> bool:\n return len(self) > 0", "def check_state(self):\n pass", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def has_value(key: str) -> bool:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.has_option(\"wsgi\", key)", "def test_get(self, env: yaenv.Env):\n assert env.get('BLANK', 'default') == ''\n assert env.get('MISSING') is None\n assert env.get('MISSING', 'default') == 'default'", "def is_initialized(self) -> bool:\n return (\n self._amount_by_currency_id is not None\n and self._quantities_by_good_id is not None\n )", "def is_empty(self):\r\n dict = self.piDD\r\n if dict == {\"[0]\": None}:\r\n return True\r\n else:\r\n return False", "def has_variable(self, name):\n return name in self._variables", "def _isinit(self):\n return self.dp.state()==PyTango.DevState.INIT", "def check(self):\n self._parse_config()\n if self.name == \"\":\n self.name = \"local\"\n self.chart_name += \"_\" + self.name\n data = self._get_data()\n if data is None:\n return False\n\n return True", "def is_empty(self) -> bool:\n return self.command is None and not self.required", "def verify_environment():\n reqs = ['NAME', 'RECIPIENT', 'SUBJECT', 'MESSAGE',\n 'MAILGUN_API_KEY', 'MAILGUN_DOMAIN']\n for req in reqs:\n if not os.getenv(req):\n logging.error('Environment variable ' + req + ' is not set')\n sys.exit(2)", "def validate_and_init() -> bool:\n env_vars_absent = [\n env\n for env in REQUIRED_ENVS\n if env not in os.environ or len(os.environ[env]) == 0\n ]\n if env_vars_absent:\n print(f\"Please define {env_vars_absent} in your github secrets. Aborting...\")\n return False\n\n if not (\n ENV_VAR_STATS_TYPE in os.environ\n and len(os.environ[ENV_VAR_STATS_TYPE]) > 0\n and os.environ[ENV_VAR_STATS_TYPE] in ALLOWED_STATS_TYPES\n ):\n print(f\"Using default stats type: {DEFAULT_STATS_TYPE}\")\n os.environ[ENV_VAR_STATS_TYPE] = DEFAULT_STATS_TYPE\n\n return True", "def has_default_value(self):\n return self.default is not None" ]
[ "0.718136", "0.6951559", "0.6602433", "0.65596896", "0.6471603", "0.6464828", "0.64093935", "0.63088953", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.61869645", "0.617754", "0.6168864", "0.6143583", "0.61368537", "0.61341774", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6120801", "0.6110685", "0.60985076", "0.6097396", "0.6078897", "0.6013909", "0.60008603", "0.59862715", "0.5970379", "0.59432125", "0.5934388", "0.5922709", "0.58937645", "0.5871716", "0.5864951", "0.5854971", "0.5842988", "0.58354425", "0.5827533", "0.5825942", "0.5812874", "0.57854843", "0.57551867", "0.57403034", "0.57231295", "0.57225835", "0.57072926", "0.5705515", "0.57005626", "0.5687629", "0.5687248", "0.568508", "0.568508", "0.5677692", "0.5677143", "0.5677143", "0.5677143", "0.5677143", "0.5677143", "0.56726825", "0.5664391", "0.5659608", "0.5657004", "0.5652213", "0.56513625", "0.56475514", "0.5646764", "0.5646543", "0.5641434", "0.56405824", "0.5636159", "0.5623938", "0.56175834", "0.5605055", "0.56022173", "0.5602102", "0.56015694", "0.5594691", "0.557835", "0.5574159", "0.5565855", "0.5556465", "0.5556458", "0.5548006", "0.5545008", "0.5542524", "0.5541201" ]
0.674686
2
update with commands from the block
def update_commands(self, commands_str): commands = dict(parse_qsl(commands_str, keep_blank_values=True)) _if = commands.get("if", self._if) if _if: self._if = Condition(_if) self._set_int(commands, "max_length") self._set_int(commands, "min_length") self.color = expand_color(commands.get("color"), passthrough=True, block=self.color) self.not_zero = "not_zero" in commands or self.not_zero self.show = "show" in commands or self.show self.soft = "soft" in commands or self.soft
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update():", "def update():", "def commandUpdate(self):\n pass", "def update( ):\r\n pass", "def update(self) -> None:\n ...", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n\n pass", "def update(self):\n self.blocks.update()\n self.players.update()\n self.Coins.update()", "def _update(self):\n pass", "def update(self):\n\n print(\"---\\n |\\n |\")\n print(self.__body__, \"\\n\\n\")", "def update(self):\r\n pass", "def update(self):", "def update(self):", "def update(self):", "def updateBlock(self):\n self.blkno = self.blknoSpinBox.value() - 1\n self.initDataParms()\n self.updateCurveList()\n self.compute()", "def changed_block(self, old_block, new_block):", "def update(*args):", "async def async_update(self):", "def update(self, *args, **kwargs):", "def _update_block_in_structure(self, structure, block_key, content):\n structure['blocks'][block_key] = content", "def update(self, args):\n pass", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def update(self, *args, **kw):\n pass", "def dispatch_update(self, update):\n command_handlers = {\n '/start': self._do_start,\n '/help': self._do_help,\n '/neuer_spruch': self._do_neuer_spruch,\n '/mein_spruch': self._do_mein_spruch,\n '/alle_meine_sprueche': self._do_alle_meine_sprueche,\n '/loesche_meine_sprueche': self._do_loesche_meine_sprüche,\n '/setze_aktiven_spruch': self._do_setze_aktiven_spruch\n }\n callback_handlers = {\n '/delete': self._callback_delete,\n '/active': self._callback_active\n }\n\n if \"message\" in update.keys():\n # Parse command\n args = update[\"message\"][\"text\"].split(' ', 1)\n command = args[0].replace('@cde_nasenspruch_bot', '')\n chat_id = update[\"message\"][\"chat\"][\"id\"]\n user_id = update[\"message\"][\"from\"][\"id\"]\n\n # Call command handler function\n try:\n command_handlers[command](chat_id, user_id, args, update)\n except KeyError:\n if command.startswith('/'):\n self.tclient.send_message('Unbekannter Befehl. Versuch es mal mit /help', chat_id)\n pass\n elif \"callback_query\" in update.keys():\n args = update[\"callback_query\"][\"data\"].split(' ', 2)\n command = args[0].replace('@cde_nasenspruch_bot', '')\n chat_id = update[\"callback_query\"][\"from\"][\"id\"]\n user_id = update[\"callback_query\"][\"from\"][\"id\"]\n \n # Call callback handler function\n try:\n callback_handlers[command](chat_id, user_id, args, update)\n except KeyError:\n print('Unbekannter callback_query {}'.format(update[\"callback_query\"][\"data\"]))\n pass", "def async_update(self):", "async def _update_stmt(self):\n raise NotImplementedError", "def update(self, line):", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def update(self)->None:\n pass", "def _update(self, host):\n pass", "def help_update(self):\n print(UPDATE)", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "async def async_update(self):\n await self.chainso.async_get_data()", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(blockstring):\n with BLOCK_LOCK:\n for block in blockstring.split('|'):\n name, values = parse(block)\n BLOCKS[name] = values", "async def update(self) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are updating\n self.add_to_output(f\"Updating...\")\n # create ssh connection to miner\n try:\n conn = await self.get_connection(\"root\", \"admin\")\n # tell the user we are sending the update file\n self.add_to_output(\"Sending upgrade file...\")\n # send the update file\n await self.send_file(UPDATE_FILE_S9, \"/tmp/firmware.tar\")\n # install the update and collect the result\n result = await conn.run(f'sysupgrade /tmp/firmware.tar')\n self.add_to_output(result.stdout.strip())\n # tell the user the update completed\n self.add_to_output(f\"Update completed...\")\n except OSError:\n self.add_to_output(f\"Unknown error...\")", "def cmd_update(self, text):\n self.update(text)", "def update(self) -> None:\n pass", "def update_self(self, command):\n yield from command.reply('Starting full self update...')\n yield from self.git_pull(command)\n yield from self.migrate(command)\n yield from self.sysinfo(command)\n yield from self.restart(command)", "def version_block(self, block_data, user_id, update_version):\n if block_data.edit_info.update_version == update_version:\n return\n\n original_usage = block_data.edit_info.original_usage\n original_usage_version = block_data.edit_info.original_usage_version\n block_data.edit_info.edited_on = datetime.datetime.now(UTC)\n block_data.edit_info.edited_by = user_id\n block_data.edit_info.previous_version = block_data.edit_info.update_version\n block_data.edit_info.update_version = update_version\n if original_usage:\n block_data.edit_info.original_usage = original_usage\n block_data.edit_info.original_usage_version = original_usage_version", "def update(self, params):", "def _update(self, count=True, forced=False):", "def update(self):\n return self._process('update')", "def update(self):\n sess = u.get_default_session()\n # sess.run(self.update_op)\n u.run(self.update_op)", "def update_data():\n pass", "def update(self,update_flags):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def dummy_update( self ):\r\n pass", "def send_update(self, target_block, DIR):\r\n new_opts = []\r\n new_weights = []\r\n if len(self.block_opts) != 1:\r\n raise Exception (\"Improperly collapsed block!\")\r\n i = self.block_opts[0] #our state\r\n for k in range(len(target_block.block_opts)): #k is their state\r\n #print(\"Checking \",i,k,DIR)\r\n if check_allowed(i,target_block.block_opts[k],DIR):\r\n new_opts.append(target_block.block_opts[k])\r\n new_weights.append(target_block.block_weights[k])\r\n target_block.block_opts = new_opts\r\n n = sum(new_weights)\r\n target_block.block_weights = [x/n for x in new_weights]\r\n target_block.block_weights = new_weights\r\n target_block.arr = target_block.superposition()\r\n return", "async def async_update(self) -> None:\n return", "def update(self, msg):\n pass", "def update():\n return 'update api in put'", "def _update_block_in_structure(self, structure, block_id, content):\r\n structure['blocks'][LocMapperStore.encode_key_for_mongo(block_id)] = content", "def handle_updates(self, update):\r\n self.__manage_pump()", "def gen_update(self, TL):\r\n pass", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def onBlock(self, data) :\n pass", "def command_wrapupdate(self):\n wrapupdater.main(*self.args())", "def update(self):\n pass", "def handl_update(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n items = db.get_item(chat)\n if text == \"/done\":\n keyboard = build_keyboard(items)\n send_message(\"Sélectionner un items pour le suprimer\", chat, keyboard)\n elif text == \"/start\":\n send_message(\"Bienvenue dans votre liste de tâches personnelles. \"\n \"Envoyez-moi n'importe quel texte et je le stockerai comme un article.\"\n \" Envoyé (/) terminé pour supprimer des éléments\", chat)\n elif text.startswith(\"/\"):\n continue\n elif text in items:\n db.delete_item(text, chat)\n items = db.get_item(chat)\n keyboard = build_keyboard(items)\n send_message(\"Sélectionner un items pour le suprimer \", chat, keyboard)\n else:\n db.add_item(text, chat)\n items = db.get_item(chat)\n # print(\"items \", items)\n message = \"\\n\".join(items)\n # print(\"message\", message)\n send_message(message, chat)", "def _update(self, last_blk=None):\n self._clear_queue()\n self.blockchain_lock.acquire()\n self.added_tx_lock.acquire()\n self.balance_lock.acquire()\n try:\n # Resolve blockchain to get last block\n if last_blk is None:\n last_blk = self._blockchain.resolve()\n # Update added transactions with transactions in blockchain\n blockchain_tx = self._blockchain.get_transactions_by_fork(last_blk)\n self._added_transactions = set(blockchain_tx)\n # Update balance state with latest\n self._balance = self._blockchain.get_balance_by_fork(last_blk)\n finally:\n self.blockchain_lock.release()\n self.added_tx_lock.release()\n self.balance_lock.release()\n self.stop_mine.clear()\n return last_blk", "def block(self, block):\n\n self._block = block", "def block(self, block):\n\n self._block = block", "def command_update_hw(self, cmd):\n # TODO\n pass", "def update(self):\n raise NotImplementedError", "def CommitBlock(self, block):\n QUEUE.put(Block(block=block))", "def update(self):\n # default implementation is to do nothing.", "def _set_block(self, pos, block_):\n self._changes[deepcopy(pos)] = block", "def update():\r\n\t#print(\"Updating poeninja database...\")\r\n\t#poeninja.update_database()\r\n\t#print(\"Updateing poeninja name-dict...\")\r\n\t#poeninja.update_name_dict()\r\n\t#print(\"Updating stats...\")\r\n\t#update_stats()\r\n\t#print(\"Updating items...\")\r\n\t#update_items()\r\n\t#print(\"Updating mod MySQL database...\")\r\n\t#update_mod_database()\r\n\tmydb = database()\r\n\twhile True:\r\n\t\trandom_mod(mydb)", "def update_command():\n # global selected_tuple\n backend.update(selected_tuple[0], \n title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get())", "def __await__(self):\n return self.update().__await__()", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update_recovered_block(\n self, block_hash: Sha256Hash, block_msg: TBlockMessage\n ) -> None:\n pass", "async def on_block_updated(\n self, position: typing.Tuple[float, float, float], itself=True\n ):\n raise NotImplementedError", "def update(self, data: bytes):\n self.send(data)" ]
[ "0.70040303", "0.70040303", "0.68496", "0.68419474", "0.63492554", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6342758", "0.6337077", "0.6331995", "0.6330059", "0.6302485", "0.6299308", "0.6270749", "0.6270749", "0.6270749", "0.6260961", "0.620627", "0.6193621", "0.61855054", "0.6155812", "0.61086863", "0.6084857", "0.60818183", "0.60818183", "0.60542375", "0.60428303", "0.6017601", "0.6015206", "0.601412", "0.60061866", "0.6004132", "0.5978561", "0.5973048", "0.5966905", "0.59668803", "0.5941202", "0.5941202", "0.5941202", "0.59377795", "0.59347886", "0.59333926", "0.5924701", "0.5916357", "0.5912083", "0.59109807", "0.59104013", "0.5890006", "0.5888199", "0.5878568", "0.58569574", "0.5856585", "0.5856585", "0.5856585", "0.5856585", "0.5850872", "0.58491457", "0.58289856", "0.5803684", "0.5801602", "0.57888186", "0.57873785", "0.5773595", "0.57693106", "0.5748793", "0.5737725", "0.5736379", "0.5721011", "0.57063764", "0.5692362", "0.56882167", "0.56882167", "0.5681648", "0.5664678", "0.5654744", "0.56418234", "0.5607175", "0.55938256", "0.55913717", "0.55856365", "0.55853975", "0.55853975", "0.55853975", "0.55853975", "0.55853975", "0.55853975", "0.558511", "0.55819416", "0.5567681" ]
0.0
-1
set integer value from commands
def _set_int(self, commands, name): if name in commands: try: value = int(commands[name]) setattr(self, name, value) except ValueError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setInteger(self, value):", "def setInteger(self, value):", "def set_num(self, num):\n self.cmd_num = num", "def setInteger(self, value: int):\n self.value = value", "def setInt(self, addr: ghidra.program.model.address.Address, value: int) -> None:\n ...", "def getint(self, strcommand):\n result = ct.c_longlong()\n command = ct.c_wchar_p(strcommand)\n self.lib.AT_GetInt(self.AT_H, command, ct.addressof(result))\n return result.value", "def setInteger(self, value):\n assert self._is_int is True\n self._value = value", "def execute(self, cmd: Command) -> int:\n try:\n return self.cmds[cmd.id]\n except KeyError:\n if cmd.val:\n self.state[cmd.key] = cmd.val\n self.cmds[cmd.id] = self.state[cmd.key]\n return self.cmds[cmd.id]", "def setInt(self, address: ghidra.program.model.address.Address, value: int) -> None:\n ...", "def set_raw_output(val: int) -> None:\n print(val)", "def main(cls, args):\r\n var = cls.MyInt()\r\n print \"Value before increment :\" , var.value\r\n cls.increment(var)\r\n print \"Value after increment :\" , var.value", "def output_integer(state, key, data):\n return int(state[key])", "def CLI(self, *_):\n self.reg.I = 0", "def setChanInt(\n self,\n u,\n chan,\n intval,\n ):\n\n # print \"setting chan %d to %d\" % (chan,intval)\n # sys.stdout.flush()\n\n self.DMX[u].set_chan_int(chan, intval)", "def seti(self, node, new_int):\n\n self.daq.syncSetInt(f'/{self.device_id}/{node}', new_int)", "def update_variable(value):\n return value + 1", "def setValue(self,val):\n val = int(val)\n self.input.setText(str(val))", "def setIntValue(self, *args):\n return _libsbml.ConversionOption_setIntValue(self, *args)", "def setIntValue(self, *args):\n return _libsbml.ConversionProperties_setIntValue(self, *args)", "def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0", "def getInteger(self):", "def getInteger(self):", "def set_power_management(value: int) -> None:", "def __int__(self): \n return int(self.val())", "def updateInt(data):\n emit('timestamp', data)", "def assign_value(self, points):\n \n self.value = int(points)", "def _cmd_set_button(self, command):\r\n index = int(command.get('value'))\r\n self.l_debug(\"_cmd_set_button\",\"index=%d\" % (index))\r\n return self._send_command_by_index(index)", "def setInt(self, key, value):\n self.__config.setValue(key, QtCore.QVariant(value))\n self.__saved = False", "def main2(cls, args):\r\n var = 1\r\n print \"Value before increment :\" , var\r\n cls.increment(var)\r\n print \"Value after increment :\" , var", "def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length", "def getValue(self) -> int:\n ...", "def set_attributes(value, attribute):\n try:\n int(value)\n return int(value)\n except ValueError:\n my_banner(\"Value must be an integer.\" + attribute.upper() +\n \" set to 0. Use \" + attribute.upper() + \" setter function to change attribute.\")\n return 0", "def value(self, p_int): # real signature unknown; restored from __doc__\n pass", "def set(self, val: int) -> None:\n self.val = val\n self.notes = []", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def test_integer_update(self):\r\n vm = Integer.value_manager(None, None, 5)\r\n assert not vm.changed\r\n vm.value = 4\r\n assert vm.changed", "def setContentInt(_session, _segment, _el, _data):\n _session.set_content_int(_el, _data)\n _session.gen3_f_a_f(_segment, keynodes.ui.format_int, _el, sc.SC_A_CONST|sc.SC_POS)", "def incrdecr(con,command,key,value=1):\n # yy=atpic.log.setname(xx,'incrdecr')\n thecommand=\"{command} {key} {value}\\r\\n\".format(command=command,key=key,value=value)\n con.send(thecommand.encode('utf-8'))\n line=get_line(con)\n # atpic.log.debug(yy,line)\n if line==b'NOT_FOUND':\n return None\n else:\n return int(line.strip())", "def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)", "def setInt(self, addr: ghidra.program.model.address.Address, value: int, bigEndian: bool) -> None:\n ...", "def assign_number(world: World, number: int):\r\n if number == 1:\r\n world[\"cpu choice\"] = \"santa\"\r\n elif number == 2:\r\n world[\"cpu choice\"] = \"reindeer\"\r\n elif number == 3:\r\n world[\"cpu choice\"] = \"snowman\"", "def getint(self, option):\n return getint(self.name, option)", "def command( self, value ): # uint8_t\n\t\tself.send(value, mode=0)", "def command(self, value):\n self._send(value, _RS_INSTRUCTION)", "def setIntegerOption(self, option, value):\n result = self.__lib.voikkoSetIntegerOption(self.__handle, option, value)\n if result == 0:\n raise VoikkoException(\"Could not set integer option %s to value %s\" % (option, value))", "def __int__(self):\n return self.get_raw_int()", "def command(self, value):\n self._send(value, self.RS_INSTRUCTION)", "def getInteger(self):\n pass", "def _command_number(self, number):\r\n if number == '.':\r\n # check if the number is a float\r\n if '.' in self.current_number:\r\n return\r\n self.current_number = self.current_number + number\r\n elif self.current_number == '0':\r\n # check if the number is 0\r\n # if it is, replace it\r\n self.current_number = number\r\n else:\r\n # else we add number after the current number\r\n self.current_number = self.current_number + number", "def set_value(self, index, mode, value):\n address = self.get_address(index, mode)\n self.program[address] = value", "def setC(self, c):\n\t\tself.c = int(c)", "def write(self, value: int, /) -> None:", "def integer(self, integer):\n\n self._integer = integer", "def setenumerated(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetEnumerated(self.AT_H, command, value)", "def set_int_reset():\n int_reset = request.params.get(\"int_reset\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetIntReset(pid, int_reset)\n if retval != 0:\n LOG.error(\"Failed to set PID integrator reset. Error code: %s\", ERROR_CODES[retval])", "def testSetPowerWithInt(self):\n self.node.power = 2\n\n self.assertEqual(\n (2, 2, 2),\n self.node.power\n )", "def writeInt(self, value: int):\n self._pack('!i', value)", "def execute(self):\n \n self.outvar = self.invar + .01", "def give_me_an_integer():\n return 5\n pass", "def test_device_init_command(self):\n default_val = 0\n self.assertEqual(self.sim_device.integer1, default_val)\n # Write to the attribute integer1\n self.sim_device.integer1 = 45\n self.assertEqual(self.sim_device.integer1, 45)\n # Reset the values of the device attributes to default.\n self.sim_device.Init()\n # Check that the desiredPointing attribute is reset.\n self.assertEqual(self.sim_device.integer1, default_val)", "def cmd_num(self):\r\n return self._arm.cmd_num", "def set_Value(self, n_value):\n#Joerg S/Martin W advice\n self.StoredValue=n_value", "def test_set_integer(self):\n setting_name = 'project_int_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': '170',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), 170)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def set_value(self,parameter_number,value):\n code = int(\"01100000\",2) | parameter_number\n command = pack('<BH',code,int(rint(value)))\n reply = self.query(command = command,ser = self.ser, count=1)\n if len(reply) != 1:\n warn(\"expecting 1, got %d bytes\" % len(reply)); return\n reply_code, = unpack('B',reply)\n if reply_code != code: warn(\"expecting 0x%X, got 0x%X\" % (code,reply_code))", "def __execute(self, command: MicrobitCommand) -> None:\n if isinstance(command, MicrobitAccelerometerSetRange):\n self.__set_max_value(command.value)", "def useti(self, prompt=None, default=None):\n \n i = 0\n abak = copy(default) # Backup our default value\n\n a = abak\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n a = int(a)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter an integer!!\"\n print \n # reload the default\n a = abak\n i = i+1\n \n return(a)", "def change_value(self,val):\n self.val = val", "def command(self, inst_data: int, buf: bytes, /) -> None:", "def get_int(self, item: str) -> int:\n return int(self[item])", "def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)", "def gpio(self) -> int:", "def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)", "def cmd_calculation():", "def int_val(self) -> int:\n return int(self.current_token)", "def command(self, value):\n self.tcp_comms.tcp_params.ISO = int(value)\n self.tcp_comms.send_iso(self.tcp_comms.tcp_params.ISO)", "def variable(self, val):", "def value(self):\n return int(self.input.text())", "def __int__(self):\n\n return self.value", "async def set_param(self, param: str, value: int) -> ArchonCommand:\n cmd = await self.send_command(f\"FASTLOADPARAM {param} {value}\")\n if not cmd.succeeded():\n raise ArchonError(\n f\"Failed setting parameter {param!r} ({cmd.status.name}).\"\n )\n return cmd", "def getint(self, section, option):\n return int(self.get(section, option))", "def cli():\n palette = \"Set1\"\n num_vals = 10\n for arg in sys.argv[1:]:\n if arg.isdigit():\n num_vals = int(arg)\n else:\n palette = arg\n run(palette, num_vals)", "def set(self, var, value):\n cmd = '{0}={1};'.format(var, value)\n out = self.eval(cmd)\n if out.find(\"error\") != -1:\n raise TypeError(\"Error executing code in Matlab\\nCODE:\\n\\t{0}\\nMatlab ERROR:\\n\\t{1}\".format(cmd, out))", "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def set_count(c):\n global count\n count = c", "def increment_counter(self) -> None:", "def _change_cmd(self, cmd_number: int, new_cmd: str):\n if cmd_number is 0:\n self.command_group.cmd0 = str(new_cmd)\n elif cmd_number is 1:\n self.command_group.cmd1 = str(new_cmd)\n elif cmd_number is 2:\n self.command_group.cmd2 = str(new_cmd)\n else:\n assert False\n\n if self.command_group.is_cmd_runner_command(new_cmd):\n self._initialize_runner(new_cmd)\n\n logging.info(\"Setting {0} to {1}\".format(str(cmd_number), str(new_cmd)))\n cybld_helpers.print_seperator_lines()", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def __setitem__(self, item: str, value: int) -> None:\n self.stoi[item] = value", "def adjustMotorValue(value):\n value = int(value)\n if value <= 20:\n value = 0\n motor.writeNumber(value)\n print(motor.readNumber(), flush=True)", "def cmd(self):", "def __init__(__self__, *,\n number: int):\n pulumi.set(__self__, \"number\", number)", "def set_pid(self, pid, value):\n if type(value) in (list, tuple):\n value = \",\".join(map(hex, value))\n cmd = \"ATSET {}={}\\r\".format(pid, value)\n self.sendCMD(cmd)", "def set_number(update: Update, context: CallbackContext):\n\n # Fix number of questions\n context.chat_data['number'] = 0\n context.chat_data['total'] = int(update.message.text)\n chat_id = update.message.chat_id\n\n # Create dictionary for future score input\n context.chat_data['user'] = dict()\n\n # Start the quiz\n data = {'chat_id': chat_id,'context': context}\n context.job_queue.run_once(run_quiz, 3, context=data, name=str(chat_id))# Delay time to 1st question\n\n return RUNNING_QUIZ", "def set_totalizer(self, cmd_code=0): #command #241\r\n byte0 = hex(cmd_code)[2:].zfill(2)\r\n status, data = self.comm2('f101' + byte0)\r\n totaliser_status = int(Brooks.get_bytes(0,data,1),16)\r\n return totaliser_status", "def set_assist(self, a):\n a = int(a*10)\n lo = a&0xff\n hi = a>>8\n self.communicate('S', '\\x03' + chr(lo) + chr(hi) + '\\x01\\x80')", "def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)", "def entero(self):\n return int(\"\".join(self.binario), 2)", "def setCount(self, num):\n self.count=num" ]
[ "0.71163434", "0.71163434", "0.67835045", "0.62937844", "0.61526585", "0.6076695", "0.6070734", "0.6059233", "0.59684515", "0.5945891", "0.5915541", "0.58779633", "0.5800634", "0.5786643", "0.5786482", "0.5768864", "0.57324964", "0.5721998", "0.56996304", "0.5692422", "0.5685165", "0.5685165", "0.56751233", "0.56431276", "0.5626594", "0.56041265", "0.5597573", "0.55639285", "0.5547441", "0.5539316", "0.5536062", "0.55340344", "0.55291337", "0.55289656", "0.54955584", "0.54730266", "0.54685706", "0.5450458", "0.5447376", "0.543579", "0.5418894", "0.5416496", "0.5414876", "0.5393683", "0.5380284", "0.5373573", "0.536791", "0.53678626", "0.53282607", "0.5325864", "0.5322972", "0.53205806", "0.53204274", "0.5320304", "0.53106594", "0.5307354", "0.5305472", "0.530478", "0.53041255", "0.53009355", "0.52907765", "0.52888674", "0.5285767", "0.52756447", "0.52756447", "0.5275485", "0.52639776", "0.5260944", "0.52605516", "0.5258342", "0.52498186", "0.52358097", "0.5234446", "0.52343345", "0.5232994", "0.5219069", "0.5204685", "0.5198631", "0.5197136", "0.519005", "0.5189448", "0.5177734", "0.5174925", "0.5171609", "0.5167937", "0.5166577", "0.5154347", "0.515411", "0.51519144", "0.51471907", "0.5142907", "0.5142359", "0.51417935", "0.5135396", "0.51054364", "0.510299", "0.51014394", "0.5098644", "0.509646", "0.5091001" ]
0.8104841
0
set any commands for this block
def set_commands(self, command_str): self.commands.update_commands(command_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commands(self, commands):\n\n self._commands = commands", "def commands():", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def at_cmdset_creation(self):\n super().at_cmdset_creation()\n #\n # any commands you add below will overload the default ones.\n #", "def silkscreen_commands(self, commands):\n self.pcb_layers[\"silkscreen\"].commands = commands", "def at_cmdset_creation(self):\n self.add(default_cmds.CmdLook())\n self.add(default_cmds.CmdSay())", "def set_commands(self, commands, append=False):\n if append:\n self.commands.extend(commands)\n else:\n self.commands = commands", "def __init__(self, commands=None):\n self.commands = {}\n self.context = None", "def set_command_list(self):\n self.commands = dict( \\\n BTN_POWER_OFF = 2, \\\n BTN_TV = 27, \\\n BTN_1 = 4, \\\n BTN_2 = 5, \\\n BTN_3 = 6, \\\n BTN_4 = 8, \\\n BTN_5 = 9, \\\n BTN_6 = 10, \\\n BTN_7 = 12, \\\n BTN_8 = 13, \\\n BTN_9 = 14, \\\n BTN_0 = 17, \\\n BTN_FAVOURITE_CHANNEL = 68, \\\n BTN_PREVIOUS_CHANNEL = 19, \\\n BTN_VOLUME_UP = 7, \\\n BTN_VOLUME_DOWN = 11, \\\n BTN_CHANNEL_UP = 18, \\\n BTN_CHANNEL_DOWN = 16, \\\n BTN_MUTE = 15, \\\n BTN_SOURCE = 1, \\\n BTN_INFO = 31, \\\n BTN_TOOLS = 75, \\\n BTN_GUIDE = 79, \\\n BTN_RETURN = 88, \\\n BTN_MENU = 26, \\\n BTN_ENTER = 104, \\\n BTN_UP = 96, \\\n BTN_DOWN = 97, \\\n BTN_LEFT = 101, \\\n BTN_RIGHT = 98, \\\n BTN_INTERNET = 147, \\\n BTN_EXIT = 45, \\\n BTN_RED = 108, \\\n BTN_GREEN = 20, \\\n BTN_YELLOW = 21, \\\n BTN_BLUE = 22, \\\n BTN_TELETEXT = 44, \\\n BTN_MEDIA = 140, \\\n BTN_CONTENT = 121, \\\n BTN_CHANNEL_LIST = 107, \\\n BTN_AD = 0, \\\n BTN_SUBTITLE = 37, \\\n BTN_FORWARD = 69, \\\n BTN_PAUSE = 74, \\\n BTN_BACKWARD = 72, \\\n BTN_RECORD = 73, \\\n BTN_PLAY = 71, \\\n BTN_STOP = 70, \\\n BTN_SLEEP = 3, \\\n BTN_PICTURE_IN_PICTURE = 32, \\\n BTN_PSIZE = 62, \\\n BTN_ENERGY = 119, \\\n BTN_SRS = 110, \\\n BTN_PMODE = 40, \\\n BTN_P_DYNAMIC = 189, \\\n BTN_P_STANDARD = 223, \\\n BTN_P_MOVIE1 = 222, \\\n BTN_P_MOVIE2 = 221, \\\n BTN_P_USER1 = 220, \\\n BTN_P_USER2 = 219, \\\n BTN_P_USER3 = 218, \\\n BTN_ASPECT_43 = 227, \\\n BTN_ASPECT_169 = 228, \\\n BTN_S_SCART1 = 132, \\\n BTN_S_SCART2 = 235, \\\n BTN_S_MODULE = 134, \\\n BTN_S_AV = 236, \\\n BTN_S_VGA = 105, \\\n BTN_S_HDMI1 = 233, \\\n BTN_S_HDMI2 = 190, \\\n BTN_S_HDMI3_DVI = 194, \\\n BTN_S_HDMI4 = 197)", "def process_commands(self, commands: List[str]):", "def at_cmdset_creation(self):\n self.add(Command())", "def setCommand(self):\r\n self.command = CommandsDatabase(self.userid)", "def getCommands(self):", "def __init__(self, command_list, ):\n self.command_list = [] # all addition via function below\n self.add_command( command_list )", "def _command(self, *cmd, handler=None):", "def _commands(self) -> Dict[str, List[str]]:\r\n pass", "def __init__(self, *commands):\n \n self.cmds = dict()\n \n for nm, attr in commands:\n self[nm] = attr", "def cmd(self):", "def setup_method(self, method):\n self.cmds = []", "def __init__(self):\n self._cmd_list = ['use']", "async def set(self, ctx):\n # [p]set <subcommand>\n\n if ctx.invoked_subcommand is None:\n await send_command_help(ctx)\n pass", "def loadAllCommand(self, player):\n for eachCmd in self.commands.keys():\n player.addCommand(eachCmd, self.commands[eachCmd]())", "def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)", "def normal(self):\n self.run_command('normal')", "def loadStdCommands(self, player):\n player.addCommand('spawn', self.commands['spawn']())\n player.addCommand('edit', self.commands['edit']())\n player.addCommand('search', self.commands['search']())\n player.addCommand('warp', self.commands['warp']())\n player.addCommand('addstat', self.commands['addstat']())\n player.addCommand('delstat', self.commands['delstat']())\n player.addCommand('savezone', self.commands['savezone']())\n player.addCommand('obliterate', self.commands['obliterate']())", "def _send_custom_commands_after_welcome(self, conn):\n for command in self.commands:\n conn.send_raw(command)", "def preloop(self):\n super(CoreCommand, self).preloop() # sets up command completion", "def __init__(self, command_list: list = None) -> None:\n if command_list is None:\n command_list = implemented_commands\n for command in command_list:\n setattr(self, command.get(\"name\").replace(\" \", \"_\"), self._SingleCommand(command))", "def _transform_command(self) -> None:\n self.command = None if self.command == [] else self.command", "async def hockey_commands(self, ctx: commands.Context) -> None:\n pass", "def before_unlock_actions(self):\n for command in self.before_unlock_commands:\n addr = command[\"address\"]\n prop = command[\"property\"]\n if len(command[\"argument\"]) == 0:\n arg = [0]\n else:\n try:\n arg = [eval(command[\"argument\"])]\n except:\n arg = [command[\"argument\"]]\n if command[\"type\"] == \"set\":\n tine.set(addr, prop, arg)\n elif command[\"type\"] == \"query\":\n tine.query(addr, prop, arg[0])", "def initDefaultCommand(self):\n pass", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def __init__(self, commandBlockListenerAbstract):\n super(CraftBlockCommandSender, self).__init__()\n self.block = commandBlockListenerAbstract", "def initialize_commands(self) -> None:\n\n @self.command(name=\"snr\")\n @logger(\"all\")\n async def snr(ctx, *args):\n await ctx.message.channel.send(str(indie_seq.Seq([int(k) for k in args]).f()))\n\n @self.command(name=\"oeis\")\n @logger(\"all\")\n async def oeis(ctx, *args):\n global oeis_in_progress\n if not oeis_in_progress:\n oeis_in_progress = True\n if len(args) > 0:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(args[0]))\n else:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(str(random.randint(1, 341962))))\n oeis_in_progress = False\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @self.command(name=\"collatz\")\n @logger(\"all\")\n async def collatz(ctx, *args):\n num = int(args[0])\n inity = \"\" if len(args) < 2 else args[1]\n\n collatz_results = indie_collatz.collatz_info(num)\n if len(inity) == 1:\n if inity == \"e\":\n await ctx.message.channel.send(f\"Evenity trajectory of {num}: {collatz_results.evenity_trajectory}\")\n elif inity == \"o\":\n await ctx.message.channel.send(f\"Oddinity trajectory of {num}: {collatz_results.oddinity_trajectory}\")\n else:\n await ctx.message.channel.send(f\"Collatz trajectory of {num}: {collatz_results.collatz_trajectory}\")\n\n @self.group(name=\"pig\")\n @logger(\"pig-math\")\n async def pig(ctx, *args):\n if ctx.invoked_subcommand is None:\n await ctx.message.add_reaction(\"❌\")\n\n def get_user_id_from_mention(user_id):\n user_id = user_id.replace(\"<\", \"\")\n user_id = user_id.replace(\">\", \"\")\n user_id = user_id.replace(\"@\", \"\")\n user_id = user_id.replace(\"!\", \"\")\n return user_id\n\n # Pig Math commands\n\n @pig.command(name=\"challenge\")\n @logger(\"pig-math\")\n async def pig_challenge(ctx, *args):\n challengee = get_user_id_from_mention(args[1])\n challengee = (await self.fetch_user(challengee)).name\n if len(args) > 2:\n point_target = int(args[2])\n else:\n point_target = 100\n pig_challenge = indie_pig.PigChallenge.create_challenge(ctx.message.author.name, challengee, point_target)\n await ctx.message.channel.send(pig_challenge.status)\n\n @pig.command(name=\"accept\")\n @logger(\"pig-math\")\n async def pig_accept(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.accept_challenge(ctx.message.author.name))\n\n @pig.command(name=\"reject\")\n @logger(\"pig-math\")\n async def pig_reject(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.reject_challenge(ctx.message.author.name))\n\n @pig.command(name=\"roll\")\n @logger(\"pig-math\")\n async def pig_roll(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"roll\"))\n\n @pig.command(name=\"bank\")\n @logger(\"pig-math\")\n async def pig_bank(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"bank\"))\n\n @pig.command(name=\"score\")\n @logger(\"pig-math\")\n async def pig_score(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"score\"))\n\n @pig.command(name=\"quit\")\n @logger(\"pig-math\")\n async def pig_quit(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"quit\"))\n\n @self.command(name=\"save\")\n @logger(\"modonly\")\n async def save(ctx, *args):\n self.save_data_files()\n await ctx.message.channel.send(\"Saved.\")\n\n @self.command(name=\"balance\")\n @logger(\"all\")\n async def balance(ctx, *args):\n bals = self.data[\"balances.json\"]\n user = ctx.message.author.id\n bal = 0\n if user in bals:\n bal = bals[user]\n else:\n bals[user] = 0 \n await ctx.message.channel.send(ctx.message.author.name+\", your balance is \"+str(bal)+\".\")\n\n @self.command(name=\"credit\")\n @logger(\"modonly\")\n async def credit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit\n\n @self.command(name=\"debit\")\n @logger(\"modonly\")\n async def debit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit\n\n @self.command(name=\"register\")\n @logger(\"all\")\n async def register(ctx, *args):\n \"\"\"\n This command will trigger a check if the user is registered,\n if not, the bot will ask them to review the terms and conditions and accept,\n if they accept, the bot will consider them registered\n \"\"\"\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")\n \n @self.command(name=\"accept\")\n @logger(\"all\")\n async def accept(ctx, *args):\n \"\"\"\n This command will trigger a check if the user has asked to be registered.\n If they have, then calling this triggers adding them to registered users.\n If they have not, they will be asked to type .register first.\n \"\"\"\n user = ctx.message.author\n user_mention = \"<@\"+str(user.id)+\">\"\n\n if user in self.data[\"users_asked_to_be_registered.json\"]:\n self.data[\"users.json\"].append(user)\n self.data[\"users_asked_to_be_registered.json\"].remove(user)\n await ctx.message.channel.send(user_mention+\", you have been successfully registered. :blue_heart:\")\n else:\n await ctx.message.channel.send(user_mention+\", have not commanded .register yet. \"\n \"Please do so first. :blue_heart:\")", "def custom(self, command):\n self.command.append(command)\n return self", "def command():\n pass", "def at_cmdset_creation(self):\n self.add(power.CmdPower())\n self.add(CmdCursedBone())\n # self.add(CmdDeathSpike())\n \"\"\"\n self.add(CmdAnchor())\n self.add(CmdBloodCloak())\n self.add(CmdBloodShield())\n self.add(CmdBloodWard())\n self.add(CmdBodyToMind())\n self.add(CmdBoneScythe())\n self.add(CmdCircleDeath())\n self.add(CmdCorpseBurst())\n self.add(CmdCorpseDrain())\n self.add(CmdCreateBloodGem())\n self.add(CmdCurseDeathLink())\n self.add(CmdDeathRain())\n self.add(CmdDeathWard())\n self.add(CmdDisease())\n self.add(CmdBoneDust())\n self.add(CmdGloom())\n self.add(CmdImbueBlood())\n self.add(CmdImbueDeath())\n self.add(CmdMassSilence())\n self.add(CmdMassSleep())\n self.add(CmdMassAnchor())\n self.add(CmdMassWeakness())\n self.add(CmdPlague())\n self.add(CmdPoison())\n self.add(CmdPoisonCloud())\n self.add(CmdSilence())\n self.add(CmdSleep())\n self.add(CmdSpectralHunter())\n self.add(CmdSummon())\n self.add(CmdSummonCorruptedMan())\n self.add(CmdSummonCursedArmy())\n self.add(CmdSummonCursedMan())\n self.add(CmdSummonReanimatedMan())\n self.add(CmdTeleport())\n self.add(CmdTeleportOther())\n self.add(CmdTransferPain())\n self.add(CmdVampiricClaw())\n self.add(CmdVampiricTouch())\n self.add(CmdWeakness())\n \"\"\"", "def additional_command(self):\n pass", "def __init__(self,cmds):\n\n # Since the list may include multi-line entries, first make sure that\n # they've been all broken up before passing it to magic2python\n cmdlist = map(magic2python,''.join(cmds).split('\\n'))\n self.value = '\\n'.join(cmdlist)", "def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())", "def _init_commands(self):\n\t\tself.commands = {}\n\t\tself.log.info(\"Initializing commands...\")\n\t\t# Get all the commands and iterate over them\n\t\tfor command in self.conf_commands:\n\t\t\t\n\t\t\t# Verify the necessary config elements exist at all\n\t\t\tdisabled = command.get('disabled', False) # Disabled is optional, defaults to False\n\t\t\tif(disabled == True):\n\t\t\t\tcontinue;\n\t\t\tcommand_name = command.get('name', \"unknown\").lower()\n\t\t\tdescription = command.get('description', \"\")\n\t\t\tpermission_str = command.get('permission', None)\n\t\t\taction = command.get('action', None)\n\t\t\tmin_votes = command.get('min_votes', None)\n\t\t\targs = command.get('args', None)\n\t\t\taliases = command.get('aliases', None)\n\t\t\tif(command_name is None \n\t\t\t\tor permission_str is None \n\t\t\t\tor action is None \n\t\t\t\tor min_votes is None \n\t\t\t\tor args is None):\n\t\t\t\tself.log.warn(\"Command '{}': Error, missing 'permission', 'action', 'min_votes', or 'args' elements for command \".format(command_name))\n\t\t\t\tcontinue\n\n\t\t\t# Verify the votes and permission string are valid\n\t\t\tif(min_votes < 0):\n\t\t\t\tself.log.warn(\"Command '{}': Error, min_votes cannot be less than zero for command {}\".format(command_name, min_votes))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': minimum votes is {}\".format(command_name, min_votes))\n\n\t\t\ttry:\n\t\t\t\tpermission = Permission[permission_str]\n\t\t\t\tself.log.debug(\"Command '{}': permission is {}\".format(command_name, permission))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, permission string '{}' is invalid, must be one of: {}\".format(command_name, permission_str, Permission.__members__))\n\t\t\t\tcontinue\n\n\t\t\t# Try to get the corresponding action class\n\t\t\ttry:\n\t\t\t\tmodule = import_module(\"obs.actions.\"+action)\n\t\t\t\tclass_ = getattr(module, action)\n\t\t\t\tself.log.debug(\"Command {}: action is {}\".format(command_name, class_))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, no such action {} is defined. Full error: {}\".format(command_name, action, e))\n\t\t\t\tcontinue\n\n\t\t\t# Try to instantiate the action class\n\t\t\ttry:\n\t\t\t\tself.log.debug(\"Command {}: args are: {}\".format(command_name, args))\n\t\t\t\tcommand_obj = class_(self, command_name, aliases, description, permission, min_votes, args)\n\t\t\texcept ValueError as e:\n\t\t\t\tself.log.warn(e)\n\t\t\t\tcontinue\n\n\t\t\t# Add command_obj to internal reference\n\t\t\tself.commands[command_name] = command_obj\n\n\t\t\t# If there are aliases, add them too\n\t\t\t\n\t\t\tif(not aliases is None and isinstance(aliases, (list,) )):\n\t\t\t\tself.log.debug(\"Command '{}': Found aliases {}\".format(command_name, aliases))\n\t\t\t\tfor alias in aliases:\n\t\t\t\t\tself.commands[alias] = command_obj\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': No aliases\".format(command_name, aliases))\n\n\t\t# Finally after all commands have been initialized then add the help command\n\t\t#self.commands['help'] = Help(self)\n\n\t\t# Done initializing\n\t\tself.log.info(\"...Commands initialized: {}\".format(\n\t\t\t\tlist( self.commands.keys()) \n\t\t\t)\n\t\t)", "def loadOlcCommands(self, player):\n player.addCommand('newzone', self.commands['newzone']())\n player.addCommand('delzone', self.commands['delzone']())\n player.addCommand('listzone', self.commands['listzone']())\n player.addCommand('newroom', self.commands['newroom']())\n player.addCommand('redit', self.commands['redit']())\n player.addCommand('delroom', self.commands['delroom']())\n player.addCommand('newportal', self.commands['newportal']())\n player.addCommand('delportal', self.commands['delportal']())\n player.addCommand('zedit', self.commands['zedit']())\n player.addCommand('pedit', self.commands['pedit']())\n player.addCommand('newtemplate', self.commands['newtemplate']())", "def flush_cmds(self):\r\n if self.select_cmd is not None:\r\n self.do_cmd()", "def __init__(self):\n ElixirCommand.__init__(self)", "def _setup_command(self):\r\n raise NotImplementedError", "def run(self, commands: list[str]):\n ...", "def setup(bot):\n bot.add_cog(AdminCommands(bot))", "def at_pre_cmd(self):\n pass", "def __init__(self, **manager_commands):\n self.package = manager_commands", "def autodefaults (self):\r\n\r\n self.defaults_from_notes(identifying_key=EMPTYCHAR,\r\n mark=EQUAL,\r\n obj=self.default_dict['commands'],\r\n entrytext=COMMANDMACROSCRIPT)", "def handle_commands_preset(self,cl,addr) :\n self.curDir = ['CTF','Challenges','tempUser'+str(random.randint(100,999))]\n try :\n client = cl\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(\"\"\"\nCustom Shell Server With Limited Functionality\n\nNew User Login from {} at {}\n \\n\"\"\".format(addr[0],time.ctime()).encode())\n shellin = \"\" \n while True:\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(self.userp)\n shellin = client.recv(2048).decode().strip('\\n')\n if shellin == \"exit\" or shellin == \"exit \" or shellin ==\"exit \" or shellin ==\"exit \" :\n break\n elif shellin == \"\" :\n continue\n elif shellin.split()[0] in self.denied :\n client.send(self.err.format(shellin.split()[0]).encode())\n else :\n self.handle_extended_commands(client,addr,shellin)\n continue\n client.close()\n except Exception as E:\n print(E)\n print(Log(\"Connection with {} Terminated\".format(addr)))", "def exec_init_cmd(self):\n\n sys.argv = ['-c']\n self.push(self.rc.c)", "def command(self):\n raise NotImplementedError", "def add_general_cmdsets(self):\n from commands.base_commands import general\n from world.map import CmdMap\n self.add(general.CmdPoke)\n self.add(CmdMap)", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def add_command(self, name, fct):\r\n self.cmds[name] = fct", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def set_command(cmd_list):\n global last_command\n last_command = ' '.join(cmd_list)", "def check_commands(self):\n pass", "def start(self) -> None:\n\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n print(\"Bienvenido al traductor TbSET. Para salir presiona Ctrl-D.\\n\")\n\n while True:\n try:\n typed = self.session.prompt(\n \".TbSET > \",\n completer=self.commands,\n validator=DummyValidator(),\n complete_while_typing=True\n )\n except KeyboardInterrupt:\n continue\n except EOFError:\n break\n else:\n self._command_processor(typed)\n\n print(\"Bye!\\n\")", "def main_command_loop(self) -> None:\n\n self.commands.pop(0)\n for command in self.commands:\n if \"--static\" == command:\n self.update_static_files()\n elif \"--temp\" == command:\n self.update_templates()\n elif \"--app\" == command:\n self.update_app()\n elif \"--settings\" == command:\n self.update_settings()\n elif \"--mysql-files\" == command:\n self.update_mysql_files()\n elif \"--help\" == command or \"-h\" == command:\n self.write_help_message()", "def cmd(self, cmd):\n\n self._cmd = cmd", "def command(self, cmd):\n self.lmp.command(cmd)", "def init_command_objects(self):\n super().init_command_objects()\n device_data = DeviceData.get_instance()\n\n args = (device_data, self.state_model, self.logger)\n\n self.register_command_object(\"SetStowMode\", SetStowMode(*args))\n self.register_command_object(\n \"SetStandbyLPMode\", SetStandbyLPMode(*args)\n )\n self.register_command_object(\"SetOperateMode\", SetOperateMode(*args))\n self.register_command_object(\"Scan\", Scan(*args))\n self.register_command_object(\"EndScan\", EndScan(*args))\n self.register_command_object(\"Configure\", Configure(*args))\n self.register_command_object(\"StartCapture\", StartCapture(*args))\n self.register_command_object(\"StopCapture\", StopCapture(*args))\n self.register_command_object(\n \"SetStandbyFPMode\", SetStandbyFPMode(*args)\n )\n self.register_command_object(\"Slew\", Slew(*args))\n self.register_command_object(\"Track\", Track(*args))\n self.register_command_object(\"StopTrack\", StopTrack(*args))\n self.register_command_object(\"Abort\", Abort(*args))\n self.register_command_object(\"Restart\", Restart(*args))\n self.register_command_object(\"ObsReset\", ObsReset(*args))", "def add_command(self, command):\n self.command.extend(command)", "def run_cmd(self):\r\n self.run = True", "def command(self, *commands):\n def decorator(function):\n for command in commands:\n self.functions[command] = function\n return function\n return decorator", "def post_run_cmds(self, host: str) -> tp.List[types.ShellCmdSpec]:\n raise NotImplementedError", "def postloop(self):\n super(CoreCommand, self).postloop() # Clean up command completion", "def parse(self, commands):\n raise NotImplementedError()", "def build_commands(self):\r\n for tag in self.bmark.tags.keys():\r\n # if this tag is a command then return true\r\n if tag in COMMANDLIST:\r\n self.commands.append(tag)", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def command(self, command):\n\n self._command = command", "def do_command(self, args):\n pass", "def setup(bot):\n new_cog = Commands(bot)\n bot.add_cog(new_cog)", "def default_supported_commands(self, commands):\n self.state.default_supported_commands(commands)", "def __init__(self, vanillaCommand, usage):\n super(VanillaCommandWrapper, self).__init__(vanillaCommand.b())\n # PAIL: rename\n self.vanillaCommand = vanillaCommand\n self.setPermission(\"minecraft.command.\" + vanillaCommand.getCommand())", "def command(command_list):\n def add_attribute(func):\n if not hasattr(func, \"command\"):\n func.command = []\n func.command.append(command_list)\n return func\n return add_attribute", "def __set_cmd_parse(self, _cmd: dict):\n for sitem in _cmd:\n #\n # alle sets durch\n # {\"set\":[{\"alert\":\"alert-04\",\"enable\":\"true\", ...}, {\"alert\":\"alert-03\",\"enable\":\"true\", ...}]}\n #\n alert_name = sitem['alert']\n if alert_name not in self.config:\n # da ist ein NEUNER Alarm angekommen == NEW\n self.log.debug(\"found NEW alert {} with set commands\".format(alert_name))\n _alert = ConfigFileObj.get_empty_configitem()\n ConfigFileObj.config_lock.acquire()\n self.config[alert_name] = _alert\n ConfigFileObj.config_lock.release()\n else:\n # EDIT Alarm\n self.log.debug(\"found alert {} with set commands\".format(alert_name))\n #\n # nun alle Eigenschaften durch\n #\n ConfigFileObj.config_lock.acquire()\n for set_command in sitem:\n if set_command == 'alert':\n continue\n # eine Einstellung schreiben\n self.log.debug(\"set property {} to {} for alert {}\".format(set_command, sitem[set_command], alert_name))\n if sitem[set_command] == 'null':\n self.config[alert_name][set_command] = \" \"\n else:\n self.config[alert_name][set_command] = sitem[set_command]\n ConfigFileObj.config_lock.release()\n # ende der kommandos per alarm\n # ende der alarme\n # es scheint alles geklappt zu haben\n # noch schnell den aktuellen hashwert berechnen (besser als version)\n self.config_hash['version'] = self.__get_hashstr(self.config)\n self.log.debug(\"set command for alert(s) successful!\")\n # callback, wenn erforderlich\n if self.on_config_change is not None:\n self.log.debug(\"call on_config_change...\")\n self.on_config_change(int(time()))\n return json.dumps({'ok': 'sucsessful commands done'}).encode(encoding='utf-8')\n # ENDE __set_cmd_parse", "def setup_commands(bot):\n # Reset the bot's command setup\n bot.reset_commands()\n # Load enabled mods\n for mod in bot.enabled_mods:\n try:\n full = 'mod_%s' % mod\n m = getattr(__import__('mods.%s' % full), full)\n except Exception:\n bot.log(ERROR, 'Importing the %s mod failed!' % mod)\n sys.excepthook(*sys.exc_info())\n continue\n\n try:\n bot.installed_mods[mod] = m\n # Check for a 404 handler, and replace the current one if there is\n p404 = getattr(m, 'handle_404', None)\n if p404:\n bot.cb_404 = p404\n\n # Check for a setup function, and run it if there is\n setup = getattr(m, 'setup', None)\n if setup:\n setup(bot)\n\n # Required command bank\n for cmd in m.command_bank:\n # Get the actual function\n func = getattr(m, cmd)\n # Get the args for the command\n data = m.command_bank[cmd]\n # If data[0] is true, mod_help will recognize this command\n if data[0]:\n bot.help_db[data[1]] = parse_help(func)\n # Get the main name and aliases inserted\n for alias in data[1:]:\n bot.command_db[alias] = func\n\n # Helper function for optional nameless multiples\n def add_optional(olist, name):\n olist.extend(getattr(m, f) for f in getattr(m, name, ()))\n\n # Optional filters are loaded and added to the list\n add_optional(bot.filters, 'filters')\n\n # Ditto for time-cycle callbacks\n add_optional(bot.periodic_cbs, 'periodic')\n\n # Handlers are the same, but structured as a dict with\n # \"type\": \"single function-name\" items\n handlers = getattr(m, 'handlers', None)\n if handlers:\n for cbtype in handlers:\n bot.handlers[cbtype].append(getattr(m, handlers[cbtype]))\n\n # Register any requirements\n # NOTE: By putting this at the end, we avoid the possibility of\n # getting fake requires.\n reqs = getattr(m, 'requires', None)\n if reqs:\n bot.required_mods.update(reqs)\n except Exception:\n bot.log(ERROR, 'Unable to install the %s mod!' % mod)\n del bot.installed_mods[mod]\n sys.excepthook(*sys.exc_info())\n\n missing = bot.required_mods - set(bot.installed_mods)\n if missing:\n raise MissingRequirementsError(missing)\n\n # And now for the post-install triggers.\n for mod, m in bot.installed_mods.items():\n post = getattr(m, 'post_prepare', None)\n if post:\n try:\n post(bot)\n except Exception:\n bot.log(ERROR, 'Unable to post-prepare the %s mod!' % mod)\n sys.excepthook(*sys.exc_info())", "def cmd(self, data, enable):\n pass", "def sys_commands(self, obj, phase=''):\n commands = self.settings.get(phase)\n if commands and isinstance(commands, list):\n for command in commands:\n if isinstance(command, list):\n # Find list items that match the string after \"att_\",\n # these are names names of attribute in the calling class\n for key, item in enumerate(command):\n if item[:4] == 'att_':\n attribute = item[4:]\n try:\n command[key] = getattr(obj, attribute)\n except AttributeError:\n continue\n try:\n popen = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError as error:\n msg = \"Cannot run {0} the command doesn't exist,\\n\".format(command.pop(0))\n msg += \"Error: {1}\".format(error.strerror)\n print msg\n results = popen.communicate()\n if results[1]:\n print \"Running {0}, \\n Error: {1}\".format(command, results[1])\n else:\n continue", "def runCommand(self): \\\n # pylint: disable=no-self-use", "def modulecmds():\n\n class Commands:\n @staticmethod\n def setenv(key, val=None):\n val = val or key\n return \"setenv({0!r}, {1!r})\\n\".format(key, val)\n\n @staticmethod\n def unsetenv(key):\n return \"unsetenv({0!r})\\n\".format(key)\n\n @staticmethod\n def load(x):\n return \"load({0!r})\\n\".format(x)\n\n @staticmethod\n def load_first(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"load_first({0})\\n\".format(x)\n\n @staticmethod\n def unload(x):\n return \"unload({0!r})\\n\".format(x)\n\n @staticmethod\n def prepend_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"prepend_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def append_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"append_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def remove_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"remove_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def set_alias(key, val):\n return \"set_alias({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_alias(key):\n return \"unset_alias({0!r})\\n\".format(key)\n\n @staticmethod\n def set_shell_function(key, val):\n return \"set_shell_function({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_shell_function(key):\n return \"unset_shell_function({0!r})\\n\".format(key)\n\n @staticmethod\n def use(path):\n return \"use({0!r})\\n\".format(path)\n\n @staticmethod\n def unuse(path):\n return \"unuse({0!r})\\n\".format(path)\n\n @staticmethod\n def swap(a, b):\n return \"swap({0!r}, {1!r})\\n\".format(a, b)\n\n @staticmethod\n def family(x):\n return \"family({0!r})\\n\".format(x)\n\n @staticmethod\n def conflict(x):\n return \"conflict({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq(x):\n return \"prereq({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq_any(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"prereq_any({0})\\n\".format(x)\n\n @staticmethod\n def source(f):\n return \"source({0!r})\\n\".format(f)\n\n @staticmethod\n def help(x):\n return \"help({0!r})\\n\".format(x)\n\n @staticmethod\n def whatis(x):\n return \"whatis({0!r})\\n\".format(x)\n\n @staticmethod\n def isloaded(x):\n return \"is_loaded({0!r})\\n\".format(x)\n\n return Commands()", "def init_command_objects(self):\n super().init_command_objects()\n device_args = (self, self.state_model, self.logger)\n # resource_args = (self.resource_manager, self.state_model, self.logger) \n # only use resource_args if we want to have separate resource_manager object\n\n self.register_command_object(\n \"Configure\",\n self.ConfigureCommand(*device_args)\n ) \n self.register_command_object(\n \"AddReceptors\",\n self.AddReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveReceptors\",\n self.RemoveReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveAllReceptors\",\n self.RemoveAllReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"ConfigureScan\",\n self.ConfigureScanCommand(*device_args)\n )\n self.register_command_object(\n \"StartScan\",\n self.ScanCommand(*device_args)\n )\n self.register_command_object(\n \"GoToIdle\",\n self.GoToIdleCommand(*device_args)\n )", "def control_center(self):\r\n c_command = input(\"Enter your command : \")\r\n self.curs.executescript(c_command)\r\n self.conn.commit()", "def update_commands(self, commands_str):\n commands = dict(parse_qsl(commands_str, keep_blank_values=True))\n _if = commands.get(\"if\", self._if)\n if _if:\n self._if = Condition(_if)\n self._set_int(commands, \"max_length\")\n self._set_int(commands, \"min_length\")\n self.color = expand_color(commands.get(\"color\"), passthrough=True, block=self.color)\n\n self.not_zero = \"not_zero\" in commands or self.not_zero\n self.show = \"show\" in commands or self.show\n self.soft = \"soft\" in commands or self.soft", "def set_command(self, command):\n self.player_command = command\n return None", "def ConsoleRun(self, command, sender):\n pass", "def __call__(self, cmd):\n cmdname = cmd.name\n self.commands[cmdname] = self._prepare_cmd(cmd)\n return cmd", "async def setheist(self, ctx):\r\n\r\n pass", "def cmd(self, command):\n self._commands.append(command)", "def _execute_impl(self, commands):\n raise NotImplementedError(\"abstract method\")" ]
[ "0.7058163", "0.70184225", "0.6852532", "0.6852532", "0.6852532", "0.6852532", "0.6804543", "0.6804543", "0.66652715", "0.65947163", "0.65928555", "0.65210867", "0.64140224", "0.63312364", "0.6290925", "0.62714374", "0.6251512", "0.6167499", "0.61642545", "0.6146838", "0.6128197", "0.61257267", "0.6070907", "0.6067032", "0.6065646", "0.6052757", "0.6041006", "0.60356104", "0.60264885", "0.60080695", "0.5994932", "0.5968947", "0.5966107", "0.5964301", "0.5924975", "0.5902757", "0.58377635", "0.5830734", "0.582569", "0.5817137", "0.58003116", "0.5793148", "0.5790446", "0.57795763", "0.57705426", "0.5750098", "0.5743952", "0.57320875", "0.572757", "0.5717779", "0.5706019", "0.56914276", "0.56861985", "0.5673234", "0.56729174", "0.5669422", "0.5657623", "0.5651898", "0.56467575", "0.5643041", "0.5614757", "0.56096184", "0.5604782", "0.560443", "0.5592759", "0.5591101", "0.55807096", "0.55791473", "0.55770844", "0.5568445", "0.5550506", "0.55471784", "0.55439717", "0.553301", "0.55311435", "0.5510976", "0.5504238", "0.5503939", "0.5503939", "0.5503939", "0.5499717", "0.5478448", "0.5469912", "0.5458316", "0.5447364", "0.5446283", "0.5437724", "0.5424095", "0.54141426", "0.5412261", "0.5400978", "0.539887", "0.53983", "0.53974915", "0.5396886", "0.5392795", "0.5392231", "0.5390548", "0.53876346", "0.53805643" ]
0.65425885
11
create a new sub block to the current block and return it. the sub block is added to the current block.
def new_block(self): child = Block(self, py3_wrapper=self.py3_wrapper) self.add(child) return child
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_new_block(self):\n block = BasicBlock()\n self.blocks.append(block)\n return block", "def newblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n return block", "def newblock(self, parent=None, **kwargs):\n block = ControlBlock(len(self.blocks), **kwargs)\n self.blocks.append(block)\n if parent:\n parent.add_child(block)\n\n return block", "def nextblock(self, parent=None, **kwargs):\n block = self.newblock(parent, **kwargs)\n if not parent and self.block:\n self.block.add_child(block)\n\n self.block = block\n return block", "def nextblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n elif self.block:\n self.block.add_child(block)\n self.block = block\n return self.block", "def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def build_new_block(cls, data='', previous_block=None):\n if previous_block:\n new_index = previous_block.index+1\n previous_hash = previous_block.hash\n else:\n new_index = 0\n previous_hash = ''\n timestamp = int(time.time())\n block_hash = cls.build_block_hash(\n index=new_index,\n timestamp=timestamp,\n data=data,\n previous_hash=previous_hash\n )\n block = cls(\n index=new_index,\n previous_hash=previous_hash,\n data=data,\n timestamp=timestamp,\n block_hash=block_hash\n )\n\n return block", "def newBlock(preBlock, remitter, number, payee):\r\n index = preBlock.index + 1\r\n timestamp = int(round(time.time() * 1000))\r\n data = (remitter, number, payee).__str__()\r\n previousHash = preBlock.hash\r\n nounce = 0\r\n return Blockchain(index, data, timestamp, nounce, previousHash)", "def new_block(self, proof, previous_hash = None):\n #create a new Block & adds it to the chain.\n \n block = {\n 'index' : len(self.chain) + 1,\n 'timestamp' : time(),\n 'transactions' : self.pending_transactions,\n 'proof' : proof,\n 'previous_hash' : previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.pending_transactions = []\n\n self.chain.append(block)\n return block\n #pass", "def get_a_block(self, usage_id=None):\r\n scope_ids = Mock()\r\n if usage_id is None:\r\n usage_id = \"_auto%d\" % len(self.all_blocks)\r\n scope_ids.usage_id = usage_id\r\n block = self.system.construct_xblock_from_class(\r\n self.TestableInheritingXBlock,\r\n field_data=self.field_data,\r\n scope_ids=scope_ids,\r\n )\r\n self.all_blocks[usage_id] = block\r\n return block", "def switch(self):\n base_block = self.base_block or self\n self.next_block = Block(self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper)\n return self.next_block", "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def new_block(self, proof, previous_hash=None):\n \n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n # Add block to existing chain\n self.chain.append(block)\n return block", "def new_block(self, proof, previous_hash=None):\n\n\t\tblock = {\n\t\t\t'index': len(self.chain) + 1,\n\t\t\t'timestamp': time(),\n\t\t\t'transactions': self.current_transactions,\n\t\t\t'proof': proof,\n\t\t\t'previous_hash': previous_hash or self.hash(self.chain[-1]),\t\t\n\t\t}\n\n\t\t#Reset current list of transactions\n\t\tself.current_transactions = []\n\n\t\tself.chain.append(block)\n\t\treturn block", "def exit_block(self, parent=None, **kwargs):\n block = self.newblock(parent, have_code=False, is_exit=True, **kwargs)\n self.blocks.pop()\n return block", "def build_block(self, format_string):\n first_block = Block(None, py3_wrapper=self.py3_wrapper)\n block = first_block\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group(\"block_start\"):\n # Create new block\n block = block.new_block()\n elif token.group(\"block_end\"):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n if not block.parent:\n raise Exception(\"Too many `]`\")\n block = block.parent\n elif token.group(\"switch\"):\n # a new option has been created\n block = block.switch()\n elif token.group(\"placeholder\"):\n # Found a {placeholder}\n key = token.group(\"key\")\n format = token.group(\"format\")\n block.add(Placeholder(key, format))\n elif token.group(\"literal\"):\n block.add(Literal(value))\n elif token.group(\"lost_brace\"):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(Literal(value))\n elif token.group(\"command\"):\n # a block command has been found\n block.set_commands(token.group(\"command\"))\n elif token.group(\"escaped\"):\n # escaped characters add unescaped values\n if value[0] in [\"\\\\\", \"{\", \"}\"]:\n value = value[1:]\n block.add(Literal(value))\n\n if block.parent:\n raise Exception(\"Block not closed\")\n # add to the cache\n self.block_cache[format_string] = first_block", "def new_block(self, proof, previous_hash=None):\n\n # Create the block\n my_block = Block(proof=proof,\n previous_hash=previous_hash or self.hash(self.last_block))\n my_block.save()\n\n # Update current_transactions with this new block.\n my_block_trans = self.current_transactions_obj\n\n for trans in Transaction.objects.filter(block__isnull=True):\n trans.block = my_block\n trans.save()\n\n block = {\n 'index': my_block.id,\n 'timestamp': my_block.timestamp,\n 'transactions': list(Transaction.objects.filter(block=my_block).values()),\n 'proof': my_block.proof,\n 'previous_hash': my_block.previous_hash,\n }\n\n return block", "def create_block(self):\n return poet_transaction_block.PoetTransactionBlock()", "def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False):\r\n if not raw:\r\n block_fields = self._serialize_fields(category, block_fields)\r\n return {\r\n 'category': category,\r\n 'definition': definition_id,\r\n 'fields': block_fields,\r\n 'edit_info': {\r\n 'edited_on': datetime.datetime.now(UTC),\r\n 'edited_by': user_id,\r\n 'previous_version': None,\r\n 'update_version': new_id\r\n }\r\n }", "def add_block(self, block_pf):\n\n # test si il s'agit du bloc genesis\n if len(self.blocks) != 0:\n # check si previous H est coherent avant ajout a chaine\n if self.check_previousBlockH(block_pf.header['prevBlockH']):\n self.blocks.append(block_pf)\n else:\n print \"== Probleme de parent\"\n print \"= %s\" % block_pf.header['prevBlockH']\n print \"= %s\" % getHashBlock(self.get_topBlock())\n else:\n self.blocks.append(block_pf)", "def addBlock(self, data):\n #get the hashVal of last block in blockchain\n lastHash = self.chain[len(self.chain) - 1].hashVal\n timestamp = time()\n hashVal = Block.hashSHA(timestamp, lastHash, data, NONCE, DIFFICULTY)\n adding_block = Block(timestamp, lastHash, hashVal, data, NONCE, DIFFICULTY)\n \n self.chain.append(adding_block)\n return adding_block", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions':self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n self.current_transactions = []\n self.chain.append(block)\n return block", "def build_nested_blocks(self):\n pass", "def new_block(self, proof, previous_hash=None):\r\n block = {\r\n 'index': len(self.chain) + 1,\r\n 'timestamp': time(),\r\n 'transactions': self.current_transactions,\r\n 'proof': proof,\r\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\r\n }\r\n\r\n # reseta a atual lista de transacoes\r\n self.current_transactions = []\r\n\r\n self.chain.append(block)\r\n return block", "def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False,\n asides=None, block_defaults=None):\n if not raw:\n block_fields = self._serialize_fields(category, block_fields)\n if not asides:\n asides = {}\n document = {\n 'block_type': category,\n 'definition': definition_id,\n 'fields': block_fields,\n 'asides': asides,\n 'edit_info': {\n 'edited_on': datetime.datetime.now(UTC),\n 'edited_by': user_id,\n 'previous_version': None,\n 'update_version': new_id\n }\n }\n if block_defaults:\n document['defaults'] = block_defaults\n return BlockData(**document)", "def make_block(self, in_size, out_size, **kwargs):\n raise NotImplementedError(\"Abstract\")", "def new_block(self, previous_hash):\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'information': self.current_information,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_information = []\n\n self.chain.append(block)\n return block", "def addBlock(self, newBlock):\n newBlock.index = len(self.chain)\n newBlock.previousHash = self.chain[-1].hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n self.writeBlocks()", "def createBlock(self, block: ghidra.program.model.mem.MemoryBlock, name: unicode, start: ghidra.program.model.address.Address, length: long) -> ghidra.program.model.mem.MemoryBlock:\n ...", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def _do_add_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n block_type = int(args[4])\r\n starting_address = int(args[5])\r\n length = int(args[6])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.add_block(name, block_type, starting_address, length)\r\n return name", "def createNewBlock(self, nonce, previousBlockHash, hash):\n newBlock = Block(len(self.chain), self.pendingTransactions, nonce, hash, previousBlockHash)\n self.pendingTransactions = []\n self.chain.append(newBlock)\n return newBlock", "def genesis_block(self):\n block = Block(target=self.target, transactions=[])\n self.current_transactions.append(block)", "def _initBlock(o,block):\n o.block = block.clone().shift(*o.board.startPosition)", "def CreateBlock(audio):\n\n BitDepth = audio.getsampwidth() # Number of bytes\n FrameList = []\n BlockList = []\n SamplesCursor = audio.tell() # Cursor inside audio file\n NumSamples = audio.getnframes() # Num Samples inside audio file\n StatusList = BuildStatusBits() # List with all status bits\n StatusCursor = 0\n\n\n # Take 2 samples and create the frist Frame\n AudioSamples = audio.readframes(2)\n Frame = CreateFrame(AudioSamples[0:2], AudioSamples[2:5],\n StatusList, StatusCursor, BitDepth)\n StatusCursor += 1\n FrameList.append(Frame) # First Frame with Z preamble\n\n SamplesCursor = audio.tell()\n while SamplesCursor != NumSamples:\n if len(FrameList) == 192: # Each 192 frames we introduce a block\n print(\"what is love\")\n BlockList.append(FrameList)\n FrameList = []\n StatusCursor = 0 # We reset SampleCursor and FrameList\n AudioSamples = audio.readframes(2)\n Frame = CreateFrame(AudioSamples[0:2], AudioSamples[2:5],\n StatusList, StatusCursor, BitDepth)\n StatusCursor += 1\n FrameList.append(Frame) # First Frame with Z preamble\n\n AudioSamples = audio.readframes(2)\n Frame = CreateFrame(AudioSamples[0:2], AudioSamples[2:5],\n StatusList, StatusCursor, BitDepth)\n StatusCursor += 1\n FrameList.append(Frame)\n SamplesCursor = audio.tell() # Update cursor\n\n return BlockList", "def build_block(self, current, block_params):\n block_args = {}\n\n # extract name\n block_name = block_params['name']\n\n # save upper_tri flatten\n self.preds_triu |= (block_name == 'upper_tri')\n \n # if Keras, get block variables names\n pass_all_globals = True\n if block_name[0].isupper():\n pass_all_globals = False\n block_func = blocks.keras_func[block_name]\n block_varnames = block_func.__init__.__code__.co_varnames\n\n # set global defaults\n global_vars = ['activation', 'batch_norm', 'bn_momentum', 'norm_type',\n 'l2_scale', 'l1_scale', 'padding', 'kernel_initializer']\n for gv in global_vars:\n gv_value = getattr(self, gv, False)\n if gv_value and (pass_all_globals or gv in block_varnames):\n block_args[gv] = gv_value\n\n # set remaining params\n block_args.update(block_params)\n del block_args['name']\n\n # save representations\n if block_name.find('tower') != -1:\n block_args['reprs'] = self.reprs\n\n # U-net helper\n if block_name[-5:] == '_unet':\n # find matching representation\n unet_repr = None\n for seq_repr in reversed(self.reprs[:-1]):\n if seq_repr.shape[1] == current.shape[1]*2:\n unet_repr = seq_repr\n break\n if unet_repr is None:\n print('Could not find matching representation for length %d' % current.shape[1], sys.stderr)\n exit(1)\n block_args['unet_repr'] = unet_repr\n\n # switch for block\n if block_name[0].islower():\n block_func = blocks.name_func[block_name]\n current = block_func(current, **block_args)\n\n else:\n block_func = blocks.keras_func[block_name]\n current = block_func(**block_args)(current)\n\n return current", "def _make_block(self, model):\n # TODO Make base class\n assert model is not None, 'Top level model must be initialized first'\n self.model = model\n # If block is already present, remove it\n if self.model.component(self.name) is not None:\n self.model.del_component(self.name)\n self.model.add_component(self.name, Block())\n self.block = self.model.__getattribute__(self.name)\n\n self.logger.info(\n 'Optimization block initialized for {}'.format(self.name))", "def build(self, block_size):", "def _get_block(self, pos):\n raise NotImplementedError", "def get_block(self, usage_id, for_parent=None):\n def_id = self.id_reader.get_definition_id(usage_id)\n try:\n block_type = self.id_reader.get_block_type(def_id)\n except NoSuchDefinition:\n raise NoSuchUsage(repr(usage_id)) # pylint: disable= raise-missing-from\n keys = ScopeIds(self.user_id, block_type, def_id, usage_id)\n block = self.construct_xblock(block_type, keys, for_parent=for_parent)\n return block", "def create_entry_basic_block(self):\n bb = BasicBlock(self)\n self.basic_blocks.insert(0, bb)\n return bb", "def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len( self.chain ) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'merkle': self.hash(self.current_transactions),\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1])\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n # Add the block to the chain\n self.chain.append( block )\n self._write_chain()\n\n return block", "def prepare(block):\n if block.parent:\n block.parent = None\n if not block.__dict__['is_open'] is None:\n block.__dict__['open'] = block.is_open\n del(block.is_open)\n # trim empty elements...\n for attr in dir(block):\n if not callable(attr) and not attr.startswith(\"__\") and \\\n attr != \"makeNode\" and attr != \"pretty\":\n if block.__dict__[attr] in [\"\", [], None, {}]:\n del(block.__dict__[attr])\n if 'children' in block.__dict__ and len(block.children) > 0:\n for i, child in enumerate(block.children):\n block.children[i] = prepare(child)\n if 'inline_content' in block.__dict__ and \\\n len(block.inline_content) > 0:\n for i, child in enumerate(block.inline_content):\n block.inline_content[i] = prepare(child)\n if 'label' in block.__dict__ and len(block.label) > 0:\n for i, child in enumerate(block.label):\n block.label[i] = prepare(child)\n if 'c' in block.__dict__ and type(block.c) is list and \\\n len(block.c) > 0:\n for i, child in enumerate(block.c):\n block.c[i] = prepare(child)\n return block", "def create_block(self, previous_hash):\r\n if len(self.transaction_pool) < 1:\r\n return None, None\r\n\r\n # Create A Temporary Block\r\n block = {'index': None, # before mining set index to None\r\n 'timestamp': None, # before mining set timestamp to None\r\n 'nonce': 0, # before mining set nonce to 0\r\n 'transactions': self.transaction_pool, # Fill in all the transactions\r\n 'previous_hash': previous_hash, # Set the previous hash\r\n 'current_hash': ''} # Current hash is yet to be calculated\r\n\r\n # Empty Transaction Pool\r\n self.transaction_pool = [] # Once transactions have been placed in a block\r\n # they can be removed from the pool\r\n\r\n # Calculate Proof Of Work (Nonce)\r\n block['nonce'], block['current_hash'] = self.proof_of_work(block, previous_hash) # Validate the block by calculating the nonce\r\n block['index'] = len(self.chain) + 1 # Set the block index\r\n block['timestamp'] = str(datetime.datetime.now()) # Set the timestamp to the time when the block was validated\r\n\r\n # Add Block To DistrictNode's Own Chain\r\n self.chain.append(block) # Append the block to the list of blocks in the blockchain\r\n print(\"BLOCK ADDED TO 90\")\r\n for block in self.chain:\r\n for key, value in block.items():\r\n print(key, value)\r\n print('\\n')\r\n\r\n return self.chain, self.transaction_pool # Return the new chain and the new transaction_pool\r", "def create_block(self, x, y, block_type):\n sprite_stack = self.get_sprite(x, y)\n if sprite_stack:\n sprite = sprite_stack[-1]\n sprite.image = block_type\n return\n\n # no existing block, so create a new one\n block_x = x * self.block_x + self.offset_x + self.menu_x\n block_y = y * self.block_y + self.offset_y\n\n bar = Sprite(\"\", image_data=block_type, x=block_x, y=block_y)\n if (x, y) in self.sprites:\n self.sprites[(x, y)].append(bar)\n else:\n self.sprites[(x, y)] = [bar]", "def createInnerRepresentation(self):\n\n for idx, single_block in enumerate(self._block_list):\n del self._to_be_processed[:]\n del self._metastring_rest[:]\n self._metastring_rest.append(self._metastring[idx])\n self.addMetastringPointer(single_block)", "def add_block(self, env):\n block_size = (0.04, 0.04, 0.04)\n block_pose = self.random_pose(env, block_size)\n block_urdf = 'assets/stacking/block.urdf'\n block_id = env.add_object(block_urdf, block_pose)\n self.object_points[block_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[block_id] = 'block'\n return block_id", "def make_genesis_block():\n block = Block(index=0,\n timestamp=datetime.now(),\n data=\"Genesis Block\",\n previous_hash=\"0\")\n return block", "def MakeBlock(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_MakeBlock(self, *args)", "def newCDataBlock(self, content, len):\n ret = libxml2mod.xmlNewCDataBlock(self._o, content, len)\n if ret is None:raise treeError('xmlNewCDataBlock() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def Block(self):\n self.currtok = next(self.tg)\n statements = self.Statements()\n if self.currtok[1].name == \"RCURLY\":\n self.currtok = next(self.tg)\n\n return BlockExpr(statements.get_lst())\n\n raise SLUCSyntaxError(\"ERROR: Right Curly Brace on line {0}\".format(str(self.currtok[2] - 1)))", "def make_block_ptr(base: tensor, shape, strides, offsets, block_shape, order, _builder=None):\n return semantic.make_block_ptr(base, shape, strides, offsets, block_shape, order, _builder)", "def open_subblock(self, lines: Tuple[int, int], name: str, size: Tuple[int, int], color=None) -> None:", "def create_genesis_block(self):\r\n genesis_block = Block(0, [], time.time(), \"0\")\r\n genesis_block.hash = genesis_block.compute_hash()\r\n self.chain.append(genesis_block)", "def create_genesis_block(self):\n index = 0\n transactions = []\n timestamp = 0.0\n previous_hash = \"0\"*64\n block = Block(index=index, transactions=transactions, timestamp=timestamp,previous_hash=previous_hash)\n block.hash = block.compute_hash()\n self.chain.append(block)", "def copy(self):\n args = []\n for arg in self.args:\n if isinstance(arg, Block):\n arg = arg.copy()\n elif isinstance(arg, list):\n arg = [b.copy() for b in arg]\n args.append(arg)\n return Block(self.type, *args)", "def new_block(self, proof, previous_hash=None):\n servers = [\n \"1.us.pool.ntp.org\",\n \"2.us.pool.ntp.org\",\n \"3.us.pool.ntp.org\"\n ]\n\n response = {}\n\n try:\n response = self.c.request('0.us.pool.ntp.org')\n except Exception:\n for server in servers:\n try:\n response = self.c.request(server)\n\n if response:\n break\n\n except Exception:\n print('\\n //// alternate ntp server didnt work')\n\n block = {\n 'message': 'New Block Forged',\n 'index': len(self.chain) + 1,\n 'timestamp': response.tx_time or time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.chain[-1]['hash'],\n }\n\n # Calculate the hash of this new Block\n block['hash'] = self.hash(block)\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def create_block(self, prev_hash=None):\n # Update blockchain and balance state (thread safe)\n if prev_hash is not None and prev_hash not in self._blockchain.hash_block_map.keys():\n print(prev_hash, self._blockchain.hash_block_map)\n prev_blk = None if prev_hash is None else \\\n self._blockchain.hash_block_map[prev_hash]\n last_blk = self._update(prev_blk)\n pending_tx = self._get_tx_pool()\n gathered_tx = self._gather_transactions(pending_tx)\n block = self._mine_new_block(last_blk.header, gathered_tx)\n if block is not None:\n blk_json = block.to_json()\n # Add block to blockchain (thread safe)\n self.add_block(blk_json)\n print(f\"{self.__class__.__name__} {self.name} created a block.\")\n # Broadcast block and the header.\n self._broadcast_block(block)\n # Remove gathered transactions from pool and them to added pile\n with self.added_tx_lock:\n self._added_transactions |= set(gathered_tx)\n self._update()\n return block", "def newfragment(self):\n newfragment = fragment()\n self.addfragment(newfragment)\n return newfragment", "def create_block(self, proof, previous_hash=None):\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash,\n }\n\n # Reset current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block", "def splitblock(self, name=None, terminate=False):\n # -------------------------------------------------\n # Sanity check\n\n # Allow splitting only after leaders and before terminator\n # TODO: error check\n\n # -------------------------------------------------\n # Split\n\n oldblock = self._curblock\n newblock = self.func.new_block(name or 'block', after=self._curblock)\n op = self._lastop\n\n # Terminate if requested and not done already\n if terminate and not ops.is_terminator(op):\n op = self.jump(newblock)\n\n # -------------------------------------------------\n # Move ops after the split to new block\n\n if op:\n if op == 'head':\n trailing = list(self._curblock.ops)\n elif op == 'tail':\n trailing = []\n else:\n trailing = list(op.block.ops.iter_from(op))[1:]\n\n for op in trailing:\n op.unlink()\n newblock.extend(trailing)\n\n # -------------------------------------------------\n # Patch phis\n\n if terminate:\n self._patch_phis(oldblock.ops, oldblock, newblock)\n else:\n for op in oldblock:\n for use in self.func.uses[op]:\n if use.opcode == 'phi':\n raise error.CompileError(\n \"Splitting this block would corrupt some phis\")\n\n self._patch_phis(newblock.ops, oldblock, newblock)\n\n return oldblock, newblock", "def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, asides=None, **kwargs): # lint-amnesty, pylint: disable=arguments-differ\n with self.bulk_operations(parent_usage_key.course_key):\n xblock = self.create_item(\n user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, asides=asides,\n **kwargs)\n\n # skip attach to parent if xblock has 'detached' tag\n if 'detached' in xblock._class_tags: # pylint: disable=protected-access\n return xblock\n\n # don't version the structure as create_item handled that already.\n new_structure = self._lookup_course(xblock.location.course_key).structure\n\n # add new block as child and update parent's version\n block_id = BlockKey.from_usage_key(parent_usage_key)\n if block_id not in new_structure['blocks']:\n raise ItemNotFoundError(parent_usage_key)\n\n parent = new_structure['blocks'][block_id]\n\n # Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))\n if kwargs.get('position') is None:\n parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))\n else:\n parent.fields.setdefault('children', []).insert(\n kwargs.get('position'),\n BlockKey.from_usage_key(xblock.location)\n )\n\n if parent.edit_info.update_version != new_structure['_id']:\n # if the parent hadn't been previously changed in this bulk transaction, indicate that it's\n # part of the bulk transaction\n self.version_block(parent, user_id, new_structure['_id'])\n self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)\n\n # db update\n self.update_structure(parent_usage_key.course_key, new_structure)\n\n # don't need to update the index b/c create_item did it for this version\n return xblock", "def generate_sub_blocks(rdd):\n ### BEGIN SOLUTION ###\n return rdd", "def new_block(self, body_blocks, snake_head):\n\t\tx = randint(0, 35)\n\t\ty = randint(1, 26)\n\t\tself.rect.x = (25 * x) + 1\n\t\tself.rect.bottom = 25 * y\n\t\t\n\t\t# If new block is on snake, get new block\n\t\tif self.rect.x == snake_head.rect.x and self.rect.bottom == snake_head.rect.bottom:\n\t\t\tself.new_block(body_blocks, snake_head)\n\t\t\n\t\t# If new block is on any body block, get new block\n\t\tif body_blocks:\n\t\t\tfor i in range(len(body_blocks)):\n\t\t\t\tif self.rect.x == body_blocks[i].rect.x and self.rect.bottom == body_blocks[i].rect.bottom:\n\t\t\t\t\tself.new_block(body_blocks, snake_head)", "def create_block(self, complete_hash, nonce):\n print(\"Creating block with hash: '%s'\" % complete_hash)\n block = Block(complete_hash, nonce)\n for transaction in self.transactions:\n block.add_transaction(transaction)\n return block", "def create_genesis_block(self):\n genesis_block = Block(0, [], time.time(), \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def add_block(self, cxnode, code, **magic_vars):\n ast = cparse(code)\n # ast.show()\n generator = MagicCGenerator(cxnode, magic_vars)\n generator.indent_level = self.indent_level\n hdr = '\\n%s// %s\\n' % (' ' * self.indent_level,\n cxnode.__class__.__name__)\n self.code += hdr + generator.visit(ast)", "def _construct_block(self, block_info):\n layer_name = block_info[0]\n if layer_name=='Conv2d':\n in_channels, out_channels, kernel_size = block_info[1:]\n return nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size)\n elif layer_name=='ReLU':\n return nn.ReLU(inplace=True)\n elif layer_name=='MaxPool2d':\n kernel_size, stride = block_info[1:]\n return nn.MaxPool2d(kernel_size=kernel_size,\n stride=stride)\n elif layer_name=='BatchNorm2d':\n num_features = block_info[1]\n return nn.BatchNorm2d(num_features=num_features)\n elif layer_name=='Linear':\n in_features, out_features = block_info[1:]\n return nn.Linear(in_features=in_features,\n out_features=out_features)\n else:\n raise Exception(\"_construct_block cannot construct block\")", "def create_genesis(self):\n return Block(0, 0, b'0', b'0', b'')", "def _create_item(self, category, name, data, metadata, parent_category, parent_name, draft=True, split=True):\r\n location = self.old_course_key.make_usage_key(category, name)\r\n if not draft or category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n mongo.create_and_save_xmodule(location, data, metadata, self.runtime)\r\n if isinstance(data, basestring):\r\n fields = {'data': data}\r\n else:\r\n fields = data.copy()\r\n fields.update(metadata)\r\n if parent_name:\r\n # add child to parent in mongo\r\n parent_location = self.old_course_key.make_usage_key(parent_category, parent_name)\r\n if not draft or parent_category in DIRECT_ONLY_CATEGORIES:\r\n mongo = self.old_mongo\r\n else:\r\n mongo = self.draft_mongo\r\n parent = mongo.get_item(parent_location)\r\n parent.children.append(location)\r\n mongo.update_item(parent, self.userid)\r\n # create pointer for split\r\n course_or_parent_locator = BlockUsageLocator(\r\n course_key=self.split_course_key,\r\n block_type=parent_category,\r\n block_id=parent_name\r\n )\r\n else:\r\n course_or_parent_locator = self.split_course_key\r\n if split:\r\n self.split_mongo.create_item(course_or_parent_locator, category, self.userid, block_id=name, fields=fields)", "def add_block2(_input, growth_rate, layers_per_block, bc_mode, is_training):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"layer_%d\" % layer):\n output = add_internal_layer(output, growth_rate, bc_mode=bc_mode, is_training=is_training)\n return output", "def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def create_genesis_block(self):\n genesis_block = Block(0, [], 0, \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)", "def add_block_as_child_node(self, block, node):\n child = etree.SubElement(node, \"unknown\")\n block.add_xml_to_node(child)", "def add(self, block):\n\n try:\n self.blocks[block.height]\n except:\n\n self.blocks[block.height] = [block]\n if self.current_height < block.height:\n self.current_height = block.height\n return\n\n if not block.hash() in [b.hash() for b in self.blocks[block.height]]:\n self.blocks[block.height].append(block)\n loggerutil.debug(\"fork detected for height:\" + str(block.height) +\n \"block candidats:\" + str(self.blocks[block.height]))\n if self.current_height < block.height:\n self.current_height = block.height", "def new_child(self,instance):\n\t\treturn self.cu_for_new_child(instance,False)", "def blockMirror_create(self, forceNew = False):\n try:\n _str_func = 'blockMirror_create'\n _side = get_sideMirror(self)\n _blockType = self.blockType\n if not _side:\n log.error(\"|{0}| >> Block is not sided. Can't create mirror\".format(_str_func, self.mNode)) \n return False\n \n if self.getMessage('blockMirror'):\n mMirror = self.blockMirror\n log.debug(\"|{0}| >> blockMirror found {1} \".format(_str_func, mMirror))\n if not forceNew:\n return mMirror\n log.debug(\"|{0}| >> focing new... \".format(_str_func, mMirror)) \n mMirror.delete()\n \n log.debug(\"|{0}| >> blockParent....\".format(_str_func))\n mBlockParent = self.p_blockParent\n if mBlockParent and mBlockParent.getMessage('blockMirror'):\n mBlockParent = mBlockParent.blockMirror\n log.debug(\"|{0}| >> blockParent has blockMirror: {1}\".format(_str_func,mBlockParent))\n \n log.debug(\"|{0}| >> Creating mirror block. {1} | {2}\".format(_str_func, _blockType, _side))\n \n _d = {'blockType':self.blockType, 'side':_side,\n 'autoForm':False,\n 'blockParent':mBlockParent,\n \n #'baseAim':[self.baseAimX,-self.baseAimY,self.baseAimZ],\n 'baseSize':baseSize_get(self)}\n \n for a in 'blockProfile','buildProfile','cgmName':\n if a in ['cgmName']:\n _d['name'] = self.getMayaAttr(a)\n else:\n _d[a] = self.getMayaAttr(a)\n\n \n log.debug(\"|{0}| >> Block settings...\".format(_str_func, self.mNode)) \n #pprint.pprint(_d)\n \n mMirror = cgmMeta.createMetaNode('cgmRigBlock',\n **_d)\n \n \n \n blockDat = self.getBlockDat()\n blockDat['ud']['side'] = _side\n for k in ['baseSize','baseAim']:\n if blockDat['ud'].has_key(k):\n blockDat['ud'].pop(k)\n for a in 'XYZ':\n if blockDat['ud'].has_key(k+a):\n blockDat['ud'].pop(k+a)\n mMirror.blockDat = blockDat\n \n blockMirror_settings(self,mMirror)\n mMirror.saveBlockDat()\n _d = mMirror.blockDat\n _d['blockState']=self.getEnumValueString('blockState')\n\n mMirror.blockDat = _d\n \n \"\"\"\n for k,dIter in BLOCKSHARE._d_mirrorAttrCheck.iteritems():\n _check = blockDat['ud'].get(k)\n if _check:\n log.debug(\"|{0}| >> mirror dat check {1} | {2}\".format(_str_func, k, dIter))\n blockDat['ud'][k] = dIter.get(blockDat['ud'][k])\"\"\"\n \n \"\"\"\n #Mirror some specfic dat\n if blockDat.get('form'):\n _subShapers = blockDat['form'].get('subShapers',{})\n log.debug(\"|{0}| >> subShaper dat mirror...\".format(_str_func, self.mNode)) \n \n for i,d_sub in _subShapers.iteritems():\n l_t = d_sub.get('t')\n l_r = d_sub.get('r')\n l_s = d_sub.get('s')\n \n for ii,d_list in enumerate(l_t):\n d_list[0] = d_list[0]*-1\n for ii,d_list in enumerate(l_r):\n d_list[0] = d_list[0]*-1 \n d_list[1] = d_list[1]*-1\n \n _subShapers[str(i)]['r'][ii] = l_r\n _subShapers[str(i)]['r'][ii] = l_t\"\"\"\n \n\n self.connectChildNode(mMirror,'blockMirror','blockMirror')#Connect\n mMirror.p_blockParent = mBlockParent\n \n blockDat_load(mMirror,useMirror=True,redefine=True)\n controls_mirror(self,mMirror)\n return mMirror\n except Exception,err:cgmGEN.cgmException(Exception,err)", "def extend_template(base, text):\n\n block_search = re.compile(\"{{(block) (\\w+)}}\")\n has_blocks = re.search(block_search, base)\n if not has_blocks:\n return base\n else:\n find_content = re.compile(\"({{block \"+has_blocks.group(2)+\"}})(.*?)({{endblock}})\", re.DOTALL)\n \n content = re.search(find_content, text).group(2)\n base = re.sub(\"{{block \"+has_blocks.group(2)+\"}}\", content, base)\n return extend_template(base, text)", "def create_item(self, parent, block):\r\n li = util.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])", "def create_block(self, nonce, previous_hash):\n block = {'block_number': transaction_blocks.count() + 1,\n 'timestamp': ctime(t),\n 'transactions': self.transactions,\n 'nonce': nonce,\n 'previous_hash': previous_hash}\n\n # Reset the current list of transactions\n self.transactions = []\n self.chain.append(block)\n return block", "def add_basic_block(self, basic_block):\n self.basic_blocks.append(basic_block)\n basic_block.function = self", "def pack(cls, blocks, *args, **kwargs) -> None:\n\n if not blocks:\n raise BlockError(\"At least one block must be provided\")\n\n # The new block will be added here\n parent = blocks[0].parent\n\n # All of the blocks must share the same parent\n if not all(blk.parent is parent for blk in blocks):\n raise BlockError(\"All of the blocks must share the same parent\")\n\n # Store all existing connections\n connections = set()\n for port in chain.from_iterable([blk.ports for blk in blocks]):\n\n if port.is_in:\n connections.update((ups, port) for ups in port.upstream)\n else:\n connections.update((port, dws) for dws in port.downstream)\n\n # Create a new block instance and place in under the parent\n block = cls(*args, **kwargs, parent=parent)\n\n # Move blocks under parent block\n for blk in blocks:\n blk.parent = block\n\n # Restore connections\n for p1, p2 in connections:\n try:\n p1.feed(p2)\n\n # If connection adding has failed, it probably means that this is an external connection,\n # therefore we add an intermediate port\n except PortError:\n\n def add_proxy_from(p):\n if p.is_in and not p.auxiliary:\n return block.In.new(f'{p.block.name}_{p.name}')\n\n elif p.is_in and p.auxiliary:\n return block.AuxIn.new(f'{p.block.name}_{p.name}')\n\n elif not p.is_in and not p.auxiliary:\n return block.Out.new(f'{p.block.name}_{p.name}')\n\n else:\n return block.AuxOut.new(f'{p.block.name}_{p.name}')\n\n # The case when p1 is outside\n if p2.block.parent is block:\n new_port = add_proxy_from(p2)\n\n # The case when p2 is outside\n else:\n # Add new port\n new_port = add_proxy_from(p1)\n\n p1.feed(new_port)\n new_port.feed(p2)\n\n block.ports.sort()", "def add_block(self, env, block_color, width, height):\n\n block_size = (0.04, 0.04, 0.04)\n block_urdf = \"stacking/block.urdf\"\n block_pose = self.get_random_pose(env, block_size)\n block_id = env.add_object(block_urdf, block_pose)\n pb.changeVisualShape(\n block_id, -1, rgbaColor=utils.COLORS[block_color] + [1])\n # (0, None): 0 means that the block is symmetric.\n # TODO(hagrawal): Not sure what None means. Update. This is kept\n # for CLIPort compatibility. We don't use it.\n self.blocks.append((block_id, (0, None)))\n block_pix = utils.xyz_to_pix(block_pose[0], self.bounds, self.pix_size)\n block_obj_info = {\n \"obj_id\": block_id,\n \"pose\": block_pose,\n \"size\": block_size,\n \"urdf\": block_urdf,\n \"color\": block_color,\n \"unknown_color\": block_color in utils.EVAL_COLORS,\n \"pix\": block_pix,\n \"region\": determine_region(block_pix[0], block_pix[1], width, height),\n }\n return block_obj_info", "def make_sub(self, sub):\n [lu, subs] = [self.lineup, self.subs]\n if 'PositionSwitch' in str(type(sub)):\n done = False\n for player in lu:\n if player.id == sub.player:\n done = True\n player.switch.append(player.pos)\n player.pos = sub.pos\n if sub.pos in player.switch:\n player.switch.remove(sub.pos)\n if not done:\n sub_idx = find_player_index(subs, sub.player)\n if sub.pos == 'p':\n subs[sub_idx].status = 'entered'\n if not len([s for s in lu if s.pos == 'p']) > 0:\n subs[sub_idx].order = 10\n lu.append(subs.pop(sub_idx))\n else:\n print(\"ERROR: NOT SURE WHAT TO DO WITH SUB\")\n print([p.__dict__ for p in lu])\n print(sub.__dict__)\n\n elif 'OffensiveSub' in str(type(sub)):\n lu_idx = find_player_index(lu, sub.sub)\n sub_idx = find_player_index(subs, sub.player)\n if sub_idx is None:\n print(\"ERROR: \" + str(sub.__dict__))\n else:\n if subs[sub_idx].status == 'removed':\n print('ILLEGAL SUB ' + str(subs[sub_idx].__dict__))\n if not lu_idx is None:\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, subs.pop(sub_idx))\n\n elif 'DefensiveSub' in str(type(sub)):\n lu_idx = find_player_index(lu, sub.sub)\n sub_idx = find_player_index(subs, sub.player)\n if sub_idx is None:\n if sub.pos == 'p':\n sub_idx = find_player_index(lu, sub.player)\n if not sub_idx is None and not lu_idx is None:\n add = lu[sub_idx]\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, add)\n if lu[lu_idx].order == 10:\n lu[lu_idx].order = lu_idx+1\n else:\n print(\"ERROR: \" + str(sub.__dict__))\n else:\n if subs[sub_idx].status == 'removed':\n print('ILLEGAL SUB ' + str(subs[sub_idx].__dict__ ))\n if not lu_idx is None: \n lu[lu_idx].status = 'removed'\n if lu[lu_idx].order != subs[sub_idx].order:\n print(\"ASSUMING ORDER FOR SUB: \" + subs[sub_idx].name)\n subs[sub_idx].order = lu[lu_idx].order\n for p in lu:\n if p.pos == subs[sub_idx].pos:\n p.pos = ''\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, subs.pop(sub_idx))\n\n elif 'Removal' in str(type(sub)):\n if lu[-1].id == sub.sub:\n lu_idx = len(lu)-1\n else:\n lu_idx = find_player_index(lu, sub.sub)\n if not lu_idx is None:\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n \n\n [self.lineup, self.subs] = [lu, subs]", "def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:\n return Submodule.add(self, *args, **kwargs)", "def _create_vertical(self, parent_usage_key=None):\r\n resp = self.create_xblock(category='vertical', parent_usage_key=parent_usage_key)\r\n self.assertEqual(resp.status_code, 200)\r\n return self.response_usage_key(resp)", "def duplicate2(self):\n try:\n _str_func = 'blockDuplicate'\n mDup = cgmMeta.createMetaNode('cgmRigBlock',blockType = self.blockType, autoForm=False)\n mDup.loadBlockDat(self.getBlockDat())\n mDup.doName()\n return mDup\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err)", "def add_node_as_child(self, block, node, id_generator=None):\n usage_id = self._usage_id_from_node(node, block.scope_ids.usage_id, id_generator)\n block.children.append(usage_id)", "def mine(self):\n last_block = self.chain[-1]\n\n nonce = self.proof_of_work()\n previous_hash = self.hash(last_block)\n self.create_block(nonce, previous_hash)", "def create_origin_block(self):\n # creating a new hash object and finding new hash with empty string.\n hash = hashlib.sha256()\n hash.update(''.encode('utf-8'))\n # Instantiating a new block with data 'Origin', and for previous hash we'll give the hash generated by empty string\n origin_block = Block('Origin', hash)\n # Mine the block with the difficulty level of the chain\n origin_block.mine(self.difficulty)\n # Appending it to the mined block list\n self.blocks.append(origin_block)", "def create_xblock(usage, student_id=None):\n block_cls = XBlock.load_class(usage.block_name)\n runtime = StuviewRuntime(block_cls, student_id, usage)\n model = DbModel(SCOPED_KVS, block_cls, student_id, usage)\n block = block_cls(runtime, model)\n return block", "def init_blocks(self):\n length = self.physics.len_blocks\n rect = Rectangle(Vector(self.rpos.x, self.rpos.y),\n Vector(self.rpos.x + length, self.rpos.y + length))\n self.rects.append(rect)\n self.physics.add_block(rect, self.stype)", "def CreateBlock(self, props):\n # Set some default properties\n\n # blk / cfg | name | name+inst\n # -----------------------------\n # name | n==n | False\n # name+inst | n==n | n==n&&i==i\n if 'ordinal' not in props and 'name' in props:\n for ordinal, ni in self.config.get('ordinals', {}).items():\n if isinstance(ni, str):\n # Config only has name, don't care about block\n if props['name'] == ni:\n props['ordinal'] = ordinal\n break\n elif 'instance' not in props:\n # Config has name+instance, block only has name\n continue\n else:\n # Config has name+instance, block has name+instance\n if props['name'] == ni[0] and props['instance'] == ni[1]:\n props['ordinal'] = ordinal\n break\n\n # Compute a block ID\n if 'name' in props:\n bid = props['name'].replace('-', '')\n else:\n bid = \"block\"\n\n if bid in self.blocks:\n i = 1\n while bid+str(i) in self.blocks:\n i += 1\n bid = bid+str(i)\n\n # Actually create block\n self.blocks[bid] = blk = Block(bid, props)\n blk.changed.handler(lambda: self.blockchanged(blk))\n\n self.blockadded(blk)\n\n return blk", "def begin():\n return BeginBlock()", "def _create_xblock(self, parent_loc, xblock_desc):\r\n create_payload = {\r\n 'category': xblock_desc.category,\r\n 'display_name': xblock_desc.display_name,\r\n }\r\n\r\n if parent_loc is not None:\r\n create_payload['parent_locator'] = parent_loc\r\n\r\n # Create the new XBlock\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/xblock/',\r\n data=json.dumps(create_payload),\r\n headers=self.headers,\r\n )\r\n\r\n if not response.ok:\r\n msg = \"Could not create {0}. Status was {1}\".format(xblock_desc, response.status_code)\r\n raise CourseFixtureError(msg)\r\n\r\n try:\r\n loc = response.json().get('locator')\r\n\r\n except ValueError:\r\n raise CourseFixtureError(\"Could not decode JSON from '{0}'\".format(response.content))\r\n\r\n # Configure the XBlock\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/xblock/' + loc,\r\n data=xblock_desc.serialize(),\r\n headers=self.headers,\r\n )\r\n\r\n if response.ok:\r\n return loc\r\n else:\r\n raise CourseFixtureError(\r\n \"Could not update {0}. Status code: {1}\".format(\r\n xblock_desc, response.status_code))", "def createChild(self):\n childName = self._generateChildName()\n zincRegion = self._zincRegion.createChild(childName)\n if zincRegion.isValid():\n childRegion = NeonRegion(childName, zincRegion, self)\n self._children.append(childRegion)\n self._informRegionChange(True)\n return childRegion\n return None", "def add_block(self, name):\n\n if not self.RE_NAME.match(name):\n raise ValueError(u\"Invalid block name '{0}'\"\n .format(common.from_utf8(name)))\n\n if name in self._block_map:\n raise ValueError(u\"Block '{0}' already exists\"\n .format(common.from_utf8(name)))\n\n # add new block and index mapping\n self._block_map[name] = len(self._ast[2]) # must come first\n option_list = []\n block = [name, option_list]\n self._ast[2].append(block)", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def add_block(_input, growth_rate, layers_per_block, bc_mode, name, is_training):\n output = _input\n for layer in range(layers_per_block):\n with tf.variable_scope(\"%s_layer_%d\" % (name, layer)):\n output = add_internal_layer(output, growth_rate, bc_mode=bc_mode, is_training=is_training)\n return output" ]
[ "0.7300657", "0.7137283", "0.700307", "0.67426383", "0.65247345", "0.64328855", "0.64139366", "0.6404519", "0.6204402", "0.61623967", "0.6048261", "0.5963573", "0.59493285", "0.5928946", "0.5925224", "0.5924873", "0.59158665", "0.5912983", "0.5904347", "0.58974856", "0.5862565", "0.58518505", "0.584571", "0.58374643", "0.5819552", "0.5813169", "0.58130515", "0.5799499", "0.5794494", "0.57889706", "0.57718897", "0.57630163", "0.57329714", "0.57304585", "0.5729394", "0.57199794", "0.5712181", "0.57050866", "0.5704886", "0.5671287", "0.56516564", "0.564135", "0.56412643", "0.56132627", "0.56063795", "0.5596406", "0.5586833", "0.55771995", "0.55599403", "0.55553937", "0.5549632", "0.5544024", "0.5543659", "0.55427104", "0.5528622", "0.5505878", "0.5504886", "0.5500673", "0.5492654", "0.54914093", "0.5491187", "0.5490123", "0.5488035", "0.548346", "0.5481799", "0.5479431", "0.54787236", "0.54728717", "0.54683286", "0.54672873", "0.5443881", "0.54428893", "0.5430977", "0.5430977", "0.54048055", "0.5397223", "0.53965336", "0.5391919", "0.5390654", "0.5385845", "0.5380702", "0.53805625", "0.53564495", "0.5353999", "0.5345577", "0.5344728", "0.53432393", "0.5341908", "0.53405446", "0.5332013", "0.5324306", "0.53125536", "0.5311059", "0.52993864", "0.52968943", "0.5289444", "0.52852124", "0.52850676", "0.52841455", "0.5280303" ]
0.73637503
0
block has been split via | so we need to start a new block for that option and return it to the user.
def switch(self): base_block = self.base_block or self self.next_block = Block(self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper) return self.next_block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def block_one(self):", "def block(self):\n pass", "def handle_request(self, request):\n ret = True\n for option in request.options:\n if option.number == defines.inv_options[\"Block2\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n elif option.number == defines.inv_options[\"Block1\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n self._parent.blockwise[key] = (1, 0, num, m, size)\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request", "def start_block2(self, request):\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = (2, 0, 0, 1, 1024)", "def new_block_call(self, event):\n self.push_job(flush=True)", "def blocks(self):\n pass", "def split(self, block: ghidra.program.model.mem.MemoryBlock, addr: ghidra.program.model.address.Address) -> None:\n ...", "def execute_block_now(event):\n b = event.cli.current_buffer\n b.validate_and_handle()", "def block_on(self, char):\n self._blocker_char = char", "def handleBlock(self, block_str):\n\n new_block = Block()\n new_block.parseJson(block_str)\n if not self.checkBlock(new_block):\n return False\n\n if new_block.confirmed:# if confirmed by someone, check and add block\n if valid_proof_of_work(new_block):\n print(\"Someone done before me, I'm going to stop\")\n self.threadjob = False\n self.BlockChain.addBlock(new_block) \n else:\n raise Exception(\"Hey this Block's Hash is not valid\")\n return False\n\n elif self.miner_indicator:\n if self.threadjob:\n print(\"This is miner, I should mine, but I'm already doing \")\n return True\n print(\"Hey this is miner, I'm going to mine\")\n self.threadjob = True\n t = Thread(target=self.mine, args = (new_block,))\n t.start()\n else:\n print(\"Hey this is unconfirmed block , but I am not miner, so I gonna miss it\")\n return True", "def block_type(self):\r\n raise NotImplementedError()", "def splitblock(self, name=None, terminate=False):\n # -------------------------------------------------\n # Sanity check\n\n # Allow splitting only after leaders and before terminator\n # TODO: error check\n\n # -------------------------------------------------\n # Split\n\n oldblock = self._curblock\n newblock = self.func.new_block(name or 'block', after=self._curblock)\n op = self._lastop\n\n # Terminate if requested and not done already\n if terminate and not ops.is_terminator(op):\n op = self.jump(newblock)\n\n # -------------------------------------------------\n # Move ops after the split to new block\n\n if op:\n if op == 'head':\n trailing = list(self._curblock.ops)\n elif op == 'tail':\n trailing = []\n else:\n trailing = list(op.block.ops.iter_from(op))[1:]\n\n for op in trailing:\n op.unlink()\n newblock.extend(trailing)\n\n # -------------------------------------------------\n # Patch phis\n\n if terminate:\n self._patch_phis(oldblock.ops, oldblock, newblock)\n else:\n for op in oldblock:\n for use in self.func.uses[op]:\n if use.opcode == 'phi':\n raise error.CompileError(\n \"Splitting this block would corrupt some phis\")\n\n self._patch_phis(newblock.ops, oldblock, newblock)\n\n return oldblock, newblock", "def end():\n return EndBlock()", "def parseBlock(self, text, prevLineData):\n return self.parser.parseBlock(text, prevLineData)", "def changed_block(self, old_block, new_block):", "def start_block(self):\n self.stop_running()\n self.update_action(Action.block)", "def check_block(self, block):\n pass", "def onBlock(self, data) :\n pass", "def test_block_full_context(self):\n # old model file == beam block full context false\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(\n ['--model-file', 'zoo:unittest/transformer_generator2/model']\n )\n agent = create_agent(opt, True)\n self.assertEqual(agent.opt['beam_block_full_context'], False)\n self.assertEqual(agent.beam_block_full_context, False)\n\n # brand new model == beam block full context true\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(['--model', 'transformer/generator'])\n agent = create_agent(opt, True)\n self.assertEqual(agent.opt['beam_block_full_context'], True)\n self.assertEqual(agent.beam_block_full_context, True)", "def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']", "def put_block(self):\n self.blocks[self.editor_cursor_position[1]][\n self.editor_cursor_position[0]] = self.available_block_types[self.current_block_type]", "def block_program(description):\n\n def run(self):\n n = len(description)\n i = 0\n while i + n <= len(self.shrink_target.blocks):\n attempt = bytearray(self.shrink_target.buffer)\n failed = False\n for k, d in reversed(list(enumerate(description))):\n j = i + k\n u, v = self.blocks[j].bounds\n if d == \"-\":\n value = int_from_bytes(attempt[u:v])\n if value == 0:\n failed = True\n break\n else:\n attempt[u:v] = int_to_bytes(value - 1, v - u)\n elif d == \"X\":\n del attempt[u:v]\n else: # pragma: no cover\n assert False, \"Unrecognised command %r\" % (d,)\n if failed or not self.incorporate_new_buffer(attempt):\n i += 1\n\n run.command = description\n run.__name__ = \"block_program(%r)\" % (description,)\n return run", "def test_04(self):\n assert 'False' == Api.requestBlock('test-04', charOrder='')", "def nextSplit(self):\n pass", "def processFirstBlock(block):\n print(block)\n if block[0:6].decode() == \"TRQ t \":\n return [\"TRQ\", \"t\"]\n elif block[0:6].decode() == \"TRQ f \":\n return [\"TRQ\", \"f\"]\n else:\n return [\"\", \"\"]", "def build_block(self, format_string):\n first_block = Block(None, py3_wrapper=self.py3_wrapper)\n block = first_block\n\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n value = token.group(0)\n if token.group(\"block_start\"):\n # Create new block\n block = block.new_block()\n elif token.group(\"block_end\"):\n # Close block setting any valid state as needed\n # and return to parent block to continue\n if not block.parent:\n raise Exception(\"Too many `]`\")\n block = block.parent\n elif token.group(\"switch\"):\n # a new option has been created\n block = block.switch()\n elif token.group(\"placeholder\"):\n # Found a {placeholder}\n key = token.group(\"key\")\n format = token.group(\"format\")\n block.add(Placeholder(key, format))\n elif token.group(\"literal\"):\n block.add(Literal(value))\n elif token.group(\"lost_brace\"):\n # due to how parsing happens we can get a lonesome }\n # eg in format_string '{{something}' this fixes that issue\n block.add(Literal(value))\n elif token.group(\"command\"):\n # a block command has been found\n block.set_commands(token.group(\"command\"))\n elif token.group(\"escaped\"):\n # escaped characters add unescaped values\n if value[0] in [\"\\\\\", \"{\", \"}\"]:\n value = value[1:]\n block.add(Literal(value))\n\n if block.parent:\n raise Exception(\"Block not closed\")\n # add to the cache\n self.block_cache[format_string] = first_block", "def _set_block(self, pos, block_):\n raise NotImplementedError", "def pick_block(self):\n current_block = self.blocks[self.editor_cursor_position[1]][self.editor_cursor_position[0]]\n if current_block in self.available_block_types:\n self.current_block_type = self.available_block_types.index(current_block)", "def is_block(self) -> bool:\n return self.answer == \".\"", "def begin():\n return BeginBlock()", "def test_block_extra_batch(self):\n pass", "def print_in_block(message):\n print(\"|\", message)", "def _get_block(self, pos):\n raise NotImplementedError", "def duplicate(self, uiPrompt = True):\n try:\n _str_func = 'duplicate'\n _blockType = self.blockType\n _side = get_side(self)\n _nameOriginal = self.cgmName\n \n _d = {'blockType':self.blockType,\n 'autoForm':False,\n 'side':_side,\n 'baseSize':baseSize_get(self),\n 'blockProfile':self.blockProfile,\n 'blockParent': self.p_blockParent}\n \n for a in 'cgmName','blockProfile':\n if a in ['cgmName']:\n _d['name'] = self.getMayaAttr(a)\n elif self.hasAttr(a):\n _d[a] = self.getMayaAttr(a) \n \n _title = 'New name for duplicate'.format(_blockType)\n result = mc.promptDialog(title=_title,\n message='Current: {0} | type: {1} | build: {2} | block:{3} '.format(_d['name'],_blockType,_d.get('blockProfile'),_d.get('buildProfile')),\n button=['OK', 'Cancel'],\n text = _d['name'],\n defaultButton='OK',\n cancelButton='Cancel',\n dismissString='Cancel')\n if result == 'OK':\n _v = mc.promptDialog(query=True, text=True)\n _d['name'] = _v\n \n else:\n log.error(\"Duplication cancelled for |{0}|\".format(self))\n return False\n \n log.debug(\"|{0}| >> Creating duplicate block. {1} | source: {2}\".format(_str_func, _blockType, self))\n\n \n log.debug(\"|{0}| >> Block settings...\".format(_str_func)) \n #pprint.pprint(_d)\n \"\"\"\n if replaceSelf:\n ml_children = self.getBlockChildren()\n _blockProfile = self.getMayaAttr('blockProfile')\n mBlockModule = self.p_blockModule\n \n _d_profiles = {} \n try:_d_profiles = mBlockModule.d_block_profiles\n except:\n log.error(cgmGEN.logString_msg(_str_func,'No d_block_profile_found')) \n \n _typeDict= _d_profiles.get(_blockProfile,{})\n if _blockProfile and not _typeDict:\n log.error(cgmGEN.logString_msg(_str_func,'blockType not found in blockProfiles. Please fix | found {0}'.format(_blockProfile)))\n pprint.pprint(_d_profiles.keys())\n return False \n \n _baseDat = _typeDict.get('baseDat')\"\"\"\n \n mDup = cgmMeta.createMetaNode('cgmRigBlock',\n **_d)\n \n mDup.doSnapTo(self)\n \n blockDat = self.getBlockDat()\n \n blockDat['baseName'] = _v\n blockDat['ud']['cgmName'] = _v\n \n if _d['blockType'] in ['finger','thumb']:\n log.debug(\"|{0}| >> Clearing nameList\".format(_str_func))\n for a in blockDat['ud'].iteritems():\n if 'nameList' in a:\n blockDat['ud'].remove(a)\n blockDat['ud']['nameList_0'] = _v \n \n \"\"\"\n if blockDat['ud'].get('rigSetup') in ['finger']:\n log.debug(\"|{0}| >> Clearing nameList\".format(_str_func))\n for a in blockDat['ud'].iteritems():\n if 'nameList' in a:\n blockDat['ud'].remove(a)\n blockDat['nameList_0'] = _v\"\"\"\n \n #changeState(mDup,'define',forceNew=True)#redefine to catch any optional created items from settings\n mDup.blockDat = blockDat\n \"\"\"\n if replaceSelf:\n if _baseDat:\n log.warning(cgmGEN.logString_msg(_str_func,'resetting baseDat: {0}'.format(_baseDat)))\n mDup.baseDat = _baseDat \"\"\" \n \n for a in ['numSubShapers','rollCount']:\n if ATTR.datList_exists(self.mNode, a):\n l = self.datList_get(a)\n mDup.datList_connect(a,l)\n \n \n l_nameList = mDup.datList_get('nameList')\n for i,n in enumerate(l_nameList):\n if _nameOriginal in n:\n l_nameList[i] = n.replace(_nameOriginal,_d['name'])\n \n mDup.datList_connect('nameList',l_nameList)\n pprint.pprint(l_nameList) \n \n \n blockDat_load(mDup,redefine=True)\n #log.debug('here...')\n #blockDat_load(mDup)#...investigate why we need two...\n \n #mDup.p_blockParent = self.p_blockParent\n #self.connectChildNode(mMirror,'blockMirror','blockMirror')#Connect \n \"\"\"\n if replaceSelf:\n for mChild in ml_children:\n mChild.p_blockParent = mDup\n \n self.delete()\"\"\"\n \n\n \n return mDup\n except Exception,err:cgmGEN.cgmExceptCB(Exception,err)", "def blockParser(block):\n struct = []\n first = True\n record = False\n for line in block:\n if line.startswith('Structure #'):\n record = True\n if not first:\n yield struct\n struct = []\n first = False\n if record:\n struct.append(line)\n yield struct", "def test_05(self):\n assert 'True' == Api.requestBlock('test-05', charOrder=50)", "def run(self, parent, blocks):\r\n pass", "def _prepare_consumed_block():\n \n blkcounter = 0\n maxBlocks = blocks[0]['maxBlocks']\n \n freeStart = blocks[0]['freeStart']\n freeEnd = blocks[0]['freeEnd']\n \n while(blkcounter in xrange(0, maxBlocks, 1)) :\n # data type can be varied\n # dictionary - for superBlock, directory and file inode\n # list - for free block list, index list\n # string - data blocks\n # empty - free block\n bdata = blocks[blkcounter]\n # an afterthought - we really do not need to bother about\n # data blocks here, as all the data blocks should be in one\n # of index list or in location attribute of inode\n \n if (type(bdata) == list) :\n if(blkcounter >= freeStart and blkcounter <= freeEnd) :\n # free block list, so no reading the content of the list, \n # but mark the block as a consumed block \n consumedBlocks.append(blkcounter)\n else :\n # this is an index list, marking this block as consumed\n # mark all the blocks listed in this list as consumed as well\n consumedBlocks.append(blkcounter)\n for n in bdata :\n consumedBlocks.append(n)\n elif (type(bdata) == dict) :\n # mark this block as consumed block\n consumedBlocks.append(blkcounter)\n if(bdata.has_key('filename_to_inode_dict')) :\n # if directory node has connected files\n for n in bdata['filename_to_inode_dict'].values() :\n consumedBlocks.append(n)\n elif(bdata.has_key('location')): \n # if this is a file node, then check indirect attribute\n # and if inderect if 0, mark the value in location attribute\n # as consumed block\n # if indirect is 1, mark the value in location attribute\n # as consumed block, then leave it, cause the 'if' block already\n # does a check on index list \n consumedBlocks.append(bdata['location'])", "def _do_add_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n block_type = int(args[4])\r\n starting_address = int(args[5])\r\n length = int(args[6])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.add_block(name, block_type, starting_address, length)\r\n return name", "def block(self, block):\n\n self._block = block", "def block(self, block):\n\n self._block = block", "def test_07(self):\n assert 'False' == Api.requestBlock('test-07', charOrder='~!@#$%%^&*()=_+<>?/')", "def __parse_blocks_pass(self):\n\n self.stack = [DocumentStackToken()]\n\n self.tokenized_document = []\n token_to_use = self.source_provider.get_next_line()\n did_start_close = False\n did_started_close = False\n requeue = []\n ignore_link_definition_start = False\n POGGER.debug(\"---$---\", token_to_use)\n POGGER.debug(\"---\")\n self.__parse_properties.pragma_lines = {}\n line_number = 1\n try:\n (\n token_to_use,\n line_number,\n requeue,\n ) = self.__process_front_matter_header_if_present(\n token_to_use, line_number, requeue\n )\n did_start_close = token_to_use is None\n keep_on_going = True\n while keep_on_going:\n POGGER.debug(\"next-line>>$\", token_to_use)\n POGGER.debug(\"stack>>$\", self.stack)\n POGGER.debug(\"current_block>>$\", self.stack[-1])\n POGGER.debug(\"line_number>>$\", line_number)\n POGGER.debug(\"---\")\n\n position_marker = PositionMarker(line_number, 0, token_to_use)\n parser_state = ParserState(\n self.stack,\n self.tokenized_document,\n TokenizedMarkdown.__close_open_blocks,\n self.__handle_blank_line,\n )\n if did_start_close:\n POGGER.debug(\"\\n\\ncleanup\")\n\n was_link_definition_started_before_close = self.stack[\n -1\n ].was_link_definition_started\n\n did_started_close = True\n (\n tokens_from_line,\n requeue_line_info,\n ) = TokenizedMarkdown.__close_open_blocks(\n parser_state,\n self.tokenized_document,\n include_block_quotes=True,\n include_lists=True,\n caller_can_handle_requeue=True,\n was_forced=True,\n )\n if tokens_from_line and not self.tokenized_document:\n self.tokenized_document.extend(tokens_from_line)\n\n if not (requeue_line_info and requeue_line_info.lines_to_requeue):\n keep_on_going = False\n else:\n assert was_link_definition_started_before_close\n assert not requeue_line_info.lines_to_requeue[0]\n\n del requeue_line_info.lines_to_requeue[0]\n line_number -= 1\n\n did_start_close = False\n tokens_from_line = None\n else:\n POGGER.debug(\">>>>$\", self.tokenized_document)\n\n if not token_to_use or not token_to_use.strip():\n POGGER.debug(\"call __parse_blocks_pass>>handle_blank_line\")\n (\n tokens_from_line,\n requeue_line_info,\n ) = self.__handle_blank_line(\n parser_state,\n token_to_use,\n from_main_transform=True,\n position_marker=position_marker,\n )\n else:\n POGGER.debug(\"\\n\\nnormal lines\")\n (\n tokens_from_line,\n _,\n _,\n requeue_line_info,\n _,\n ) = ContainerBlockProcessor.parse_line_for_container_blocks(\n parser_state,\n position_marker,\n ignore_link_definition_start,\n self.__parse_properties,\n None,\n )\n\n POGGER.debug(\"<<<<$\", self.tokenized_document)\n\n if keep_on_going:\n line_number, ignore_link_definition_start = TokenizedMarkdown.__xx(\n line_number, requeue_line_info, requeue\n )\n\n POGGER.debug(\n \"---\\nbefore>>$\",\n self.tokenized_document,\n )\n POGGER.debug(\"before>>$\", tokens_from_line)\n if tokens_from_line:\n self.tokenized_document.extend(tokens_from_line)\n POGGER.debug(\n \"after>>$\",\n self.tokenized_document,\n )\n if requeue:\n POGGER.debug(\"requeue>>$\", requeue)\n POGGER.debug(\"---\")\n\n (\n token_to_use,\n did_start_close,\n did_started_close,\n ) = self.__determine_next_token_process(\n requeue, did_start_close, did_started_close\n )\n except AssertionError as this_exception:\n error_message = f\"A project assertion failed on line {line_number} of the current document.\"\n raise BadTokenizationError(error_message) from this_exception\n\n if self.__parse_properties.pragma_lines:\n self.tokenized_document.append(\n PragmaToken(self.__parse_properties.pragma_lines)\n )\n return self.tokenized_document", "def handle_eof_in_block(self):\n self.handle_error(\"hit EOF, expected close tag\")", "def inner_start_mining(self):\n print(\"Mining a new block\")\n blockchain = self.get_blockchain()\n self.request_transactions(blockchain)\n last_block_hash = blockchain.last_block().header\n complete_hash, nonce = self.proof_of_work(last_block_hash)\n new_block = self.create_block(complete_hash, nonce)\n self.send_block(new_block)\n self.reset_transaction()", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def penblock(self, block):\n self.block = block", "def block(self):\n return self._block", "def test_29(self):\n assert 'True' == Api.requestBlock('test-29')", "def test_13(self):\n assert 'False' == Api.requestBlock('test-13')", "def test_06(self):\n assert 'False' == Api.requestBlock('test-06', charOrder=51)", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def block(self, source, task):\n raise NotImplementedError", "def test_fork_simple(self):\n bvh = self.BlockValidationHandler()\n new_block = self.btm.generate_block(previous_block=self.btm.chain_head,\n add_to_store=True)\n\n bv = self.create_block_validator(new_block, bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block.status == BlockStatus.Valid)\n self.assertTrue(bvh.result[\"commit_new_block\"])", "def write_special_block(self, block, cell_content):\n if not cell_content:\n self.save_text()\n con = 1\n if block['t'] == 'Header':\n con = 2\n self.list_parse(block['c'][con])\n if not cell_content:\n self.save_text()", "def test_39(self):\n assert 'True' == Api.requestBlock('test-39')", "def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf", "def combine_and_select_block(self, first):\n block = self.combine_block(first)\n self.combined.append(block)", "def open_launcher(self):\n vim.command('silent! botright split {0}'.format(self.name))\n self.setup_buffer()", "def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)", "def addBlock(self, newBlock):\n newBlock.index = len(self.chain)\n newBlock.previousHash = self.chain[-1].hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n self.writeBlocks()", "def nextblock(self, parent=None, **kwargs):\n block = self.newblock(parent, **kwargs)\n if not parent and self.block:\n self.block.add_child(block)\n\n self.block = block\n return block", "def test_block_bad_batch(self):\n pass", "def add_block_str(self, block_str):\n return self._add_block_str(block_str, True, False)", "def test_02(self):\n assert 'False' == Api.requestBlock('test-02')", "def finish_hanging(self):\n if self.groups.starting_signature:\n if self.groups.starting_group:\n self.add_tokens_for_group(with_pass=True)\n\n elif self.groups.starting_single:\n self.add_tokens_for_single()", "def get_block(\n self, position: typing.Tuple[int, int, int], none_if_str=True\n ) -> typing.Union[typing.Any, str, None]:\n raise NotImplementedError", "def _initBlock(o,block):\n o.block = block.clone().shift(*o.board.startPosition)", "def test_34(self):\n assert 'False' == Api.requestBlock('test-34')", "def func(self):\n # check what aliases we have used\n if self.cmdstring == \"getinline\":\n self.switches.append(\"getinline\")\n if self.cmdstring == \"nextinline\":\n self.switches.append(\"nextinline\")\n if \"createline\" in self.switches:\n self.create_line()\n return\n if not self.args and not self.switches:\n self.display_line()\n return\n if not self.check_line:\n return\n if \"getinline\" in self.switches:\n self.join_line()\n return\n if \"nextinline\" in self.switches:\n self.next_in_line()\n return\n if \"dropout\" in self.switches:\n self.drop_out()\n return\n if \"dismiss\" in self.switches:\n self.dismiss()\n return\n if \"loop\" in self.switches:\n self.toggle_loop()\n return", "def _maskhg19(self):\n if len(self._current_block) > 2:\n self._current_block[0].text = self._current_block[1].text\n self._current_block[0].size = self._current_block[1].size\n self._current_block[0].setstring()\n self._current_block.remove(self._current_block[1])\n else:\n self._current_block = []", "def reconsiderblock(self, block_hash: str) -> None:\n return self.rpc_call(\"reconsiderblock\", block_hash)", "def test_43(self):\n assert 'False' == Api.requestBlock('test-43')", "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def test_block_bad_signature(self):\n pass", "def test_14(self):\n assert 'False' == Api.requestBlock('test-14')", "def do_block():\n print_column()\n print_rows()", "def do_block():\n print_column()\n print_rows()", "def on_new_line(self, line, is_full_line):\n try:\n if is_full_line:\n self._parse_v_option(line)\n self._parse_general_info(line)\n self._parse_header(line)\n except ParsingDone:\n pass # line has been fully parsed by one of above parse-methods\n return super(W, self).on_new_line(line, is_full_line)", "def parseChunk(self, parent, text):\r\n self.parseBlocks(parent, text.split('\\n\\n'))", "def visit_option_argument(self, node):\n self.body.append(node.get('delimiter', ' '))", "def visit_option_argument(self, node):\n self.body.append(node.get('delimiter', ' '))", "def test_03(self):\n assert 'False' == Api.requestBlock('test-03')", "def test_41(self):\n assert 'False' == Api.requestBlock('test-41')", "def test_16(self):\n assert 'False' == Api.requestBlock('test-16')", "def nextblock(self, parent=None):\n block = ControlBlock()\n self.blocks.add(block)\n if parent:\n parent.add_child(block)\n elif self.block:\n self.block.add_child(block)\n self.block = block\n return self.block", "def valid_chain(self, block, prev_block):\n self.stop_mine()\n\n print('\\n //// MINING STOPPED\\n')\n\n print('\\n //// block entering valid_chain')\n pprint(block)\n\n if block is not None and block['message'] != 'mining stopped':\n if block['previous_hash'] == self.hash(prev_block):\n \n # Check that the Proof of Work is correct\n if self.valid_proof(prev_block['proof'], block['proof']):\n if block['index'] == self.last_block['index']:\n if self.last_block['timestamp'] > block['timestamp']:\n del self.chain[-1]\n self.chain.append(block)\n print('\\n //// true from equal index but older timestamp')\n return True\n\n elif self.last_block['timestamp'] == block['timestamp']:\n print('\\n //// true from timestamps are equal block isnt added')\n return True\n else:\n print('\\n //// true timestamp is newer not added but sending false')\n return False\n\n elif block['index'] > self.last_block['index']:\n print('\\n //// true from index is greater and block is added')\n self.chain.append(block)\n return True\n else:\n print('\\n //// false from adding block had index less than block already there')\n else:\n print('\\n //// false from not a valid proof')\n\n else:\n print('\\n //// false from hashes arent equal')\n if (block['timestamp'] < self.last_block['timestamp']):\n if (block['index'] == self.last_block['index']):\n print('\\n //// hashes arent equal but block is older, subtracting and adding')\n del self.chain[-1]\n self.chain.append(block)\n return True\n\n elif (block['timestamp'] > self.last_block['timestamp']):\n if(block['index'] > self.last_block['index']):\n self.chain.append(block)\n return True\n else:\n return True\n\n return False\n\n else:\n return 'reject'", "async def xpblock(self, ctx, *, user_or_role : str = None):\r\n\r\n\t\tusage = 'Usage: `{}xpblock [user_or_role]`'.format(ctx.prefix)\r\n\r\n\t\tif not await Utils.is_bot_admin_reply(ctx): return\r\n\r\n\t\tif user_or_role == None:\r\n\t\t\tawait ctx.message.channel.send(usage)\r\n\t\t\treturn\r\n\r\n\t\troleName = user_or_role\r\n\t\tis_user = True\r\n\t\tif type(user_or_role) is str:\r\n\t\t\t# Check user first\r\n\t\t\tuser_or_role = DisplayName.memberForName(roleName, ctx.guild)\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tis_user = False\r\n\t\t\t\t# Check role\r\n\t\t\t\tif roleName.lower() == \"everyone\" or roleName.lower() == \"@everyone\":\r\n\t\t\t\t\tuser_or_role = ctx.guild.default_role\r\n\t\t\t\telse:\r\n\t\t\t\t\tuser_or_role = DisplayName.roleForName(roleName, ctx.guild)\r\n\t\t\t\t\t\r\n\t\t\tif not user_or_role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(Nullify.escape_all(roleName))\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\t\t\r\n\t\t# Check if they're admin or bot admin\r\n\t\tif Utils.is_bot_admin(user_or_role):\r\n\t\t\treturn await ctx.send(\"You can't block other admins with this command.\")\r\n\t\tif is_user:\r\n\t\t\tur_name = DisplayName.name(user_or_role)\r\n\t\telse:\r\n\t\t\tur_name = Nullify.escape_all(user_or_role.name)\r\n\r\n\t\t# Now we see if we already have that role in our list\r\n\t\tpromoArray = self.settings.getServerStat(ctx.message.guild, \"XpBlockArray\")\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif str(aRole) == str(user_or_role.id):\r\n\t\t\t\t# We found it - throw an error message and return\r\n\t\t\t\tmsg = '**{}** is already in the list.'.format(ur_name)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tpromoArray.append(user_or_role.id)\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"XpBlockArray\", promoArray)\r\n\r\n\t\tmsg = '**{}** added to list.'.format(ur_name)\r\n\t\tawait ctx.message.channel.send(msg)\r\n\t\treturn", "def next_block_type(self):\n if self.current_block_type >= len(self.available_block_types) - 1:\n self.current_block_type = 0\n else:\n self.current_block_type += 1", "def gen_break(self, stmt: statements.Break) -> None:\n # block = self.get_label_block(stmt.label)\n if self.break_block_stack:\n block = self.break_block_stack[-1]\n self.builder.emit_jump(block)\n else:\n self.error(\"Cannot break here!\", stmt)\n new_block = self.builder.new_block()\n self.builder.set_block(new_block)", "def test_15(self):\n assert 'False' == Api.requestBlock('test-15')", "def test_26(self):\n assert 'False' == Api.requestBlock('test-26')", "def parse_blocks(self):\n if not self.options.blocks:\n return\n block_identifiers, block_aliases = [list(b) for b in zip(*self.options.blocks)]\n while block_identifiers:\n nodelist = self.parser.parse(block_identifiers)\n token = self.parser.next_token()\n current_identifier = block_identifiers.pop(0)\n current_alias = block_aliases.pop(0)\n while token.contents != current_identifier:\n current_identifier = block_identifiers.pop(0)\n self.blocks[block_aliases.pop(0)] = template.NodeList() \n self.blocks[current_alias] = nodelist\n assert len(self.blocks) == len(self.options.blocks), \"%s block parsing failed: %r => %r\" % (self.tagname, self.options.blocks, self.blocks)", "def test_46(self):\n assert 'False' == Api.requestBlock('test-46')", "def test_12(self):\n assert 'False' == Api.requestBlock('test-12')", "def test_11(self):\n assert 'False' == Api.requestBlock('test-11')", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def test_38(self):\n assert 'True' == Api.requestBlock('test-38')", "def test(self, parent, block):\r\n pass", "def test_44(self):\n assert 'False' == Api.requestBlock('test-44')" ]
[ "0.62640697", "0.61661994", "0.6021515", "0.5755425", "0.56972605", "0.5621218", "0.55789465", "0.55772924", "0.5509449", "0.55021685", "0.5426134", "0.53966236", "0.53940547", "0.53849393", "0.5371453", "0.53622526", "0.5353725", "0.5346401", "0.53440475", "0.5342697", "0.5316477", "0.52873236", "0.5286101", "0.52671295", "0.5254315", "0.52464074", "0.5243924", "0.5239295", "0.52022743", "0.5185023", "0.51746094", "0.51717204", "0.5169843", "0.5162618", "0.5141435", "0.5124506", "0.51035875", "0.50774753", "0.50774497", "0.5052052", "0.5052052", "0.5049856", "0.504101", "0.5029518", "0.5015477", "0.5013375", "0.4998327", "0.49829", "0.49625477", "0.49447128", "0.4941575", "0.4932166", "0.4931994", "0.4928474", "0.49271235", "0.4924444", "0.49236873", "0.49090055", "0.4905615", "0.4901128", "0.4891148", "0.4889988", "0.48890084", "0.4887226", "0.48823753", "0.48819506", "0.48806563", "0.4875949", "0.4874744", "0.48678035", "0.48657107", "0.4865508", "0.48647285", "0.48609617", "0.48604724", "0.48586854", "0.4858151", "0.4858151", "0.48556116", "0.4840658", "0.4835976", "0.4835976", "0.4833429", "0.4831193", "0.48289403", "0.48259628", "0.48212835", "0.48185456", "0.48146486", "0.48144957", "0.4813606", "0.48111534", "0.48085618", "0.48084688", "0.48062307", "0.48044008", "0.48040462", "0.48034403", "0.48026255", "0.47961903" ]
0.5264465
24
see if the if condition for a block is valid
def check_valid(self, get_params): if self.commands._if: return self.commands._if.check_valid(get_params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_block(self, block):\n pass", "def is_block(self):\n return self.v & 1 == 0", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def is_block(self):\n\t\treturn self.name in get_elements_collection(self.__res, 'block_level')", "def is_valid_block(self, first):\n return (self.a_cursor > first.a and\n self.b_cursor > first.b)", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def verify(condition):\n global _is_in_verify_within\n if not condition:\n calling_frame = inspect.currentframe().f_back\n error_message = 'verify() failed at line {} in \"{}\"'.format(calling_frame.f_lineno,\n calling_frame.f_code.co_filename)\n if not _is_in_verify_within:\n print error_message\n return False\n return True", "def is_valid_proof(self, block, block_hash): \n return (block_hash.startswith('0' * Blockchain.difficulty) and block_hash == compute_hash())", "def _inblock(row, column, init, end):\n return all([row[column][0] >= init[0],\n row[column][1] >= init[1],\n row[column][0] <= end[0],\n row[column][1] <= end[1]])", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def check_if(self, token: tokenize.TokenInfo) -> bool:\n if self._seen_for:\n self._seen_if_in_line = True\n\n self._potential_violation = (\n self._potential_violation or\n self.seen_clause_in_line\n )\n return self._check_violation(token)\n return True", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def is_valid(self):\n # Check blocks\n for block in self.blocks.values():\n # Non-optional blocks must be enabled\n if (\n block.structure.number_non_optional_data() > 0\n and not block.enabled\n and block.is_allowed()\n ):\n self.last_error = (\n f'Required block \"{block.block_header.name}\" not enabled'\n )\n return False\n # Enabled blocks must be valid\n if block.enabled and not block.is_valid:\n self.last_error = f'Invalid block \"{block.block_header.name}\"'\n return False\n\n return True", "def isValid(self):\n currBlock = self.getBlock(self.tailBlockHash)\n while currBlock != self.genesisBlock:\n if not self.isValidBlock(currBlock):\n return False\n currBlock = self.getBlock(currBlock.prevHash)\n return True", "def chainIsValid(self):\n for i in range(1, len(self.blocks)):\n prev_block = self.blocks[i-1]\n cur_block = self.blocks[i]\n if cur_block.header['prevBlockH'] != getHashBlock(prev_block):\n return False\n return True", "def is_block(self) -> bool:\n return self.answer == \".\"", "def isValidBlock(self, block, unSpentTransactions):\n\n prevBlock = self.getBlock(self.tailBlockHash)\n if prevBlock.index+1 != block.index:\n return False\n elif prevBlock.currHash != block.prevHash:\n return False\n elif block.calculateHash() != block.currHash:\n return False\n return block.isValid(unSpentTransactions)", "def valid_proof(block):\n return Blockchain.hash(block) [:4] == \"0000\"", "def check():", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def validateBlock(self, currentBlock, previousBlock): \n \n # Check the block index\n if currentBlock.index != previousBlock.index + 1:\n return False\n if currentBlock.previousHash != previousBlock.hash:\n return False\n if currentBlock.hash != currentBlock.hashBlock():\n return False\n if not self.validateNonce(previousBlock.nonce, previousBlock.hash, currentBlock.nonce):\n return False\n return True", "def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True", "def test_34(self):\n assert 'False' == Api.requestBlock('test-34')", "def is_valid_proof(self, block, block_hash):\r\n return (block_hash.startswith('0' * Blockchain.difficulty) and\r\n block_hash == block.compute_hash())", "def find_block_to_run(blk):\n _top = find_top_block(blk)\n if blk == _top and blk.name[0:3] is not 'def':\n return True\n else:\n return False", "def is_valid_proof(cls, block, block_hash):\n return (cls.ifsatisfy_diff(block_hash) and block_hash == block.compute_hash())", "def test_13(self):\n assert 'False' == Api.requestBlock('test-13')", "def test_03(self):\n assert 'False' == Api.requestBlock('test-03')", "def test_37(self):\n assert 'False' == Api.requestBlock('test-37')", "def test_27(self):\n assert 'False' == Api.requestBlock('test-27')", "def is_valid_proof(cls, block, block_hash):\n return (block_hash.startswith('0' * Blockchain.difficulty) and\n block_hash == block.compute_hash())", "def is_valid_proof(cls, block, block_hash):\n return (block_hash.startswith('0' * Blockchain.difficulty) and\n block_hash == block.compute_hash())", "def test_41(self):\n assert 'False' == Api.requestBlock('test-41')", "def test_28(self):\n assert 'False' == Api.requestBlock('test-28')", "def test_35(self):\n assert 'False' == Api.requestBlock('test-35')", "def test_33(self):\n assert 'False' == Api.requestBlock('test-33')", "def test_26(self):\n assert 'False' == Api.requestBlock('test-26')", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def test_46(self):\n assert 'False' == Api.requestBlock('test-46')", "def test_43(self):\n assert 'False' == Api.requestBlock('test-43')", "def test_11(self):\n assert 'False' == Api.requestBlock('test-11')", "def check_for(self, token: tokenize.TokenInfo) -> bool:\n self._is_comprehension = True\n self._seen_for = True\n self._seen_for_in_line = True\n\n self._potential_violation = (\n self._potential_violation or\n self.seen_clause_in_line\n )\n return self._check_violation(token)", "def test_14(self):\n assert 'False' == Api.requestBlock('test-14')", "def test_44(self):\n assert 'False' == Api.requestBlock('test-44')", "def test_23(self):\n assert 'False' == Api.requestBlock('test-23')", "def test_09(self):\n assert 'False' == Api.requestBlock('test-09')", "def test(self, parent, block):\n\n self.match = self.pattern.match(block) if self.pattern is not None else None\n return self.match is not None", "def test_02(self):\n assert 'False' == Api.requestBlock('test-02')", "def _check_free_block():\n \n # populate the block data fields\n status = _prepare_blocks()\n if(status != STATUS['OK']) :\n # some metadata file reading error\n return status\n \n # find which blocks are already consumed, \n # per the block level metadata\n _prepare_consumed_block()\n \n \n # TODO : remove debug statements\n # print consumedBlocks\n \n freeStart = blocks[0]['freeStart']\n freeEnd = blocks[0]['freeEnd']\n blknum = 0\n for fblkcnt in xrange(freeStart, freeEnd + 1, 1) :\n for fblk in blocks[fblkcnt] :\n # if free block is zero, then, it should not be in consumed blocks\n # else it definitely should be\n print fblkcnt, \" - \", fblk, \" - \", blknum\n if (fblk == 0 and (blknum in consumedBlocks)):\n # free blocks says it's free, but consumed block says it's not\n return STATUS['F_NFB']\n elif(fblk == 1 and not(blknum in consumedBlocks)) : \n # free block says it's not free, but consumed block says it is\n return STATUS['F_MFB']\n \n blknum += 1\n # consistency all ok\n return STATUS['OK']", "def _check_multiline_conditions(self, node: ast.If) -> None:\n start_lineno = getattr(node, 'lineno', None)\n for sub_nodes in ast.walk(node.test):\n sub_lineno = getattr(sub_nodes, 'lineno', None)\n if sub_lineno is not None and sub_lineno > start_lineno:\n self.add_violation(MultilineConditionsViolation(node))\n break", "def check(self):\n return True", "def test_52(self):\n assert 'False' == Api.requestBlock('test-52')", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n # print(f'{last_block}')\n # print(f'{block}')\n # print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def test_12(self):\n assert 'False' == Api.requestBlock('test-12')", "def test_36(self):\n assert 'False' == Api.requestBlock('test-36')", "def is_valid_block(last_block, block):\n if block.last_hash != last_block.hash:\n raise Exception('Incorrect last_hash')\n if hex_to_binary(block.hash)[0:block.difficulty] != '0' * block.difficulty:\n raise Exception('Proof of Work not fulfilled')\n if abs(block.difficulty - last_block.difficulty) > 1:\n raise Exception('Block difficulty must only adjust by 1')\n\n reconstructed_hash = crypto_hash(\n block.timestamp,\n block.last_hash,\n block.data,\n block.nonce,\n block.difficulty\n )\n\n if block.hash != reconstructed_hash:\n raise Exception('Incorrect Block hash')", "def condition(self):\n return True", "def test_40(self):\n assert 'False' == Api.requestBlock('test-40')", "def _if(self):\n debug.show(\"if:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 2:\n ifcode = isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"if:True\")\n evaluate(ifcode)\n else:\n debug.err(\"not enough items on the stack\")\n debug.show(\"if:False\")\n return None", "def is_block_complete(self, n):\n\n return self._complete(self._block(n))", "def test_15(self):\n assert 'False' == Api.requestBlock('test-15')", "def test_16(self):\n assert 'False' == Api.requestBlock('test-16')", "def check_ok (result):\n return not result is None and (not \"due to errors\" in result)", "def test_21(self):\n assert 'False' == Api.requestBlock('test-21')", "def test_06(self):\n assert 'False' == Api.requestBlock('test-06', charOrder=51)", "def is_valid(self):\n\n chain = blockchain.chain # This object of type Blockchain may be useful\n\n # Placeholder for (1a)\n\n # (checks that apply to all blocks)\n # Check that Merkle root calculation is consistent with transactions in block (use the calculate_merkle_root function) [test_rejects_invalid_merkle]\n # On failure: return False, \"Merkle root failed to match\"\n if self.merkle != self.calculate_merkle_root():\n return False, \"Merkle root failed to match\"\n\n # Check that block.hash is correctly calculated [test_rejects_invalid_hash]\n # On failure: return False, \"Hash failed to match\"\n if self.hash != self.calculate_hash():\n return False, \"Hash failed to match\"\n\n # Check that there are at most 900 transactions in the block [test_rejects_too_many_txs]\n # On failure: return False, \"Too many transactions\"\n if len(self.transactions) > 900:\n return False, \"Too many transactions\"\n\n # (checks that apply to genesis block)\n if self.is_genesis:\n # Check that height is 0 and parent_hash is \"genesis\" [test_invalid_genesis]\n # On failure: return False, \"Invalid genesis\"\n if self.height != 0 or self.parent_hash != \"genesis\":\n return False, \"Invalid genesis\"\n\n # (checks that apply only to non-genesis blocks)\n else:\n # Check that parent exists (you may find chain.blocks helpful) [test_nonexistent_parent]\n # On failure: return False, \"Nonexistent parent\"\n if self.parent_hash not in chain.blocks:\n return False, \"Nonexistent parent\"\n\n # Check that height is correct w.r.t. parent height [test_bad_height]\n # On failure: return False, \"Invalid height\"\n if self.height != chain.blocks[self.parent_hash].height + 1:\n return False, \"Invalid height\"\n\n # Check that timestamp is non-decreasing [test_bad_timestamp]\n # On failure: return False, \"Invalid timestamp\"\n if self.timestamp < chain.blocks[self.parent_hash].timestamp:\n return False, \"Invalid timestamp\"\n\n # Check that seal is correctly computed and satisfies \"target\" requirements; use the provided seal_is_valid method [test_bad_seal]\n # On failure: return False, \"Invalid seal\"\n if self.seal_is_valid() == False:\n return False, \"Invalid seal\"\n\n # Check that all transactions within are valid (use tx.is_valid) [test_malformed_txs]\n # On failure: return False, \"Malformed transaction included\"\n for tx in self.transactions:\n if tx.is_valid() == False:\n return False, \"Malformed transaction included\"\n\n # Check that for every transaction\n tx_in_block = {}\n input_refs_in_block = {}\n for tx in self.transactions:\n # the transaction has not already been included on a block on the same blockchain as this block [test_double_tx_inclusion_same_chain]\n # (or twice in this block; you will have to check this manually) [test_double_tx_inclusion_same_block]\n # (you may find chain.get_chain_ending_with and chain.blocks_containing_tx and util.nonempty_intersection useful)\n # On failure: return False, \"Double transaction inclusion\"\n if nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_containing_tx.get(tx.hash, [])):\n return False, \"Double transaction inclusion\"\n\n # If the two same transactions in this block\n if tx.hash in tx_in_block:\n return False, \"Double transaction inclusion\"\n else:\n # If not, add to the dict.\n tx_in_block[tx.hash] = tx\n\n # for every input ref in the tx\n input_user = None\n output_sum=0\n input_sum = 0\n for input_ref in tx.input_refs:\n\n # (you may find the string split method for parsing the input into its components)\n input_tx = input_ref.split(\":\")\n input_tx_hash = input_tx[0]\n input_tx_index = int(input_tx[1])\n\n # each input_ref is valid (aka corresponding transaction can be looked up in its holding transaction) [test_failed_input_lookup]\n # (you may find chain.all_transactions useful here)\n # On failure: return False, \"Required output not found\"\n if (input_tx_hash not in chain.all_transactions or input_tx_index >= len(chain.all_transactions[input_tx_hash].outputs)) and input_tx_hash not in tx_in_block:\n return False, \"Required output not found\"\n\n # every input was sent to the same user (would normally carry a signature from this user; we leave this out for simplicity) [test_user_consistency]\n # On failure: return False, \"User inconsistencies\"\n input_tx_ref = None\n if input_tx_hash in chain.all_transactions:\n input_tx_ref = chain.all_transactions[input_tx_hash]\n else:\n input_tx_ref = tx_in_block[input_tx_hash]\n\n if input_user != None and input_user != input_tx_ref.outputs[input_tx_index].receiver:\n return False, \"User inconsistencies\"\n else:\n input_user = input_tx_ref.outputs[input_tx_index].receiver\n\n # no input_ref has been spent in a previous block on this chain [test_doublespent_input_same_chain]\n # (or in this block; you will have to check this manually) [test_doublespent_input_same_block]\n # (you may find nonempty_intersection and chain.blocks_spending_input helpful here)\n # On failure: return False, \"Double-spent input\"\n if input_tx_hash in input_refs_in_block:\n return False, \"Double-spent input\"\n else:\n input_refs_in_block[input_tx_hash] = input_ref\n\n if input_ref in chain.blocks_spending_input and nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_spending_input[input_ref]):\n return False, \"Double-spent input\"\n\n # each input_ref points to a transaction on the same blockchain as this block [test_input_txs_on_chain]\n # (or in this block; you will have to check this manually) [test_input_txs_in_block]\n # (you may find chain.blocks_containing_tx.get and nonempty_intersection as above helpful)\n # On failure: return False, \"Input transaction not found\"\n if input_tx_hash not in tx_in_block and nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_containing_tx.get(input_tx_hash)) == False:\n return False, \"Input transaction not found\"\n input_sum += input_tx_ref.outputs[input_tx_index].amount\n\n # for every output in the tx\n for output in tx.outputs:\n\n # every output was sent from the same user (would normally carry a signature from this user; we leave this out for simplicity)\n # (this MUST be the same user as the outputs are locked to above) [test_user_consistency]\n # On failure: return False, \"User inconsistencies\"\n if output.sender != input_user:\n return False, \"User inconsistencies\"\n output_sum += output.amount\n\n # the sum of the input values is at least the sum of the output values (no money created out of thin air) [test_no_money_creation]\n # On failure: return False, \"Creating money\" \n if output_sum > input_sum:\n return False, \"Creating money\"\n\n return True, \"All checks passed\"", "def test_32(self):\n assert 'False' == Api.requestBlock('test-32')", "def valid_chain(self, block, prev_block):\n self.stop_mine()\n\n print('\\n //// MINING STOPPED\\n')\n\n print('\\n //// block entering valid_chain')\n pprint(block)\n\n if block is not None and block['message'] != 'mining stopped':\n if block['previous_hash'] == self.hash(prev_block):\n \n # Check that the Proof of Work is correct\n if self.valid_proof(prev_block['proof'], block['proof']):\n if block['index'] == self.last_block['index']:\n if self.last_block['timestamp'] > block['timestamp']:\n del self.chain[-1]\n self.chain.append(block)\n print('\\n //// true from equal index but older timestamp')\n return True\n\n elif self.last_block['timestamp'] == block['timestamp']:\n print('\\n //// true from timestamps are equal block isnt added')\n return True\n else:\n print('\\n //// true timestamp is newer not added but sending false')\n return False\n\n elif block['index'] > self.last_block['index']:\n print('\\n //// true from index is greater and block is added')\n self.chain.append(block)\n return True\n else:\n print('\\n //// false from adding block had index less than block already there')\n else:\n print('\\n //// false from not a valid proof')\n\n else:\n print('\\n //// false from hashes arent equal')\n if (block['timestamp'] < self.last_block['timestamp']):\n if (block['index'] == self.last_block['index']):\n print('\\n //// hashes arent equal but block is older, subtracting and adding')\n del self.chain[-1]\n self.chain.append(block)\n return True\n\n elif (block['timestamp'] > self.last_block['timestamp']):\n if(block['index'] > self.last_block['index']):\n self.chain.append(block)\n return True\n else:\n return True\n\n return False\n\n else:\n return 'reject'", "def check(self, description: Description) -> bool:", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def validate_acid_block_preview(self, acid_block):\r\n self.assertTrue(acid_block.init_fn_passed)\r\n self.assertTrue(acid_block.resource_url_passed)\r\n self.assertTrue(acid_block.scope_passed('user_state'))\r\n self.assertTrue(acid_block.scope_passed('user_state_summary'))\r\n self.assertTrue(acid_block.scope_passed('preferences'))\r\n self.assertTrue(acid_block.scope_passed('user_info'))", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def test_32_if(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a=0 then return 1; else return a; end\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,432))", "def validate_acid_block_view(self, acid_block):\r\n self.assertTrue(acid_block.init_fn_passed)\r\n self.assertTrue(acid_block.resource_url_passed)\r\n self.assertTrue(acid_block.scope_passed('user_state'))\r\n self.assertTrue(acid_block.scope_passed('user_state_summary'))\r\n self.assertTrue(acid_block.scope_passed('preferences'))\r\n self.assertTrue(acid_block.scope_passed('user_info'))", "def valid(self) -> bool:\n pass", "def test_30(self):\n assert 'False' == Api.requestBlock('test-30')", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != last_block_hash:\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def is_block(modules):\n if isinstance(modules, (BasicBlock, Bottleneck)):\n return True\n return False", "def check_block(block):\n asides = block.runtime.get_asides(block)\n assert len(asides) == 1, f'Found {asides} asides but expected only test_aside'\n assert isinstance(asides[0], AsideTestType)\n category = block.scope_ids.block_type\n assert asides[0].data_field == f'{category} aside data'\n assert asides[0].content == f'{category.capitalize()} Aside'\n\n for child in block.get_children():\n check_block(child)", "def is_valid(self):\r\n return self.circuit.is_valid", "def test_24(self):\n assert 'False' == Api.requestBlock('test-24')", "def test_29(self):\n assert 'True' == Api.requestBlock('test-29')", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n\n # Check that the hash of block is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check the Proof of Work\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n \n last_block = block\n current_index += 1\n return True", "def validate_Eval(result, _dummy_expression):\n return result is not None", "def isValid(self):\n return self.file_name != \"\" and self.line_number != 0", "def test_39(self):\n assert 'True' == Api.requestBlock('test-39')", "async def bot_check(self, ctx):\n blocked = await self.db.fetchrow(\n \"\"\"\n SELECT *\n FROM blocks\n WHERE user_id=$1\n \"\"\",\n ctx.author.id,\n )\n if blocked is None:\n return True\n raise BlackListed", "def test_04(self):\n assert 'False' == Api.requestBlock('test-04', charOrder='')", "def test_25(self):\n assert 'False' == Api.requestBlock('test-25')", "def test_31(self):\n assert 'True' == Api.requestBlock('test-31')", "def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = self.hash(block)\r\n if hash_operation[:4] != '0000':\r\n return False\r\n previous_block = block\r\n block_index += 1\r\n return True", "def IsExitBlock(self, node) -> bool:\n return self.nodes[node].get(\"exit\", False)", "def test_block_bad_state(self):\n pass", "def is_valid(self):\n # check data sets\n for dataset in self.datasets.values():\n # Non-optional datasets must be enabled\n if not dataset.structure.optional and not dataset.enabled:\n return False\n # Enabled blocks must be valid\n if dataset.enabled and not dataset.is_valid:\n return False\n # check variables\n for block_header in self.block_headers:\n for dataset in block_header.data_items:\n # Non-optional datasets must be enabled\n if not dataset.structure.optional and not dataset.enabled:\n return False\n # Enabled blocks must be valid\n if dataset.enabled and not dataset.is_valid():\n return False", "def valid(self) -> bool:\n return True", "def _check_block_type():\n \n \n maxBlocks = blocks[0]['maxBlocks']\n freeEnd = blocks[0]['freeEnd']\n \n while(blkcounter in xrange(freeEnd + 1, maxBlocks, 1)) :\n if (type(bdata) == dict) :\n # directory, file inodes\n # won't touch the character device file\n if IS_DIR(blocks[blkcounter]['mode']):\n status = _check_dir_type(blocks[blkcounter], blkcounter)\n elif IS_REG(blocks[blkcounter]['mode']) :\n status = _check_reg_type(blocks[blkcounter], blkcounter)\n \n if(status != STATUS['OK']) :\n return status\n \n return STATUS['OK']", "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def validate(self):\n if not ((self.bpq_kind == self.BPQ_BLOCK_KIND_QUERY) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE_DO_NOT_CACHE_FRAG) or\n (self.bpq_kind == self.BPQ_BLOCK_KIND_PUBLISH)):\n return False\n\n if not ((self.matching_rule == self.BPQ_MATCHING_RULE_EXACT) or\n (self.matching_rule == self.BPQ_MATCHING_RULE_TOKENS) or\n (self.matching_rule == self.BPQ_MATCHING_RULE_NEVER)):\n return False\n\n if not (self.src_eid and (len(self.src_eid) == self.src_eid_len)):\n return False\n\n if not (self.bpq_id and (len(self.bpq_id) == self.bpq_id_len)):\n return False\n\n if not (self.bpq_val and (len(self.bpq_val) == self.bpq_val_len)):\n return False\n\n if not (self.frag_cnt == len(self.frag_desc)):\n return False\n\n for d in self.frag_desc:\n if not (d.has_key(\"frag_offset\") and d.has_key(\"frag_len\")):\n return False\n\n return True", "def has_inside(block):\n return comma(block[0]) if block else '#N/A'", "def _block_matches_all(block_data):\n # do the checks which don't require loading any additional data\n if (\n self._block_matches(block_data, qualifiers) and\n self._block_matches(block_data.fields, settings)\n ):\n if content:\n definition_block = self.get_definition(course_locator, block_data.definition)\n return self._block_matches(definition_block['fields'], content)\n else:\n return True" ]
[ "0.78990954", "0.6600305", "0.63182026", "0.63088113", "0.63049823", "0.6304188", "0.6274338", "0.62668514", "0.6249396", "0.62368685", "0.62282825", "0.6225797", "0.62173724", "0.6185306", "0.6149648", "0.61086375", "0.6089408", "0.60828173", "0.6078198", "0.6062693", "0.6043321", "0.60343885", "0.5967337", "0.59645027", "0.5955123", "0.59518284", "0.59311426", "0.5916689", "0.5912164", "0.58929396", "0.5882824", "0.5882824", "0.5871161", "0.58512884", "0.58493227", "0.5848021", "0.5842264", "0.583868", "0.583868", "0.58327144", "0.5829176", "0.582594", "0.581562", "0.5813151", "0.58077043", "0.5804267", "0.5801222", "0.57993305", "0.57844186", "0.57764685", "0.57460517", "0.5739442", "0.5735214", "0.57292086", "0.5720529", "0.57178277", "0.5716183", "0.57142234", "0.570992", "0.57088375", "0.5708802", "0.5708587", "0.57078856", "0.5702561", "0.5693498", "0.56914824", "0.56783473", "0.5677271", "0.5671606", "0.5670828", "0.56707805", "0.56642354", "0.5652061", "0.56366694", "0.56344867", "0.5629829", "0.562574", "0.5608291", "0.5606486", "0.5605819", "0.5593206", "0.55915743", "0.5591535", "0.55838174", "0.55758756", "0.5575086", "0.55712414", "0.55660677", "0.5558777", "0.55572456", "0.5556086", "0.555555", "0.555208", "0.5551141", "0.55500144", "0.5548138", "0.55429196", "0.55333894", "0.5527913", "0.55248636", "0.5522113" ]
0.0
-1
render the block and return the output.
def render(self, get_params, module, _if=None): enough = False output = [] valid = None if self.commands.show: valid = True if self.parent and self.commands.soft and _if is None: return None, self if _if: valid = True elif self.commands._if: valid = self.check_valid(get_params) if valid is not False: for item in self.content: if isinstance(item, Placeholder): sub_valid, sub_output, enough = item.get(get_params, self) output.append(sub_output) elif isinstance(item, Literal): sub_valid = None enough = True output.append(item.text) elif isinstance(item, Block): sub_valid, sub_output = item.render(get_params, module) if sub_valid is None: output.append(sub_output) else: output.extend(sub_output) valid = valid or sub_valid if not valid: if self.next_block: valid, output = self.next_block.render(get_params, module, _if=self.commands._if) elif self.parent is None and ((not self.next_block and enough) or self.base_block): valid = True else: output = [] # clean color = self.commands.color if color and color[0] != "#": color_name = f"color_{color}" threshold_color_name = f"color_threshold_{color}" # substitute color color = ( getattr(module, color_name, None) or getattr(module, threshold_color_name, None) or getattr(module.py3, color_name.upper(), None) ) if color == "hidden": return False, [] text = "" out = [] if isinstance(output, str): output = [output] # merge as much output as we can. first = True last_block = None for index, item in enumerate(output): is_block = isinstance(item, Block) if not is_block and item: last_block = None if isinstance(item, (str, bool, int, float, bytes)) or item is None: text += str(item) continue elif text: if not first and (text == "" or out and out[-1].get("color") == color): out[-1]["full_text"] += text else: part = {"full_text": text} if color: part["color"] = color out.append(part) text = "" if isinstance(item, Composite): if color: item.composite_update(item, {"color": color}, soft=True) out.extend(item.get_content()) elif is_block: # if this is a block then likely it is soft. if not out: continue for other in output[index + 1 :]: if other and not isinstance(other, Block): valid, _output = item.render(get_params, module, _if=True) if _output and _output != last_block: last_block = _output out.extend(_output) break else: if item: out.append(item) first = False # add any left over text if text: part = {"full_text": text} if color: part["color"] = color out.append(part) # process any min/max length commands max_length = self.commands.max_length min_length = self.commands.min_length if max_length or min_length: for item in out: if max_length is not None: item["full_text"] = item["full_text"][:max_length] max_length -= len(item["full_text"]) if min_length: min_length -= len(item["full_text"]) if min_length > 0: out[0]["full_text"] = " " * min_length + out[0]["full_text"] min_length = 0 return valid, out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_block(data):\n\tsnippet = data[2] \n\ttitle = data[0]['name']\n\tdescription = data[0]['description']\n\tblock_type = data[0]['type']\n\t\n\n\t# change the panel outline for\n\t# warnings and detections\n\tblock_border = 'yellow' if block_type == 'warning' else 'red1'\n\n\tcode_snippet = Syntax(\n\t\t\t\t\t\tsnippet, \n\t\t\t\t\t\tSYNTAX, \n\t\t\t\t\t\ttheme=THEME, \n\t\t\t\t\t\tline_numbers=True, \n\t\t\t\t\t\tstart_line=data[1]\n\t\t\t\t\t)\n\n\tdescription_txt = Markdown(\n\t\t\tf\"\"\" ## Explanation \\n {description} \"\"\",\n\t\t\tinline_code_lexer=SYNTAX,\n\t\t\tinline_code_theme=THEME,\n\t\t)\n\t\n\tcomponents = RenderGroup(\n\t\t\t\t\tcode_snippet,\n\t\t\t\t\tdescription_txt\n\t\t\t\t)\n\t\n\tblock = Panel(\n\t\t\tcomponents,\n\t\t\ttitle=f'[b white]{title}',\n\t\t\twidth=60,\n\t\t\tborder_style=block_border\n\t\t)\n\n\t# render\n\tprint('\\n')\n\tprint(block)", "def render(self, template_name, block, **context):\n template = self._get_template(template_name)\n return self._render_context(template,\n template.blocks[block],\n **context)", "def _render(self) -> None:\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\r\n super().render()", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def render(self, block, view_name, context=None):\n # Set the active view so that :function:`render_child` can use it\n # as a default\n old_view_name = self._view_name\n self._view_name = view_name\n try:\n\n view_fn = getattr(block, view_name, None)\n if view_fn is None:\n view_fn = getattr(block, \"fallback_view\", None)\n if view_fn is None:\n raise NoSuchViewError(block, view_name)\n view_fn = functools.partial(view_fn, view_name)\n\n frag = view_fn(context)\n\n # Explicitly save because render action may have changed state\n block.save()\n updated_frag = self.wrap_xblock(block, view_name, frag, context)\n return self.render_asides(block, view_name, updated_frag, context)\n finally:\n # Reset the active view to what it was before entering this method\n self._view_name = old_view_name", "def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'", "def _render_context(self, template, block, **context):\n return u''.join(block(template.new_context(context)))", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "def render(self):", "def render(self, **kw):\r\n style = kw.get('style', c.render_style or 'html')\r\n return Wrapped.part_render(self, self.style, style = style, **kw)", "def render(self, r):\n raise NotImplementedError", "def _render(self) -> str:\n html = self._template.render(self._transient_context)\n self._transient_context = None\n return html", "def render(self):\n fmt = 'B' + 'B' * len(self.frame)\n self.sendPacket(6, struct.pack(fmt, self.start_code, *self.frame))", "def render(self, rstate):\n pass", "def render(self):\n raise NotImplementedError", "def render(self, renderer, right=False):\n pass # pragma: no cover", "def output_open_html(self):\n text = self.token[\"text\"]\n tag = self.token[\"tag\"]\n if self._parse_block_html and tag not in _pre_tags:\n text = self.inline(text)\n extra = self.token.get(\"extra\") or \"\"\n html = \"<%s%s>%s</%s>\" % (tag, extra, text, tag)\n return self.renderer.block_html(html)", "def render(self, mode='human', close=False):\n return None", "def render_report(blocks):\n\tfor block in blocks:\n\t render_block(\n\t\t(\n\t\t block[0],\t\t# signature \n\t\t block[1], # line number\n\t\t block[2],\t\t# line\n\t\t)\n\t )", "def render(self, mode='human', close=False):\n pass", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>') # deal with new line\n return render_str(\"post.html\", p = self)", "def render(self):\n context = {'groups': self._groups}\n\n return loader.render_to_string(self._template_path, dictionary=context)", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>')\n return TemplateFile.jinja_render_str(\"post.html\", p=self)", "def render(self, mode='human'):", "def render(self) -> str:\n with no_colors():\n self._render()\n return self.sio.getvalue()", "def render(self, mode='human', close=False):\n pass", "def render(self):\n raise NotImplementedError()", "def makeBlock(tag):\n return {\"t\":\"RawBlock\",\"c\":[\"html\",tag]}", "def render(self):\n raise RenderNotImplemented('Render function is not implemented.')", "def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output", "def render(self):\n self.run()\n return [{'dest' : self.dest,\n 'text' : self.tmpl.render(**self.data)}]", "def render(self) -> str:\n\n return \"\\n\".join(self.render_iter())", "def render(self, mode='human'):\n pass # no use in this situation", "def render(self, out_file=None, cur_ind=\"\"):\n output_string = cur_ind\n if self.tag:\n output_string += self.get_opening_tag_string()\n for line in self.content:\n output_string += f\"{line}\"\n if self.tag:\n output_string += f\"</{self.tag}>\"\n if out_file:\n out_file.write(output_string)\n return output_string", "def render(self):\n self.increase_view_count()\n return render_to_string(self.template.template_file, {'advert':self})", "def render(self, mode='human'):\n\n pass", "def draw_block(cls, coord):\n block, _ = MapModel.get_block(coord)\n\n block_size = Resolution.get_primary().block_height\n block_surface = BlockTexture.load_texture(block.type, block.variant)\n cls.view.surface.blit(\n block_surface,\n coord.relative(\n depth=cls.depth, section=cls.section, scale=block_size\n ),\n )\n # Render decorations.\n # TODO: This will NOT work with offsets into unrendered blocks!\n decor, offset = block.decoration\n if decor:\n decoration = BlockTexture.load_texture(decor)\n cls.view.surface.blit(\n decoration,\n coord.get_adjacent(offset).relative(\n depth=Depth.of(coord.row),\n section=Section.of(coord.col),\n scale=block_size,\n ),\n )", "def render(self, template, **kw):\n self.write(self.render_string(template, **kw))", "def draw_block(self):\n draw_component = DrawComponent(self.component_spot,self.component_type)\n return draw_component", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "def _render(self):\n self._renderer.render_menu()\n pg.display.update()", "def render(self, mode='human', close=False):\n raise NotImplementedError()", "def serve(self) -> str:\n return self._render()", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)", "def render(self):\n self.env.render()", "def render(self):\n return self", "def render(self):\n self.rendering = True\n self.env.render()", "def render(self):\n if self.can_render():\n output = '<ul>'\n for item in self.items:\n output += \"<li>{0}</li>\".format(item)\n return output + '</ul>'\n return ''", "def render(self):\n self.env.render()\n #input(\"Press enter to take a step \")", "def render(self):\n\n self._render_text = self.content.replace('\\n', '<br>')\n return render_str('post_template.html', post=self, user=self.user)", "def render(self, context=None):\n # Make the complete context we'll use.\n render_context = dict(self.context)\n if context:\n render_context.update(context)\n return self._render_function(render_context, self._do_dots)", "def render(self):\n return render_to_string(\n self.template_name, self.get_context_data(), request=self.request\n )", "def renderNode(self, node):\n self.html += '<div id=\\\"nodeDecoration\\\">'\n self.html += '<p id=\\\"nodeTitle\\\">'\n self.html += escape(node.titleLong)\n self.html += '</p></div>\\n'\n for idevice in node.idevices:\n block = g_blockFactory.createBlock(None, idevice)\n if not block:\n log.critical(\"Unable to render iDevice.\")\n raise Error(\"Unable to render iDevice.\")\n if hasattr(idevice, \"isQuiz\"):\n self.html += block.renderJavascriptForWeb()\n self.html += block.renderView(self.style)\n for child in node.children:\n self.renderNode(child)", "def render(self, screen):\n pass", "def render(self, screen):\n pass", "def render(self, file_out, cur_ind=''):\n file_out.write(cur_ind + f'<{self.tag}')\n Element.add_values(self, file_out)\n file_out.write('>')\n Element.add_items_no_line(self, file_out)\n file_out.write(cur_ind + f'</{self.tag}>\\n')", "def render(self, resource):\n result = resource.render(self)\n if result is NOT_DONE_YET:\n return\n self.write(result)\n self.finish()", "def render(self, context=None, **kwargs):\n # store the given context\n global_context = {}\n # store the result\n result = []\n # group the given context or kwargs\n if context:\n global_context.update(context)\n elif kwargs:\n global_context.update(kwargs)\n\n # this function to output from context\n # to the rendered template\n def write(*args):\n result.extend([str(arg) for arg in args])\n\n def fmt_write(fmt, *args):\n result.append(fmt % args)\n\n # add write and fmt_write into global_context\n global_context['write'] = write\n global_context['fmt_write'] = fmt_write\n # run the code\n for is_code, token in self.tokens:\n if is_code:\n exec(token, global_context)\n else:\n result.append(token)\n return ''.join(result)", "def render_all_blocks(self, template_name, template_vars):\n # Read the state template file into a template object using the\n # environment object\n found_template_name = (\n self.find_template_name(template_name + '\\.tpl(\\.\\w+)?$'))\n template = self._template_env.select_template(\n [template_name, found_template_name])\n\n # Render template code for each of the template blocks (all except the\n # meta block)\n template_block_code = {\n block: self.render_block(template_name, template_vars, block)\n for block in\n self.get_template_blocks(template_name) if block != 'meta'}\n\n return template_block_code", "def render(self,screen):\n for boids in self.boid_list:\n boids.render(screen)", "def _draw_block(self, block: Tuple[int, int], kind: str) -> None:\n # ToDo: implement display picture: https://pythonprogramming.net/displaying-images-pygame/\n if self.board_formatting[kind]['picture'] is not None:\n raise Exception('Displaying pictures has not yet been implemented!')\n else:\n rectangle = [block[1] * self.block_length, block[0] * self.block_length,\n self.block_length, self.block_length]\n pygame.draw.rect(self.display, self.board_formatting[kind]['color'], rectangle)", "def student_view(self, context=None):\n context = {\n u\"title\": self.title,\n u\"description\": self.description,\n u\"embed_code\": self.embed_code\n }\n fragment = Fragment()\n fragment.add_content(\n loader.render_template(\"templates/vr.html\", context)\n )\n fragment.add_css_url(\n self.runtime.local_resource_url(self, \"public/css/vr.css\")\n )\n self.runtime.publish(self, 'progress', {})\n return fragment", "def render(self, out_file=None, cur_ind=\"\"):\n output_string = cur_ind\n if self.tag:\n output_string += self.get_opening_tag_string()\n for line in self.content:\n if not isinstance(line, str): # Recursively render objects\n output_string += line.render(out_file, cur_ind + self.indent)\n else: #If it's a string just render it with padding\n output_string += cur_ind + self.indent\n output_string += f\"{line}\"\n if self.add_new_line:\n output_string += \"\\n\"\n if self.tag:\n output_string += cur_ind\n output_string += f\"</{self.tag}>\"\n if out_file:\n out_file.write(output_string)\n return output_string", "def do_block():\n print_column()\n print_rows()", "def do_block():\n print_column()\n print_rows()", "def student_view(self, context=None): # pylint: disable=unused-argument\n html = self.resource_string(\"static/html/chartsxblock.html\")\n frag = Fragment(html.format(self=self))\n frag.add_css(self.resource_string(\"static/css/chartsxblock.css\"))\n frag.add_javascript(self.resource_string(\"static/vendor/GCloader.js\"))\n frag.add_javascript(self.resource_string(\"static/js/src/chartsxblock.js\"))\n frag.initialize_js('ChartsXBlock', {'chartData': self.chart_data,\n 'chartType': self.chart_type,\n 'chartName': self.chart_name,\n 'chartTypes': self.chart_types})\n return frag", "def render(self):\n\n self.page_current = self.pages[-1]\n\n # Remember terminal size.\n self.terminal_width = terminal.width\n self.terminal_height = terminal.height - 1\n\n # Do not render if no items exist in page yet.\n if not self.page_current.items:\n return\n\n # Fill buffer with content if empty.\n if not self.render_buffer:\n for line in self.page_current.item_strings_formatted:\n line += terminal.on_black(' ' * (self.terminal_width - terminal.length(line)))\n self.render_buffer.append(line)\n self.render_offset = self.page_current.item_onscreenlocs[self.page_current.item_selected]\n\n # Adjust the rendering offset if selected menu item is out of bounds of current terminal.\n if self.page_current.item_onscreenlocs[self.page_current.item_selected] >= self.render_offset + self.terminal_height:\n self.render_offset += self.terminal_height\n elif self.page_current.item_onscreenlocs[self.page_current.item_selected] < self.render_offset:\n self.render_offset -= self.terminal_height\n if self.render_offset < 0:\n self.render_offset = 0\n\n # Render buffer content to terminal\n for buffer_line_no in range(self.terminal_height):\n try:\n buffer_line = self.render_buffer[self.render_offset + self.render_offset_item + buffer_line_no]\n print(terminal.move(buffer_line_no, 0) + buffer_line, end='')\n except IndexError:\n # Print blank line in case buffer is empty\n print(terminal.move(buffer_line_no, 0) + (terminal.on_black(' ' * self.terminal_width)), end='')\n\n # Render status\n print(terminal.move(self.terminal_height, 0) + (terminal.black_on_cyan(self.status_text + ' ' * (self.terminal_width - terminal.length(self.status_text)))), end='')\n\n # Render cursor.\n # TODO Need to fix bug where the cursor occasionally gets drawn outside the screen and disrupting the rendering process\n if self.render_offset_item == 0:\n cursor = terminal.white_on_black('>')\n try:\n cursor += terminal.white_on_black('-' * (self.page_current.item_indentations[self.page_current.item_selected] * 2))\n except IndexError:\n pass\n print(terminal.move(self.page_current.item_onscreenlocs[self.page_current.item_selected] - self.render_offset, 0) + cursor)", "def render(self):\n try:\n if self.permit():\n return self.renderer.render(self)\n except AttributeError:\n if self.renderer is None:\n raise NotImplementedError(\"Should have implemented a renderer for {0}\".format(self.name))\n else:\n raise\n return ''", "def index(self, **kw):\n\n template = self.context\n request = self.request\n\n request.response.setHeader('content-type',\n template.content_type)\n\n return template.render(request, **kw)", "def rawHTMLrendered(self):", "def render(self, template: str, **vars) -> str:", "def render_meta_block(self, template_name):\n # Read the state template file into a template object using the\n # environment object\n found_template_name = (\n self.find_template_name(template_name + '\\.tpl(\\.\\w+)?$'))\n template = self._template_env.select_template(\n [template_name, found_template_name])\n\n # Generate a context placeholder\n context = template.new_context\n\n # Render template code for the template meta block\n meta_block_code = template.blocks['meta'](context(vars={})).next()\n\n return meta_block_code", "def render(self):\n self._render_product()\n\n self.params['product'] = self.product\n\n params = copy(self.params)\n\n try:\n if self.source.needs_render:\n # if this task has upstream dependencies, render using the\n # context manager, which will raise a warning if any of the\n # dependencies is not used, otherwise just render\n if params.get('upstream'):\n with params.get('upstream'):\n self.source.render(params)\n else:\n self.source.render(params)\n except Exception as e:\n raise type(e)('Error rendering code from Task \"{}\", '\n ' check the full traceback above for details'\n .format(repr(self), self.params)) from e\n\n self._status = (TaskStatus.WaitingExecution if not self.upstream\n else TaskStatus.WaitingUpstream)", "def get_html(self):\r\n context = self.get_context()\r\n html = self.system.render_template(\r\n '{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context\r\n )\r\n return html", "def render_reponse(self, template, **context):\n content = self.jinja2.render_template(template, **context)\n self.response.write(content)", "def instrumented_test_render(self, context):\n signals.template_rendered.send(sender=self, template=self, context=context)\n return self.nodelist.render(context)", "def render(self, template, **kw):\n self.write(self._render_str(template, **kw))", "def render(self):\n\n context = {\n 'model': self,\n 'hidden_fields': self.hidden_fields,\n 'css_prefix': self.css_prefix,\n }\n rendered = loader.render_to_string(self.template_path,\n dictionary=context)\n return rendered", "def render(self):\n self.rendered = self.value\n return self.rendered", "def render(self, template, *args, **kwargs):\n self._render(template, sys.stdout, *args, **kwargs)", "def draw(self, parent, cr):\n for x, y in self.get_block_coords():\n parent.draw_block_element(cr, x, y)", "def mj_render(self):\n self.render(mode='human')", "def student_view(self, context=None):\n html = self.resource_string(self.html_path)\n fragment = Fragment(html.format(self=self))\n fragment.add_css(self.resource_string(self.css_path))\n fragment.add_javascript(self.resource_string(self.js_path))\n fragment.initialize_js('WhoWhereWhyXBlock')\n return fragment", "def render(self, renderer: RenderingManager):\n self.grader.render(renderer)", "def renderHTTP(ctx):", "def render(self, file_out, cur_ind=''):\n file_out.write(cur_ind + f'<{self.tag}')\n Element.add_values(self, file_out)\n file_out.write('>\\n')\n Element.add_items(self, file_out)\n file_out.write(cur_ind + f'</{self.tag}>\\n')", "def codeBlock( self, text ):\n indent= self.context[-1]\n lines= text.split( '\\n' )\n if len(lines) == 1: # Fragment with no newline.\n self.write('{!s}{!s}'.format(self.lastIndent*' ', lines[0]) )\n self.lastIndent= 0\n self.fragment= True\n else:\n first, rest= lines[:1], lines[1:]\n self.write('{!s}{!s}\\n'.format(self.lastIndent*' ', first[0]) )\n for l in rest[:-1]:\n self.write( '{!s}{!s}\\n'.format(indent*' ', l) )\n if rest[-1]:\n self.write( '{!s}{!s}'.format(indent*' ', rest[-1]) )\n self.lastIndent= 0\n self.fragment= True\n else:\n # Buffer a next indent\n self.lastIndent= len(rest[-1]) + indent\n self.fragment= False", "def render(self,value):\n self.content += value\n if self._rendernl:\n self.content += self._rendernl", "def render(self, mode='human', close=False):\n self.gym.render(mode=mode, close=close)", "def render(self, data, *args, **kwargs):\n pass # pragma: nocover", "def render( *args, **kwargs ):" ]
[ "0.7029032", "0.69647723", "0.6806205", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65881765", "0.65558636", "0.6526438", "0.6469616", "0.6436933", "0.64222467", "0.63671017", "0.63276225", "0.6301424", "0.6263438", "0.6262892", "0.6222637", "0.61917096", "0.61849964", "0.6142114", "0.6135704", "0.6127353", "0.61211157", "0.6104681", "0.60987437", "0.60900253", "0.6083268", "0.6075749", "0.6074311", "0.6057477", "0.60555387", "0.6046632", "0.60323685", "0.6031627", "0.60198796", "0.59999067", "0.5999396", "0.59817517", "0.59788984", "0.59686095", "0.5936603", "0.5919452", "0.59023994", "0.5874536", "0.5874536", "0.5874536", "0.5860077", "0.58523285", "0.58210826", "0.5809542", "0.5808336", "0.5805234", "0.58039826", "0.5798226", "0.57926196", "0.57891184", "0.57715", "0.5769802", "0.57684684", "0.5756998", "0.5756998", "0.57545936", "0.5753187", "0.5735119", "0.57251626", "0.57245845", "0.57174474", "0.57158357", "0.5710612", "0.56991655", "0.56991655", "0.56962466", "0.56919336", "0.5687553", "0.5685101", "0.56841385", "0.5680185", "0.5672627", "0.5668297", "0.5667108", "0.56632984", "0.5651388", "0.5650363", "0.5647693", "0.5647598", "0.5645278", "0.56378317", "0.56371266", "0.5633591", "0.56330115", "0.5631753", "0.5629145", "0.56290925", "0.56128144", "0.55929196", "0.5590812", "0.55858934" ]
0.56899357
76
Create a new position.
def __init__(self,x,y): self.x = x self.y = y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_position(self):\n raise NotImplementedError", "def create(self, pos):\n self.pos = pos", "def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden\r\n self.position = position", "def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden\r\n self.position = position", "def new_position(self, p):\n if self.track:\n self.gnx = p.gnx\n else:\n p = self.get_position()\n\n self.new_position_edit(p)\n self.new_position_view(p)", "def create_position(self, position, name):\n # get row name and create a column\n row_name = self.rows[0]\n self.create_column(0, position)\n col_name = self.columns[position]\n\n if self.verbose:\n print(f'Creating and opening subgroup {row_name}/{col_name}/{name}')\n\n # create position subgroup\n self.store[row_name][col_name].create_group(name)\n\n # update trackers\n self.current_pos_group = self.store[row_name][col_name][name]\n self.current_well_group = self.store[row_name][col_name]\n self.current_position = position\n\n # update ome-metadata\n self.positions[position] = {'name': name, 'row': row_name, 'col': col_name}\n self._update_plate_meta(position)\n self._update_well_meta(position)", "def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(PShipCell(position))\n self._update_battle_position(self._cells)", "def set_new_location(self, xPos, yPos):", "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(QShipCell(position))\n self._update_battle_position(self._cells)", "def setPosition(position):", "def new_position_edit(self, p):\n\n DBG(\"new edit position\")\n if self.mode != 'view':\n self.edit_widget.new_text(p.b)", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def new_position_view(self, p):\n DBG(\"new view position\")\n if self.mode != 'edit':\n if self.recurse:\n text = g.getScript(self.c, p, useSelectedText=False, useSentinels=False)\n else:\n text = p.b\n self.view_widget.new_text(text)", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def set_position(self, position):\n self.position = tuple(position)", "def test_transact_position_new_position():\n # Create the PositionHandler, Transaction and\n # carry out a transaction\n ph = PositionHandler()\n asset = 'EQ:AMZN'\n\n transaction = Transaction(\n asset,\n quantity=100,\n dt=pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC),\n price=960.0,\n order_id=123,\n commission=26.83\n )\n ph.transact_position(transaction)\n\n # Check that the position object is set correctly\n pos = ph.positions[asset]\n\n assert pos.buy_quantity == 100\n assert pos.sell_quantity == 0\n assert pos.net_quantity == 100\n assert pos.direction == 1\n assert pos.avg_price == 960.2683000000001", "def set_position(self, new_pos):\n self._position = new_pos", "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def __init__(self, pos):\r\n self.pos = pos", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def set_position(self, position):\n raise NotImplementedError()", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))", "def _init(self, position):\n\t\tself._position = position", "def set_position(self, new_pos, units=\"bohr\"):\n from numpy import array\n # Convert the input to the right units.\n pos = array(new_pos)\n if _IsAngstroem(units):\n pos /= AU_to_A\n if _IsAngstroem(self):\n pos *= AU_to_A\n pos = [x for x in pos]\n\n # Insert\n if 'r' in self.store:\n self.store['r'] = pos\n else:\n self.store[self.sym] = pos\n pass", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def create_new_point(self, coords, **options):\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n\n x1, y1 = (coords[0] - self.variables.point_size), (coords[1] - self.variables.point_size)\n x2, y2 = (coords[0] + self.variables.point_size), (coords[1] + self.variables.point_size)\n shape_id = self.create_oval(x1, y1, x2, y2, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POINT, options)\n self.variables.vector_objects[str(shape_id)].point_size = self.variables.point_size\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def position(self, position):\n self.move_to(position)", "def set_position(self, position):\n self.position = position", "def create_marker_for_position(self, position):\n\n marker = Marker()\n marker.header.frame_id = '/world'\n marker.type = marker.SPHERE\n marker.action = marker.ADD\n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.scale.z = 0.1\n marker.color.r = 1\n marker.color.g = 1\n marker.color.b = 1\n marker.color.a = 1.0\n marker.pose.orientation.w = 1.0\n marker.pose.position.x = position[0]\n marker.pose.position.y = position[1]\n marker.pose.position.z = position[2]\n self.marker_list.markers.append(marker)", "def __init__(self, position, is_horizontal, map_state):\n\n self.position = position\n self.spawn_position = position[:]\n self.in_spawn_area = True\n self.is_horizontal = is_horizontal\n self.map_state = map_state\n self.previous_direction = (0, 0)", "def get_new_position(cls, position_x, position_y, direction):\n new_position_x = cls.calculate_position_x(position_x, direction)\n new_position_y = cls.calculate_position_y(position_y, direction)\n return new_position_x, new_position_y", "def set_position():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The new position vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def __init__(self, *args):\n this = _ida_hexrays.new_ctext_position_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def setPosition(self,newPos):\n self._position = newPos", "def pos_gen(self, method = None):\n # Generate positions object\n pos = pos_class.pos(self._parent, method)\n # Save it to ICobj\n self._parent.pos = pos", "def add_position(self, position):\n if not hasattr(self, 'positions'):\n self.positions = list()\n self.append_to('positions', position)", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n #print \"new x = \" + str(new_x) + \" new y \" + str(new_y)\n #print \"floor of x = \" + str(math.floor(new_x)) + \" floor of y = \" + str(math.floor(new_y))\n \n return Position(new_x, new_y)", "def __init__(self, *args):\n if len(args) == 1:\n position = args[0]\n if len(position) != 2:\n raise PositionError\n self._position = args\n elif len(args) == 2:\n self._position = args\n else:\n raise PositionError", "def position(self, pos: int):\n self.__pos = pos", "def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]", "def position(self, x, y, z):\n self.curr_position = Vector([x, y, z])\n self.ptr.position(x, y, z)", "def update_position(position):\n pass", "def set_pos(self, p, a, **kwargs):\n\t\treturn self.send(\"set_pos\", p[0], p[1], a, **kwargs)", "def __init__(self, name, position):\n self.name = name\n self.position = position", "def __init__(self):\n self.position = 0", "def set_position(self):\n raise RuntimeError(\"the 'set_position' method must be overriden\")", "def __init__(self, size=0, position=(0, 0)):\n self.size = size\n self.position = position", "def __init__(self, size=0, position=(0, 0)):\n self.size = size\n self.position = position", "def __init__(self, size=0, position=(0, 0)):\n self.size = size\n self.position = position", "def createCell(self, xPos, yPos):\n self.cells.append(Cell(self.screen, xPos, yPos))", "def make_pose(self, position, orientation, frame):\n\n pose = PoseStamped()\n pose.header.frame_id = frame\n pose.pose.position.x = position[0]\n pose.pose.position.y = position[1]\n pose.pose.position.z = position[2]\n pose.pose.orientation.w = orientation[0]\n pose.pose.orientation.x = orientation[1]\n pose.pose.orientation.y = orientation[2]\n pose.pose.orientation.z = orientation[3]\n return pose", "def __init__(self, x, y):\n # assigning the initial position\n self.x = x\n self.y = y", "def __init__(self, position):\n self.position = position\n self.direction = 'U'\n self.length = 0", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def position_append(self, pos, gtid):\n return None", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")", "def get_position(self, position):", "def fl_add_positioner(posittype, xpos, ypos, width, height, label):\n _fl_add_positioner = library.cfuncproto(\n library.load_so_libforms(), \"fl_add_positioner\",\n cty.POINTER(xfdata.FL_OBJECT), [cty.c_int, xfdata.FL_Coord,\n xfdata.FL_Coord, xfdata.FL_Coord, xfdata.FL_Coord, xfdata.STRING],\n \"\"\"FL_OBJECT * fl_add_positioner(int type, FL_Coord x, FL_Coord y,\n FL_Coord w, FL_Coord h, const char * label)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(posittype, \\\n xfdata.POSITIONERTYPE_list)\n i_posittype = library.convert_to_intc(posittype)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n s_label = library.convert_to_bytestrc(label)\n library.keep_elem_refs(posittype, xpos, ypos, width, height, label, \\\n i_posittype, i_xpos, i_ypos, i_width, i_height, s_label)\n retval = _fl_add_positioner(i_posittype, i_xpos, i_ypos, i_width, \\\n i_height, s_label)\n return retval", "def getRandomPosition(self):\n posX = np.random.uniform(0, self.width)\n posY = np.random.uniform(0, self.height)\n return Position(posX, posY)", "def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)", "def __init__(self, position, expiry_tick):\n self.position = position\n self.expiry_tick = expiry_tick", "def _test_set_new_pos(self):\n self.p.setup(siteInfo={}, source=None, device=self.device)\n time.sleep(0.3) if self.using_mock else time.sleep(3)\n act_position = self.device.getActPosition()\n offset = 0.5\n self.p.setOffset(offset)\n time.sleep(0.3) if self.using_mock else time.sleep(3)\n self.assertAlmostEqual(\n self.p.getPosition(), \n act_position + offset, \n places=1\n )\n self.assertAlmostEqual(\n act_position + offset, \n self.device.getActPosition(), \n places=1\n )", "def pos(self, x=-1, y=-1):\n self._p('[pos] {} {}'.format(x, y))", "async def set_position(self, pos: int) -> None:\n return await self.relay(\"set_position\")(pos=pos)", "def position(self):\r\n pass", "def createNew(cls, x0, y0, z0, a1, b1, c1, a2, b2, c2):\n p0 = Point(x0, y0, z0)\n d1 = Vector(a1, b1, c1)\n d2 = Vector(a2, b2, c2)\n return cls(p0, d1, d2)", "def set_position( self, posx, posy ):\n\n self.__foodx = posx\n self.__foody = posy", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n angle = float(angle)\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n angle = float(angle)\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def getNewPosition(self, angle, speed):\n old_x, old_y = self.getX(), self.getY()\n angle = float(angle)\n # Compute the change in position\n delta_y = speed * math.cos(math.radians(angle))\n delta_x = speed * math.sin(math.radians(angle))\n # Add that to the existing position\n new_x = old_x + delta_x\n new_y = old_y + delta_y\n return Position(new_x, new_y)", "def set_pos(self, newpos : list) :\n if len(newpos) == 2 :\n self.pos = list(newpos).copy()\n else :\n raise UserWarning('wrong position passed')", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def __init__(self, pos=(SCREEN_X//2, SCREEN_Y//2)):\n self.heading = \"right\"\n self.speed = 4\n self.length = 32\n self.size = 16\n self.color = COLOR\n self.pos = pos\n (self.x_coord, self.y_coord) = ([], [])\n self.displacement = 0\n for _ in range(self.length):\n self.x_coord.append(self.pos[0] - self.displacement)\n self.y_coord.append(self.pos[1])\n self.displacement += 4" ]
[ "0.847801", "0.8391693", "0.8154143", "0.8154143", "0.7296945", "0.692898", "0.678412", "0.6775694", "0.67575675", "0.6718587", "0.6701663", "0.65915143", "0.6572498", "0.65635514", "0.65402424", "0.65402424", "0.65402424", "0.65402424", "0.65402424", "0.647187", "0.64189696", "0.6401512", "0.63681185", "0.6355446", "0.63266635", "0.6285665", "0.6250105", "0.6249137", "0.62480664", "0.62462413", "0.6242256", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.62356055", "0.6231228", "0.62137055", "0.6211468", "0.6206496", "0.6205113", "0.619186", "0.618599", "0.6185304", "0.6185304", "0.6185304", "0.6185304", "0.6185304", "0.6185304", "0.6185304", "0.6185304", "0.6181098", "0.6168401", "0.61606675", "0.61606044", "0.61493516", "0.61416507", "0.6122833", "0.61139095", "0.61060655", "0.60843647", "0.60666645", "0.6062765", "0.60545564", "0.6046288", "0.60432833", "0.5992293", "0.5992293", "0.5987201", "0.5963765", "0.5948354", "0.5941344", "0.5939529", "0.5939529", "0.5939529", "0.5938988", "0.59183604", "0.59172165", "0.5911951", "0.5904317", "0.5901551", "0.58985895", "0.58983266", "0.58979976", "0.58956194", "0.5893542", "0.58880293", "0.5878515", "0.5874195", "0.5873715", "0.5873715", "0.5873715", "0.58702093", "0.5866985", "0.5862936" ]
0.0
-1
Display position in user friendly manner.
def __repr__(self): return "("+str(self.x)+","+str(self.y)+")"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def position(self):\r\n pass", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def display_coordinates(self) -> None:\n\n print('Y coordinate: ', self.player.row_position + 1)\n print('X coordinate: ', self.player.column_position + 1)", "def get_position(self, position):", "def positioning(self):\n pass", "def show_position(self):\n self.anc350_instrument.update_all_positions()\n\n self.current_positions = self.anc350_instrument.current_positions\n\n self.gui.label_actualPositionX.setText(str(self.current_positions['XPiezoStepper']))\n self.gui.label_actualPositionY.setText(str(self.current_positions['YPiezoStepper']))\n self.gui.label_actualPositionZ.setText(str(self.current_positions['ZPiezoStepper']))\n\n self.gui.scan_positionX.setText(str(self.current_positions['XPiezoScanner']))\n self.gui.scan_positionY.setText(str(self.current_positions['YPiezoScanner']))\n self.gui.scan_positionZ.setText(str(self.current_positions['ZPiezoScanner']))", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def display(self):\n mg_w = self.width\n mg_h = self.height\n str_to_prt = \"\\n\" * self.y + (\" \" * self.x + \"#\" * mg_w + '\\n') * mg_h\n print(str_to_prt[:-1])", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def draw_position_label(self):\n x, y, z = self.player.pos\n x, y, z = math.floor(x), math.floor(y), math.floor(z)\n self.label.text = 'FPS: {}, X: {}, Y: {}, Z: {}'.format(round(pyglet.clock.get_fps()), x, y, z)\n self.label.draw()", "def printPosition(self, node):\n self.node = str(node)\n \n self.pos_x = self.nodePosition[self.node][0]\n self.pos_y = self.nodePosition[self.node][1]\n self.pos_z = self.nodePosition[self.node][2] \n print \"----------------\\nPosition of %s\\n----------------\\nPosition X: %.2f\\nPosition Y: %.2f\\nPosition Z: %.2f\\n\" % (self.node, float(self.pos_x), float(self.pos_y), float(self.pos_z))", "def display(self):\n prow = self.__width * '#'\n nstr = self.y * \"\\n\"\n for x in range(self.__height):\n nstr += self.x * \" \"\n nstr += prow\n if x == (self.__height - 1):\n break\n nstr += \"\\n\"\n print(nstr)", "def position(self):\n return self.__position", "def my_print(self):\n length = self.__size\n\n if self.__size == 0:\n print(\"\")\n\n \"\"\"Print using position of y-axis.\"\"\"\n for i in range(self.__position[1]):\n print(\"\")\n for j in range(length):\n \"\"\"Print spaces and # in x-axis.\"\"\"\n print((\" \" * self.__position[0]) + (\"#\" * length))", "def display(self):\n\n print(\"\\n\" * self.__y, end='') # y offset\n\n for i in range(self.__height):\n print(\" \" * self.__x, end='') # x offset\n print(\"#\" * self.__width)", "def __str__(self) -> str:\n position = self.get_position()\n return f\"Baby at position ({position[0]}, {position[1]}) (row, col)\"", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def get_pos(self):\r\n return self.pos", "def show_status(self):\n color = (255, 255, 255)\n w, h = self.width, self.height\n x, y = self.pos_shift\n self.put_text(\"scale factor: %.2E\" % SCALE_FACTOR,\n color, (x, y))\n self.put_text(\"G: %.7E\" % G,\n color, (x, y + 25))\n self.put_text(\"number of objects: %d\" % len(self.phy.objects),\n color, (x, y + 50))\n self.put_text(\"x: %d\" % x,\n color, (w + x - 100, h + y - 50))\n self.put_text(\"y: %d\" % y,\n color, (w + x - 100, h + y - 25))", "def cmd_position(self, n, e, d, heading):\n pass", "def position(self):\n return self._position", "def print_position(position):\n print('Packet Number # %s' % position)", "def position(self):\n return (self.__position)", "def get_position(self):\n return self.position", "def get_position(self):\n return self.position", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def get_pos(self):\n return self.pos", "def get_position(self):\n raise NotImplementedError()", "def print_pos(pos):\n # TO DO: EXCLUDE FIRST LINE\n s = \"%BLOCK POSITIONS_FRAC\\n\" + str(pos) + \"\\n%ENDBLOCK POSITIONS_FRAC\"\n return s", "def __str__(self):\r\n x, y, z = self.pos\r\n return self.label + f\" {x} {y} {z}\"", "def position(self):\r\n return self._position", "def send_position(self):\r\n return '{},{}'.format(self.x_Pos, self.y_Pos)", "def position(self):\n return self.__position", "def position(self):\n return self.__position", "def position(self):\n return self.__position", "def display(self):\n print('\\n' * (self.__y), end='')\n for point in range(self.__height):\n print(' ' * self.__x, end='')\n for point in range(self.__width - 1):\n # print(' ' * self.__x, end='')\n print('#', end='')\n print('#')", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def display(self):\n row = (' ' * self.__x) + (Rectangle.print_symbol * self.__width) + '\\n'\n print(('\\n' * self.__y) + (row * self.__height), end=\"\")", "def GetPosition(self):\n ...", "def display(self):\n print(\"\\n\" * self.__y, end=\"\")\n for i in range(self.__height):\n print(\" \" * self.__x, end=\"\")\n print(\"#\" * self.__width, end=\"\")\n print()", "def getPosition(self):\n return self.x", "def pos(self, x=-1, y=-1):\n self._p('[pos] {} {}'.format(x, y))", "def pos(self) -> str:\n return self._pos", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def position(self):\n return self.x, self.y", "def setPosition(position):", "def display(self):\n print(\"\\n\" * self.__y, end='')\n for row in range(self.__height):\n if self.__x:\n print(\" \" * (self.__x), end='')\n if self.__width:\n print(\"#\" * self.__width)", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def display(self):\n map_show = self.map.copy()\n map_show[self.currY, self.currX] = 4 \n print(map_show)", "def position(self):\n return self.properties.get('position')", "def pos(self, x, y):\n\n if isinstance(x, float):\n x = int(x)\n\n self.screen.write(colorama.Cursor.POS(x, y), ansi=True)\n self.x = x\n self.y = y\n\n return x, y", "def display(self):\n for _jumpline in range(self.y):\n print(end=\"\\n\")\n for _height in range(self.height):\n for _space in range(self.x):\n print(\" \", end=\"\")\n for _width in range(self.width):\n print(\"#\", end=\"\")\n print(end=\"\\n\")", "def display(self):\n print('')\n print(\" ---------------------------------\")\n counter = 0\n for row in self.positions:\n counter += 1\n line = f'{counter}: |'\n for space in row:\n if isinstance(space, str):\n line += f' {space} |'\n else:\n starter = ' '\n ender = '|'\n if space.team == 'white':\n piece = stylize(space.symbol+' ', colored.fg(\"light_blue\"))\n else:\n piece = stylize(space.symbol+' ', colored.fg(\"light_red\"))\n line += starter+piece+ender\n print(line)\n print(\" ---------------------------------\")\n print(\" | A | B | C | D | E | F | G | H |\\n\")", "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "def update_position(position):\n pass", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def position(self):\n return self._position", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def get_position(self):\n return self.__position", "def show( self):\n def symbol( i):\n return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i)\n \n X, Y = np.max( self.board.positions, 0)\n # -2 to indicate outside board.\n display = np.zeros( (X+1,Y+1), dtype=int) - 2 \n for x, y in self.board.positions:\n display[x, y] = -1 # -1 to indicate unoccupied\n for p, i in self.occupation.items():\n x, y = self.board.positions[p]\n display[x, y] = i\n for x in xrange(X+1):\n s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ])\n print s", "def display(self):\n print(\"\\n\" * self.y, end=\"\")\n for i in range(self.height):\n print(\" \" * self.x, end=\"\")\n for j in range(self.width):\n print(\"#\", end=\"\")\n print()", "def pos(self):\n return self._position", "def updateScreenText(self) :\n\t\t# Update the text showing avatar's position\n\t\th = self.avatarNP.getH()\n\t\tif h < -180 : h += 360\n\t\telif h > 180 : h -= 360\n\t\tself.avPos.setText(\"Your location (x, y, dir): (%d, %d, %d)\"%\\\n\t\t\t(self.avatarNP.getX(), self.avatarNP.getY(), h))\n\n\t\t# update text showing visible avatars\n \t\tself.showNumVisible.setText(\"Visible avatars: \" + \\\n\t\t\t\t\t self.updateVisList())", "def position(self):\n return self.source.position + self.position_relative", "def get_position(self):\n return self._pos", "def get_position(self):\n return self._pos", "def my_print(self):\n if self.size == 0:\n print(\"\")\n return\n for j in range(self.__position[1]):\n print(\"\")\n for i in range(self.size):\n if self.__position[0] > 0:\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.size)", "def position(self):\n\n\t\treturn self._window.position", "def __str__(self):\n return str(\"{0} {1} {2} {3}\".format(self.label, self.position[0], self.position[1], self.position[2]))", "def get_base_pos_on_screen(self,position):\n\n return self.seq_xstart+float(position-1)*self.base_scale.get(),self.seq_row", "def print_info(self):\n\n print \"parent:\\t {0}\".format(self.parent)\n print \"value:\\t {0}\".format(self.value)\n \n #children\n print \"posXposYposZ: \\t {0}\".format(self.posXposYposZ)\n print \"posXposYnegz: \\t {0}\".format(self.posXposYnegZ)\n print \"posXnegYposZ: \\t {0}\".format(self.posXnegYposZ)\n print \"posXnegYnegZ: \\t {0}\".format(self.posXnegYnegZ)\n print \"negXposYposZ: \\t {0}\".format(self.negXposYposZ)\n print \"negXposYnegZ: \\t {0}\".format(self.negXposYnegZ)\n print \"negXnegYposZ: \\t {0}\".format(self.negXnegYposZ)\n print \"negXnegYnegZ: \\t {0}\".format(self.negXnegYnegZ) \n\n #position in space\n print \"Xupperlimit: \\t {0}\".format(self.Xupperlimit)\n print \"Yupperlimit: \\t {0}\".format(self.Yupperlimit)\n print \"Zupperlimit: \\t {0}\".format(self.Zupperlimit)\n \n print \"Xlowerlimit: \\t {0}\".format(self.Xlowerlimit)\n print \"Ylowerlimit: \\t {0}\".format(self.Ylowerlimit)\n print \"Zlowerlimit: \\t {0}\".format(self.Zlowerlimit)\n\n print \"Xcenter: \\t {0}\".format(self.Xcenter)\n print \"Ycenter: \\t {0}\".format(self.Ycenter)\n print \"Zcenter: \\t {0}\".format(self.Zcenter)", "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "def display(self):\n for space in range(self.y):\n print('')\n for row in range(self.height):\n for x in range(self.x):\n print(' ', end='')\n for col in range(self.width):\n print('#', end='')\n print('')", "def display(self):\n\n #player UI\n s = \" \"\n for p in range(WIDTH):\n s += str(p)\n s += \" \"\n\n print(s)\n\n for row in range(HEIGHT):\n\n # player UI\n print(row, end=' ')\n\n for col in range(WIDTH):\n\n if self.board[row][col] == 1:\n print(\"X\", end=' ')\n elif self.board[row][col] == 2:\n print(\"O\", end=' ')\n else:\n print(\"-\", end=' ')\n print()", "def draw_pos_text(self, text):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, (0, 255, 0))\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw) // 2, (self.height - fh) // 2))", "def display(self):\n for r in range(len(self.grid)):\n for c in range(len(self.grid[r])):\n if (r, c) == self.location:\n print('\\033[96m*\\x1b[0m', end=' ') # print a blue *\n else:\n print(self.grid[r][c], end=' ') # prints a space or wall\n print()\n print()", "def x(self):\r\n return self.position.x", "def __repr__(self):\n s = \" position:\" + str(self.pos) + \"\\n\"\n s += \" heading: \" + str(self.heading) + \"\\n\"\n return s", "def _get_pos(self):\n return self._pos", "def _print(self):\n print('center :', self.center, ' widht : ', self.width, ' height : ', self.height, ' heat : ', self.heat,\n ' speed ', self.speed)", "def display(self):\n print(\"\\n\" * self.y, end='')\n for i in range(self.height):\n for j in range(self.width + self.x):\n if j < self.x:\n print(' ', end='')\n else:\n print('#', end='')\n print('')", "def position(self):\n if self.p:\n if self._finished:\n return None\n return self.p.get_position()*10", "def render(self, **kwargs):\n return print(self.game.agent_positions)", "def _repr_(self):\n if self.is_multitape:\n pos = tuple(p for p, t in sorted(self.position, key=lambda x: x[1]))\n return 'multi-tape at %s' % (pos,)\n else:\n return 'tape at %s' % (self.position[0][0],)", "def display_player_points():\r\n pass", "def show(self, screen):\n x_display = self.xy_position[0] * constants.CELL_SIZE\n y_display = self.xy_position[1] * constants.CELL_SIZE\n screen.blit(self.image, (x_display, y_display))", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):" ]
[ "0.7464939", "0.7350452", "0.7230497", "0.71928203", "0.7168089", "0.71171767", "0.6995238", "0.6969509", "0.6844631", "0.68162245", "0.6798185", "0.6786135", "0.665452", "0.6650976", "0.66377974", "0.66324097", "0.6619713", "0.66189533", "0.6613451", "0.6598675", "0.65528977", "0.6537376", "0.65082747", "0.64998335", "0.64998335", "0.64962435", "0.6449015", "0.644721", "0.64445615", "0.6430174", "0.6427575", "0.6423468", "0.6423391", "0.6423391", "0.6423391", "0.6422447", "0.6422315", "0.6421346", "0.6417551", "0.6393669", "0.6389788", "0.6381125", "0.6378031", "0.63752234", "0.6336609", "0.6324888", "0.6320327", "0.63165134", "0.6311758", "0.63042736", "0.63007814", "0.629824", "0.6288609", "0.6285713", "0.62814677", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.6281119", "0.628028", "0.6264021", "0.62582684", "0.6254786", "0.62465906", "0.6230786", "0.62282485", "0.62277746", "0.62277746", "0.62233216", "0.62222004", "0.6220652", "0.6219756", "0.62193096", "0.6209869", "0.62092936", "0.6208698", "0.62008286", "0.6199561", "0.6197475", "0.6196163", "0.6192391", "0.61830014", "0.6179132", "0.6178247", "0.61711055", "0.6157396", "0.61521596", "0.61519516", "0.61495537", "0.61495537", "0.61495537", "0.61495537", "0.61495537" ]
0.0
-1
Returns a tuple of position in (x,y) form.
def get(self): return (self.x,self.y);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self) -> typing.Tuple[int, int]:\n raise NotImplementedError", "def get_pos(self):\n return (self.x, self.y)", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def get_pos(self) -> tuple:\n return self.pos", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def get_position(self) -> Tuple[int]:\n return self.position.copy()", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def position_tuple_for_index(self, index):\n x = self.base_values[index % self.size]\n y = self.base_values[index // self.size]\n return x, y", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def position(self):\n return self.x, self.y", "def getIntPos(self):\n return (int(self.pos[0]),int(self.pos[1]))", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def position(self):\n return self._x, self._y", "def get_current_position(self) -> Tuple[int, int]:\n return self.__row_position, self.__col_position", "def coordinate(self) -> Tuple[float, float]:\n return self.lat, self.lon", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def coord (i, j):\r\n return j, i", "def position(self):\n return (self.__position)", "def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x", "def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def getTuple(self):\n return self.position.exportToTuple()", "def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)", "def get(self):\r\n return ((self.x, self.y), self.dir)", "def getXY(self):\n return (self.X,self.Y)", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def tuple(self) -> Tuple[float, float]:\n return (self.latitude, self.longitude)", "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def value(self) -> tuple:\n return self._position", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def pixelcoord(coordx: float, coordy: float) -> Tuple[int, int]:\n ox, oy = origin()\n x, y = int(round(ox+coordx)), int(round(oy-coordy))\n return (x, y)", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def get_location(self):\r\n return self.__x, self.__y", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def get_coordinates(num: int) -> tuple:\r\n return num * math.sin(num), num * math.cos(num)", "def get_position(self):\n return [self._row, self._column]", "def restored_position(self) -> Tuple[int, int]:\n x_n, y_n = self.normalized_position\n w, h = self.region.original_size\n return int(x_n * w), int(y_n * h)", "def restored_position(self) -> Tuple[int, int]:\n x_n, y_n = self.normalized_position\n w, h = self.region.original_size\n return int(x_n * w), int(y_n * h)", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()", "def coords2D(self):\n return (self.x, self.y)", "def get_position(self, position):", "def HTSeq_pos_to_tuple(HTSeq_pos):\n try:\n chrom = HTSeq_pos.chrom\n except AttributeError:\n raise MutantError(\"Invalid position %s! Need an HTSeq iv object. (If empty, maybe read wasn't aligned?)\"%(HTSeq_pos,))\n strand = HTSeq_pos.strand\n # HTSeq is 0-based and I want 1-based, thus the +1; end has no +1 because in HTSeq end is the base AFTER the alignment.\n start_pos = HTSeq_pos.start+1\n end_pos = HTSeq_pos.end\n output_pos = (chrom, start_pos, end_pos, strand)\n check_valid_position_tuple(output_pos)\n return output_pos", "def to_tuple(self):\n return (self.row_start, self.row_end, self.col_start, self.col_end)", "def get_frame_coordinates(self, i: int) -> Tuple[Tuple[int]]:\n return (self.get_frame_x(i), self.get_frame_y(i))", "def getPos(self):\n return self.Xpos,self.Ypos", "def get_point(self):\n return self._x, self._y", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def get_pos(self) -> tuple:\n return self.rect.center", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def get_position(self):\n return parsegeometry(self.geometry())[2:]", "def get_position(self, cell) -> tuple:\n for i, row in enumerate(self.cells):\n if cell in row:\n return row.index(cell), i\n if not isinstance(cell, Cell):\n raise TypeError(f\"Argument should be of type 'Cell', not '{cell.__class__.__name__}'.\")\n raise ValueError(\"The given cell is not a part of the grid.\")", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def coordinates(self, xy_pairs=False):\n\n if xy_pairs:\n return tuple(zip(self._coordinates[:-1:2], self._coordinates[1::2]))\n else:\n return tuple(self._coordinates)", "def get_pos(self):\n return [self.row, self.col]", "def pixel_coords_to_pos(i, j, maze_size):\n maze_border = ((maze_size - 1) // 2) * BLOCK_PIXEL_SIZE\n pos_x = (i + maze_border) // BLOCK_PIXEL_SIZE\n pos_y = (maze_border - j) // BLOCK_PIXEL_SIZE\n\n return int(pos_x), int(pos_y)", "def getPosition(self):\n return self.x", "def get_position_on_game(self):\n return (self.peg, self.position_on_peg)", "def handle_position(data: bytes) -> Tuple[bytes, str]:\n x, y, z = struct.unpack('fff', data[0:3 * 4])\n return data[20:], f'Current Position (x,y,z): {x} {y} {z}'", "def get_xy_position(gcode_command):\n\tpattern = re.compile(r\"^G\\dX(\\d+(?:\\.\\d*)?|\\.\\d+)Y(\\d+(?:\\.\\d*)?|\\.\\d+)F(\\d+)$\") # using non-capturing groups\n\tm = pattern.match(gcode_command) # no need for re.search() because we have a complete pattern\n\t(x, y) = m.group(1,2)\n\ttry:\n\t\tx = float(x)\n\t\ty = float(y)\n\texcept:\n\t\traise Exception(\"Could not convert X and Y to floats. X={}, Y={}\".format(x,y))\n\tx_pixel = round(x/X_STEP) # round to nearest int\n\ty_pixel = round(y/Y_STEP*12)\n\treturn (x_pixel, y_pixel)", "def point2pos(self, point):\n row = self._vim.eval('byte2line({})'.format(point))\n col = self._vim.eval('{} - line2byte({})'.format(point, row))\n return (int(row), int(col))", "def getCoords(self):\n if self._ra == \"\" or self._dec == \"\":\n raise ValueError('Object named ' + self._name +' has no coordinates in database.')\n ra = self._ra.split(\":\")\n dec = self._dec.split(\":\")\n raTuple = (int(ra[0]), int(ra[1]), float(ra[2]))\n decTuple = (dec[0][0], int(dec[0][1:]), int(dec[1]), float(dec[2]))\n return raTuple, decTuple", "def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def get_position(self, row, column):\n position_key = \"{}{}\".format(row, column)\n return self.positions[position_key]", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j", "def __get_position(self, value, state):\n coords = np.argwhere(state == value).flatten()\n return coords", "def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def get_location(self) -> tuple:\r\n if self.data is None:\r\n return (None, None)\r\n \r\n lat = self.data['GPSInfo']['GPSLatitude']\r\n lon = self.data['GPSInfo']['GPSLongitude']\r\n \r\n # Convert from Degrees, minutes, seconds to standard form\r\n latitude = (lat[0][0] / lat[0][1]) \\\r\n + (lat[1][0] / lat[1][1] / 60) \\\r\n + (lat[2][0] / lat[2][1] / 3600)\r\n \r\n longitude = (lon[0][0] / lon[0][1]) \\\r\n + (lon[1][0] / lon[1][1] / 60) \\\r\n + (lon[2][0] / lon[2][1] / 3600)\r\n\r\n # Adjust for direction references\r\n if self.data['GPSInfo']['GPSLatitudeRef'] == 'S':\r\n latitude *= -1\r\n\r\n if self.data['GPSInfo']['GPSLongitudeRef'] == 'W':\r\n longitude *= -1\r\n\r\n return (round(latitude, 6), round(longitude, 6))", "def element_coordinates(self, element):\n out = (0,0,0,0)\n if 'title' in element.attrib:\n matches = self.boxPattern.search(element.attrib['title'])\n if matches:\n coords = matches.group(1).split()\n out = (int(coords[0]),int(coords[1]),int(coords[2]),int(coords[3]))\n return out", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def position(self, x, y):\n if self.portrait:\n # HMSB\n index = (x + y * self.size[0]) >> 3\n offset = 7 - (x & 0x07)\n else:\n # VMSB\n index = (y >> 3) * self.size[0] + x\n offset = 7 - (y & 0x07)\n return index, offset", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def index_to_position(self, index):\n col = index % self._grid_size\n row = index // self._grid_size\n return row, col", "def pixel_to_position(self, pixel):\n x, y = pixel\n return y // 60, x // 60", "def position_tuples(self, protein=False):\n if protein:\n if not self.is_coding():\n raise AttributeError(\n \"Cannot return wild type protein \"\n \"position tuples for non-coding wild \"\n \"type [{}]\".format(self.parent_name)\n )\n else:\n seq = self.protein_seq\n offset = self.protein_offset\n else:\n seq = self.dna_seq\n offset = self.dna_offset\n\n return [(i + offset + 1, seq[i]) for i in range(len(seq))]", "def get_position(self):\n return self._find_gnx_node(self.gnx)", "def getPosition(self):\n return self.target, min(self.points), max(self.points)", "def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')", "def position(self):\n return self._position", "def get(self):\n return self.x, self.y" ]
[ "0.8117628", "0.80366904", "0.80062807", "0.7994267", "0.79300076", "0.7879146", "0.78181684", "0.7731477", "0.7600716", "0.756264", "0.7516764", "0.75011235", "0.7460002", "0.7377023", "0.7375958", "0.73217714", "0.7272441", "0.72643983", "0.72562724", "0.7233895", "0.72307074", "0.71828496", "0.7123727", "0.7054602", "0.7052914", "0.7042365", "0.7036704", "0.70213485", "0.7018248", "0.7017193", "0.7015173", "0.7001802", "0.6992793", "0.69848615", "0.69812334", "0.6967557", "0.6961377", "0.69490266", "0.69485974", "0.693001", "0.6924752", "0.6922837", "0.6919948", "0.6919572", "0.68733644", "0.68733644", "0.68354005", "0.6828863", "0.6825407", "0.6818702", "0.6810405", "0.6787162", "0.67666453", "0.67547655", "0.67503184", "0.67478", "0.67478", "0.6747612", "0.67448866", "0.67448866", "0.67448866", "0.6697253", "0.66929734", "0.66929734", "0.6686259", "0.6679175", "0.6671447", "0.66625005", "0.66625005", "0.6650577", "0.6640228", "0.6635979", "0.66147375", "0.6605124", "0.66009676", "0.65960014", "0.6588954", "0.6584113", "0.6552553", "0.65499073", "0.6548126", "0.65393734", "0.65221155", "0.6518585", "0.6497022", "0.6496041", "0.6492163", "0.64913315", "0.64774394", "0.645816", "0.6458005", "0.6458005", "0.6456314", "0.6448156", "0.64378923", "0.6429952", "0.64292616", "0.6426562", "0.6426109", "0.6423071" ]
0.7212456
21
Displays a string in user friendly manner.
def __repr__(self): return "("+str(self.pos)+","+str(self.color)+")"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def see(s):\n print(\"---- %s -----\" % s)", "def _message_display(string):\n print(\"========================================\")\n print(string)\n print(time.asctime(time.localtime(time.time())))\n print(\"========================================\")", "def show_on_screen(self, string, location, font='Arial', font_size=20, colour=WHITE):\n msg = pygame.font.SysFont(font, font_size).render(str(string), True, colour)\n self.screen.blit(msg, location)", "def __str__(self): # pragma: no cover\n return self.display()", "def _display_message(message: str) -> None:\n print(message)", "def display(self,message):\r\n \r\n print(message)", "def _display(s):\n if not isinstance(s, unicode):\n s = s.decode(\"utf-8\")\n s = _indent(_escaped_text_from_text(s, \"whitespace\"), 4)\n if not s.endswith('\\n'):\n s += '\\n'\n return s", "def displayMessage(j, s):\n if j:\n j.DisplayMessage(\"info\", s.replace(\" \", \"&nbsp;\").replace(\"j.\", \"\", 1))\n print(\" \" + s)", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text", "def print_(self, s: str) -> None:", "def display_message():\n msg = \"I'm learning to store code in functions.\"\n print(msg)", "def message(self, string):\n print (string)", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def display_text(self, text):\n self.write_to_serial(':DISP:TEXT \\'' + text + '\\'')", "def show(self, stdscr):\n stdscr.addstr(*self.pos, self.asset)", "def code_display(self):\r\n return u'%s-%s-%s' % (self.code[:3], self.code[3:6], self.code[6:])", "def render_string(self, template: str, **vars) -> str:", "def DrawStringAt(self, x, y, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (x, y))", "def display_message():\n\tmessage = \"I'm learning how to use function.\"\n\tprint(message)", "def drawString(text: str):\n pass", "def displayString(*args, delete: bool=True, exists: bool=True, keys: bool=True, replace:\n bool=True, value: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def display(self) -> str:\n lines, _, _, _ = self._display_aux()\n return '\\n'.join(lines)", "def print_string(self, s):\n self._write('%s\\n' % s)", "def display_message(self, message):\n text = self.font.render(message, True,\n self.display_states[self.display_names[self.current_display_state]]['text'])\n temp_width = text.get_rect().width\n self.gameDisplay.blit(text, ((self.SCREEN_WIDTH / 2) - (temp_width/2), 100))", "def title(self, string):\n return self.bold(string)", "def display_message(self, message):\n\t\tself.render('message.html', {'message': message})", "def output_str(string:str) -> str:\n print(string)", "def printString(self):\n if hasattr(self, 'string'):\n print self.string.upper()", "def disp_msg(msg):\n from x84.bbs import getterminal, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow('%s ' % (msg,),),\n term.yellow_reverse_bold(u'...'),)))", "def printout(string):\n print(string)", "def __str__(self):\n bold = \"*\" if self.bold else ''\n italic = \"/\" if self.italic else ''\n underline = \"_\" if self.underline else ''\n return bold + italic + underline + self.character", "def display_unicode(self, string):\n if string is None:\n return ''\n return string.decode(\"utf16\", \"ignore\").encode(\"ascii\", 'backslashreplace')", "def shout():\n # Concatenate the strings: shout_word\n shout_word='congratulations'+'!!!'\n\n # Print shout_word\n print(shout_word)", "def display_string(text_area_no: int) -> str:\n if text_area_no == 1:\n text = ''\n for v in utterances['P1']:\n text += v + '\\n'\n return text\n elif text_area_no == 2:\n text = ''\n for v in utterances['S1']:\n text += v + '\\n'\n return text\n elif text_area_no == 3:\n text = ''\n for v in utterances['S2']:\n text += v + '\\n'\n return text\n elif text_area_no == 4:\n text = ''\n for v in utterances['S3']:\n text += v + '\\n'\n return text\n elif text_area_no == 5:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 6:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 7:\n text = ''\n for v in utterances['C1']:\n text += v + '\\n'\n return text\n elif text_area_no == 8:\n text = ''\n for v in utterances['C2']:\n text += v + '\\n'\n return text", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word", "def display_message():\n message = \"I am learning about functions, function calls, parameters and \"\n message+= \"arguments.\"\n print(message)", "def show_text(text, colour):\n message = font_style.render(text, True, colour)\n dis.blit(message, [game_size_x/2, game_size_y/2])", "def display_message():", "def __str__(self) -> str:\r\n return self.process(self.string)", "def shout():\n # Concatenate the strings: shout_word\n shout_word = 'congratulations'+'!!!'\n\n # Print shout_word\n print(shout_word)", "def say(string):\n print(\"\\n\" + string)", "def sprint(string=\"string\"):\n global screen\n screen += string", "def strprint(self, mystr):\n if self.is_verbose is True:\n print(mystr)\n else:\n pass\n return", "def render_help(self, text):\n if text and text.strip():\n self.enable_help_toggle()\n style = \"display: %s;\" % (\"block\" if self.help_visible else \"none\")\n c = self.render_div(text.strip(), class_=\"help\", style=style)\n return c\n return \"\"", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def display(self, message=\"\"):\n print(\"-\" * (79 - len(message)), end=\" \")\n print(message)\n if self.mat is None:\n print(\"None\")\n else:\n print(self.__repr__())\n print(\"=\" * 80)", "def display_message():\n\tprint(\"In this chapter we will be learning how to write functions\")", "def __str__(self):\n return 'str-human.%s' % self.name", "def render_string(_str):\n\t\treturn str.encode(_str)", "def bye():\r\n return \"<p>Bye World! <p>\"", "def showme(message):\n print(message)", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def display_s(s, font, screen, lcd, size=5, x=0, y=0):\n i = 0\n spacing = size * .11\n s = str(s)\n char = s[0]\n char_w, char_h = font[char].size\n for c in s:\n display_c(c,font,screen,lcd,size,(int(i*spacing*char_w)+x),y)\n i += 1\n return screen", "def make_display_word(secret_word):\n return ('_ ' * len(secret_word))", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def display(self, assignment):\r\n # Subclasses can print in a prettier way, or display with a GUI\r\n print(assignment)", "def pmd_display_text(text, size=1):\n esc = conditional_escape\n result = '<span class=\"pmd-display%d\">%s</span>' % (int(size), esc(text))\n return mark_safe(result)", "def _display_error(message: str) -> None:\n print()\n print(message, end='\\n\\n')", "def display_message(self, message):\n params = {\n 'message': message\n }\n self.render_template('message.html', params)", "def url_to_display_string(url):\n raw_label = url.split('/')[-1]\n label = raw_label.replace('_', ' ')\n return label", "def showMessage(self, message):\r\n print message", "def do_display_ascii(self, address):\n address = self.ParseAddressExpr(address)\n string = self.reader.ReadAsciiString(address)\n if string == \"\":\n print(\"Not an ASCII string at %s\" % self.reader.FormatIntPtr(address))\n else:\n print(\"%s\\n\" % string)", "def help_display(self):\r\n cmdString = 'pyuic5 -h' \r\n # execute command and return stdout string\r\n output2 = subprocess.getoutput(cmdString) \r\n # show stdout \r\n self.plainTextEdit.insertPlainText( output2 )", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def display_string( self, a_string, update_now = False ):\n #rint( \"debug for display_string\")\n if AppGlobal.parameters.gui_text_log_fn:\n # for now open close.... later perhaps improve\n with open( AppGlobal.parameters.gui_text_log_fn, \"a\" ) as a_file:\n a_file.write( a_string ) # do we need \\n check\n #rint( a_string )\n\n if AppGlobal.parameters.log_gui_text:\n AppGlobal.logger.log( AppGlobal.parameters.log_gui_text_level, a_string, )\n\n self.msg_text.insert( Tk.END, a_string, ) # this is going wrong, why how\n try:\n numlines = int( self.msg_text.index( 'end - 1 line' ).split('.')[0] )\n # !! beware int( None ) how could it happen ?? it did this is new\n except Exception as exception:\n # Catch the custom exception -- !! to broad execpt\n AppGlobal.logger.error( str( exception ) )\n print( exception )\n numlines = 0\n if numlines > self.max_lines:\n cut = int( numlines/2 ) # lines to keep/remove\n self.msg_text.delete( 1.0, str( cut ) + \".0\" ) # remove excess text\n# msg = \"Delete from test area at \" + str( cut )\n# self.logger.info( msg )\n\n if self.cb_scroll_var.get():\n self.msg_text.see( Tk.END )\n\n if update_now:\n AppGlobal.gui.root.update()\n print( \"!! self.root not valid here \")", "def print(self, my_screen, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n my_screen.blit(text_bitmap, [self.x_pos, self.y_pos])\n self.y_pos += self.line_height", "def visualizeWithContents(self, paths):\n return ExpressString(\"A totally mystical rune.\")", "def text(self):\n surface_score = pygame.font.SysFont('Helvetic', 100).render(str(self.score), False, BLACK)\n screen.blit(surface_score, (50, 50))", "def _(string):\n\t\treturn string", "def result_display(self, arg):\n if self.rc.pprint:\n out = stringify_func(arg)\n\n if '\\n' in out:\n print\n\n print out\n else:\n print repr(arg)", "def display_text(text, x, y, size):\r\n font = pygame.font.Font('freesansbold.ttf', size)\r\n text_surf, text_rect = text_objects(text, font)\r\n text_rect.center = (x, y)\r\n display.blit(text_surf, text_rect)", "def print_entry(text):\n print \"Text entered: \\n '%s'\" % text", "def entertext(Title,objectname,stringtoenter):\n try:\n ldtp.enterstring(Title,objectname,stringtoenter)\n logging.DEBUG(\"entered string\")\n except Exception as er:\n logging.DEBUG(\"Not able to enter the string in %\")", "def ui_output_text(morzeText: str):\n print(morzeText)", "def __str__(self) -> str:\n\n return self.display_form()", "def showText(self, context, text, size=1, color=colors.WHITE, conversion=True):\n context.print(text, self.components, size, color=color, conversion=conversion)", "def display_profile(self):\n statement = f\"\"\"\n ------\n {self.name.upper()}\n ------\n Fee: {self.fee} -/Rs.\n Rating: {self.rating} STARS\n Qualification: {self.qualification}\n Speciality: {self.speciality}\n Language: {self.language}\n Working Hours: {self.working_hrs}\n Contact: {self.contact}\n Location: {self.location}\n \"\"\"\n print(statement)", "def Print(self, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (5, self.textLine))\r\n self.textLine += 15", "def render_string(self, source: str, **vars) -> str:\n vars.setdefault('ctx', self._ctx)\n return self._renderer.render_string(source, **vars)", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def command_show(problem):\r\n print problem.get_html()", "def display_message(self, message, subtitle=None, arg=None):\n if message is None:\n # Display same message as the placeholder\n message = self.placeholder\n xml = alfred.xml([\n alfred.Item(\n title=message,\n subtitle=subtitle,\n attributes={\n 'uid': alfred.uid(0),\n 'arg': arg\n },\n icon='icon.png',\n )\n ]) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n exit()", "def lcd_string(self, message, line):\n # Send string to display\n\n # message = message.ljust(LCD_WIDTH,\" \")\n\n self.lcd_byte(line, LCD_CMD)\n for i in range(len(message)):\n self.lcd_byte(ord(message[i]),LCD_CHR)", "def show_text(text, args):\n return expyriment.stimuli.TextLine(text,\n text_font=args[\"--text-font\"],\n text_size=args[\"--text-size\"],\n text_colour=args[\"stimuli_color\"],\n background_colour=args[\"bg_color\"])", "def show_greeting(self):\n self.output(' ------------------------ ')\n self.output('You are now playing ' + self.name)\n self.output(self.greeting)\n self.output(' ------------------------ ')", "def printStr(str):\n if str_chk.match(str): return str\n return repr(str)", "def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)", "def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")", "def show_help():\r\n print(\"What should we pickup at the store?\")\r\n print(\"\"\"\r\n Enter 'SHOW' to display current list.\r\n Enter 'DONE' to stop adding items.\r\n Enter 'REMOVE' followed by name to remove an item. Ex: REMOVE Orange\r\n Enter 'HELP' if you would like to reference the commands.\r\n \"\"\")", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tp = self.options['sep']\r\n\t\tt = self.options['tab']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.abbrev+s+p+s+self.text)", "def display_char(self) -> None:\r\n print(self.char if self.was_guessed else '_', end=' ')", "def display_letter_prompt(self, letter=None):\n if letter == None:\n letter = self.current_prompt\n \n displaybox = self.pygame.draw.rect(self.gameDisplay,\n self.display_states[self.display_names[self.current_display_state]]['background'],\n ((self.SCREEN_WIDTH/2)-200, 108, 400, 50))\n\n text = self.font_large.render(letter, True,\n self.display_states[self.display_names[self.current_display_state]]['text'])\n\n temp_width = text.get_rect().width\n\n self.gameDisplay.blit(text, ((self.SCREEN_WIDTH / 2) - (temp_width/2), 100))", "def display_character(window, name, path_template):\n # Could be improved a lot.\n border_size = 20\n path = \".\".join((path_template, \"200\", \"png\"))\n pic = pygame.image.load(path)\n pic_w, pic_h = pic.get_size()\n text = ft_title.render(\" \".join((\"<-\", name, \"->\")), 1, WHITE)\n text_w, text_h = text.get_size()\n pygame.draw.rect(window, GREY, (SCREEN_W/2 - pic_w/2 - border_size,\n SCREEN_H/2 - pic_h/2 - text_h - border_size,\n pic_w + border_size*2, pic_h + border_size*2),\n border_size)\n window.blit(pic, (SCREEN_W/2 - pic_w/2, SCREEN_H/2 - pic_h/2 - text_h))\n window.blit(text, (SCREEN_W/2 - text_w/2, SCREEN_H/2 + pic_h/2 - text_h/2))", "def __str__(self):\n if len(self.title) > 50:\n return self.title[:50] + \"...\"\n else:\n return self.title", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()", "def display(self):\n disptxt = str(self)\n if self.width == 0 or self.has_output:\n print(disptxt)\n else:\n print(\"\\r\", end='')\n print(disptxt, end='')\n sys.stdout.flush()", "def about(display=True):\n\n ABOUT_TEXT = \"\"\"\nPre-release version %s (%s) of Topographica; an updated\nversion may be available from topographica.org.\n\nThis program is free, open-source software available under the BSD\nlicense (http://www.opensource.org/licenses/bsd-license.php).\n\"\"\"%(release,version)\n if display:\n print ABOUT_TEXT\n else:\n return ABOUT_TEXT", "def show_code(code):\n\n print('The code was: '+str(code))", "def Display2(self, rain, humidity, lang):\n if lang == 'eng':\n string = rain + '% Rain\\n' + humidity + '% Humidity'\n return string\n else:\n return 0\n # Not Yet Supported" ]
[ "0.71144617", "0.6865019", "0.6777228", "0.6654619", "0.6601663", "0.6576626", "0.65690356", "0.654347", "0.6448489", "0.6334183", "0.63277024", "0.6308555", "0.6306732", "0.6305804", "0.62739414", "0.62355924", "0.6233103", "0.62326515", "0.62177855", "0.62035185", "0.6197159", "0.6167717", "0.6167667", "0.6167243", "0.6154799", "0.6125252", "0.6118674", "0.6112658", "0.6105487", "0.60987604", "0.609601", "0.60825163", "0.6080288", "0.6055717", "0.6023397", "0.6021482", "0.60128707", "0.60104096", "0.6006801", "0.6000908", "0.5993272", "0.5986248", "0.5985843", "0.5971898", "0.59479034", "0.5947135", "0.5942999", "0.5939034", "0.591741", "0.59125215", "0.5883775", "0.5875679", "0.58736885", "0.58660984", "0.5863218", "0.5856877", "0.5852121", "0.5848165", "0.5822224", "0.58196557", "0.5806563", "0.5798225", "0.57930136", "0.5788982", "0.57861066", "0.5783891", "0.578015", "0.57729214", "0.57663834", "0.5757942", "0.57579136", "0.57544386", "0.57430905", "0.57328075", "0.572724", "0.57231", "0.57217765", "0.57155013", "0.57071054", "0.57069296", "0.5702195", "0.5686774", "0.56782544", "0.56741256", "0.5671378", "0.5669025", "0.5666256", "0.56662405", "0.5659276", "0.565868", "0.56540745", "0.5642554", "0.5640254", "0.5637872", "0.5637395", "0.5629506", "0.56221986", "0.5612216", "0.5593927", "0.55915624", "0.558118" ]
0.0
-1
Compares color to pawn color, returns true if they are the same.
def isColor(self,color): return self.color==color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def same_color(self, other: 'Piece') -> bool:\n\n return self.color == other.color", "def color_check_mate(self, mycolor):\n\n if not self.color_in_check(mycolor):\n return False\n\n incheck = True\n for (x, y) in self.__players[mycolor]:\n moves = self._get_piece_moves(x, y)\n for to in moves:\n res, captured = self._make_move((x, y), to)\n if not self.color_in_check(mycolor):\n incheck = False\n\n self._unmake_move(to, (x, y), captured)\n if not incheck:\n return False\n\n return incheck", "def color_in_check(self, mycolor):\n\n opponent = self.__players[self.get_opponent_color(mycolor)]\n\n x, y = None, None\n for (u, v) in self.__players[mycolor]:\n piece = self.get_piece(u, v)\n if not piece:\n raise ValueError()\n\n if self.get_piece(u, v).name == 'king':\n x, y = u, v\n break\n\n for (u, v) in opponent:\n if (x, y) in self._get_piece_moves(u, v):\n return True\n\n return False", "def can_checkmate(self, color):\n\n if color == 'red':\n opposing_color = 'blue'\n else:\n opposing_color = 'red'\n general_location = None\n\n for piece in self._active_pieces[opposing_color]:\n if type(piece) is General:\n general_location = self.translate_to_algebraic(piece.get_location())\n break\n for piece in self._active_pieces[color]:\n piece_location = self.translate_to_algebraic(piece.get_location())\n if piece.validate_move(piece_location, general_location, self._board):\n return True\n return False", "def check_win(self, color):\n if dijkstra(self, color) == 0:\n return True\n else:\n return False", "def is_green(self, pixel: tuple) -> bool:\r\n\r\n # If the pixel is more green than any other color,\r\n # Then we consider is a plant pixel\r\n if pixel[0] < pixel[1] and pixel[2] < pixel[1]:\r\n return True\r\n\r\n # The following rules are for fine adjusments\r\n # These tolerances may vary by photo\r\n if pixel[0] > self.max_brightness:\r\n return False\r\n\r\n if pixel[1] < self.min_brightness:\r\n return False\r\n \r\n if pixel[0] / pixel[1] > self.color_tolerance[0] / self.color_tolerance[1]:\r\n return False\r\n\r\n if pixel[2] / pixel[1] > self.color_tolerance[2] / self.color_tolerance[1]:\r\n return False\r\n \r\n return True", "def game_won(self):\n\n # Makes sure every tile is colored,\n for column in self.board:\n for tile in column:\n if not tile.color:\n return False\n\n # Makes sure each color has a line.\n colors = set()\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n colors.add(dot.color)\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n # If we've already found a line for this color.\n if dot.color not in colors:\n continue\n # If this dot starts a line and ends at the other dot.\n if dot_tile.next and not dot_tile.line_end().is_dot:\n return False\n elif dot_tile.next:\n colors.remove(dot.color)\n # If colors isn't empty, not all colors have lines.\n return not colors", "def is_a_player_in_check(self, color):\n\n current_player = color\n if current_player == 'red':\n opposing_player = 'blue'\n else:\n opposing_player = 'red'\n\n if self.can_checkmate(opposing_player):\n return self.get_general(current_player)\n\n if self.can_checkmate(current_player):\n return self.get_general(opposing_player)\n\n return False", "def in_check(self, colour):\n from pieces import King, Queen, Bishop, Knight, Rook, Pawn\n king_coord = self.white_king_coord if colour is WHITE else self.black_king_coord\n if king_coord is None:\n return False\n for piece_class in King, Queen, Bishop, Knight, Rook, Pawn:\n dummy_piece = piece_class(colour = colour)\n for coord_to in dummy_piece.iter_move_coords(self, king_coord):\n piece = self[coord_to]\n if isinstance(piece, piece_class) and piece.colour != colour:\n return True\n return False", "def stalemate(self):\n last_piece = self.pieces.moveHistory[-1][0]\n last_piece_color = self.pieces.piece_color(last_piece)\n if last_piece_color == \"black\":\n if len(self.whiteMoves) == 0:\n return True\n else:\n if len(self.blackMoves) == 0:\n return True\n return False", "def _is_same_color(p1: str, p2: str):\n return p1.islower() == p2.islower()", "def CheckProperColoring(G):\r\n coloring_proper = True\r\n\r\n for vertex in G._color:\r\n #print('Vertex',vertex)\r\n #print('G._color',G._color[vertex])\r\n #print('G._adj[vertex]', G._adj[vertex])\r\n for adj_vertex in G._adj[vertex]:\r\n if G._color[vertex] == G._color[adj_vertex]:\r\n coloring_proper = False\r\n #end\r\n #end\r\n #end\r\n\r\n return coloring_proper", "def win_check(self):\n\t\t# Create a temp var to capture the number of correct matches\n\t\tright = 0\n\t\t# retrieve peg_guess_color_list for current round\n\t\tguess = self.model.guesses[self.model.status]\n\t\t# retreive solution list\n\t\tsolution = self.model.guesses[\"solution\"]\n\t\t# compare values in each index for both lists against eachother\n\t\tfor i in range(len(solution.pegs)):\n\t\t\t# \n\t\t\tif solution.pegs[i].peg_color == guess.pegs[i].peg_color:\n\t\t\t\tright += 1\n\n\t\t\t\tprint(\"Yay, it works!\")\n\n\t\t# If all indexes of the peg_colors in the solution and guess are True:\n\t\tif right == 4:\n\t\t\treturn True\n\n\t\telse:\n\t\t\treturn False", "def check_position_for_same_colour(self, position1, position2):\n return (not self.check_position_free(position1)) and self.check_position_for_same_occupancy(position1, position2)", "def check_for_different_colour(self, arg1_position, arg2):\n return (not self.check_position_free(arg1_position)) and (not self.check_for_same_colour(arg1_position, arg2))", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def checkWin(color_list):\n startcolor = color_list[0][0] #Saves color of [0][0] to variable for easy access\n for i in range(15):\n for k in range(25):\n if color_list[i][k] != startcolor: #If any color is not same as color on [0][0] stop and return False since game is not won\n return False\n return True #If all colors are the same as [0][0] the game ahs been won and return Tture", "def answer_ok(a):\n (rightly_positioned, permutated) = a\n if (rightly_positioned + permutated > number_of_positions) \\\n or (rightly_positioned + permutated < len(colours) - number_of_positions):\n return False\n if rightly_positioned == 3 and permutated == 1:\n return False\n return True", "def is_piece(\n self,\n rank: RankLike,\n color: ColorLike\n ) -> bool:\n\n return self.is_rank(rank) and self.is_color(color)", "def add_a_pawn(self, player, pos_x, pos_y)-> bool:\n if not pos_x in range(0, len(self.grid)) \\\n or not pos_y in range(0, len(self.grid)):\n return False\n if self.grid[pos_x][pos_y].color is None:\n for index_x in range(-1, 2):\n for index_y in range(-1, 2):\n other_pos_x = pos_x + index_x\n other_pos_y = pos_y + index_y\n if other_pos_x < 0 or other_pos_y < 0:\n continue\n if other_pos_x >= self.grid_length or other_pos_y >= self.grid_length:\n continue\n if not self.grid[other_pos_x][other_pos_y].color is None:\n self.grid[other_pos_x][other_pos_y].change_color(player.color)\n self.grid[pos_x][pos_y].change_color(player.color)\n self.last_move.append((pos_x, pos_y))\n return True\n else:\n return False", "def is_monochromatic(self):\n return equal(s.color for s in self.iter_states())", "def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True", "def is_win(self, color):\n win = self.n\n # check y-strips\n for y in range(self.n):\n count = 0\n for x in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check x-strips\n for x in range(self.n):\n count = 0\n for y in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check two diagonal strips\n count = 0\n for d in range(self.n):\n if self[d][d] == color:\n count += 1\n if count == win:\n return True\n count = 0\n for d in range(self.n):\n if self[d][self.n - d - 1] == color:\n count += 1\n if count == win:\n return True\n\n return False", "def is_on_board(self, r, c):\r\n return 0 <= r <= 7 and 0 <= c <= 7", "def checkEndOfGame(self, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n for i in range(1, self.grid.width+1):\n for j in range(1, self.grid.height+1):\n if self.grid[i, j] != checkColor:\n continue\n if (i > 2) and (self.grid[i-1, j] == otherColor) and (self.grid[i-2, j] == emptyColor):\n return False\n if (i < self.grid.width-1) and (self.grid[i+1, j] == otherColor) and (self.grid[i+2, j] == emptyColor):\n return False\n if (j > 2) and (self.grid[i, j-1] == otherColor) and (self.grid[i, j-2] == emptyColor):\n return False\n if (j < self.grid.height-1) and (self.grid[i, j+1] == otherColor) and (self.grid[i, j+2] == emptyColor):\n return False\n return True", "def is_pawn_move_valid(self, from_row, from_col, to_row, to_col):\n # Setup variables used\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n to_piece = self.board.squares[to_row][to_col]\n row_diff = abs(from_row - to_row)\n col_diff = abs(from_col - to_col)\n dc = 0\n\n # Set flag for first move of pawn\n first_move = True if from_row == 6 or from_row == 1 else False\n\n # If direction is not correct for white, exit\n if to_row - from_row > 0:\n dr = 1\n if self.piece_color(piece) == \"white\":\n return False\n\n # If direction is not correct for black, exit\n if to_row - from_row < 0:\n dr = -1\n if self.piece_color(piece) == \"black\":\n return False\n\n # If moving straight\n if from_col == to_col:\n # if not legal straight move, exit\n if not (row_diff == 1 or (first_move and row_diff == 2)):\n return False\n\n # make sure to move has no pieces on straight path\n dm = row_diff + 1\n\n # return value\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm)\n\n# if retVal and not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n\n return retVal\n\n # WHITE en passant\n # move from moveHistory => (\"piece\", fromRow, fromCol, toRow, toCol)\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♟\" and self.moveHistory[-1][1] == 1 and\\\n self.moveHistory[-1][3] == 3 and piece_color == \"white\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # BLACK en passant\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♙\" and self.moveHistory[-1][1] == 6 and\\\n self.moveHistory[-1][3] == 4 and piece_color == \"black\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # else move must be taking piece directly move\n # if legal taking piece move and (opponent-already check for own piece) piece at to-square\n if col_diff == 1 and row_diff == 1 and to_piece != None:\n\n# if not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n return True\n\n return False", "def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def at_exit(piece, colour):\n return ((colour == 'red' and piece.q == BOARDDIM) or\n (colour == 'green' and piece.r == BOARDDIM) or\n (colour == 'blue' and piece.q + piece.r == -BOARDDIM))", "def is_in_checkmate(self):\r\n\r\n done = False # Determines when to stop the loop to check\r\n checkmate = True\r\n if not self.is_in_check(\"red\") and not self.is_in_check(\"black\"): # Exits the function if not one side is checked\r\n return False\r\n\r\n board = self._board.get_board()\r\n possible_move_list = []\r\n\r\n # Red's Turn: Creates a list of all possible positions for red to move\r\n if self.get_player_turn() == 1: # Red's turn\r\n for row in range(10):\r\n for col in range(9):\r\n if board[row][col].get_piece() is None or board[row][col].get_piece().get_player_id() == 'b':\r\n possible_move_list.append(self.location_to_pos(row, col))\r\n\r\n # Checks for all possible moves\r\n for row in range(10):\r\n for col in range(9):\r\n if board[row][col].get_piece() is not None:\r\n if board[row][col].get_piece().get_player_id() == 'r': # Loops through all the red pieces on board\r\n start = self.location_to_pos(row,col)\r\n for move in possible_move_list: # Checks for move\r\n if not done:\r\n end_loc = self.parse_pos(move)\r\n end_row = end_loc[0]\r\n end_col = end_loc[1]\r\n end_piece = board[end_row][end_col].get_piece()\r\n end_piece_player_id = None\r\n if end_piece is not None:\r\n end_piece_player_id = end_piece.get_player_id()\r\n if self.make_move(start, move) is True:\r\n if self.is_in_check(\"red\") is False: # If there is a way to get out of a check\r\n done = True\r\n checkmate = False\r\n self.reverse_move(start,move, board, end_piece_player_id, end_piece)\r\n self.set_player_turn(1)\r\n if checkmate:\r\n self.set_game_state('b') # Black wins\r\n\r\n # Black's Turn: Creates a list of all possible positions for black to move\r\n elif self.get_player_turn() == -1: # Blacks's turn\r\n for row in range(10):\r\n for col in range(9):\r\n if board[row][col].get_piece() is None or board[row][col].get_piece().get_player_id() == 'r':\r\n possible_move_list.append(self.location_to_pos(row, col))\r\n\r\n # Checks for all possible moves\r\n for row in range(10):\r\n for col in range(9):\r\n if board[row][col].get_piece() is not None:\r\n if board[row][col].get_piece().get_player_id() == 'b': # Loops through all the black pieces on board\r\n start = self.location_to_pos(row,col)\r\n for move in possible_move_list: # Checks for move\r\n if not done:\r\n end_loc = self.parse_pos(move)\r\n end_row = end_loc[0]\r\n end_col = end_loc[1]\r\n end_piece = board[end_row][end_col].get_piece()\r\n end_piece_player_id = None\r\n if end_piece is not None:\r\n end_piece_player_id = end_piece.get_player_id()\r\n if self.make_move(start, move) is True:\r\n if self.is_in_check(\"black\") is False: # If there is a way to get out of a check\r\n done = True\r\n checkmate = False\r\n self.reverse_move(start,move, board, end_piece_player_id, end_piece)\r\n self.set_player_turn(-1)\r\n if checkmate:\r\n self.set_game_state('r') # Red wins\r", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def gen_is_in_check(self, color, checkmate=False):\n\n gen_location = self.get_general_location(color)\n gen_piece = self._game_board[gen_location[0]][gen_location[1]]\n is_red = color == \"red\"\n opposite_color = \"black\" if is_red else \"red\"\n opposing_gen_loc = self.get_general_location(opposite_color)\n opposing_col_moves = self.avaiable_moves_for_color(opposite_color)\n\n # if checkmate is true\n if checkmate:\n gen_moves = gen_piece.available_moves(self._game_board)\n # for each available move\n for move in gen_moves:\n # move piece\n self.move_pieces(gen_location,move)\n # check if gen_is_in_check on the new board\n if not self.gen_is_in_check(color):\n # if any return false, move piece back to location and return false\n self.move_pieces(move, gen_location)\n return False\n else:\n self.move_pieces(move, gen_location)\n return True\n\n # if checkmate is False\n if not checkmate:\n # if gen's location is in opposing colors moves\n if gen_location in opposing_col_moves:\n return True", "def c_equals(a, b):\n alpha = library.PixelGetAlpha\n return bool(library.IsPixelWandSimilar(a, b, 0) and\n alpha(a) == alpha(b))", "def is_color(self, color: ColorLike) -> bool:\n\n if isinstance(color, Color):\n return self.color == color\n elif isinstance(color, str):\n return str(self.color) == color\n elif isinstance(color, int):\n return int(self.color) == color\n return False", "def __eq__(self, other):\n return (\n self.bg_color == other.bg_color and\n self.width == other.width and\n self.height == other.height\n )", "def fully_equal(left, right, compare_color=True):\n color = not compare_color or left.color == right.color\n return (left == right and\n left.is_initial == right.is_initial and\n left.is_final == right.is_final and\n left.final_word_out == right.final_word_out and\n left.word_out == right.word_out and\n color and\n left.initial_probability == right.initial_probability)", "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "def __eq__(self, other):\n return type(other) is type(self) and other.color == self.color", "def _is_taking_own_piece(self, from_row, from_col, to_row, to_col):\n # Get piece being moved\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n\n # is piece trying to take it's own piece?\n to_piece = self.board.squares[to_row][to_col]\n if to_piece != None:\n if self.piece_color(to_piece) == piece_color:\n return True\n return False", "def has_winner(self):\n if self.color_check_mate(ChessGame.BLACK):\n return ChessGame.WHITE\n elif self.color_check_mate(ChessGame.WHITE):\n return ChessGame.BLACK\n else:\n return None", "def check_moves(self, board, self_color, coords, delta):\r\n found_opponent = False\r\n for i in range(1, 8):\r\n dr = coords[0] + i * delta[0]\r\n dc = coords[1] + i * delta[1]\r\n\r\n if self.is_on_board(dr, dc):\r\n if board[dr][dc] == self_color:\r\n break\r\n\r\n elif board[dr][dc] == self.get_opponent_color(self_color):\r\n found_opponent = True\r\n\r\n elif board[dr][dc] == self.EMPTY:\r\n if found_opponent:\r\n return dr, dc\r\n else:\r\n break", "def is_current_players_piece(piece: str, white_turn: bool) -> bool:\n if white_turn and piece in WHITE_PIECES:\n return True\n elif not white_turn and piece in BLACK_PIECES:\n return True\n else:\n return False", "def is_finished(self)-> bool:\n for line in self.grid:\n for pawn in line:\n if pawn.color is None:\n return False\n return True", "def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()", "def has_friendly_piece(self, piece) -> bool:\r\n if self.has_piece():\r\n if piece.get_color() == self.get_piece().get_color():\r\n return True\r\n \r\n return False", "def _ischeckopponent(self, from_, to_):\n opp_color = 'W' if self.to_move == 'B' else 'B'\n opp_king_pos = self.board.get_occupants(color=opp_color, notation='K')[0]\n\n diff = (\n opp_king_pos[0] - to_cartesian(to_)[0],\n opp_king_pos[1] - to_cartesian(to_)[1]\n )\n\n if diff in self.board[from_].occupant.get_captures():\n if self.board[from_].occupant.hopping:\n return True\n\n if not self.board.isblocked(to_, opp_king_pos):\n return True\n\n return False", "def checkMove(move: Card, game) -> bool:\n lastMove = game.lastMove\n\n if move.number == lastMove.number:\n return True\n\n elif move.color == lastMove.color: \n return True\n\n elif move.wild: \n return True\n\n return False", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def game_over(self):\n red_minion = 0\n blue_minion = 0\n red_master = 0\n blue_master = 0\n only_masters = True\n for row in self.board:\n for piece in row:\n if piece != 0:\n if not piece.master:\n if piece.player:\n blue_minion += 1\n else:\n red_minion += 1\n only_masters = False\n else:\n if piece.player:\n blue_master += 1\n else:\n red_master += 1\n if blue_minion + blue_master == 0:\n self.winner = \"Red\"\n self.red_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif red_minion + red_master == 0:\n self.winner = \"Blue\"\n self.blue_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif only_masters:\n if red_master > blue_master:\n self.winner = \"Red\"\n self.red_victories += 1\n elif blue_master > red_master:\n self.winner = \"Blue\"\n self.blue_victories += 1\n else:\n self.winner = \"Nobody\"\n self.number_of_games +=1\n self.game_over_screen()\n return True\n \n return False", "def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False", "def update_tile(tile, color, tiles):\n if color == BLACK:\n return num_black_neighbors(tile, tiles) in [1, 2]\n if color == WHITE:\n return num_black_neighbors(tile, tiles) == 2", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def test_exist_and_change(self):\n colorList = ColorList()\n prev = colorList.pickColor()\n self.assertIsNotNone(prev)\n for i in range(100):\n color = colorList.pickColor()\n self.assertIsNotNone(color)\n self.assertTrue(color.r != prev.r or color.g != prev.g or color.b != prev.b)\n prev = color", "def check_up(self, i, j, board):\r\n color = board.pieceAt(i, j)\r\n\r\n row = True\r\n for k in range(4):\r\n row &= board.pieceAt(i-k, j) is color\r\n\r\n if row:\r\n return color\r\n else:\r\n return Color.EMPTY", "def can_change_colors(self):\n return self.condition is None or self.get_state(self.condition[\"entity\"]) == self.condition[\"state\"]", "def is_en_passant(self, from_col, from_row, to_col, to_row):\n from_square = self.get_square(from_col, from_row)\n to_square = self.get_square(to_col, to_row)\n taking_square = self.get_square(to_col, from_row)\n # Check the to_col is next to the from_col\n if abs(ord(from_col) - ord(to_col)) != 1:\n return False\n # Check the from row is correct (5 for white, 4 for black)\n elif from_row != (5 if from_square.isupper() else 4):\n return False\n # Check the from square is a pawn\n elif from_square.lower() != 'p':\n return False\n # Check the to square is empty\n elif self.get_square(to_col, to_row) != ' ':\n return False\n # Check the square being taken is a pawn of the opposite colour\n elif taking_square.lower() != 'p' or taking_square == from_square:\n return False\n else:\n # It is a valid en-passant move\n return True", "def red_has_won(self):\n return not any([self.squares[p].is_red() for p in self.squares \\\n if self.squares[p]])", "def __eq__(self, other):\n return self.is_red() == other.is_red()", "def is_cue_color(color, tolerance = 100):\n return col_diff(color, CUE_COLOR) <= tolerance", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def solved(self):\n\n for p in self.pieces:\n if [p.x, p.y, p.z].count(0) < 2: # Filter out centers.\n # Compare each sticker color with its respective center.\n for i, s in enumerate(p.stickers):\n cen = self.getpiece(s.x, s.y, s.z).getsticker(s.x, s.y, s.z)\n if s.c != cen.c:\n return False\n\n return True", "def CheckGhost(self,G):\n\t\tif(P.x==G.x and P.y==G.y):\n\t\t\treturn 1\n\t\treturn 0", "def validate_hair_color(passport: map) -> bool:\n if passport.get('hcl'):\n regex = re.compile('#[0-9a-f]{6}')\n match = regex.match(passport['hcl'])\n return bool(match)\n\n return False", "def checks_check(self, turn):\n opposite_colour = next_turn(turn)\n\n if piece_class.KING_LOCATION[opposite_colour] in self.path_dict[turn]:\n self.print_message(\"CHECK!\")\n# self.checkmate(turn)\n self.mate_double(turn)\n self.mate_pinned(turn)\n self.mate_normal(turn)\n\n if piece_class.KING_LOCATION[turn] in self.path_dict[opposite_colour]:\n return True\n\n else:\n return False", "def has_clashes(the_board):\r\n for c in range(1, len(the_board)):\r\n if col_clashes(the_board, c):\r\n return True\r\n return False", "def GreenFilter(c):\n if (c[1] > c[0]) and (c[1] > c[2]) and (c[0] == c[2]): return True\n else: return False", "def pawnTo(self, direction) :\n dirCoord = add(self.coord, direction)\n return self.board.playerCellList[dirCoord[0]][dirCoord[1]].hasPawn", "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "def has_enemy_piece(self, piece) -> bool:\r\n if self.has_piece():\r\n if piece.get_color() != self.get_piece().get_color():\r\n return True\r\n \r\n return False", "def has_clashes(the_board):\r\n for col in range(1,len(the_board)):\r\n if col_clashes(the_board, col):\r\n return True\r\n return False", "def has_clashes(the_board):\r\n for col in range(1,len(the_board)):\r\n if col_clashes(the_board, col):\r\n return True\r\n return False", "def _check_zone_locks(self, start, end):\n color = self._board[start]\n goal = CheckersGame.opposite[color]\n\n start_zone = FULL_BOARD[start]\n end_zone = FULL_BOARD[end]\n\n # You can always get into your own goal\n # and you can always move within a zone.\n if end_zone == start_zone or end_zone == goal:\n return True\n # So now we can assume they changed zones.\n # And that if they're in a goal, it's not their own.\n\n # The first rule is that if a piece is in *its destination goal*,\n # it can't be moved outside of it.\n # The second rule is that we may not move into a zone which is\n # another player's spawn or goal\n if start_zone == goal or end_zone == color:\n return False\n return True", "def valid(black, white, x, y):\n return (not black & gobit[(x, y)]) and (not white & gobit[(x, y)])", "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "def _check_redshift(self, red):\n if np.min(np.abs(red - self.zout)) > 0.01:\n return 0\n return 1", "def has_clashes(the_board):\n for col in range(1,len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False", "def has_clashes(the_board):\n for col in range(1,len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False", "def combine_test():\n\n red = create_image(1, 3, create_color(255, 0, 0))\n green = create_image(1, 3, create_color(0, 255, 0))\n blue = create_image(1, 3, create_color(0, 0, 255))\n\n expected = create_image(1, 3, create_color(255, 255, 255))\n\n result = combine(red, green, blue)\n\n for x, y, (r, g, b) in result:\n if (r, g, b) == tuple(get_color(expected, x, y)):\n print(\"Pixel at (\" + str(x) + \",\" + str(y) + \") passed.\")\n else:\n print(\"Pixel at (\" + str(x) + \",\" + str(y) + \") failed. Expected \" + str((r, g, b)) + \" got \"\n + str(get_color(expected, x, y)))", "def has_clashes(the_board):\n for col in range(1, len(the_board)):\n if col_clashes(the_board, col):\n return True\n return False", "def select_our_piece(self):\n return self.board.location(self.mouse_pos) is not None and self.board.location(self.mouse_pos).color == self.turn", "def test_p1_color_map(grid):\n space = bempp.api.function_space(grid, \"P\", 1)\n\n colors_unique = True\n\n for local_dofs in space.global2local:\n colors = [space.color_map[elem] for elem, _ in local_dofs]\n if len(colors) != len(set(colors)):\n colors_unique = False\n\n assert colors_unique", "def is_game_finish(self):\n for row in self.chessboard:\n for val in row:\n if not val.is_correct_state():\n return False\n return True", "def IsOk(*args, **kwargs):\n return _gdi_.Colour_IsOk(*args, **kwargs)", "def __eq__(*args, **kwargs):\n return _gdi_.Colour___eq__(*args, **kwargs)", "def is_valid(current_card: Card, destination: Card) -> bool:\n # TODO: check for a card to a space is only Kings; maybe in the board?\n match = current_card.color == destination.color\n difference = destination.value - current_card.value\n if not match and difference == 1:\n return True\n else:\n return False", "def test_if_bottom_color_player_well_set(self):\n ui = UIRender(TestUI.image_path)\n ui.set_bottom_player_color(CELTIC_GREEN)\n self.assertEqual(ui.bottom_player_color, CELTIC_GREEN)\n self.assertEqual(ui.top_player_color, SPQR_RED)\n ui.set_bottom_player_color(SPQR_RED)\n self.assertEqual(ui.bottom_player_color, SPQR_RED)\n self.assertEqual(ui.top_player_color, CELTIC_GREEN)", "def piece_color(self, piece):\n if piece == None:\n return None\n if ord(ChessPiece.W_KING) <= ord(piece) <= ord(ChessPiece.W_PAWN):\n return \"white\"\n return \"black\"", "def testKingOnly(board):\n return bin(board.friends[board.color]).count(\"1\") == 1", "def inside_gamut(rgb: ndarray) -> bool:\n return all(rgb >= 0)", "def is_red(self):\n return \"red\" == self.color", "def test_issue_269(self):\n\n c = pygame.Color(0)\n c.hsva = 360, 0, 0, 0\n self.assertEqual(c.hsva, (0, 0, 0, 0))\n c.hsva = 360, 100, 100, 100\n self.assertEqual(c.hsva, (0, 100, 100, 100))\n self.assertEqual(c, (255, 0, 0, 255))", "def is_black(self):\n return \"black\" == self.color", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def IsColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsColor(self, *args)", "def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "def PinkFilter(c):\n if (c[0] > c[1]) and (c[2] > c[1]) and (c[2] == c[0]): return True\n else: return False", "def check_opponent_winning(self):\n valid_actions = self.get_valid_actions()\n copy_board = np.copy(self.board)\n for action in list(valid_actions):\n height = self.get_height(action, board=copy_board)\n self.set(action, height=height, value=self.current_player * -1, board=copy_board)\n\n if self.check_winner(copy_board, action, height) != 0:\n return True\n\n self.set(action, height=height, value=0, board=copy_board)\n\n return False", "def castling_valid(self, turn, direction):\n \n opposite_colour = next_turn(turn)\n\n \n if self.board[direction[0]] and self.board[direction[-1]] != self.empty:\n if ((self.board[direction[0]].graphic) == piece_class.PIECEDICT[turn][piece_class.King] and \n (self.board[direction[-1]].graphic) == piece_class.PIECEDICT[turn][piece_class.Rook]):\n if self.board[direction[0]].move_track == False and self.board[direction[-1]].move_track == False:\n for i in self.path_dict[opposite_colour]:\n if i in self.coords:\n if self.coords.index(i) == direction[0]:\n \n return False\n \n if self.coords.index(i) == direction[1]:\n \n return False\n \n if self.coords.index(i) == direction[2]:\n \n return False\n \n if len(direction) == 4:\n if self.board[direction[1]] == self.empty:\n if self.board[direction[2]] == self.empty:\n \n return True\n \n if len(direction) == 5:\n if self.board[direction[1]] == self.empty:\n if self.board[direction[2]] == self.empty:\n if self.board[direction[3]] == self.empty:\n \n return True\n \n return False", "def same_color(suit):\n\tif suit == 's':\n\t\treturn 'c'\n\telif suit == 'c':\n\t\treturn 's'\n\telif suit == 'd':\n\t\treturn 'h'\n\telif suit == 'h':\n\t\treturn 'd'", "def check(chessboard, row, col, n):\n for i in range(col):\n if chessboard[row][i] == 1:\n return False\n\n for j, i in zip(range(row, -1, -1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n \n for j, i in zip(range(row, n, 1), range(col, -1, -1)):\n if chessboard[j][i] == 1:\n return False\n\n return True" ]
[ "0.7082352", "0.6642375", "0.6420325", "0.63803416", "0.6271055", "0.6236686", "0.62152225", "0.6214757", "0.6146159", "0.6136335", "0.6056689", "0.6049656", "0.6021747", "0.60207504", "0.6019101", "0.60071254", "0.6001189", "0.6000824", "0.59897274", "0.59405607", "0.5914164", "0.59021425", "0.5842953", "0.58133096", "0.5810256", "0.5799214", "0.57960993", "0.57919997", "0.57706434", "0.57624686", "0.57583934", "0.5755108", "0.57348907", "0.57340693", "0.571377", "0.5696566", "0.56681585", "0.56668746", "0.56628287", "0.56194884", "0.5604908", "0.5585283", "0.55802375", "0.555661", "0.55518454", "0.5550554", "0.55497813", "0.5544096", "0.5543194", "0.55152905", "0.5496374", "0.5484078", "0.54765904", "0.54724705", "0.5462182", "0.54413503", "0.543558", "0.54348326", "0.5413382", "0.54108614", "0.5396273", "0.53797054", "0.53731763", "0.53592455", "0.5355614", "0.5350689", "0.53431386", "0.5339132", "0.5337703", "0.5336549", "0.5336549", "0.5328069", "0.5318972", "0.531652", "0.5309256", "0.5308579", "0.5308579", "0.5303134", "0.5300871", "0.53005457", "0.52884257", "0.52816665", "0.5281411", "0.52673167", "0.52641946", "0.5262345", "0.5254663", "0.5250977", "0.52466357", "0.5243622", "0.52350503", "0.523059", "0.5228432", "0.5226302", "0.5220336", "0.521963", "0.52167845", "0.5207115", "0.52070874", "0.5206211" ]
0.6147662
8
Sets the position of the pawn
def move(self,x,y): self.pos.x = x self.pos.y = y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def setPosition(position):", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def setPos(self,pos):\n self.Xpos,self.Ypos=pos", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def set_new_location(self, xPos, yPos):", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def move(self, p):\r\n self.position.setvalue(p)", "def set_position( self, posx, posy ):\n\n self.__foodx = posx\n self.__foody = posy", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def set_position(self, position):\r\n\r\n self.position = position\r\n if (self.rect):\r\n self.rect.x = position[0]\r\n self.rect.y = position[1]", "def set_position(self, position):\n self.position = position", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def set_player_position(self, position):\n raise NotImplementedError", "def put_pawn_at(board: Board, pawn_value: int, x: int, y: int) -> None:\n\n board[y][x] = 1 if argv[2] is '0' else pawn_value", "def set_position(self):\n raise RuntimeError(\"the 'set_position' method must be overriden\")", "def set_position(self, position):\n self.set_current_position(position)", "def respawn_player(self):\n self.rect.x = 50\n self.rect.y = 50\n \n # Specifies the Player's spawnpoint as maze_arrangement[1][1], representing\n # the tile in the top-left corner of the maze\n self.__user_x = 1\n self.__user_y = 1", "def move(self):\n \n self.position = self.wander()", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def set_position(self, position):\n raise NotImplementedError()", "def set_position(self, new_pos):\n self._position = new_pos", "def setPosition(self,newPos):\n self._position = newPos", "def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def set_pos(self, x):\n self._pos = x", "def move_pawn(pos, game):\n #Convert coordinates to row and column\n row = int(pos[1]//(SQUARESIZE+FENCEWIDTH))\n col = int(pos[0]//(SQUARESIZE+FENCEWIDTH))\n #Make move\n game.move_pawn(game.get_player_turn(), (col,row))", "def teleport(self, x, y, reset_rotation=False):\n self.center[0] = x\n self.center[1] = y\n self.rect.center = tuple(self.center) # update pygame sprite placement\n if reset_rotation:\n self.rotate(-self.rotation)", "def SetPosition(self, pos):\n self._pos = pos", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def setNewPosition(self,board_object,x,y):\n\t\tif checkClash(board_object,self,x,y)==0:\n\t\t\toverlayMatrix(board_object,self,x,y)\n\t\t\tself.setPos(x,y)\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1", "def place(self,y,x):\n self.y = y\n self.x = x", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def set_pos(self, p, a, **kwargs):\n\t\treturn self.send(\"set_pos\", p[0], p[1], a, **kwargs)", "def set_position(self,position,midpoint = False):\n\n # Find the image size and midpoint of the image\n imagesize = self.image.get_size()\n imagemidp = (int(imagesize[0] * 0.5),int(imagesize[1] * 0.5))\n\n # if a midpoint arguement is passed, set the pos to the top left pixel\n # such that the position passed in is in the middle of the button\n if midpoint:\n self.pos = (position[0] - imagemidp[0], position[1] - imagemidp[1])\n else:\n self.pos = position\n\n # set the rectangle to be used for collision detection\n self.rect = pygame.Rect(self.pos,self.image.get_size())\n\n # Set up the information that is needed to blit the image to the surface\n self.blitinfo = (self.image, self.pos)", "def position(self, position):\n self.move_to(position)", "def respawn(self, xrespawn, yrespawn):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n #self.center_x = SCREEN_WIDTH / 2\n #self.center_y = SCREEN_HEIGHT / 2\n\n self.center_x = xrespawn\n self.center_y = yrespawn\n\n self.angle = 0\n\n self.cur_health = self.max_health", "def new_position(self, p):\n if self.track:\n self.gnx = p.gnx\n else:\n p = self.get_position()\n\n self.new_position_edit(p)\n self.new_position_view(p)", "def set_drone_position(self, new_point):\n self.drone.set_drone_position(new_point)", "def set_position(self, position):\n self.position = tuple(position)", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def set_location(self, x, y, z=0):\n self._rect.topleft = (x, y)\n self._z = z\n self._update()", "def assign_position(self, position):\n\n self._actual_position = list(position)\n self._position = [0, 0]\n if not self._widget_assigned:\n self._align()\n self._update_widgets()\n self._widget_assigned = True", "def set_position(self, x_position, y_position): \n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.x = (x_position*50) + 3\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50)\n \n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y", "def setRobotPosition(self, position):\n posx = position.getX()\n posy = position.getY()\n self.position = Position(posx, posy)\n #raise NotImplementedError", "def reset(self, playerx, playery):\n self.rect.center = (playerx, playery)", "def set_position(self, x_position, y_position):\n \n # Checks if the tile position is within the x boundaries of the maze\n if x_position >= 0 and x_position <= 18: \n \n # The new self.rect.x is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 14 as a slight\n # offset to center the image more appropriately on the tile\n self.rect.x = (x_position*50) + 14\n \n # Checks if the tile position is within the y boundaries of the maze \n if y_position >= 0 and y_position <= 18:\n \n # The new self.rect.y is determined by multiplying the tile value by 50,\n # representing the pixel position of the sprite, and adding 3 as a slight\n # offset to center the image more appropriately on the tile \n self.rect.y = (y_position*50) + 3\n\n # Returns the updated self.rect.x and self.rect.y to the caller\n return self.rect.x, self.rect.y", "def force_set(self, pos):\n self.rect.center = pos", "def set_position(self, az_pos, el_pos):\n raise NotImplementedError()", "def set_position(self, pos):\n self.ref_pos = pos", "def set_move(self, x, y):\n self.pieces[x + (y * self.width)].set_move()", "def setCenter(self, p):\n self.__center = p", "def setposition(self, x, y, bearing=None):\n self.posX = x\n self.posY = y\n\n # self check of position inside canvas\n if self.posX < self._min_x:\n self.posX = self._min_x\n if self.posY < self._min_y:\n self.posY = self._max_y\n\n if self.posX > self._max_x:\n self.posX = self._max_x\n if self.posY > self._max_y:\n self.posY = self._max_y\n\n if bearing is None:\n self._add_point()\n elif isinstance(bearing, int):\n self.setbearing(bearing)\n else:\n raise ValueError(\"Bearing must be an integer\")", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = SCREEN_HEIGHT / 2\n self.angle = 0", "def positioning_ship(self):\n self.rect.midleft = self.screen_rect.midleft\n self.y = float(self.rect.y)", "def reposition(self, x, y):\n self.groupx = x\n self.groupy = y\n self.call('reposition', x, y)", "def __set_paddle_position(self):\n self.__window.remove(self.__paddle)\n self.__window.add(self.__paddle, (self.__window.width - self.__paddle.width) / 2,\n self.__window.height - self.__paddle_offset)", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def set_position(self, x: float, y: float):\n self._shape.body.position.x = x\n self._shape.body.position.y = y", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def set_position(self, pos, debug=False):\n pos = max(pos, 0)\n pos = min(pos, 1)\n posrange = pos * self.range\n pos = posrange + self.min\n if debug:\n print('Setting Dynamixel {} with posrange {} to position {}'.format(self.id, posrange, pos))\n self.motor.set_position(int(pos))", "def position(self, pos: int):\n self.__pos = pos", "def setPoint(self, point):\n self.position = point.position", "def setBallPos(self, x: int, y: int):\n self.x = x\n self.y = y", "def setPosicion(self, posicion):\r\n\t\tif(isinstance(posicion, list)):\r\n\t\t\tself._x=posicion[0]+30\r\n\t\t\tself._y=posicion[1]+30\r\n\t\telse:\r\n\t\t\tself._x=posicion\r\n\t\t\r\n\t\tfor objeto in self.getObjetos():\r\n\t\t\tobjeto.setPosicion([self._x, self._y])", "def setRobotPosition(self, position):\n self.position = position", "def setRobotPosition(self, position):\n self.position = position", "def _updatePos(self, newTile):\n self.pos = newTile\n self.rect = (newTile.x * TILE_W, newTile.y * TILE_H)", "def setPosition(self, *args):\n return _libsbml.BoundingBox_setPosition(self, *args)", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "async def set_position(self, pos: int) -> None:\n return await self.relay(\"set_position\")(pos=pos)", "def setPos(self,x,y):\n for i in self.itemType.find('parameters'):\n paramType = i.find('type').text.strip()\n if paramType.startswith('position-x'):\n self.params[i.find('name').text] = x\n if paramType.startswith('position-y'):\n self.params[i.find('name').text] = y", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position" ]
[ "0.7676149", "0.7314917", "0.73105", "0.7299208", "0.7293732", "0.722337", "0.72098196", "0.704315", "0.70363337", "0.6967169", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6944696", "0.6905733", "0.6900997", "0.686623", "0.68578947", "0.68043804", "0.6775906", "0.67596513", "0.67434305", "0.67250013", "0.66691494", "0.66659516", "0.6652618", "0.66302234", "0.6616995", "0.65888625", "0.65753484", "0.65538156", "0.6550093", "0.6546061", "0.6534359", "0.6534048", "0.6495577", "0.6492476", "0.64830583", "0.6481546", "0.6469655", "0.64654624", "0.6462083", "0.64398533", "0.64390504", "0.642606", "0.6407466", "0.6403881", "0.64037615", "0.63697314", "0.63639873", "0.63627553", "0.63557494", "0.6353616", "0.6353616", "0.6352895", "0.63519156", "0.6332815", "0.63321906", "0.6318598", "0.63162506", "0.6292385", "0.62886626", "0.62835264", "0.62822133", "0.6251897", "0.6250571", "0.62453693", "0.6234455", "0.6215765", "0.62105817", "0.6209746", "0.62086904", "0.620628", "0.61989784", "0.6195209", "0.61950886", "0.6194947", "0.6193548", "0.6191357", "0.6191357", "0.61814344", "0.61792237", "0.6159495", "0.615946", "0.6149791", "0.6144547", "0.6144499", "0.6144499", "0.6144499", "0.6144499", "0.6144499", "0.6144499", "0.6144499" ]
0.638652
55
Create a new object
def __init__(self,state,player=WHITE): if(state==None): self.gameState = dict() for x in range(0,WIDTH): for y in range(0,HEIGHT): self.gameState[x,y] = EMPTY for x in range(0,WIDTH): self.gameState[x,BSTARTROW] = BLACK#Blacks starting row self.gameState[x,WSTARTROW] = WHITE#Whites starting row #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE)) #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK)) else: self.gameState = state self.whoseTurn = player self.cachedWin = False # set to True in winFor() if self.cachedWinner = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_object(self):\r\n\t\tpass", "def new(self, obj):\n pass", "def create(cls, _):\n return cls", "def create():", "def create():", "def make_object():\n return object()", "def create(cls):\n pass\n return cls()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def new(name=None):", "def create(self):", "def create(self):\n\n pass", "def new_object(cls):\n return cls.for_value([])", "def new(cls, **kwargs):\n return cls(**kwargs)", "def create(self):\n\n raise NotImplementedError", "def create(self, *args, **kwargs):\n pass", "def create(self):\n ...", "def create(self, **kargs):\n return self(**kargs)", "def new(cls):\n return cls()", "def create():\n pass", "def create(cls, **dictionary):\n dummy_obj = cls(1, 1)\n dummy_obj.update(**dictionary)\n return dummy_obj", "def __newobj__(cls, *args):\n return cls.__new__(cls, *args)", "def new(self):\n self._init()", "def create_object_instance(path):\n path = utils.strip_path_string(path)\n object_config_file = utils.ConfigFile(path+\"/.config.py\")\n object_type = object_config_file.read_variable(\"object_type\")\n vobject_class = {\"algorithm\":VAlgorithm,\n \"task\":VTask,\n \"data\":VData,\n \"directory\":VDirectory,\n \"project\":VProject}\n return vobject_class[object_type](path)", "def make(self):\n pass", "def New():\n Self = $classname()\n Self._initialize_()\n Self._update_()\n return Self", "def make(self, **kwargs):\n raise NotImplementedError", "def Create(self):\n raise NotImplementedError()", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def create(cls, **dictionary):\n new_inst = cls.__new__(cls)\n if cls.__name__ == \"Rectangle\":\n new_inst.__init__(42, 98)\n elif cls.__name__ == \"Square\":\n new_inst.__init__(42)\n new_inst.update(**dictionary)\n return new_inst", "def create(cls, *args, **kwargs):\r\n return cls(*args, **kwargs).save()", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n object = cls(1, 1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == \"Square\":\n object = cls(1)\n object.update(**dictionary)\n return object", "def create(cls, **dictionary):\n if cls.__name__ == 'Square':\n object = cls(1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == 'Rectangle':\n object = cls(1, 2)\n object.update(**dictionary)\n return object", "def create(self, cls, data=None):\n return cls(self, initial_data=data)", "def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this", "def create(*args):", "def new(self, obj):\n key = obj.__class__.__name__+'.'+obj.id\n self.__objects[key] = obj", "def new(self, obj):\n if obj:\n key = obj.__class__.__name__ + \".\" + obj.id\n self.__objects[key] = obj", "def createBasicObject(self):\n\n\t\treturn self._createBasicObjFunct(self)", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n instance.save()\n return instance", "def create_instance(self,name):\n print \"INFO : new %s\" % name\n return self.get_class(name)()", "def create(klass, **kw):\n m = klass(**kw)\n m._new = True\n return m", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def createObject(self, *args):\n return _libsbml.Submodel_createObject(self, *args)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create(cls, dictionary):\n return cls(**dictionary)", "def create_instance(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.guest.createObject(create_options)", "def __init__(self, *args):\n this = _libsbml.new_Model(*args)\n try: self.this.append(this)\n except: self.this = this", "def create(self, data):\n raise NotImplementedError", "def create_instance(self, reset_data=None):\n if reset_data is None:\n log.error(f\"Cannot load Object with reset data of None.\")\n return\n\n location = reset_data.target_loc_vnum\n target_location = None\n\n log.debug(f\"Creating object[{self.vnum}] instance. \"\n f\"Target {reset_data.target_loc_is}[{reset_data.target_loc_vnum}]\")\n if reset_data.target_loc_is == \"mobile\":\n target_location = self.area.mobile_inst_by_vnum(location)\n if not target_location:\n return\n elif reset_data.target_loc_is == \"room\":\n if type(location) is int and location in self.area.roomlist:\n target_location = self.area.room_by_vnum(location)\n else:\n return\n\n new_obj = Object(self.area, self.to_json(), load_type=\"instance\")\n new_obj.aid = str(uuid.uuid4())\n\n if target_location is not None and reset_data.target_loc_is == \"room\":\n new_obj.move(target_location)\n elif target_location is not None and reset_data.target_loc_is == \"mobile\":\n target_location.contents[new_obj.aid] = new_obj\n if reset_data.target_mobile_wear:\n if 'hand' in self.default_wear_loc and self.keywords:\n comm_ = f\"hold {self.keywords[0]}\"\n target_location.interp(comm_)\n elif self.keywords:\n comm_ = f\"wear {self.keywords[0]} on {self.default_wear_loc}\"\n target_location.interp(comm_)", "def do_create(self, args):\n args = args.split()\n l = len(args)\n if l < 1:\n print(\"** class name missing **\")\n else:\n if args[0] in HBNBCommand.valid_classes.keys():\n if l == 1:\n new_obj = HBNBCommand.valid_classes[args[0]]()\n else:\n result = self.__create_help(args[1:])\n if result is None:\n print(\"** Object fails **\")\n return\n new_obj = HBNBCommand.valid_classes[args[0]](**result)\n print(new_obj.id)\n new_obj.save()\n else:\n print(\"** class doesn't exist **\")", "def create_object(cls: Type[\"Object\"],\n start_point: Tuple[int, int],\n end_point: Tuple[int, int],\n program: \"Program\",\n canvas: tk.Canvas) -> Type[\"Object\"]:\n pass", "def _create(self, title=''):\n return ContentObject(title)", "def create_ion_object(self, object_params):\n new_obj = IonObject(object_params[\"type_\"])\n\n # Iterate over the parameters to add to object; have to do this instead\n # of passing a dict to get around restrictions in object creation on setting _id, _rev params\n for param in object_params:\n self.set_object_field(new_obj, param, object_params.get(param))\n\n new_obj._validate() # verify that all of the object fields were set with proper types\n return new_obj", "def create(self, class_name, attrs, session):", "def _Create(self):\n pass", "def make_objects(self):\n pass", "def create_object(object_name):\n if object_name == 'deathstar':\n return Deathstar()\n elif object_name == 'mercury':\n return Mercury()\n elif object_name == 'venus':\n return Venus()\n elif object_name == 'mars':\n return Mars()\n elif object_name == 'earth':\n return Earth()\n elif object_name == 'moon':\n return Moon()\n elif object_name == 'tatooine':\n return Tatooine()\n elif object_name == 'mordor':\n return Mordor()\n elif object_name == 'xwing':\n return Xwing()", "def createObject(self, *args):\n return _libsbml.FbcModelPlugin_createObject(self, *args)", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def from_data(cls,data):\n\n new_object = cls() # Only this line needs to be updated\n new_object.data = data\n\n return new_object", "def create(cls, data=None):\n # allow create() calls with no input\n if not data:\n data = {}\n\n return cls(**data)", "def new(self, obj):\n\n key = \"{}.{}\".format(obj.__class__.__name__, obj.id)\n self.__objects[key] = obj", "def new(self, _def, _dict=None, **kwargs):\n\n _def = self.get_def(_def)\n obj = AnodeObjectBase(_def, _dict, **kwargs)\n self.instances.add(obj)\n self.instances_by_name[_def.type.name] = obj\n return obj", "def new(name, source):", "def create_instance(self, date):\n raise NotImplementedError", "def make_object(self, data, **kwargs):\n if not data:\n return None\n return Note(\n title=data['title'],\n content=data['content'],\n )", "def new(self, obj):\n key = '{}.{}'.format(obj.__class__.__name__, obj.id)\n self.__objects[key] = obj", "def create(cls, **_params):\n cls_inst = cls()\n cls_inst = cls_inst.set(**_params)\n cls_inst.save()\n return cls_inst", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def create_individual(self):\n pass", "def create_instance(self, **attrs):\n return self._create(_instance.Instance, **attrs)", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ..." ]
[ "0.8279344", "0.81875837", "0.76169944", "0.7532984", "0.7532984", "0.74903953", "0.7413568", "0.7263086", "0.7263086", "0.7263086", "0.7188198", "0.71668696", "0.7105122", "0.70978415", "0.7032688", "0.70286256", "0.69967264", "0.697575", "0.6942721", "0.6913294", "0.69061947", "0.68732804", "0.68594754", "0.6824122", "0.68045425", "0.6802282", "0.6800301", "0.6800007", "0.67886984", "0.67809767", "0.676915", "0.6762539", "0.6702267", "0.6696043", "0.6672337", "0.66626805", "0.6642999", "0.66215193", "0.66162497", "0.6600577", "0.6599635", "0.65964454", "0.65955216", "0.6589351", "0.6589351", "0.6568672", "0.6548489", "0.6548489", "0.654264", "0.6535689", "0.65198034", "0.6516465", "0.65142226", "0.6495855", "0.64897794", "0.64868647", "0.64791536", "0.64777195", "0.6469894", "0.64573956", "0.64522564", "0.6450248", "0.64496017", "0.6441314", "0.64411676", "0.6440367", "0.6426849", "0.64216626", "0.6420042", "0.64192", "0.641303", "0.64112157", "0.63947606", "0.6372704", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316", "0.63672316" ]
0.0
-1
Used for debugging and displaying in user friendly manner.
def __repr__(self): s = "" for y in range(0,HEIGHT): temp="" for x in range(0,WIDTH): temp = temp+ str(self.gameState[x,y]) s += temp+"\n" return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():", "def output_debug_info(self):", "def debug_string(self):\n\n raise NotImplementedError", "def debug(self):\n raise NotImplementedError", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "def debug(self, message):\r\n pass", "def debug(self) -> str:\n\n return Widget.debug(self)", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug(msg):", "def _print_custom(self):\n pass", "def debug(self, msg):\n debug(msg)", "def _printable(self):\n pass", "def debug(self, *args, **kwargs):", "def debug(self):\r\n print(\"_,.-^ DEBUG ^-.,_\")\r\n print(\"state = %s %s\"%(self.state, self.error))\r\n print(\"\".join(self.memory))\r\n print ((self.pointer*\" \")+\"^\")\r\n print(\"PROGRAM\")\r\n print(\" {:16}{:7}{:7}{:7}{:16}\".format(\"State\", \"symbol\", \"write\", \"move\", \"new_state\"))\r\n for row in self.program:\r\n if row.state == self.state and row.symbol == self.memory[self.pointer]:\r\n print(\">\", end=\"\")\r\n else:\r\n print(\" \", end=\"\")\r\n print(row)", "def show(self):\n self._logger.debug(\"show\")", "def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))", "def __debug_print__(self):\n print(self.question_data)", "def print_out():\n pass", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def show_debug_msg(self) -> None:\n if self.debug_mode:\n for point in self.points:\n print(point.debug_info())", "def debug(self, msg):\n if self._debug:\n print \"%s\" % (msg)", "def debug(self, text):\n if self.PRINT_DEBUG:\n print('[FileHistory] ' + text)", "def debug(self, msg, *args, **kwargs):\n pass", "def debug() -> bool:", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def visualizar(self):\n print(self.stack)", "def debug(self):\n print(self.memory)\n print('r0 = %s, ip = %s' % (self.r0, self.ip))", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "def fullDebug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n print(\"\\n*Current Input:\", CurrentInput)\r\n print(\"*Current State: \", CurrentState)\r\n print(\"\\n*Response Options: \", RESPONSEOPTIONS)\r\n print(\"___________________________\")", "def print_debug(self):\n print()\n print(\"Variable names ({} total):\".format(len(self.variable_names)))\n print()\n for variable in self.variable_names:\n print(variable)\n print()\n\n print(\"Clauses:\")\n print()\n for clause in self.abstract_clauses:\n print(clause)", "def __str__(self): # pragma: no cover\n return self.display()", "def debug(msg):\n #print(msg)\n pass\n #end debug", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def compute_debug(self):", "def __debug(msg):\n\n pass", "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "def print_debug(context: str = \"\") -> None:\r\n print(context)\r\n print(\"This is the current board\")\r\n print(example)\r\n print(\"This is the conflict space\")\r\n print(conflict_space)\r\n print(\"This is the safeboard\")\r\n print(safeboard)", "def display(self):\n print(self)", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def print(self):\r\n self.print_avec_separateur()", "def display(self):\n print(str(self))", "def debug_print(self, *args, **kwargs):\n print(\"APP_DEBUG_PRINT\", args, kwargs)", "def debug():\n def _debug(x):\n return e.String(x.as_source())\n yield (\"(λ any . str)\", _debug)", "def debug(self):\n try:\n super(FaucetTopoTestBase, self).debug()\n except Exception:\n pprint.pprint(self.host_information)\n raise", "def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)", "def _debuginfo(self,suspect,message):\n suspect.debug(message)\n self.logger.debug(message)", "def print(self):\n print(self.pretty_str())", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def display(self,message):\r\n \r\n print(message)", "def print_problem(self):\n print('\\n*****************')\n print('PROBLEM: ' + self.problem)\n print('OBJECTS: ' + str(self.objects))\n print('INIT: ' + str(self.init))\n print('GOAL: ' + str(self.goal))\n print('AGENTS: ' + str(self.agents))\n print('****************')", "def debugprint(self, cur_pos): \n print(\"cur_pos = \", cur_pos)\n print(\"Distance map:\")\n print(self.distance_map)\n print(\"Frontier:\")\n print(sorted(self.frontier.items(), key=lambda x:x[1] ))\n print(\"Footprint:\")\n print(self.footprint)\n print(\"--------------\")", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def debug(self):\n #breakpoint() # infinite loop\n print(self.ttl)", "def show(self):\n pass", "def debug_to_console(self):\n vert = None\n horiz = None\n if self.grid.apple_is_up():\n vert = \"Up \"\n elif self.grid.apple_is_down():\n vert = \"Down\"\n else:\n vert = \"None\"\n if self.grid.apple_is_left():\n horiz = \"Left \"\n elif self.grid.apple_is_right():\n horiz = \"Right\"\n else:\n horiz = \"None \"\n print(\n \"Apple is: (\", vert, \",\", horiz,\n \")\\tProximity: \",\n str(round(self.grid.proximity_to_apple(), 2)), \"\\t[x, y]:\",\n self.grid.snake.head(),\n \" \\tUp: (\", str(round(self.grid.safe_cells_up(), 2)),\n \",\", str(round(self.grid.safe_cells_up_global(), 2)), \")\"\n \" \\tDown: (\", str(round(self.grid.safe_cells_down(), 2)),\n \",\", str(round(self.grid.safe_cells_down_global(), 2)), \")\"\n \" \\tLeft: (\", str(round(self.grid.safe_cells_left(), 2)),\n \",\", str(round(self.grid.safe_cells_left_global(), 2)), \")\"\n \" \\tRight: (\", str(round(self.grid.safe_cells_right(), 2)),\n \",\", str(round(self.grid.safe_cells_right_global(), 2)), \")\"\n )", "def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text", "def write_debug_info(self):\n #path = self.request.uri.split('?')[0]\n #method = path.split('/')[-1]\n \n self.write(\"Handler: \" + str(self.__class__.__name__)+\"<br>\")\n self.write(\"<hr>\")\n self.write(str(dir(self.request)))\n self.write(\"<br><hr>\")\n self.write(\"query_arguments:\" + str(self.request.query_arguments))\n self.write(\"<br>\")\n self.write(\"uri:\" + self.uri)\n self.write(\"<br>\")\n self.write(\"path:\" + self.path)\n self.write(\"<br>\")\n self.write(\"method to call: \" + self.request.method.lower() + \"_\" + self.method)\n self.write(\"<hr>\")\n self.write(\"request method: \" + self.request.method)\n self.write(\"<hr>\")\n self.write(\"request headers: \" + str(self.request.headers))\n self.write(\"<hr>\")\n self.flush()", "def print_me(self):\n return \"ID: %s Title: %s\" % (self.ID, self.title)", "def __str__(self):\n return self.trace", "def debug_print(self, *content):\n if self.debug:\n print(*content)", "def show(self):\n\t\traise NotImplementedError()", "def print(self):\n self.print_avec_separateur(\" \")", "def disp(self):\r\n print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))", "def disp(self):\r\n print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))", "def show():\n info(str(Project))", "def help_dump(self):\n print(DUMP)", "def print(self):\n # Your implementation here", "def debug(self, message):\r\n if self._debug:\r\n print('[Debug] %s' % message)", "def debug(node):\n print \"%r\" % node", "def print(self):\r\n print(\"[DEBUG] STACK: \", self.__memory.__repr__())", "def __window_print(self):\n pass", "def command_show(problem):\r\n print problem.get_html()", "def __str__(self):\n\n desc = self.description\n if desc is not None:\n return str(desc)\n\n desc = self.debugDescription\n if desc is not None:\n return str(desc)\n\n return repr(self)", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "def info(self):", "def info(self):", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def show_state(self):\n print \"I don't know how to show_state.\"", "def show(self) -> None:", "def debug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n for state in RESPONSEOPTIONS:\r\n score = calcTotalScore(state, CurrentInput, CurrentState)\r\n print(state.id + \": \" + str(score) + \" ,\", end=\"\")\r\n print(\"\\n___________________________\")", "def print_details(self):\n self.view.print_details()", "def __str__(self):\n #{{{ Nicely print of elements in class\n\n if config.debug:\n\n for orid in self.event_cache:\n print \"\\nEvents(): %s(%s)\" % (orid,self.event_cache[orid])\n\n else: \n\n print \"Events(): %s\" % (self.event_cache.keys())", "def show_data():", "def __debug(self):\n\t\tprint \"Dumping Object Chat\"\n\t\tprint self.userA.username +' + '+ self.userB.username", "def __str__(self):\n return self.printable()", "def print_problem(self) -> None:\n self._assert_problem_is_valid()\n print(f\"{'*'*60}\\n{' Silent Substitution Problem ':*^60s}\\n{'*'*60}\")\n print(f\"Device: {self.name}\")\n print(f\"Observer: {self.observer}\")\n print(f\"Ignoring: {self._ignore}\")\n print(f\"Silencing: {self._silence}\")\n print(f\"Targeting: {self._target}\")\n print(f\"Target contrast: {self._target_contrast}\")\n print(f\"Background: {self._background}\")\n print(\"\\n\")", "def debug(string):\n if verbose:\n print string\n return", "def debug(string):\n if conf.DEBUG:\n outputs.print_debug(string)", "def DebugInfo( self, request_data ):\n pass", "def debug(msg):\n if settings.DEBUG:\n print \"DEBUG: cli.%(msg)s\" % locals()", "def debug(state: bool, /) -> None:", "def debug_print(debug_data):\n if DEBUG_MODE == \"true\":\n pp.pprint(debug_data)", "def dump(self):\n self.logger.debug(self)" ]
[ "0.7927461", "0.77589476", "0.74094605", "0.7343519", "0.73350525", "0.7155053", "0.7092897", "0.70533305", "0.70131016", "0.7006562", "0.69903415", "0.69416094", "0.69380265", "0.68714195", "0.6865168", "0.68511516", "0.6811546", "0.68112475", "0.6776409", "0.67534703", "0.67317986", "0.6659786", "0.6648442", "0.6636242", "0.663607", "0.6628409", "0.66189414", "0.65906453", "0.6590476", "0.65801215", "0.6578925", "0.657702", "0.6565381", "0.6563253", "0.65345895", "0.65272355", "0.6511501", "0.65048903", "0.6504079", "0.6495901", "0.64877975", "0.64829326", "0.6474352", "0.646279", "0.646279", "0.64507574", "0.6447908", "0.64397144", "0.64393646", "0.6439086", "0.64329326", "0.64264077", "0.6412305", "0.6411126", "0.6404614", "0.640377", "0.63864547", "0.6385463", "0.6376414", "0.63757247", "0.6361464", "0.63495445", "0.63433206", "0.6338934", "0.63320047", "0.63183993", "0.63117456", "0.6311198", "0.63081074", "0.63081074", "0.63012904", "0.63004756", "0.6289677", "0.6270353", "0.62696", "0.62692404", "0.62621194", "0.6255398", "0.6244141", "0.6242517", "0.62412685", "0.6236337", "0.62330276", "0.62330276", "0.6230348", "0.62261784", "0.62261266", "0.62198395", "0.62176734", "0.6214959", "0.62142897", "0.61941135", "0.61910516", "0.61906934", "0.6182579", "0.61738515", "0.61710185", "0.61702746", "0.6166147", "0.6165996", "0.61615247" ]
0.0
-1
Translate the board description into a string. Used for a hash table.
def __str__(self): s="" for y in range(0,HEIGHT): for x in range(0,WIDTH): s+=str(self.gameState[x,y]) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def StringFromBoard(board):\n\trows = []\n\tfor row in board:\n\t\trows.append('|'.join([' '+square+' ' for square in row]))\n\treturn '\\n-----------\\n'.join(rows)", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += STRMAP[self._board[row][col]]\n if col == self._dim - 1:\n rep += \"\\n\"\n else:\n rep += \" | \"\n if row != self._dim - 1:\n rep += \"-\" * (4 * self._dim - 3)\n rep += \"\\n\"\n return rep", "def uninterpret_board(self):\n\t\tboard_string = ''\n\t\tfor i in range(len(self.board)):\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tboard_string+=self.board[i][j]\n\n\t\treturn board_string", "def board_string(self):\n s = \"\"\n for i, v in enumerate(self.board):\n # if i % 81 == 0:\n # s += \"\\n\"\n if v is None:\n s += \"0\"\n else:\n if v.color == StoneColor.black:\n s += \"1\"\n else:\n s += \"2\"\n return s", "def __str__(self):\n\t\tboardString = \"\\n{0}|{1}|{2}\\n-----\\n{3}|{4}|{5}\\n-----\\n{6}|{7}|{8}\\n\"\n\t\treturn boardString.format(self.board[0], self.board[1], self.board[2], self.board[3],\n\t\t\tself.board[4], self.board[5], self.board[6], self.board[7], self.board[8])", "def __str__(self):\n string = ''\n for row in self.board:\n for item in row:\n if item == None:\n string += \"_ \"\n else:\n string += f\"{item.name:<2}\"\n string += '\\n'\n \n return string", "def __str__(self):\n board_lists = [['_']*self.__width for rows in range(self.__height)]\n for car in self.__cars:\n car_coords = car.car_coordinates()\n for item in car_coords:\n if item == (3,7):\n pass\n else:\n board_lists[item[0]][item[1]] = car.get_name()\n board_str = '\\n'.join(' '.join(sub) for sub in board_lists)\n return board_str", "def convertBoard(self):\n \n board = \"\"\n \n for m in self.squares:\n board += str(convertMarker(m)) + \" \"\n \n return board", "def __str__(self):\n board = ''\n for row in range(self.height):\n if row > 0:\n board += '\\n'\n for col in range(self.width):\n if self.board[row][col] == '':\n board += '| '\n else:\n board += ('|' + self.board[row][col])\n board += '|'\n board += ('\\n' + '-' * 2 * self.width + '\\n')\n for i in range(self.width):\n board += (' ' + str(i))\n return board", "def __str__(self):\n\t\t\n\t\tdef mapping(x):\n\t\t\tif x == 1:\n\t\t\t\t# WHITE\n\t\t\t\treturn 'O'\n\t\t\telif x == -1:\n\t\t\t\t# BLACK\n\t\t\t\treturn 'X'\n\t\t\telse:\n\t\t\t\t# Empty\n\t\t\t\treturn '-'\n\t\t\n\t\ts = 'BLACK - X\\n'\n\t\ts += 'WHITE - O\\n\\n'\n\t\tfor j in self.rows:\n\t\t\ts += j\n\t\t\ts += ' '\n\t\t\ts += ''.join(mapping(self[i+j]) for i in self.columns)\n\t\t\ts += '\\n'\n\t\treturn s + '\\n ' + self.columns + '\\n'", "def __repr__(self):\n W = self.width\n H = self.height\n\n s = '' # the string to return\n for row in range(0, H):\n s += '|'\n for col in range(0, W):\n s += self.data[row][col] + '|'\n s += '\\n'\n\n s += (2 * W + 1) * '-' # bottom of the board\n s += '\\n'\n\n x = -1\n for i in range(W):\n if x == 9:\n x = 0\n s += \" \" + str(x)\n else:\n x += 1\n s += \" \" + str(x)\n\n return s # the board is complete, return it", "def __str__(self):\r\n return str(self.board)", "def __str__(self) -> str:\n not_actual = self.current_board\n representation = self.current_board\n\n for index in range(len(not_actual)):\n if not_actual[index: index + 2] in ['31', '32', '33', '34', '36',\n '37', '38']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['41', '42', '43', '44', '45',\n '46', '47', '48']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['51', '52', '53', '54', '55',\n '56', '57', '58']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n return representation", "def __str__(self):\n board = \"\"\" 0 1 2 3 4 5\\n\"\"\"\n\n for y in range(Board.board_size):\n board += str(y) + \" \"\n for x in range(Board.board_size):\n piece = self.board[x][y] if self.board[x][y] is not None else \".\"\n\n piece = str(piece).lower() if piece in self.player_1_pieces else str(piece)\n\n board += piece\n board += \" \"\n board += \"\\n\"\n return board", "def macro_str(self):\n str = '-' * (2 * self.SIZE + 1) + '\\n'\n for row in self.boards:\n str += ' '\n for board in row:\n str += board.state.value + ' '\n str += '\\n' + '-' * (2 * self.SIZE + 1) + '\\n'\n return str", "def __repr__(self):\n\n # Creates a deep copy of _board and changes all None values to underscores.\n temp_board = copy.deepcopy(self.get_board())\n\n for row_index, row in enumerate(temp_board):\n\n for column_index, column in enumerate(row):\n\n if column is None:\n\n row[column_index] = '____' + self.reverse_position(column_index, row_index) + '_____'\n\n # str() with position is mapped onto each list within _board list,\n # converted to string, and then joined by a nextline character.\n return '\\n\\n'.join(map(str, temp_board))", "def description(self) -> str:\r\n descrip = 'The player must aim to put the most possible units of a ' \\\r\n 'given colour c on the outer perimeter of ' \\\r\n 'the board. The ' \\\r\n 'player’s score is the total number of unit cells ' \\\r\n 'of colour ' \\\r\n 'c that are on the perimeter. There is a ' \\\r\n 'premium on corner ' \\\r\n 'cells: they count twice towards the score. '\r\n return descrip", "def __str__(self) -> str:\n return self.board", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def _to_string(board: Tuple[Tuple[Optional[int]]], width: int) -> str:\n display = \"\\n\"\n for i in range(width):\n for j in range(width):\n line = board[j][i * width:i * width + width]\n start = j * width ** 2 + i * width\n for k, space in enumerate(line):\n if space == 0:\n space = start + k\n else:\n space = (\"X\" if space == 1\n else \"O\" if space == -1\n else \"-\")\n display += \"{0:>4}\".format(space)\n display += \" \" * width\n display += \"\\n\"\n return display", "def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string", "def __str__(self):\r\n result = \"\"\r\n for line in self.board:\r\n for i in line:\r\n if i is None:\r\n result += \" \"\r\n else:\r\n result += i + \" \"\r\n result += \"\\n\"\r\n\r\n return result", "def __str__(self):\r\n \r\n #return \"The 2048 board is \" + str(self._cells)\r\n string = \"\"\r\n for row in range(self._grid_height):\r\n for column in range(self._grid_width):\r\n if column == self._grid_width -1:\r\n string += str(self._cells[row][column]) + \"\\n\"\r\n else:\r\n string += str(self._cells[row][column]) +\", \"\r\n return \"The 2048 board is \"+ str(self._grid_height) + \"x\" + str(self._grid_width) + \" and contains: \" + \"\\n\" + string", "def board_to_string(board):\n ordered_vals = []\n for r in ROW:\n for c in COL:\n ordered_vals.append(str(board[r + c]))\n return ''.join(ordered_vals)", "def board_to_string(board):\n ordered_vals = []\n for r in ROW:\n for c in COL:\n ordered_vals.append(str(board[r + c]))\n return ''.join(ordered_vals)", "def __str__(self):\n return f\"{self._desc:16s}\"", "def __str__(self):\n return '\\n'.join(str(self._board[j]) for j in range(self._n))", "def humanize(self, well_ref):\n row, col = self.decompose(well_ref)\n return \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"[row] + str(col + 1)", "def boardToString(self, margins={}):\n b = self.board\n rg = range(b.size())\n left = ' '*margins.get('left', 0)\n s = '\\n'.join(\n [left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])\n return s", "def __repr__(self):\n t = ''\n for x in range(len(self.board)):\n for y in range(len(self.board[0])):\n t += str(self.board[x][y]) + ' '\n t += '\\n'\n return t", "def __str__(self):\n\t\tstring = \"\"\n\t\tfor i in self.board:\n\t\t\tfor j in i:\n\t\t\t\tstring += str(j)\n\t\t\tstring += \"\\n\"\n\t\treturn string", "def __str__(self):\n board = ''\n board_2 = ''\n\n for row in self.from_grid:\n for space in row:\n board += ' ' + space\n board += '\\n'\n\n for row in self.to_grid:\n for space in row:\n board_2 += ' ' + space\n board_2 += '\\n'\n\n return 'Current State:\\n' + board + 'Target State:\\n' + board_2", "def __str__(self):\n result = \"\"\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 5:\n result += \" x\"\n elif self.board[i][j] == 7:\n result += \" о\"\n else:\n result += \" #\"\n result += \"\\n\"\n return result", "def __str__(self):\n return str(self.description)[:10]", "def __repr__(self):\n return f'Board({ self.board !r})'", "def __repr__(self):\n s = dashes = \"\".join([ ' -' for i in range(self.BoardSize) ])\n for row in range( self.BoardSize ):\n sRow = '|'\n for col in range( self.BoardSize ):\n sRow += str(self.CurrentGameboard[row][col]) + '|'\n s += '\\n' + sRow + '\\n' + dashes\n return s", "def __repr__(self) -> str:\n\t\t\n\t\trepr = \"\"\n\t\tfor row in self.board:\n\t\t\tfor element in row:\n\t\t\t\tif element:\n\t\t\t\t\trepr = repr + \"o \"\n\t\t\t\telse:\n\t\t\t\t\trepr = repr + \"@ \"\n\t\t\trepr = repr + \"\\n\"\n\t\treturn repr", "def __str__(self):\n s = \" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\\n\"\n board = initial_board()\n count = 1\n for i in self.occupied:\n board[i[0]][i[1]] = self.occupied[i]\n space = ''\n for i in range(0, 16):\n space += ' '\n start = '---'.join(space)\n s += start+'\\n|'\n for row in range(1,16):\n for col in range(1,16):\n if use_color and (row, col) == self.action:\n s += '\\033[91m'\n if board[row][col] == 0:\n s += ' |'\n elif board[row][col] == 1:\n s += ' O |'\n else:\n s += ' X |'\n if use_color and (row, col) == self.action:\n s += '\\033[0m'\n s += '\\033[0m'\n s+=str(count)+'\\n'+start+'\\n|'\n count += 1\n\n s = s[:len(s)-1]\n s += \"\\n*****************************************************************************\"\n return s[:len(s)-1]", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def _cards_to_string(self, cards):\n return repr(self._eval.card2string(map(lambda x: x & 63, cards)))\\\n #.lower().replace(\"h\", u\"\\u2761\").replace(\"s\", u\"\\u2660\").replace(\"c\", u\"\\u2663\").replace(\"d\", u\"\\u2662\")", "def get_description(self, code):\n try:\n return self.message[str(code)]\n except KeyError:\n return \"Unknown (\" + str(code) + \")\"", "def __str__(self):\n #formatting board correctly\n formatted_board = \"\"\n for i in range(self.size):\n formatted_board += str(self.board[i]) + \"\\n\"\n return \"Board size: \" + str(self.size) + \"\\n\" + \"Number of Queens placed: \" + str(self.num_queens_placed) + \"\\n\" + str(formatted_board)", "def transferBoardToString(self,board):\n #int(False) will be 0\n outputString = str()\n for i in board:\n outputString = outputString + str(int(i))\n return outputString", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def displayBoard(self):\n res = ''\n for i in range(0, self.size):\n res += '|'\n for j in range(0, self.size):\n res += ' ' + str(self.board[i][j])\n res += '\\n'\n res += '+'\n for i in range(0, self.size * 2):\n res += '-'\n res += '\\n '\n for i in range(1, (self.size + 1)):\n res += (' ' + str(i))\n return res", "def __repr__(self):\n r = \"\"\n for row in self.board:\n for cell in row:\n if cell == \"\":\n cell = color_magenta(\"_\")\n r += cell + \" \" # for all the empty strings, we will replace it with an '_'.\n r += \"\\n\"\n return r", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def board(self) -> str:\n divider = \"+\" + \"-\" * 23 + \"+\"\n b = [divider]\n for i in range(9):\n r = []\n for j in range(3):\n s = tool.index_counter(i, j * 3)\n r.append(' '.join(str(i) if i > 0 else ' '\n for i in self.grid[s:s+3]))\n b.append(f\"| {r[0]} | {r[1]} | {r[2]} |\")\n if (i + 1) % 3 == 0:\n b.append(divider)\n return \"\\n\".join(b)", "def describe_to(self, description) -> str:\n return self.matcher.describe_to(description)", "def _board_detail(self, i, j, player=None):\n if self.board[i][j] is not None:\n return self.board[i][j]\n else:\n return ' '", "def __str__(self):\n return \"Description(values={},data_model={})\".format(\n self._values, self.data_model\n )", "def __str__(self):\n # replace with your code\n board = \"\"\n for index in range(self.grid_height):\n board += \"[\"\n for inner_index in range(self.grid_width):\n board += str(self.board[index][inner_index]) + \" \"\n else:\n board += \"]\\n\"\n return board", "def __unicode__(self):\n matrix_ = self._repr_matrix(self.matrix[:20, :5])\n lines = matrix_.split('\\n')\n headers = [repr(self)[1:-1]]\n if self._item_ids.size:\n col_headers = [('%-8s' % unicode(item)[:8]) for item in self._item_ids[:5]]\n headers.append(' ' + (' '.join(col_headers)))\n\n if self._user_ids.size:\n for (i, line) in enumerate(lines):\n lines[i] = ('%-8s' % unicode(self._user_ids[i])[:8]) + line\n for (i, line) in enumerate(headers):\n if i > 0:\n headers[i] = ' ' * 8 + line\n lines = headers + lines\n if self.matrix.shape[1] > 5 and self.matrix.shape[0] > 0:\n lines[1] += ' ...'\n if self.matrix.shape[0] > 20:\n lines.append('...')\n\n return '\\n'.join(line.rstrip() for line in lines)", "def __str__(self):\n \n # top row\n result = ' '\n result = '\\n ' + '-' * (self.DIM*2+5) + '\\n'\n \n # board rows\n for row in range(self.DIM):\n if row is 3 or row is 6:\n result += '|' + '-' * (self.DIM*2+5) + '|' + '\\n'\n # result += '|-------+-------+-------|\\n'\n result += '| '\n for col in range(self.DIM):\n if col is 3 or col is 6:\n result += '| '\n if self.board[row][col] == SudokuConfig.EMPTY:\n result += '.'\n else:\n result += str(str(self.board[row][col]))\n if col != self.DIM-1:\n result += ' '\n result += ' |' + '\\n'\n \n # bottom row\n result += ' ' + '-' * (self.DIM*2+5) + '\\n'\n result += ' '\n result += '\\n'\n \n return result", "def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))", "def __getAsciiString(self):\n lines = []\n horizontalLine = ('-' * (26))\n lines.append(horizontalLine)\n for row in self.board:\n rowLine = '|'\n for col in row:\n if col == -1:\n col = 'O'\n if col == 0:\n col = '-'\n if col == 1:\n col = 'X'\n rowLine = rowLine + ' ' + col.__str__() + ' |'\n lines.append(rowLine)\n lines.append(horizontalLine)\n return '\\n'.join(lines)", "def board2str(board, end='\\n'):\n s = ''\n for x in range(board.shape[0]):\n for y in range(board.shape[1]):\n s += str(board[x][y]) + '\\t'\n s += end\n return s[:-len(end)]", "def description() -> str:\n content = \"Demonstrates usage of blackbord remappings.\\n\"\n content += \"\\n\"\n content += \"Demonstration is via an exemplar behaviour making use of remappings..\\n\"\n\n if py_trees.console.has_colours:\n banner_line = console.green + \"*\" * 79 + \"\\n\" + console.reset\n s = banner_line\n s += console.bold_white + \"Blackboard\".center(79) + \"\\n\" + console.reset\n s += banner_line\n s += \"\\n\"\n s += content\n s += \"\\n\"\n s += banner_line\n else:\n s = content\n return s", "def __str__(self) -> str:\n return self.description", "def get_board_name(self):\n return \"OPP {} Board {}\".format(str(self.led.neoCard.chain_serial), \"0x%02x\" % self.led.neoCard.addr)", "def description():", "def ToMessageDesc(description):\n message_desc = 'The text in the keyboard overlay to explain the shortcut'\n if description:\n message_desc = '%s (%s).' % (message_desc, description)\n else:\n message_desc += '.'\n return message_desc", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def __repr__(self):\r\n numLetters = self.numLetters\r\n S = ''\r\n S += 3*'\\n'\r\n S += ' '\r\n for i in range(numLetters):\r\n S += self.currentBoard[i] + ' '\r\n\r\n return S", "def __str__(self):\n b = ''\n for i in range(7): # 7 steps in the board\n if i == self.chaser_i: # chaser position\n b += '|' + str(i) + '| chaser |\\n'\n elif i == self.player_i: # player position\n b += '|' + str(i) + '| player |\\n'\n else:\n b += '|' + str(i) + '| |\\n'\n b += '|7| bank |\\n' # bank position\n return b", "def __repr__(self):\n\n return '<Board id={id} title={title}'.format(id=self.id,\n title=self.title,\n )", "def descString(self):\n return \"\".join ([self.Name, \" (\", str(self.RollCount), \"d\"\\\n , str(self.RollMax), \"; \", str(self.CritRollMin), \"-\"\\\n , str(self.CritRollMax), \"x\", str (self.CritRollMult)\\\n , \") - \", str(self.Value), \" gp\"])", "def desc(self):\n return LandCell.desc(self) + \"; plant=\" + str(self.plant)", "def __str__(self):\n string = \"\"\n for row in self.layout:\n for tile in row:\n string+= str(tile) + \" \"\n string+= \"\\n\"\n return string", "def __str__(self):\n return \"%s\\n\" % self.text + \" \" * self.col + \"^\"", "def get_description(self):\n if CONFIG_KEY not in self:\n return\n if hasattr(self[CONFIG_KEY], DESC_KEY):\n desc_str = str(self[CONFIG_KEY][DESC_KEY])\n if not isinstance(desc_str, str):\n try:\n desc_str = str(desc_str)\n except Exception as e:\n raise InvalidConfigFileException(\n \"Could not convert the specified Project description \"\n \"({}) to string. Caught exception: {}\".\n format(desc_str, getattr(e, 'message', repr(e))))\n return desc_str", "def description(self) -> str:\r\n descrip = 'The player must aim for the largest “blob” of a given ' \\\r\n 'colour c. A blob is a group of connected blocks with the ' \\\r\n 'same colour. Two blocks are connected if their sides ' \\\r\n 'touch; touching corners doesn’t count. The player’s score ' \\\r\n 'is the number of unit cells in the largest blob of colour ' \\\r\n 'c. '\r\n return descrip", "def __str__(self):\n return self.get_ascii_trunk() + self.get_ascii_leaves()", "def __str__(self):\n\n # Create grid headers for the table\n headers = [letter for letter in string.ascii_uppercase[:self.width]]\n\n board_state = []\n board_state.extend([[value for value in row] for i, row in enumerate(self.board_state)])\n\n for idx, row in enumerate(board_state):\n row.insert(0, idx + 1)\n\n return tabulate(board_state, headers, tablefmt=\"grid\")", "def get_board_name(self):\n pass", "def as_string(self):\n # Remove cards which have no definition\n cards = [card for card in self if card.definition is not None]\n # Remove cards which have no word\n cards = [card for card in cards if card.word]\n return \"\\n\".join([card.get_flashcard() for card in cards])", "def __str__(self) -> str:\n return self._color + str(self._value) + SudokuTile.C_END", "def __str__(self) -> str:\r\n output: str = \"\"\r\n\r\n for row_i in range(Board._NUM_ROWS):\r\n for col_i in range(Board._NUM_COLS):\r\n pos: Pos2D = Pos2D(col_i, row_i)\r\n output += (\"{} \".format(self.squares[pos].get_representation()))\r\n # Finished row, add new line.\r\n output += \"\\n\"\r\n\r\n return output", "def __str__(self):\n line = ''\n line += self.board_state.__str__()\n line += self.move.__str__()\n line += '\\n'\n return line", "def get_descr_full(self):\n desc_text = []\n stack = [(self._desc, 0)]\n while stack:\n dl, di = stack.pop()\n while di < len(dl):\n if dl[di] == 0:\n di += 1\n elif dl[di] > 0 and dl[di] < 100000:\n desc_text.append(str(self._tables.tab_b[dl[di]]))\n di += 1\n elif dl[di] >= 100000 and dl[di] < 200000:\n lm = dl[di] // 1000 - 100\n ln = dl[di] % 1000\n desc_text.append(\"%06d : LOOP, %d desc., %d times\" % (dl[di], lm , ln))\n di += 1\n elif dl[di] >= 200000 and dl[di] < 300000:\n en = self._tables.tab_c.get(dl[di])\n am = dl[di] // 1000 - 200\n an = dl[di] % 1000\n if en is None:\n en = (str(am), \"\")\n if dl[di] < 222000:\n desc_text.append(\"%06d : OPERATOR %s: %d\" % (dl[di], en[0], an))\n else:\n desc_text.append(\"%06d : OPERATOR '%s'\" % (dl[di], en[0]))\n di += 1\n elif dl[di] >= 300000 and dl[di] < 400000:\n stack.append((dl, di + 1))\n da = dl[di]\n dl = self._tables.tab_d[dl[di]]\n di = 0\n desc_text.append(\"%06d : SEQUENCE, %d desc.\" % (da, len(dl)))\n return desc_text", "def __str__(self):\n return self.description", "def __str__(self):\n return self.description", "def Description(self) -> str:", "def Description(self) -> str:", "def __str__(self):\r\n # The full representative string\r\n str_matrix = \"\"\r\n\r\n if self.matrix is not None:\r\n # Save the lenght into a variable\r\n # to send this number to the tiles method\r\n # and calculate the number of spaces\r\n spaces = len(self.matrix)\r\n for i in range(0, spaces):\r\n nums = list(filter(lambda x: x != \"_\", self.matrix[i]))\r\n str_matrix += self.tiles(nums, (i+1), (spaces - i))\r\n\r\n return str_matrix", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def board_label(board):\n # 0 draw for side-to-move, 1 win for side-to-move (more than 50 moves), 2 win for side-to-move\n # -1 loss in more than 50, -2 loss in <50\n with chess.syzygy.open_tablebase(\"./\") as tablebase:\n # board = chess.Board(\"8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1\")\n wdl = tablebase.probe_wdl(board)\n\n # 0 draw, x win in x, -x loss in x\n # counts may be off by 1\n with chess.syzygy.open_tablebase(\"./\") as tablebase:\n # board = chess.Board(\"8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1\")\n dtz = tablebase.probe_dtz(board)\n if wdl == 0:\n win = 0\n draw = 1\n loss = 0\n elif wdl > 0:\n win = 1\n draw = 0\n loss = 0\n else:\n win = 0\n draw = 0\n loss = 1\n if dtz > 0:\n quality = 2000 - dtz\n elif dtz < 0:\n quality = -2000 - dtz\n else:\n quality = 0\n\n return win, draw, loss, quality", "def __str__(self):\n\n desc = self.description\n if desc is not None:\n return str(desc)\n\n desc = self.debugDescription\n if desc is not None:\n return str(desc)\n\n return repr(self)", "def get_cards_as_string(self):\n return '' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n'.format(*self.get_cards_high_to_low())", "def state_to_string(board_state):\n return str(board_state)", "def board_string(self, players):\n if len(self.user_guess) == 1:\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += \"-------------------\\n\"\n\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0].get_name()}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1].get_name()}: {self.user_guess[1]}, {self.applied_guess[1]}\\n\"\n board += \"-------------------\\n\"\n\n return board", "def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)", "def get_board_as_string(game):\n str_board = \"\\n\" # every board starts with a blank line\n row = 0 # used to print the board\n\n # creates a board of 5 lines. 3 rows, 2 dashed.\n for line in range(1, 6):\n\n # every odd line\n if line % 2 != 0:\n\n # add a row to the string str_board\n str_board += \"{} | {} | {}\".format(game['board'][row][0], game['board'][row][1], game['board'][row][2])\n\n # increment the row\n row += 1\n\n # every even line\n else:\n str_board += \"--------------\"\n\n str_board += \"\\n\" # add line break at the end of every line\n\n return str_board", "def description(self):\n active = np.nonzero([bool(p) for p in self])[0]\n last_active = active[-1] if len(active) else -1\n return ' '.join([p.value_str for p in self][:last_active + 1])", "def __str__(self):\n return (\">%s\\n\" % self.name) + \\\n wrap(self.sequence, self.COLUMNS)", "def __str__(self):\r\n # replace with your code\r\n return str(self._board[0]) + \"\\n\" + str(self._board[1]) + \"\\n\" + str(self._board[2]) + \"\\n\" + str(self._board[3]) + \"\\n\\n\"", "def __str__(self):\n card_str = ['-' * CARD_WIDTH]\n card_str.append('{} (Game {})'.format(self.name, self.game.number).center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('Money: {}'.format(self.money).center(CARD_WIDTH))\n card_str.append('Turnover: {}'.format(self.turnover).center(CARD_WIDTH))\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n return '\\n'.join('|{}|'.format(l) for l in card_str)", "def descString(self):\n return \"\".join ([self.Name, \" (AR \", str(self.AR), \", Max DEX \"\\\n , str(self.MaxDEXMod), \") - \", str(self.Value), \" gp\"])", "def getDescription(self):\n if(self.isInit == True):\n STR=\"Map source : \"+self.source+\"\\n\"\n STR += str(self.rowsNumber) + \" rows, \"+str(self.columnsNumber)+\" columns, router range radius is \"+str(self.routerRangeRadius)+\"\\n\"\n STR +=\"backbone costs \"+str(self.backBoneCosts)+\", router costs \"+str(self.routerCosts)+\", buget is \"+str(self.budget)+\"\\n\"\n STR +=\"the initial cell connected to backbone is [\"+str(self.firstCell.row)+\",\"+str(self.firstCell.column)+\"]\"\n return STR\n else:\n return \"MAP NOT INITIALISED\"", "def __str__(self):\n data_string = \"\"\n for i in range(self.height):\n row = \"\"\n for j in range(self.width):\n row += \"|\" + self.data[i][j]\n row += \"|\\n\"\n data_string += row\n data_string += \"-\" * (2 * self.width + 1)\n data_string += \"\\n\"\n\n # Column numbers are labeled modulo 10 to keep the characters\n # aligned correctly\n for i in range(self.width):\n data_string += \" \" + str(i % 10)\n\n return data_string", "def print_board(self):\n board = \"\"\n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += self.board[i][j]\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board" ]
[ "0.65844256", "0.65822697", "0.642821", "0.6403273", "0.6371313", "0.6342245", "0.63234556", "0.62662345", "0.62411314", "0.623124", "0.62133116", "0.62067544", "0.6201318", "0.6192333", "0.6180038", "0.61786634", "0.616831", "0.6166834", "0.61507493", "0.6119286", "0.6043222", "0.60417575", "0.60313874", "0.6017458", "0.6017458", "0.6016843", "0.59935737", "0.5989846", "0.59773004", "0.59670496", "0.5964539", "0.5957519", "0.59106284", "0.5910254", "0.59046715", "0.5889854", "0.58877295", "0.58851564", "0.58790296", "0.5878066", "0.5877805", "0.5872732", "0.5870701", "0.5858251", "0.5839793", "0.5833439", "0.5829822", "0.58271646", "0.58122814", "0.58120835", "0.57896686", "0.5784467", "0.57713145", "0.576708", "0.57640743", "0.57607096", "0.5748869", "0.5743802", "0.5742404", "0.57397485", "0.57314897", "0.5729486", "0.57285744", "0.5721781", "0.5710826", "0.5695495", "0.5692412", "0.5692032", "0.5689897", "0.5684616", "0.56731254", "0.56723696", "0.56594765", "0.56424224", "0.5639167", "0.56344044", "0.56163746", "0.5608865", "0.56048447", "0.55988944", "0.5597813", "0.5597813", "0.5588222", "0.5588222", "0.55830926", "0.5577695", "0.5576101", "0.5571726", "0.555814", "0.55524564", "0.5539906", "0.5533482", "0.55284107", "0.55239946", "0.5523124", "0.5518274", "0.55069476", "0.550123", "0.5500214", "0.54984015", "0.54844433" ]
0.0
-1
Gives a pawn on the position x,y or returns empty if none exists
def getPawn(self,x,y): if(self.gameState[x,y]==EMPTY): return return Pawn(x,y,self.gameState[x,y])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_a_pawn(self, player, pos_x, pos_y)-> bool:\n if not pos_x in range(0, len(self.grid)) \\\n or not pos_y in range(0, len(self.grid)):\n return False\n if self.grid[pos_x][pos_y].color is None:\n for index_x in range(-1, 2):\n for index_y in range(-1, 2):\n other_pos_x = pos_x + index_x\n other_pos_y = pos_y + index_y\n if other_pos_x < 0 or other_pos_y < 0:\n continue\n if other_pos_x >= self.grid_length or other_pos_y >= self.grid_length:\n continue\n if not self.grid[other_pos_x][other_pos_y].color is None:\n self.grid[other_pos_x][other_pos_y].change_color(player.color)\n self.grid[pos_x][pos_y].change_color(player.color)\n self.last_move.append((pos_x, pos_y))\n return True\n else:\n return False", "def respawn(self, xrespawn, yrespawn):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n #self.center_x = SCREEN_WIDTH / 2\n #self.center_y = SCREEN_HEIGHT / 2\n\n self.center_x = xrespawn\n self.center_y = yrespawn\n\n self.angle = 0\n\n self.cur_health = self.max_health", "def get_piece(self, x, y):\n if self.in_bounds(x, y) and self.piece_at(x, y):\n return self.__board[(x, y)]\n return None", "def move_pawn(pos, game):\n #Convert coordinates to row and column\n row = int(pos[1]//(SQUARESIZE+FENCEWIDTH))\n col = int(pos[0]//(SQUARESIZE+FENCEWIDTH))\n #Make move\n game.move_pawn(game.get_player_turn(), (col,row))", "def select_square(board: Board, n: int, pawn_value: int) -> Tuple[int, int]:\n\n print(\"Select a square where you may want to put a pawn.\")\n x = sanitized_int_input(\"Its x coordinates: \")\n y = sanitized_int_input(\"Its y coordinates: \")\n\n if square_valid(board, n, pawn_value, x, y):\n return x, y\n else:\n print(\"You can't put a pawn here, sorry. Please, retry.\")\n return select_square(board, n, pawn_value)", "def random_position(self):\n while True:\n h = random.randrange(0, self.height)\n w = random.randrange(0, self.width)\n if self.grid[h, w] == 0:\n return (h, w)", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def locate(x, y):\n position(x * 6, y)", "def spawn(self):\n (x_coord, y_coord) = (0, 0)\n grid_x = SCREEN_X // self.size\n grid_y = SCREEN_Y // self.size\n while x_coord < EDGE + 5 or x_coord > SCREEN_X - self.size - EDGE - 5:\n x_coord = random.randrange(grid_x) * self.size\n while y_coord < EDGE + 5 or y_coord > SCREEN_Y - self.size - EDGE - 5:\n y_coord = random.randrange(grid_y) * self.size\n return (x_coord, y_coord)", "def respawn_player(self):\n self.rect.x = 50\n self.rect.y = 50\n \n # Specifies the Player's spawnpoint as maze_arrangement[1][1], representing\n # the tile in the top-left corner of the maze\n self.__user_x = 1\n self.__user_y = 1", "def put_pawn_at(board: Board, pawn_value: int, x: int, y: int) -> None:\n\n board[y][x] = 1 if argv[2] is '0' else pawn_value", "def get_clicked_tile(self, x: int, y: int) -> Optional[Point]:\n\t\ttile_x = x//(self.canvas_width//self.board_size)\n\t\ttile_y = y//(self.canvas_height//self.board_size)\n\n\t\tif tile_x < 0 or tile_x >= self.board_size or tile_y < 0 or tile_y >= self.board_size:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn Point(tile_x, tile_y)", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_random_empty_location(player_x, player_y, min_distance_from_player):\n empty_locations = []\n for row in range(height):\n for column in range(width):\n if False: # TODO: Replace False with condition that checks that location (column, row) is empty and at least min_distance_from_player squares from the player.\n # TODO: Add the empty location tuple to the list of empty locations.\n pass\n # TODO: If the list of empty locations is empty, return None.\n # TODO: Otherwise, return one of the location tuples from the list.", "def get_random_position(self):\n if self._geometry_type in ['area', 'circle']:\n geo = self.get_geometry()\n min_x, min_y, max_x, max_y = geo.bounds\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n while not geo.contains(pnt):\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n return pnt\n else:\n return None", "def __get_random_player_position(self) -> Tuple[int, int]:\n no_player_position = True\n while no_player_position:\n for row in range(0, self.__labyrinth.labyrinth_height):\n for col in range(0, self.__labyrinth.labyrinth_width):\n if self.__labyrinth[row][col] == Labyrinth.FLOOR and no_player_position:\n self.__row_position = row\n self.__col_position = col\n\n if len(self.__path_to_end()) > self.__labyrinth.labyrinth_width and \\\n len(self.__path_to_end()) > self.__labyrinth.labyrinth_height:\n self.__labyrinth[row][col] = Labyrinth.START\n no_player_position = False\n\n return self.__row_position, self.__col_position", "def getKickingPosition():\n pass", "def _input_coords(game,player):\r\n print(Player.get_name(player)+'('+Player.get_spec(player)+')'+\" it's your turn\")\r\n coords = input(\"coords of cell? \")\r\n coords = coords.split(',')\r\n try :\r\n x = int(coords[0])\r\n y = int(coords[1])\r\n if game[x][y] == '' : \r\n return (x,y)\r\n except :\r\n return _input_coords(game,player)\r\n print('illegal play, choose an empty cell')\r\n return _input_coords(game,player)", "def get_piece(self, selected_piece_coords):\n for piece in self.game_pieces:\n if selected_piece_coords[0] == piece.x and selected_piece_coords[1] == piece.y:\n return piece.clone()\n return None", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def default_location(self, thing):\n if isinstance(thing, Agent):\n return (1, 1)\n else:\n get_rand = lambda arg: random.choice(range(1, arg-1))\n while True:\n xloc, yloc = (get_rand(self.width), get_rand(self.height))\n if (1, 1) == (xloc, yloc):\n continue\n break\n return (xloc, yloc)", "def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)", "def getPoint(self):\n return Point(*self.position)", "def get_absolute_pos(x, y, base):\n\n # give a small deadzone\n new_x = base[0] + (int(x / 2) if abs(x) > 2 else 0)\n new_y = base[1] - (int(y / 2) if abs(y) > 2 else 0)\n\n return (new_x, new_y)", "def respawn(x,y):\n\t\tpos = Board.prev_j+1\n\t\twhile pos<y:\n\t\t\tif (Board.board[x][pos]==' ' or Board.board[x][pos]=='.') and Board.board[x+1][pos]!='-':\n\t\t\t\tBoard.board[x][pos]='M'\n\t\t\t\tbreak\n\t\t\tpos+=1", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def draw_X():\r\n x,y = pygame.mouse.get_pos()\r\n x = 3*x/300\r\n y = 3*y/300\r\n x = approximate(x)\r\n y = approximate(y)\r\n pos_x = 0\r\n pos_y = 0\r\n if x == 50:pos_x = 0\r\n elif x == 150:pos_x=1\r\n elif x == 250:pos_x=2\r\n if y == 50:pos_y=0\r\n elif y == 150:pos_y=1\r\n elif y == 250:pos_y=2\r\n if positions[pos_y][pos_x] == 0:\r\n positions[pos_y][pos_x] = -1\r\n pygame.draw.line(screen,(255,255,255),(x-40,y-40),(x+40,y+40),10)\r\n pygame.draw.line(screen,(255,255,255),(x+40,y-40),(x-40,y+40),10)\r\n players.reverse()\r\n else: \r\n print('the spot was occupied')\r\n time.sleep(.25)", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def get_pos(self, off_w=0, off_l=0, off_h=0):\n try:\n return self.world_grid[self.w + off_w][self.l + off_l][self.h + off_h]\n except IndexError:\n return blocks['wall']", "def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))", "def get_piece(self, position):\n return self.board[position[0]][position[1]]", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = SCREEN_HEIGHT / 2\n self.angle = 0", "def innerSpawn( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isHills() or pCurrent.isFlatlands():\n\t\t\tif pCurrent.getTerrainType() != con.iWetland:\n\t\t\t\tif not pCurrent.isCity() and not pCurrent.isUnit():\n\t\t\t\t\tbClean = True\n\t\t\t\t\tfor x in range(tCoords[0] - 1, tCoords[0] + 2):\t\t# from x-1 to x+1\n\t\t\t\t\t\tfor y in range(tCoords[1] - 1, tCoords[1] + 2):\t# from y-1 to y+1\n\t\t\t\t\t\t\tif gc.getMap().plot(x,y).isUnit():\n\t\t\t\t\t\t\t\tbClean = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif bClean:\n\t\t\t\t\t\tif pCurrent.getOwner() in argsList:\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)", "def random_position(width, height):\n x = random.randrange(0, width)\n y = random.randrange(0, height)\n return x,y", "def get_center_position( peg, position_on_peg):\n x = Hanoi.FIRST_TOWER_X + peg * Hanoi.DISTANCE_BETWEEN_TOWER\n y = position_on_peg * Hanoi.DISK_HEIGHT + 0.5 * Hanoi.DISK_HEIGHT\n return (x, y)", "def pupil_left_coords(self):\n if self.pupils_located:\n x = self.eye_left.origin[0] + self.eye_left.pupil.x + self.x_add\n y = self.eye_left.origin[1] + self.eye_left.pupil.y + self.y_add\n return (x, y)", "def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]", "def room_xy(room, x, y, value=None):\n return room[x][y]", "def assign_spawn_point(self) -> StaticTile:\n spawn_tile = self.spawn_points[self.active_spawns]\n self.active_spawns += 1\n return spawn_tile", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def outerSpawn( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isHills() or pCurrent.isFlatlands():\n\t\t\tif pCurrent.getTerrainType() != con.iWetland:\n\t\t\t\tif not pCurrent.isCity() and not pCurrent.isUnit():\n\t\t\t\t\tbClean = True\n\t\t\t\t\tfor x in range(tCoords[0] - 1, tCoords[0] + 2):\t\t# from x-1 to x+1\n\t\t\t\t\t\tfor y in range(tCoords[1] - 1, tCoords[1] + 2):\t# from y-1 to y+1\n\t\t\t\t\t\t\tif gc.getMap().plot(x,y).isUnit():\n\t\t\t\t\t\t\t\tbClean = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif bClean:\n\t\t\t\t\t\tif pCurrent.calculateCulturalOwner() == -1: # edead: bugfix\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)", "def random_empty_position(current_animal, grid_size, all_animals):\n all_neighbors = list_neighbors(current_animal.row, current_animal.col,\n grid_size)\n\n occupied = set()\n for x in all_animals:\n occupied.add((x.row, x.col))\n\n neighbors = []\n for x in all_neighbors:\n if x not in occupied:\n neighbors.append(x)\n\n if len(neighbors) == 0:\n return None\n\n row, col = my_random_choice(neighbors)\n return (row, col)", "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def _get_piece_at_opening_or_none(self, x, y):\n if self._is_valid_opening(x, y):\n return self._openings[x][y]\n else:\n return Piece.NONE", "def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)", "def pickPlace(distx = 10,disty = 10, midDist = 20,defaultHeight= -70,linHeight = 10, robotHeight = -90):\n pickPlacePos = []\n yCount = [0, 1]\n xCount = [-1,0,1]\n for numx in xCount:\n for numy in yCount:\n pickPlacePos.append([numx*distx, numy*disty-midDist, robotHeight+linHeight,0,0,0,'lin'])\n pickPlacePos.append([numx*distx, numy*disty-midDist, robotHeight,0,0,0,'mov'])\n pickPlacePos.append([numx*distx, numy*disty-midDist, robotHeight+linHeight,0,0,0,'lin'])\n\n pickPlacePos.append([numx*distx, 0, defaultHeight,0,0,0,'mov'])\n\n pickPlacePos.append([numx*distx, numy*disty+midDist, robotHeight+linHeight,0,0,0,'lin'])\n pickPlacePos.append([numx*distx, numy*disty+midDist, robotHeight,0,0,0,'mov'])\n pickPlacePos.append([numx*distx, numy*disty+midDist, robotHeight+linHeight,0,0,0,'lin'])\n\n pickPlacePos.append([numx*distx, 0, defaultHeight,0,0,0,'mov'])\n\n pickPlacePos.append([0,0,-127,0,0,0,'mov'])\n return pickPlacePos", "def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves", "def position_to_stone(self, position):\n if len(position) != 2:\n stone = -1\n return stone\n h = position[0]\n w = position[1]\n stone = h * self.width + w\n if stone not in self.blanks:\n stone = -1 # -1 means the current position is blank.\n return stone", "def getRandomPosition(self):\n\n #random.seed(1)#for repeatable results.\n #pos[0] = random.randint(0, self.width-1) # -1 since randint Returns a random integer N such that a <= N <= b.\n #pos[1] = random.randint(0, self.height-1)# -1 since randint Returns a random integer N such that a <= N <= b.\n #above solution uses ints and returns random tile positions, not random positions.\n #NB: must use init method of Potion object\n return Position(random.random()*self.width, random.random()*self.height)\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html", "def crearParedX(self, posx, posy):\n\n # Marco el punto como nodo y guardo\n k = (3, self.matrixMAPA[posx][posy][1])\n self.matrixMAPA[posx][posy] = k\n\n\n # Pinto hacia -x hasta que termine o hasta que encuentre un muro\n for i in range(0, posy):\n \n iterator = (posy-1) - i\n # Capturo el punto\n # Si no es pared continue\n k = self.matrixMAPA[posx][iterator]\n # Si el valor esta permitido pinte si no termine\n if k[0] != 1 and k[0] != 2 and k[0] != 3:\n # Marco como pared\n alpha = (2, self.matrixMAPA[posx][iterator][1])\n # Guardo lo marcado\n self.matrixMAPA[posx][iterator] = alpha\n else:\n break\n\n # Pinto hacia +x \n for j in range(posy+1, len(self.matrixMAPA[posx])):\n # Capturo el valor\n k = self.matrixMAPA[posx][j]\n # Si el valor esta permito pinte \n if k[0] != 1 and k[0] != 2 and k[0] != 3:\n # marco como pared\n alpha = (2, self.matrixMAPA[posx][j][1])\n # Guardo\n self.matrixMAPA[posx][j] = alpha\n else:\n break", "def get_pos(self):\n return (self.x, self.y)", "def return_cell(self):\n\n pos = pygame.mouse.get_pos()\n\n x = pos[1] // (self.cell_size+1)\n y = pos[0] // (self.cell_size+1)\n\n return self.grid[x][y]", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def __occupy(self, x, y, player):\n queue = [(x, y)]\n area = []\n closed = True\n while closed and len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) in area:\n continue\n area.append((x, y))\n #print(\"Q: \", queue)\n #print(\"A: \", area)\n # try to extend in possible directions\n if not self.has_border(x, y, \"top\"):\n if y == 0: # leaving the board\n closed = False\n queue.append((x, y - 1))\n if not self.has_border(x, y, \"right\"):\n if x == self.__size - 1: # leaving the board\n closed = False\n queue.append((x + 1, y))\n if not self.has_border(x, y, \"bottom\"):\n if y == self.__size - 1: # leaving the board\n closed = False\n queue.append((x, y + 1))\n if not self.has_border(x, y, \"left\"):\n if x == 0: # leaving the board\n closed = False\n queue.append((x - 1, y))\n\n if closed: # closed area => occupy it by player\n for (x, y) in area:\n self.__board[y][x] += player * 16\n self.__colored[player - 1] += 1", "def shuffle_pos(self, ):\n x, y = 0, 0\n while self.maze.structure[int(y / 40)][int(x / 40)] != \"0\" \\\n or (x, y) in self.forbiden_tulpes:\n x = random.randint(0, 14) * sprite_size\n y = random.randint(0, 14) * sprite_size\n self.forbiden_tulpes.append((x, y))\n return x, y", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def getpiece(self, x, y, z):\n\n for p in self.pieces:\n if (p.x, p.y, p.z) == (x, y, z):\n return p", "def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt", "def pixel(self, x, y, color=None):\n if not 0 <= x <= 4:\n return None\n if not 0 <= y <= 6:\n return None\n return super()._pixel(x, y, color)", "def initialCoordinates():\r\n return (-250,-250)", "def put_piece(self, x: int, y: int):\n if self.game_board.board_values[x, y] != 0:\n print(x, y)\n raise exceptions.NonEmptySlotError(\"You must select a empty slot!\")\n\n self.game_board.put_piece(x, y, self.current_piece)", "def firstMove(self):\n return (10, 10)", "def pos_as_point(self) -> Point:\n return Point(self.x, self.y, 0)", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def get_penguin_placement(self, state: FishGameState):\n return self.strategy.place_penguin(state=state)", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def get_our_tile(self, x, y):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\treturn self.our_tiles[x][y]\n\t\treturn None", "def is_pawn_move_valid(self, from_row, from_col, to_row, to_col):\n # Setup variables used\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n to_piece = self.board.squares[to_row][to_col]\n row_diff = abs(from_row - to_row)\n col_diff = abs(from_col - to_col)\n dc = 0\n\n # Set flag for first move of pawn\n first_move = True if from_row == 6 or from_row == 1 else False\n\n # If direction is not correct for white, exit\n if to_row - from_row > 0:\n dr = 1\n if self.piece_color(piece) == \"white\":\n return False\n\n # If direction is not correct for black, exit\n if to_row - from_row < 0:\n dr = -1\n if self.piece_color(piece) == \"black\":\n return False\n\n # If moving straight\n if from_col == to_col:\n # if not legal straight move, exit\n if not (row_diff == 1 or (first_move and row_diff == 2)):\n return False\n\n # make sure to move has no pieces on straight path\n dm = row_diff + 1\n\n # return value\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm)\n\n# if retVal and not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n\n return retVal\n\n # WHITE en passant\n # move from moveHistory => (\"piece\", fromRow, fromCol, toRow, toCol)\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♟\" and self.moveHistory[-1][1] == 1 and\\\n self.moveHistory[-1][3] == 3 and piece_color == \"white\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # BLACK en passant\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♙\" and self.moveHistory[-1][1] == 6 and\\\n self.moveHistory[-1][3] == 4 and piece_color == \"black\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # else move must be taking piece directly move\n # if legal taking piece move and (opponent-already check for own piece) piece at to-square\n if col_diff == 1 and row_diff == 1 and to_piece != None:\n\n# if not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n return True\n\n return False", "def outerSeaSpawn( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isWater() and not pCurrent.isLake(): # edead: no barbs in lakes!\n\t\t\tif not pCurrent.isCity() and not pCurrent.isUnit():\n\t\t\t\tif pCurrent.calculateCulturalOwner() == -1: # edead: bugfix\n\t\t\t\t\tbClean = True\n\t\t\t\t\tfor x in range(tCoords[0] - 1, tCoords[0] + 2):\t\t# from x-1 to x+1\n\t\t\t\t\t\tfor y in range(tCoords[1] - 1, tCoords[1] + 2):\t# from y-1 to y+1\n\t\t\t\t\t\t\tif gc.getMap().plot(x,y).isUnit():\n\t\t\t\t\t\t\t\tbClean = False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif bClean:\n\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)", "def xy_occupied(xy, board):\n return True if board[xy[0]][xy[1]] else False", "def update(board: Board, pawn_value: int, x: int, y: int) -> None:\n\n put_pawn_at(board, pawn_value, x, y)\n block_row(board, pawn_value, y)\n block_column(board, pawn_value, x)\n block_diagonals(board, pawn_value, x, y)", "def peek(self, pos_x, pos_y):\n self.validate_position(pos_x, pos_y)\n return self.map[pos_x][pos_y]", "def get(self, x: int, y: int, /) -> int:", "def getRandomPosition(self):\n return Position(random.random()*self.w, random.random()*self.h)", "def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]", "def generate_point(width, height):\n x = random.randrange(0 - OFFSET, width + OFFSET, 1)\n y = random.randrange(0 - OFFSET, height + OFFSET, 1)\n return (x, y)", "def _show_piece(self, x_pos, y_pos):\n piece = self.board[x_pos, y_pos]\n if isinstance(piece, Pawn): return '{}P'.format(piece.color[0])\n elif isinstance(piece, Knight): return '{}N'.format(piece.color[0])\n elif isinstance(piece, Bishop): return '{}B'.format(piece.color[0])\n elif isinstance(piece, Rook): return '{}R'.format(piece.color[0])\n elif isinstance(piece, Queen): return '{}Q'.format(piece.color[0])\n elif isinstance(piece, King): return '{}K'.format(piece.color[0])\n else: return ' '", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------", "def assgin_pos(self, range_x, range_y, n_p):\n # n_p random integers\n pos_x = random.sample(range(0, int(100*n_p)), n_p)\n # get a random number\n tmp1 = random.uniform(0, 1)\n # keep position in the range of x and looks \"very random\"\n pos_x %= range_x - tmp1\n # same procedure for y\n pos_y = random.sample(range(0, int(100*n_p)), n_p)\n tmp1 = random.uniform(0, 1)\n pos_y %= range_y - tmp1\n return pos_x, pos_y", "def spawn_point(self):\n return self._spawn_point", "def _checkPawn(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.can_go_on(self.endCell)):\r\n message = (\"This pawn (%r) cannot go on this type of cell (%r)\" %\r\n (pawn, self.endCell.type))\r\n raise IllegalMoveException(message)\r\n return True", "def getRandomPosition(self):\n\t\tp = Position(random.randrange(0, self.width), random.randrange(0, self.height))\n\t\treturn p", "def shoot(self, point: Point) -> Tuple[bool, bool, ShipType]:\n\n # Shot off board\n if not self.point_in_board(point):\n raise InvalidShotException(f'{point} is not on the board')\n\n # Point has already been shot\n elif self.point_is_shot(point):\n raise InvalidShotException(f'{point} has already been shot')\n\n else:\n self.shot_locations.add(point)\n is_hit = True if point in self.all_ship_locations else False\n is_sunk = False\n ship_sunk = None\n\n if is_hit:\n # find out which one of the ships was shot\n for k, v in self.individual_ship_locations.items():\n # if v was the ship that was shot\n if point in v:\n # remove the point from v\n v.remove(point)\n if len(v) == 0:\n is_sunk = True\n ship_sunk = k\n\n return is_hit, is_sunk, ship_sunk", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def position(self):\n return self.x, self.y", "def get_hit_box(self):\n hit_box = self.player_image.get_rect()\n hit_box[0] = self.player_position[\"x\"]\n hit_box[1] = self.player_position[\"y\"]\n return hit_box", "def getRandomPosition(self):\n x = random.randint(0, self.width - 1)\n y = random.randint(0, self.height - 1)\n return Position(x, y)", "def do_unsafe(self):\r\n if(not self.isLegal()):\r\n raise IllegalMoveException(self.whyNotLegal())\r\n pawn = self.startCell.takePawn()\r\n self.endCell.placePawn(pawn)", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def to_pygame(self, p):\n return int(p.x), int(-p.y+600)", "def _get_piece_center(self, piece_coord, piece, terrain_centers):\r\n if piece.type == PieceType.road:\r\n # these pieces are on edges\r\n tile_id = hexgrid.nearest_tile_to_edge(piece_coord)\r\n tile_coord = hexgrid.tile_id_to_coord(tile_id)\r\n direction = hexgrid.tile_edge_offset_to_direction(piece_coord - tile_coord)\r\n terrain_x, terrain_y = terrain_centers[tile_id]\r\n angle = 60*self._edge_angle_order.index(direction)\r\n dx = math.cos(math.radians(angle)) * self.distance_tile_to_edge()\r\n dy = math.sin(math.radians(angle)) * self.distance_tile_to_edge()\r\n return terrain_x + dx, terrain_y + dy, angle + 90\r\n elif piece.type in [PieceType.settlement, PieceType.city]:\r\n # these pieces are on nodes\r\n tile_id = hexgrid.nearest_tile_to_node(piece_coord)\r\n tile_coord = hexgrid.tile_id_to_coord(tile_id)\r\n direction = hexgrid.tile_node_offset_to_direction(piece_coord - tile_coord)\r\n terrain_x, terrain_y = terrain_centers[tile_id]\r\n angle = 30 + 60*self._node_angle_order.index(direction)\r\n dx = math.cos(math.radians(angle)) * self._tile_radius\r\n dy = math.sin(math.radians(angle)) * self._tile_radius\r\n return terrain_x + dx, terrain_y + dy, 0\r\n elif piece.type == PieceType.robber:\r\n # these pieces are on tiles\r\n tile_id = hexgrid.tile_id_from_coord(piece_coord)\r\n terrain_x, terrain_y = terrain_centers[tile_id]\r\n return terrain_x, terrain_y, 0\r\n else:\r\n logging.warning('Unknown piece={}'.format(piece))", "def setup_player_position(world, empty, player):\n\n position = [0, 0]\n is_valid = False\n\n while not is_valid:\n position[0] = randint(0, len(world) - 1)\n position[1] = randint(0, len(world) - 1)\n\n if world[position[0]][position[1]] == empty:\n is_valid = True\n world[position[0]][position[1]] = player\n\n return position", "def _checkPlayer(self):\r\n pawn = self.startCell.getPawn()\r\n if(not pawn.owner == self.player):\r\n message = (\"Player (%r) is not allowed to move that pawn (%r)\" %\r\n (self.player, pawn))\r\n raise IllegalMoveException(message)", "def get_position(self, position):", "def calculate_entry_exit(self, pos_y, pos_x):\r\n return (pos_y * 70 + 35), (pos_x * 70 + 35)", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def room_at(self, x, y):\r\n return self.__maze[x][y]", "def input_stone_position():\n p_1 = input(\"input first co-ordinate, range 0 to 7:\")\n p_2 = input(\"input second co-ordinate, range 0 to 7:\")\n # if input is anything else but 1 2 3 4 5 6 7 8 9 0 ipython shell returns a ValueError\n\n try:\n return (int(p_1), int(p_2))\n except ValueError as val_err:\n print(\"A ValueError occured with message {}\".format(val_err))\n print(\"You should input something like 1 (then press ENTER) 5 (then press ENTER).\")\n repeat = input(\"Do you want to try again [type t] or end the game [type e] or continue [type what you want]?\")\n if repeat == 't':\n return input_stone_position()\n elif repeat == 'e':\n print(\"Press ctrl + c to end the game.\")" ]
[ "0.6292175", "0.628745", "0.61434907", "0.61041254", "0.6023498", "0.60125977", "0.59928966", "0.5978027", "0.5957675", "0.59455156", "0.5943654", "0.5890515", "0.58391374", "0.5811578", "0.5807527", "0.57815", "0.57768774", "0.57580405", "0.5754607", "0.57248765", "0.5722037", "0.5657106", "0.5655582", "0.5655443", "0.56396013", "0.56387126", "0.5634304", "0.5633164", "0.5624075", "0.5611899", "0.56016004", "0.5598816", "0.5589041", "0.5585464", "0.5577566", "0.5570165", "0.5563897", "0.5559242", "0.5553902", "0.55453914", "0.55410933", "0.5539457", "0.5533906", "0.5527768", "0.5515817", "0.55092305", "0.5498963", "0.54977894", "0.5493953", "0.54910046", "0.54866946", "0.5477511", "0.5473517", "0.5464037", "0.54589707", "0.54353225", "0.54235685", "0.54206485", "0.53984547", "0.5386559", "0.5385362", "0.5380641", "0.53703076", "0.5369765", "0.5367784", "0.5361226", "0.5358391", "0.5355636", "0.5351469", "0.5342905", "0.53370976", "0.53355694", "0.5335449", "0.5327614", "0.53263474", "0.5326228", "0.5325885", "0.53230554", "0.5320174", "0.53183126", "0.53181815", "0.5309922", "0.5308639", "0.52985597", "0.52973646", "0.529334", "0.5292807", "0.5291434", "0.5289749", "0.5285891", "0.52831197", "0.5278541", "0.5276351", "0.52757263", "0.5273823", "0.5271783", "0.52709526", "0.52702016", "0.5268249", "0.52671766" ]
0.8193444
0
Check if it's a win for player.
def winFor(self,player): if(self.cachedWin == False): won = False; if(player==WHITE): for x in range(0,WIDTH): if(self.gameState[x,0]==WHITE): won = True elif(player==BLACK): for x in range(0,WIDTH): if(self.gameState[x,HEIGHT-1]==BLACK): won = True if(len(self.successors()) == 0):#IF there are no available moves for both players bCount = self.count(BLACK) #check who has the most pawns wCount = self.count(BLACK) if(bCount>wCount): self.cachedWin = True self.cachedWinner = player return True if(wCount>bCount): self.cachedWin = True self.cachedWinner = player return True if(won): self.cachedWin = True self.cachedWinner = player return True else: return False else: return player == self.cachedWinner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_won(board, player):\r\n return False", "def has_won(board, player):\n return False", "def checkForWin(self):\n w = self.getWinner()\n if w == PLAYER or w == AI:\n # self.printBoard()\n # print('%d'%w + ' won!')\n return\n if w == Tie:\n # print('Tie')\n return", "def check_win(self, player):\n def check_row_win(player):\n for row in self.game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_column_win(player):\n # For doing a column check, transpose the grid and do a row check\n trans_game_state = numpy.transpose(self.game_state)\n for row in trans_game_state:\n if player == row[0] == row[1] == row[2]:\n return True\n return False\n\n def check_diag_win(player):\n # Left to right diagonal\n if player == self.game_state[0][0] == self.game_state[1][1] == self.game_state[2][2]:\n return True\n # Right to left diagonal\n if player == self.game_state[0][2] == self.game_state[1][1] == self.game_state[2][0]:\n return True\n return False\n\n if check_column_win(player) or check_diag_win(player) or check_row_win(player):\n return True\n return False", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def check_win(self, player):\n for win_pos in TicTacToe.win_pos:\n # for each winning position defined we take the set difference to the positions played be player\n # if there are not elements left after resulting set after difference operator\n # we get False as return. ie he has placed his marker in the winning positions which in turn makes him\n # the winner\n if not win_pos.difference(self.player_played_pos[player]):\n return True\n\n # if after checking for every winning positions if the control still reaches here,\n # the player has not marked the winning positions. returns False\n return False", "def check_winner(self):\n pass", "def hasWin(self) :\n comparison = self.compareNumberUser()\n if (comparison == 'equal') :\n return True\n else :\n return False", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def check_win_lose(self):\n if self.b.get_player_i() == 7: # player got to the bank\n return 1 # win\n if self.b.get_chaser_i() == self.b.get_player_i(): # chaser catch the player\n return 2 # lose\n return 0 # nothing", "def checkForWin(self, board, player):\n\t\tif ((board[0][0] == player and board[0][1] == player and board[0][2] == player) or\n\t\t\t(board[1][0] == player and board[1][1] == player and board[1][2] == player) or\n\t\t\t(board[2][0] == player and board[2][1] == player and board[2][2] == player) or\n\t\t\t(board[0][0] == player and board[1][1] == player and board[2][2] == player) or\n\t\t\t(board[0][2] == player and board[1][1] == player and board[2][0] == player) or\n\t\t\t(board[0][0] == player and board[1][0] == player and board[2][0] == player) or\n\t\t\t(board[0][1] == player and board[1][1] == player and board[2][1] == player) or\n\t\t\t(board[0][2] == player and board[1][2] == player and board[2][2] == player)):\n\t\t\tprint(\"----------------------------\")\n\t\t\tprint(\"Yay! Player%d is the winner!\" % player)\n\t\t\tprint(\"----------------------------\")\n\t\t\tself.win = player", "def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()", "def determine_win(self):\n if self.match.radiant_win is True and self.player_slot < 5:\n return True\n if self.match.radiant_win is False and self.player_slot > 5:\n return True\n return False", "def is_winner(self, player) -> bool:\n return (self.current_state.get_current_player_name() != player\n and self.is_over(self.current_state))", "def is_winner(self, player):\n return (self.current_state.get_current_player_name() != player\n and self.is_over(self.current_state))", "def is_game_won(self):\n return True", "def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return", "def is_game_won(self) -> int:\n\n b = self.board\n for c1, c2, c3, c4 in _WINDOWS:\n if b[c1] and (b[c1] == b[c2] == b[c3] == b[c4]):\n print(\"win\", c1, c2, c3, c4)\n return b[c1]", "def checkWinner(self, surface):\r\n winner = True\r\n \r\n # Checks for winner\r\n for point in self.points:\r\n if point.getTeam() == self.getTurn():\r\n winner = False\r\n \r\n # Displays winner message if there is a winner\r\n if winner:\r\n self.surface.fill(BLACK)\r\n winText = graphicalObjects.Text(self.getCurrentString() + ' wins!', WIN_CENTER, 20)\r\n winText.draw(self.surface)\r\n pygame.display.flip()\r\n self.won = True", "def has_a_winner(self):\n return self.state in {State.X_WON, State.O_WON}", "def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False", "def checkWin(self, board):\n for w in self.wins:\n if board[w[0]] != ' ' and (board[w[0]] == board[w[1]] == board[w[2]]):\n self.winner = board[w[0]]\n return True", "def check_win_condition(board) -> bool:\n if _check_vertical_win_condition(board) or _check_horizontal_win_condition(board) or _check_diagonal_win_condition(\n board):\n return True\n else:\n board.alternate_current_player()\n return False", "def is_winner(self, player: str) -> bool:\n total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item == '2':\n p2_taken += 1\n if player == \"p1\":\n return float(p1_taken) >= total_line/2\n return float(p2_taken) >= total_line/2", "def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)", "def checkForWin (self):\r\n\t\tw = self.getWinner()\r\n\t\tif w:\r\n\t\t\tself.printBoard()\r\n\t\t\traise Exception(w + ' won!')", "def check_player_reached():\n global round_start_timer, round_over\n\n if player1.alive and player1.rect.top < (platform_width // 2):\n add_time_points()\n reset_players()\n player1.wins += 1\n return True\n\n elif player2.alive and (player2.rect.top + player2.image.get_height()) > \\\n (SCREEN_HEIGHT - platform_width):\n player2.wins += 1\n round_over = True\n add_time_points()\n reset_players()\n return True", "def is_game_won(board, player):\n\n\tis_won = False\n\n\tif (\n\t\tboard[0] == board[1] == board[2] == player or\n\t\tboard[3] == board[4] == board[5] == player or\n\t\tboard[6] == board[7] == board[8] == player or\n\t\tboard[0] == board[3] == board[6] == player or\n\t\tboard[1] == board[4] == board[7] == player or\n\t\tboard[2] == board[5] == board[8] == player or\n\t\tboard[0] == board[4] == board[8] == player or\n\t\tboard[2] == board[4] == board[6] == player\n\t):\n\t\tis_won = True\n\n\treturn is_won", "def is_game_win(self):\n return not self.deck and not self.hand", "def winner(self):\n if self.__current_player == 1:\n if self.__fields[0].winner():\n print(self.__players[0]._Player__name + \"is winner!\")\n Game.play = False\n elif self.__current_player == 2:\n if self.__fields[1].winner():\n print(self.__players[1]._Player__name + \"is winner!\")\n Game.play = False", "def uber_check_win(self):\n if self.player1.score == self.player2.score:\n print(\"It's a draw!\")\n elif self.player1.score > self.player2.score:\n print(\"Player 1 is a proper bad ass mother fucker\")\n else:\n print(\"Player numma 2 is a proper bad ass mother fucker\")", "def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False", "def is_winner(self):\n return self._winner != self.NEUTRAL_PLAYER", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True", "def check_tie(self, player1, player2):\n if self.check_win(player1) or self.check_win(player2):\n return False\n return self.check_grid_full()", "def is_over(self):\n winner = TictactoeMatch.get_winner(self.inputs_)\n if winner:\n self.result_ = winner\n if Config.USER['debug']['enabled']:\n print \"It is over! Player \"+str(self.result_)+\" (\"+str(self.player_label_[self.result_])+\") wins!\"\n return True\n for value in self.inputs_:\n if value == TictactoeMatch.EMPTY:\n if Config.USER['debug']['enabled']:\n print \"Go!\"\n return False\n self.result_ = TictactoeMatch.DRAW\n if Config.USER['debug']['enabled']:\n print \"It is over! Draw!\"\n return True", "def check_for_win(self,board, player_id, action):\n\n row = 0\n\n # check which row was inserted last:\n for i in range(ROWS):\n if board[ROWS - 1 - i, action] == EMPTY_VAL:\n row = ROWS - i\n break\n\n # check horizontal:\n vec = board[row, :] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check vertical:\n vec = board[:, action] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check diagonals:\n vec = np.diagonal(board, action - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n vec = np.diagonal(np.fliplr(board), ACTION_DIM - action - 1 - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n return False", "def has_winner(self):\n\n if self.num_black_pieces == 0 or len(self.get_all_valid_moves(Player.black)) == 0:\n return Player.white\n elif self.num_white_pieces == 0 or len(self.get_all_valid_moves(Player.white)) == 0:\n return Player.black\n elif self.repetition_happened() or self.passive_game():\n return \"Tie\"\n else:\n return None", "def winning_game_player(players):\n\n # in order for there to be a winner, the game must\n # be over\n if not game_over(players):\n return None\n\n # if the game is over, it could be that there is no\n # winner\n active_players = players_with_decks(players)\n if not active_players:\n return False\n\n # if the game is over than find the winner\n return players_with_decks(players)[0]", "def is_win(self, roster):\n player = roster.get_current()\n guess = player.get_move().get_guess()\n if guess == self._code:\n return True\n else:\n return False", "def check_if_game_over():\n # Calling check for winners.\n check_for_winner()\n # Calling check it's tie or not.\n check_if_tie()", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def win():\r\n\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9, move1, move2, player1, player2\r\n\r\n\twin1 = tile1==tile2==tile3==1 or tile1==tile2==tile3==2\r\n\twin2 = tile4==tile5==tile6==1 or tile4==tile5==tile6==2\r\n\twin3 = tile7==tile8==tile9==1 or tile7==tile8==tile9==2\r\n\twin4 = tile1==tile4==tile7==1 or tile1==tile4==tile7==2\r\n\twin5 = tile2==tile5==tile8==1 or tile2==tile5==tile8==2\r\n\twin6 = tile3==tile6==tile9==1 or tile3==tile6==tile9==2\r\n\twin7 = tile1==tile5==tile9==1 or tile1==tile5==tile9==2\r\n\twin8 = tile3==tile5==tile7==1 or tile3==tile5==tile7==2\r\n\r\n\twin = win1 or win2 or win3 or win4 or win5 or win6 or win7 or win8\r\n\treturn win", "def is_winner(self):\n return self.winner", "def check_winner(self):\r\n if all(heap == 0 for heap in self.heaps):\r\n if self.misere:\r\n self.winner = self.other_player\r\n self.loser = self.current_player\r\n else:\r\n self.winner = self.current_player\r\n self.loser = self.other_player", "def player_has_won(board, player):\n check = 0b000000000\n for index in range(9):\n if board[index] == player:\n check |= (1 << index)\n for pattern in config.WINNING_PATTERNS:\n if pattern & check == pattern:\n return True\n return False", "def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False", "def checkWin(self):\n winstates = [(0, 1, 2),\n (3, 4, 5),\n (6, 7, 8),\n (0, 3, 6),\n (1, 4, 7),\n (2, 5, 8),\n (0, 4, 8),\n (2, 4, 6)]\n win = False\n for state in winstates:\n if (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == 3:\n self.handleWin(1)\n win = True\n elif (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == -3:\n self.handleWin(-1)\n win = True\n\n if len([i for i in range(9) if self.gameState[i] == 0]) == 0 and not win:\n print(\"Draw yo\")\n self.handleDraw()\n return None", "def check_won(board,player):\n # X axis\n if (\n (len(set(board[1:4])) == 1 and ' ' not in set(board[1:4])) or\n (len(set(board[4:7])) == 1 and ' ' not in set(board[4:7])) or\n (len(set(board[7:10])) == 1 and ' ' not in set(board[7:10]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Y axis\n if (\n (len(set(board[1::3])) == 1 and ' ' not in set(board[1::3])) or\n (len(set(board[2::3])) == 1 and ' ' not in set(board[2::3])) or\n (len(set(board[3::3])) == 1 and ' ' not in set(board[3::3]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n # Diagonals\n if (\n (len(set(board[1::4])) == 1 and ' ' not in set(board[1::4])) or\n (len(set(board[3:9:2])) == 1 and ' ' not in set(board[3:9:2]))\n ):\n print('Player %s, you win!' % player)\n display_board(board)\n return True\n\n return False", "def __check_winner(self):\n for i in range(0, 3):\n col = self.__get_col(i)\n if col.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if col.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n row = self.__get_row(i)\n if row.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if row.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if diag.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if diag.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n if self.state.count(' ') == 0:\n print('\\nDraw!')\n self.game_ended = True", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def check_win(self) -> Union[Player, None]:\n\t\tfor player in self.players:\n\t\t\tif len(player.deck) == 0:\n\t\t\t\treturn player\n\t\t\n\t\treturn None", "def is_game_won(self):\n if self.game_is_tied():\n return False\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n if my_available_steps == 0 or opp_available_steps == 0:\n return True\n else:\n return False", "def winning_event(self, player):\n # vertical check\n for col in range(GameData.columns):\n if self.board[0][col] == player and self.board[1][col] == player and self.board[2][col] == player:\n self.draw_vertical_winning_line(col, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # horizontal check\n for row in range(GameData.rows):\n if self.board[row][0] == player and self.board[row][1] == player and self.board[row][2] == player:\n self.draw_horizontal_winning_line(row, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # ascending diagonal heck\n if self.board[2][0] == player and self.board[1][1] == player and self.board[0][2] == player:\n self.draw_asc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # descending diagonal win chek\n if self.board[0][0] == player and self.board[1][1] == player and self.board[2][2] == player:\n self.draw_desc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n return False", "def check_for_tie():\n global ongoing_game\n check_for_winner()\n if \"*\" not in board and winner is None:\n ongoing_game = False\n print(\"Game is a Tie! \\n\")\n play_again()\n return True\n else:\n return False", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def win_game(self):\n\n def horizontal_win():\n \"\"\"Return whether there is horizontal win\"\"\"\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True\n\n def vertical_win():\n \"\"\"Return whether there is vertical win\"\"\"\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()\n\n def diagonal_win():\n \"\"\"Return whether there is diagonal win\"\"\"\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True\n\n if horizontal_win() or vertical_win() or diagonal_win():\n print \"You have won.\"\n return True", "def check_for_game_won(self):\n all_moscuvites_captured = True\n king_captured = True\n king_escaped = True\n for piece in self.game_pieces:\n if piece.player == 2:\n all_moscuvites_captured = False\n elif piece.player == 3:\n king_captured = False\n king_coords = (piece.x,piece.y)\n escape_coords = [(0, 0), (0, 8),\n (8, 0), (8, 8)]\n if king_coords not in escape_coords:\n king_escaped = False\n if king_captured:\n return 2\n elif king_escaped or all_moscuvites_captured:\n return 1\n else:\n return 0", "def check_for_win(cls, context={}):\n\t\tplayers = context.get(constants.CONTEXT.PLAYERS, [])\n\t\tfor p in players:\n\t\t\tif len(p.hand) == 0:\n\t\t\t\treturn p\n\t\treturn None", "def check_winner(self):\n\t\tif self.check_diagonals() or self.check_rows() or self.check_columns():\n\t\t\treturn True\n\t\telif self.board_is_full():\n\t\t\tprint(\"There was a draw, everyone lost\")\n\t\t\treturn None\n\t\treturn False", "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "def check_game_status(self):\n for player in (\"1\", \"2\"):\n row_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 1, self.board\n ).any()\n col_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 0, self.board\n ).any()\n d1_win = set(self.data[[0, 4, 8]]) == {player}\n d2_win = set(self.data[[2, 4, 6]]) == {player}\n if any([row_win, col_win, d1_win, d2_win]):\n return (\"win\", player)\n\n if self.counter[\"_\"] == 0:\n return (\"tie\", None)\n else:\n return (\"turn\", \"1\" if self.counter[\"1\"] == self.counter[\"2\"] else \"2\")", "def check_win(self):\n win = None\n for pos in self.winning_pos:\n win = self.is_match(set(self.get_cells(pos)))\n if win:\n return win\n if not self.open_tiles():\n return \"Draw\"\n return win", "def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def event_player_wins(self) -> None:\n win_amount = self.user.bet\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(self.user.bet)", "def check_opponent_winning(self):\n valid_actions = self.get_valid_actions()\n copy_board = np.copy(self.board)\n for action in list(valid_actions):\n height = self.get_height(action, board=copy_board)\n self.set(action, height=height, value=self.current_player * -1, board=copy_board)\n\n if self.check_winner(copy_board, action, height) != 0:\n return True\n\n self.set(action, height=height, value=0, board=copy_board)\n\n return False", "def winner(self):\n return self._fetch_element('winner') == 'true'", "def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()", "def check_winner(self):\n if DotsAndBoxesState.score1 > 4: # Because the total score is fixed at nine, if player's score is greater than four,\n # then the player is the winner.\n return \"A\"\n else:\n return \"B\"", "def check_for_win(self, row, col, player): \n\n count = 0\n for i in range(0, len(self.board[0])):\n # Check vertical\n if self.board[row][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n\n count = 0\n for i in range(0, len(self.board)):\n # Check horisontal\n if self.board[:, col][i] == player:\n count += 1\n else:\n count = 0\n \n if count == self.max_count:\n return True\n \n count = 0\n totoffset = col - row\n for i in np.diagonal(self.board, offset=totoffset):\n # Check diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True\n\n count = 0\n mirrorboard = np.fliplr(self.board)\n col = self.colswitch[col]\n totoffset = col - row\n for i in np.diagonal(mirrorboard, offset=totoffset):\n # Check other diagonal\n if i == player:\n count += 1\n else:\n count = 0\n\n if count == self.max_count:\n return True", "def check_if_user_won(self, board, pos, player_no):\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_horizontal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_vertical(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_diagonal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n has_player_got_4 = set()\n has_player_got_4.add(pos)\n\n self.check_inverted_diagonal(has_player_got_4, board, pos, player_no)\n\n if len(has_player_got_4) >= 4:\n return True\n\n if self.check_if_board_full(board):\n self.draw = True\n return True", "def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0", "def game_over(self):\n return bool(self.last_round and self.last_player == self.current_player)", "def win_game(board :list) -> bool:\n if board == win_state:\n return True\n return False", "def player_has_won(self):\n return len(self._words_guessed) == self._num_words", "def is_over(self, board):\n if _winner(board) != 0:\n return True\n return False", "def playerCanPlay(game, situation, player):\r\n return True", "def row_win(board, player):\n for row in board:\n if check_row(row, player):\n return True\n return False", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def is_winner(self, player, cell):\n \n if len(self.moves) > 2:\n # Need at least 3 moves for a player to win. Normally\n # would be 5, but turn enforcement is not implemented here.\n \n column = cell % 3\n row = cell - (cell % 3)\n diagonal = cell % 2 == 0\n \n victory = False\n \n # For these checks, we slice the cells in question out of the\n # board, compare them all to the player, and finally check that\n # each is True.\n if diagonal:\n victory = victory or \\\n all([c == player for c in self.cells[0:9:4]]) or \\\n all([c == player for c in self.cells[2:8:2]])\n \n victory = victory or \\\n all([c == player for c in self.cells[column:9:3]]) or \\\n all([c == player for c in self.cells[row:row+3]])\n \n return victory\n return False", "def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False", "def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def is_winning_state(self):\n return self.game.is_winning_state()", "def check_for_win(self, index):\n\n\t\tpossible_comb = self.cell_combinations[index]\n\n\t\tfor comb in possible_comb:\n\n\t\t\ttokens = []\n\t\t\ttokens.append(self.player_model.grid[comb[0]].token)\n\t\t\ttokens.append(self.player_model.grid[comb[1]].token)\n\t\t\ttokens.append(self.player_model.grid[comb[2]].token)\n\n\t\t\tif all([token == self.player_model.current_player.token for token in tokens]):\n\n\t\t\t\treturn True\n\n\t\treturn False", "def __check_win_condition(self,token,coordinate):\n # Check if it's even possible to win yet\n if self._turn_counter >= 8 and self._turn_counter+1 < self._board_size**2:\n # Disable win\n # return False\n\n # Up and up right vectors\n vec1=(1,0)\n vec2=(1,1)\n\n # Loop both directions of vector\n for _ in range(2):\n if self.__check_direction(vec1,coordinate):\n self.__declare_winner()\n return True\n if self.__check_direction(vec2,coordinate):\n self.__declare_winner()\n return True\n\n # Turn vector directions\n vec1 = -vec1[1], vec1[0]\n vec2 = -vec2[1], vec2[0]\n\n # Check for draw\n elif self._turn_counter+1 >= self._board_size**2:\n if (self._gui):\n self.__turn_counter_label[\"text\"] = \"Draw!\"\n self.__status[\"text\"] = \"\"\n self.__status.update()\n self.__turn_counter_label.update()\n self._winner = 3\n if (self._state == PLAY): showerror(\"Draw\",\"Draw!\")\n return True", "def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0", "def player_win(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU WIN\"\r\n print(\"Player wins against opponent.\\n\")\r\n self.player_wins += 1", "def check_for_winner(self):\r\n\r\n # Iterate through the rows\r\n for row in range(self.height):\r\n if self.board[row][0] == self.board[row][1] == self.board[row][2] and self.board[row][0] != None:\r\n return Board.WIN if self.board[row][0] else Board.LOSS\r\n\r\n # Iterate through the columns\r\n for col in range(self.width):\r\n if self.board[0][col] == self.board[1][col] == self.board[2][col] and self.board[0][col] != None:\r\n return Board.WIN if self.board[0][col] else Board.LOSS\r\n\r\n # Diagonals\r\n if self.board[0][0] == self.board[1][1] == self.board[2][2] and self.board[0][0] != None:\r\n return Board.WIN if self.board[0][0] else Board.LOSS\r\n if self.board[0][2] == self.board[1][1] == self.board[2][0] and self.board[0][2] != None:\r\n return Board.WIN if self.board[0][2] else Board.LOSS\r\n\r\n # No winner yet\r\n return 0", "def enough_players():\n return True", "def check_win(self) -> None:\n if self.word == \"\".join(self.word2):\n print(f\"You won! Word: {self.word}\")\n self.play_again()\n elif self.errors == self.attempts:\n print(f\"You lose!\\nThe word was: {self.word}\")\n self.play_again()", "def has_won(self):\n return len(self.hand) == 0", "def is_round_over(whose_turn,players):\n if ((len(players[whose_turn].hand.cards) == 0) and (players[whose_turn].has_discarded == True)):\n round_over = True\n else:\n round_over = False\n return round_over", "def checkForWin(self, tracker):\n\n # Check damage levels for all ships in the tracker\n # 0 = Fully damaged (sunk), Non-zero otherwise\n for i in tracker.values():\n if i != 0:\n break\n else:\n return True\n return False", "def is_over(self):\n winner = self.get_winner()\n status = bool(winner or not self.available_moves)\n return status, winner", "def get_win_condition(self, info: Dict[str, Any]) -> bool:\n if not self.possible_to_win:\n return False\n return not info[\"lost_live\"] and info[\"terminal\"]", "def winner(position, player):\n if position == 100:\n print(\"Congratulations,\",player + \", you have won.\")\n return True\n else:\n return False" ]
[ "0.82665426", "0.8169365", "0.8155496", "0.7858614", "0.78276265", "0.7803459", "0.7798291", "0.77653474", "0.77357554", "0.76523876", "0.7650778", "0.7639162", "0.76185304", "0.7592742", "0.75793433", "0.757934", "0.7569485", "0.75380087", "0.7517126", "0.74795717", "0.7467015", "0.7464727", "0.7422103", "0.73940426", "0.73882407", "0.7370998", "0.73498994", "0.7340067", "0.732755", "0.7325881", "0.732211", "0.7320684", "0.73188543", "0.7317892", "0.7317396", "0.73022884", "0.7297983", "0.72921306", "0.7275481", "0.7269435", "0.72635806", "0.72635365", "0.7253548", "0.72512376", "0.7244091", "0.7216195", "0.7198554", "0.71901447", "0.71795195", "0.7167977", "0.7167264", "0.7160922", "0.7108491", "0.7104755", "0.709937", "0.70861214", "0.7084189", "0.70807725", "0.70732284", "0.7063544", "0.70584935", "0.7050873", "0.7038908", "0.70350367", "0.7033918", "0.7032862", "0.6996194", "0.69948727", "0.6988028", "0.6975339", "0.6972975", "0.6963928", "0.693865", "0.69325095", "0.6923861", "0.6922583", "0.6921967", "0.6915704", "0.6912867", "0.69078374", "0.6907316", "0.6898464", "0.6891929", "0.688989", "0.6883125", "0.68608737", "0.68562895", "0.6855568", "0.6855036", "0.68531066", "0.68487024", "0.684503", "0.6829715", "0.68077207", "0.68017864", "0.67919403", "0.67873746", "0.67869467", "0.67777896", "0.67689145" ]
0.796108
3
needed for search Gets the number pawns on the board of a color.
def count(self,color): count = 0 for y in range(0,HEIGHT): for x in range(0,WIDTH): if(self.gameState[x,y]==color): count+=1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n", "def sum_color(self, board, color):\n sum_of_color = 0\n for i in range(board.size):\n for j in range(board.size):\n if board.board[i][j].name == color:\n sum_of_color += 1\n return sum_of_color", "def currentScore(self, playerColor):\n total = 0\n for col in range(0, 8):\n for row in range(0, 8):\n if self.board[col][row].color == playerColor:\n total+=1\n return total", "def get_color_count(self, color, filled):\n count = 0\n for cell in filled:\n coord_x = cell[1]\n coord_y = cell[0]\n\n # up\n if coord_y - 1 >= 0:\n new_cell = (coord_y-1, coord_x)\n cell_up_color = self.get_color(new_cell)\n if cell_up_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # down\n if coord_y + 1 < self.height:\n new_cell = (coord_y+1, coord_x)\n cell_down_color = self.get_color(new_cell)\n if cell_down_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # left\n if coord_x - 1 >= 0:\n new_cell = (coord_y, coord_x-1)\n cell_left_color = self.get_color(new_cell)\n if cell_left_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # right\n if coord_x + 1 < self.width:\n new_cell = (coord_y, coord_x+1)\n cell_right_color = self.get_color(new_cell)\n if cell_right_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n return count", "def getNumberPoints(self, move):\r\n (current_point_white, current_point_black) = self._board.get_nb_pieces()\r\n self._board.push(move)\r\n (new_point_white, new_point_black) = self._board.get_nb_pieces()\r\n self._board.pop()\r\n \r\n if(self._mycolor == 1): #black\r\n return (new_point_black-current_point_black) \r\n else:\r\n return (new_point_white-current_point_white)", "def numerocuadrado(posicion_del_mouse):\r\n\r\n for j in range(16):\r\n if Totalcuadrados[j].collidepoint(posicion_del_mouse):\r\n return j+1", "def count_legal_moves(board, color):\n return len(legal_moves(board, color))", "def eval_board(self, board):\n\t\ts = 0\n\t\t\n\t\tfor i in board.columns:\n\t\t\tfor j in board.rows:\n\t\t\t\tif board[i+j] == self.color:\n\t\t\t\t\n\t\t\t\t\tif i in ['A', 'H'] or j in ['1', '8']:\n\t\t\t\t\t\tif i + j in ['A1', 'A8', 'H1', 'H8']:\n\t\t\t\t\t\t\ts += 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ts += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\ts += 1\n\t\treturn s", "def count_neighbors(lights, r, c):\n neighbors = 0\n\n if r > 0 and c > 0: # 1\n neighbors += 1 if lights[r - 1][c - 1] == \"#\" else 0\n\n if r > 0: # 2\n neighbors += 1 if lights[r - 1][c] == \"#\" else 0\n\n if r > 0 and c < GRID_SIZE - 1: # 3\n neighbors += 1 if lights[r - 1][c + 1] == \"#\" else 0\n\n if c < GRID_SIZE - 1: # 4\n neighbors += 1 if lights[r][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c < GRID_SIZE - 1: # 5\n neighbors += 1 if lights[r + 1][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1: # 6\n neighbors += 1 if lights[r + 1][c] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c > 0: # 7\n neighbors += 1 if lights[r + 1][c - 1] == \"#\" else 0\n\n if c > 0: # 8\n neighbors += 1 if lights[r][c - 1] == \"#\" else 0\n\n return neighbors", "def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n count += 1\n if self[x][y]==-color:\n count -= 1\n return count", "def get_disk_count(self, self_color, board):\r\n count = 0\r\n for r in range(8):\r\n for c in range(8):\r\n if board[r][c] == self_color:\r\n count += 1\r\n return count", "def get_num_moves(self, player: PlayerColor) -> int:\r\n player_squares: List[Square] = self.get_player_squares(player)\r\n count: int = 0\r\n for player_square in player_squares:\r\n adj_squares: List[Square] = \\\r\n self._get_adjacent_squares(player_square.pos)\r\n for adj_square in adj_squares:\r\n if (adj_square.state == SquareState.OPEN):\r\n count += 1\r\n elif(adj_square.state == SquareState.OCCUPIED):\r\n opposite_square: Square = \\\r\n self.squares.get(\r\n self._get_opposite_pos(player_square.pos,\r\n adj_square.pos))\r\n if (opposite_square is not None\r\n and opposite_square.state == SquareState.OPEN):\r\n count += 1\r\n\r\n return count", "def count_pixels_of_certain_color(\n self, color: Tuple[int, int, int]\n ) -> int:\n image = self.image\n\n color = CVUtils.rgb_to_bgr(color)\n\n mask = cv2.inRange(image, color, color)\n return cv2.countNonZero(mask)", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def getAvailableMoves(self, checkColorIndex):\n checkColor = self.grid.REPRESENTATION[checkColorIndex]\n otherColor = self.grid.REPRESENTATION[1-checkColorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n\n result = 0\n for x in range(1, self.grid.width+1):\n for y in range(1, self.grid.height+1):\n if self.grid[x, y] != checkColor:\n continue\n\n if x - 2 >= 1 and self.grid[x - 1, y] == otherColor \\\n and self.grid[x - 2, y] == emptyColor:\n result += 1\n \n if x + 2 <= self.grid.width and self.grid[x + 1, y] == otherColor \\\n and self.grid[x + 2, y] == emptyColor:\n result += 1\n\n if y - 2 >= 1 and self.grid[x, y - 1] == otherColor \\\n and self.grid[x, y - 2] == emptyColor:\n result += 1\n\n if y + 2 <= self.grid.height and self.grid[x, y + 1] == otherColor \\\n and self.grid[x, y + 2] == emptyColor:\n result += 1\n return result", "def getPieceCount(self, checkColorIndex):\n return self.grid.countPlayerX() if self.grid.REPRESENTATION[checkColorIndex] == 'X' \\\n else self.grid.countPlayerO()", "def score(self, board: Block) -> int:\n grid = _flatten(board)\n left = grid[0]\n right = grid[-1]\n top = [i[0] for i in grid]\n bottom = [i[-1] for i in grid]\n score0 = left.count(self.colour)\n score1 = right.count(self.colour)\n score2 = top.count(self.colour)\n score3 = bottom.count(self.colour)\n return score0 + score1 + score2 + score3", "def count_numbers(board: list, x: int, y: int):\n nums = []\n for i in range(9):\n # Adds number to list if it finds one in the col or row\n if not board[i][x] == 0:\n nums.append(board[i][x])\n if not board[y][i] == 0:\n nums.append(board[y][i])\n # Does the same as the find_group function, finds the group of the cell\n x_group = x - x % 3\n y_group = y - y % 3\n for i in range(3):\n for z in range(3):\n # Adds group numbers to list\n if not board[y_group + i][x_group + z] == 0:\n nums.append(board[y_group + i][x_group + z])\n # returns the length of unrepeated number in the col, row and group\n return len(set(nums))", "def number_at_cell(game, pokemon_locations, grid_size, index):\n num = 0\n # number of Pokemon in neighbouring cells\n neighbours = neighbour_directions(index,grid_size)\n for neighbour in neighbours:\n if neighbour in pokemon_locations:\n num += 1\n return num", "def get_player_piece_locs(self, mycolor):\n if mycolor not in (ChessGame.BLACK, ChessGame.WHITE):\n return None\n\n return self.__players[mycolor]", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def _check_row(self, col, board) -> int:\n score = 0\n for row in range(len(board)):\n if board[col][row] == self.colour:\n score += 1\n return score", "def get_carrot_count(matrix, pos, size):\n\n row, col = pos\n\n if on_board(pos, size):\n return (matrix[row][col], pos)\n\n return (0, (-1, -1))", "def number_at_cell(self, pokemon_locations, grid_size, index):\n if self.get_game()[index] != UNEXPOSED:\n return int(self.get_game()[index])\n\n number = 0\n for neighbour in self.neighbour_directions(index, grid_size):\n if neighbour in pokemon_locations:\n number += 1\n\n return number", "def get_neighbors(self, line, col):\n neighbors = 0\n for line_shift in [-1, 0, 1]:\n for col_shift in [-1, 0, 1]:\n if line_shift == 0 and col_shift == 0:\n continue # Do not count given cell\n # % connects left/right and up/down\n i = (line + line_shift) % self.lines\n j = (col + col_shift) % self.cols\n if self[i][j] == self.cell_state['alive']:\n neighbors += 1\n return neighbors", "def count_winning_blocks(self, gameboard):\r\n count = {'red':0.1, 'blue':0.1}\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n h = gameboard.check_horizontal_state(position)\r\n v = gameboard.check_vertical_state(position)\r\n d1 = gameboard.check_diag_1_state(position)\r\n d2 = gameboard.check_diag_2_state(position)\r\n for state in [h, v, d1, d2]:\r\n if ((state.count('red') + state.count('x') == 5)\r\n and (state.count('red') > 0)):\r\n count['red'] += np.power(3, (state.count('red') - 1))\r\n elif ((state.count('blue') + state.count('x') == 5)\r\n and (state.count('blue') > 0)):\r\n count['blue'] += np.power(3, (state.count('blue') - 1))\r\n return count", "def get_num_explored(self):\n return self.__num_explored", "def getEgdePieceCount(self, checkColorIndex):\n return self.grid.countPlayerXEdge() if self.grid.REPRESENTATION[checkColorIndex] == 'X' \\\n else self.grid.countPlayerOEdge()", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def queryNumberOfColors(self):\n self._numColorsInUse = \\\n self._readInt('How many colors are available', 2, len(self._palette))\n return self._numColorsInUse", "def count_neighbors(self, x, y):\n # IMPLEMENT ME\n # HINT: You do not have to use a for-loop for this method; just\n # if-statements will suffice. Also, you do not need to indent further\n # than two levels further than this comment.\n neighbours = 0\n if x > 0 and y > 0:\n if self.board[x-1][y-1] == \"x\":\n neighbours += 1\n if x > 0:\n if self.board[x-1][y] == \"x\":\n neighbours += 1\n if x > 0 and y < self.width - 1:\n if self.board[x-1][y+1] == \"x\":\n neighbours += 1\n if y > 0:\n if self.board[x][y-1] == \"x\":\n neighbours += 1\n if y < self.width - 1:\n if self.board[x][y+1] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y > 0:\n if self.board[x+1][y-1] == \"x\":\n neighbours += 1\n if x < self.height - 1:\n if self.board[x+1][y] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y < self.width - 1:\n if self.board[x+1][y+1] == \"x\":\n neighbours += 1\n return neighbours", "def decide_color(tile, board, n, colours):\n list_c = colours.copy()\n origin_color = board[0,0]\n list_c.remove(origin_color)\n colour_2 = list_c[0]\n list_c.remove(colour_2)\n colour_3 = list_c[0]\n c2_count = 0\n c3_count = 0\n for el in tile:\n el_x = el[0]\n el_y = el[1]\n if el_x + 1 < n:\n if board[el_x+1, el[1]] == colour_2:\n c2_count += 1\n if board[el_x+1, el_y] == colour_3:\n c3_count += 1\n if el_y + 1 < n:\n if board[el_x, el_y+1] == colour_2:\n c2_count += 1\n if board[el_x, el_y+1] == colour_3:\n c3_count += 1\n\n if c2_count > c3_count:\n colour = colour_2\n\n if c2_count <c3_count:\n colour = colour_3\n\n if c2_count == c3_count:\n colour = colour_2\n return colour", "def corner_score(self, size: int,\n board: List[List[Tuple[int, int, int]]]) -> int:\n score = 0\n bound = size - 1\n corners = [(0, 0), (0, bound), (bound, 0), (bound, bound)]\n for corner in corners:\n if board[corner[0]][corner[1]] == self.colour:\n score += 1\n return score", "def getPlayer(board):\n count_x, count_o = 0, 0\n for row in board:\n count_x += row.count(X)\n count_o += row.count(O)\n if count_x > count_o:\n return O\n return X", "def get_neighbors(arr, pos, color):\n neighbors = []\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] - 1][pos[1]] == color:\n neighbors.append((pos[0] - 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0] + 1][pos[1]] == color:\n neighbors.append((pos[0] + 1, pos[1]))\n except IndexError:\n pass\n try:\n if arr[pos[0]][pos[1] - 1] == color:\n neighbors.append((pos[0], pos[1] - 1))\n except IndexError:\n pass\n return neighbors", "def get_num_black_pieces(self):\n return self.num_black_pieces", "def count_chips(board, player):\n cont = 0\n for row in board:\n for col in row:\n if col == PLAYER_CHIPS[player]:\n cont += 1\n return cont", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def get_piece_type_positions_by_color(self, leading_color: chess.Color):\n\n # layers with pieces of current player will be stacked on layers of other player\n if leading_color == chess.WHITE:\n return np.vstack((self.__white, self.__black))\n elif leading_color == chess.BLACK:\n return np.vstack((self.__black, self.__white))", "def checkNumNeighbors():", "def player(board):\n\n if terminal(board):\n return 7\n\n numX = 0\n numO = 0\n\n for i in board:\n for j in i:\n if j == X:\n numX = numX + 1\n elif j == O:\n numO = numO + 1\n\n if numX == numO:\n return X\n else:\n return O", "def _getNumcam( self, bSeed ):\n\n\t\treturn ( ( bSeed >> 20 ) & 0xF ) + 1", "def h(self, node):\n count_peg = -1\n for line in node.state.board:\n count_peg += line.count(c_peg())\n return count_peg", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def get_palette_index(self, color, level):\n if self.is_leaf():\n return self.palette_index\n index = self.get_color_index_for_level(color, level)\n if self.children[index]:\n return self.children[index].get_palette_index(color, level + 1)\n else:\n # get palette index for a first found child node\n for i in range(8):\n if self.children[i]:\n return self.children[i].get_palette_index(color, level + 1)", "def slot_numbers_for_cars_with_colour(self, colour):\n\n if not self._is_valid():\n return\n\n colour = colour.lower()\n\n slot_nos = []\n\n for pslot in self.slots.values():\n if not pslot.available and pslot.car and pslot.car.colour.lower() == colour:\n slot_nos.append(str(pslot.slot_no))\n\n if slot_nos:\n print(\", \".join(slot_nos))\n else:\n print(\"Not found\")\n\n return slot_nos", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def match(head_list_at_crossing, wall_color_at_crossing, p):\n num_index2 = int((p-1)/2)\n indicies_of_heads = []\n # print(num_index2)\n for i in range(1, num_index2+1):\n # print(i)\n # print(indicies_of_heads)\n indicies_of_heads += find(i, head_list_at_crossing)\n # print(indicies_of_heads)\n return indicies_of_heads", "def get_trump_color(self):\n color_counter = collections.Counter()\n for card in self.hand:\n color = card.color\n if color == \"White\":\n continue\n color_counter[color] += 1\n if not color_counter.most_common(1):\n return super().get_trump_color()\n else:\n return color_counter.most_common(1)[0][0]", "def checkWin(color_list):\n startcolor = color_list[0][0] #Saves color of [0][0] to variable for easy access\n for i in range(15):\n for k in range(25):\n if color_list[i][k] != startcolor: #If any color is not same as color on [0][0] stop and return False since game is not won\n return False\n return True #If all colors are the same as [0][0] the game ahs been won and return Tture", "def rec_count(color : str) -> int:\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )", "def get_palette_index(self, color):\n return self.root.get_palette_index(color, 0)", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def color_in_check(self, mycolor):\n\n opponent = self.__players[self.get_opponent_color(mycolor)]\n\n x, y = None, None\n for (u, v) in self.__players[mycolor]:\n piece = self.get_piece(u, v)\n if not piece:\n raise ValueError()\n\n if self.get_piece(u, v).name == 'king':\n x, y = u, v\n break\n\n for (u, v) in opponent:\n if (x, y) in self._get_piece_moves(u, v):\n return True\n\n return False", "def __moves_available(self, board: Board):\n player_moves = self.get_num_of_moves(board, self.player_color)\n opponent_moves = self.get_num_of_moves(board, self.opponent_color)\n # print(len(player_moves), len(opponent_moves))\n\n return player_moves - opponent_moves * 3", "def getNumEmptyHouses(self, prop_colour):\n colour_monopolies = database_creator.db.query(\n \"SELECT num_of_houses FROM main_property_deck WHERE property_colour = :prop_colour\", prop_colour=prop_colour)\n self.numUnbuilt = []\n for i in colour_monopolies:\n d = 5-int(i.num_of_houses)\n self.numUnbuilt.append(d)\n #print(self.numUnbuilt)\n return self.numUnbuilt", "def getWinner(self):\n global vertical_win\n global done\n lines = (\n self.board, # columns\n zip(*self.board), # rows\n diagonalsPos(self.board, self.cols, self.rows), # positive diagonals\n diagonalsNeg(self.board, self.cols, self.rows) # negative diagonals\n )\n\n for sublist in self.board:\n if sublist[0] == sublist[1] == sublist[2] == sublist[3] or sublist[1] == sublist[2] == sublist[3] == \\\n sublist[4] or sublist[2] == sublist[3] == sublist[4] == sublist[5]:\n vertical_win = True\n\n for line in chain(*lines):\n for color, group in groupby(line):\n if color != 0 and len(list(group)) >= self.win:\n done = True\n return color\n counter = 0\n for sublist in self.board:\n for i in sublist:\n if i != 0:\n counter += 1\n if counter == 42:\n done = True\n return Tie", "def check_up(self, i, j, board):\r\n color = board.pieceAt(i, j)\r\n\r\n row = True\r\n for k in range(4):\r\n row &= board.pieceAt(i-k, j) is color\r\n\r\n if row:\r\n return color\r\n else:\r\n return Color.EMPTY", "def player(board):\n\n count_x = 0\n count_o = 0\n for i in board:\n for j in i:\n if (j == X):\n count_x += 1\n elif (j == O):\n count_o += 1\n if (count_x <= count_o):\n return X\n else:\n return O", "def get_color_index(position):\n\n\t\tcolors = {\"blue\": 6,\n\t\t\t\t\t\t\t\"red\": 13,\n\t\t\t\t\t\t\t\"yellow\": 17,\n\t\t\t\t\t\t\t\"light_blue\": 18,\n\t\t\t\t\t\t\t\"pink\": 20,\n\t\t\t\t\t\t\t\"purple\": 30}\n\n\t\tpositions = {\"C\": \"yellow\",\n\t\t\t\t\t\t\t\t \"R\": \"red\",\n\t\t\t\t\t\t\t\t \"L\": \"blue\"}\n\n\t\tindex = colors.get(positions.get(position[0], None), None)\n\t\tif not index:\n\t\t\t\traise KeyError(\"Position color index '%s' not recognised.\" % position)\n\n\t\treturn index", "def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)", "def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0", "def player(board):\n X_count = 0\n O_count = 0\n\n for row in board:\n X_count += row.count(X)\n O_count += row.count(O)\n\n if X_count <= O_count:\n return X\n else:\n return O", "def get_piece_dict(self, mycolor):\n pieces = { (x, y) : self.get_piece(x, y) for (x, y) in\n self.__players[mycolor] }\n if None in pieces.values():\n raise ValueError()\n return pieces", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def G_colour_count(self, r, b=-1):\n counts = [0 for _ in range(r)]\n for row in self.G_colour_tableau(r,b):\n for cell_colour in row:\n counts[cell_colour] += 1\n assert sum(counts) == self.size()\n return counts", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_pile_size_in_col(self, col, only_faceup=False) -> int:\n cards = 0\n # return -1 if col does not exist\n if col < 0 or col >= 7:\n return 0\n # count the cards. 0 = not a card\n if only_faceup:\n for card in self.solitaire[col]:\n if card != 0 and not None and not card.is_facedown:\n cards += 1\n return cards\n else:\n for card in self.solitaire[col]:\n if card != 0:\n cards += 1\n else:\n break\n return cards", "def get_neighbour_squares_idx(self, pos):\n if pos:\n possible_values = {0, 1, 2}\n col_variation = zip( [pos[0], pos[0]], possible_values - {pos[1]} )\n row_variation = zip( possible_values - {pos[0]}, [pos[1], pos[1]] )\n return list(col_variation), list(row_variation)", "def color_column(self):\n return 8", "def player(board):\n num_x = sum([list.count(X) for list in board])\n num_o = sum([list.count(O) for list in board])\n if num_x == num_o:\n return X\n else:\n return O", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def getPixelColor(self, n):\n self._logger.debug(\"getPixelColor\")", "def heuristic_takeAllPiece(board, player):\n\n if player is board._WHITE:\n return board._nbWHITE - board._nbBLACK\n \n return board._nbBLACK - board._nbWHITE", "def howManyNeigbors(board,row,col):\r\n\tneigbors = 0\r\n\tif board[row-1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\treturn neigbors", "def score(self, board: Block) -> int:\r\n target_colour = self.colour\r\n score = 0\r\n flat_list = _flatten(board)\r\n for j in range(len(flat_list)):\r\n if flat_list[0][j] == target_colour:\r\n if j in [0, len(flat_list) - 1]:\r\n score += 2\r\n else:\r\n score += 1\r\n for j in range(len(flat_list)):\r\n if flat_list[-1][j] == target_colour:\r\n if j in [0, len(flat_list) - 1]:\r\n score += 2\r\n else:\r\n score += 1\r\n for j in range(2): # first and last row\r\n for i in range(1, len(flat_list) - 1): # deduct corners\r\n if flat_list[i][-j] == target_colour:\r\n score += 1\r\n return score", "def random_neighbors(self) -> int:\n return self.__random_neighbors", "def count_colors(cards: np.ndarray) -> np.ndarray:\n result = np.zeros(4, np.int32)\n cards.sum()\n result[0] = (cards[0:9]).sum()\n result[1] = (cards[9:18]).sum()\n result[2] = (cards[18:27]).sum()\n result[3] = (cards[27:36]).sum()\n return result", "def count_bingo_board(board_num):\n bingo_score = 0\n for row in bingoboards[board_num]:\n bingo_score += sum(list(filter(lambda x: x != \"X\", row)))\n return bingo_score", "def check_neighbours(r, c, board):\n NeighboursSum = 0\n\n for x in range(r - 1, r + 1):\n for y in range(c - 1, c + 1):\n NeighboursSum = NeighboursSum * board[x][y]\n\n return NeighboursSum", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def get_number(self, row, col):\n return self._grid[row][col]", "def houses(self):\n num = 0\n points = 0\n # TODO: add pattern matching\n if \"s\" in self.__as_str:\n num += 1\n if \"f\" in self.__as_str:\n num += 1\n if \"1\" in self.__as_str or \"2\" in self.__as_str or \"3\" in self.__as_str or \"4\" in self.__as_str:\n num += 1\n if \"o\" in self.__as_str:\n num += 1\n if \"p\" in self.__as_str:\n num += 1\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'h':\n if 'f' in self.neighbours(i, j):\n points += 1\n else:\n points += num\n return points", "def player(board) -> str:\n x_amount = 0\n o_amount = 0\n for row in board:\n for column in row:\n if column == X:\n x_amount += 1\n elif column == O:\n o_amount += 1\n\n return X if x_amount == o_amount else O", "def get_color_index_for_level(self, color, level):\n index = 0\n mask = 0x80 >> level\n if color.red & mask:\n index |= 4\n if color.green & mask:\n index |= 2\n if color.blue & mask:\n index |= 1\n return index", "def get_counts(self):\n counts = [0, 0]\n for i in range(self._num_rows):\n for j in range(self._num_cols):\n if self._board[i][j] == \"B\":\n counts[0] += 1\n elif self._board[i][j] == \"W\":\n counts[1] += 1\n return counts", "def score(self):\r\n totN = 0\r\n totB = 0\r\n for l in range(SIZE):\r\n for c in range(len(COLONNES)):\r\n if self.jeu[l][c] == NOIR:\r\n totN += 1\r\n elif self.jeu[l][c] == BLANC:\r\n totB += 1\r\n return (totN, totB)", "def get(self):\n # 8 timesteps, 6 piece types per player, 64 squares #FIXME: 1 timestep\n # 1 castling (which rooks can still castle)\n # 1 player color (1 if white, 0 if black)\n # 1 total move count\n # 1 moves without progress\n # TODO: add repetions (2): repetition count for that position (3 repitions is an autmatic draw)\n pieces = np.concatenate(self.boards)[::-1]\n pieces = np.concatenate(pieces)\n if len(pieces) == MAX_PIECE_INDEX:\n return pieces\n else:\n return np.concatenate((pieces, np.zeros(MAX_PIECE_INDEX-len(pieces), )))", "def get_neighbors(self, node):\r\n neighbors = set()\r\n for neighbor in ORTHOGONAL_POSITIONS[(node.pos[0], node.pos[1])]:\r\n if self.board[neighbor[0]][neighbor[1]].color == node.color:\r\n neighbors.add(neighbor)\r\n else:\r\n continue\r\n return neighbors", "def match(self, color: Color) -> ColorPoint:\n results = self.tree.search_nn(color.hsl)\n if not results:\n raise KeyError('No match found for color: {}'.format(color))\n return results[0].data", "def find_possible_moves(self, board, self_color):\r\n possible_moves = []\r\n delta = [(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1)]\r\n\r\n for r in range(len(board)):\r\n for c in range(len(board[r])):\r\n if board[r][c] == self_color:\r\n for i in range(0, 8):\r\n coords = (r, c)\r\n\r\n found_move = self.check_moves(board, self_color, coords, delta[i])\r\n\r\n if found_move is not None and found_move not in possible_moves:\r\n possible_moves.append(found_move)\r\n return possible_moves", "def nbr_tours(self):\n nbr_tours = 0\n for i in range(3):\n for j in range(3):\n if self.grille[i][j] != 0:\n nbr_tours += 1\n return nbr_tours", "def gen_moves_list(self,color='',dontCallIsAttacked=False):\n \n if(color==''):\n color=self.side2move\n mList=[]\n \n # For each 'piece' on the board (pos1 = 0 to 63)\n for pos1,piece in enumerate(self.cases):\n \n # Piece (or empty square) color is not the wanted ? pass\n if piece.couleur!=color:\n continue\n \n if(piece.nom=='ROI'): # KING\n mList+=piece.pos2_roi(pos1,self.oppColor(color),self,dontCallIsAttacked)\n continue\n \n elif(piece.nom=='DAME'): # QUEEN = ROOK + BISHOP moves !\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='TOUR'): # ROOK\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='CAVALIER'): # KNIGHT\n mList+=piece.pos2_cavalier(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='FOU'): # BISHOP\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n if(piece.nom=='PION'): # PAWN\n mList+=piece.pos2_pion(pos1,piece.couleur,self)\n continue\n \n return mList", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def get_pieces_left(board, piece):\r\n\tpieces = 0\r\n\tfor row in board:\r\n\t\tfor col in row:\r\n\t\t\tif col == piece:\r\n\t\t\t\tpieces += 1\r\n\r\n\treturn pieces" ]
[ "0.71357477", "0.67711115", "0.6714085", "0.6677235", "0.65223455", "0.6421733", "0.63733757", "0.63176304", "0.6308519", "0.6277486", "0.62195385", "0.6203734", "0.6190637", "0.6115216", "0.61102915", "0.6102846", "0.60874075", "0.6083563", "0.60525626", "0.6047749", "0.6038292", "0.6021945", "0.59981954", "0.59945077", "0.5979369", "0.594881", "0.5933531", "0.5902637", "0.5890151", "0.5887287", "0.5873161", "0.58449376", "0.58354115", "0.5830128", "0.5823101", "0.5792508", "0.5790537", "0.5757185", "0.5756808", "0.5727046", "0.57252973", "0.5720263", "0.571608", "0.5700856", "0.56965524", "0.5692262", "0.568991", "0.56806624", "0.56693333", "0.5660685", "0.56460327", "0.5633886", "0.5624084", "0.5615689", "0.5613689", "0.5610581", "0.5606288", "0.56042737", "0.5602917", "0.55947906", "0.55892444", "0.5588708", "0.5584628", "0.558396", "0.5567274", "0.5564481", "0.55609906", "0.5559269", "0.5559269", "0.5559269", "0.5554464", "0.5554361", "0.5553061", "0.5549734", "0.5542581", "0.5539438", "0.5537671", "0.5536205", "0.553154", "0.55302334", "0.55280477", "0.5526254", "0.5517963", "0.5512889", "0.5512889", "0.5512889", "0.5512889", "0.54919964", "0.5486476", "0.54846984", "0.5482698", "0.5481752", "0.5478684", "0.5478381", "0.54781544", "0.54777455", "0.54755133", "0.54744977", "0.54720384", "0.5471559" ]
0.6866677
1
Tells if a position is in the game bounds
def inBounds(self,pos): return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_bounds(self, position):\n row, col = position\n return ((row >= 0 and row < self.height) and\n (col >= 0 and col < self.width))", "def is_in_bounds(pos):\n return PLAYFIELD_PADDING[0] < pos[0] < PLAYFIELD_PADDING[0] +\\\n BLOCK_NUM_WIDTH * Block.WIDTH and PLAYFIELD_PADDING[1] < pos[1] <\\\n PLAYFIELD_PADDING[1] + BLOCK_NUM_HEIGHT * Block.HEIGHT", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def pos_within_bounds(position):\n if type(position[0]) == int:\n row,col = position\n else:\n col,row = position\n \n if not 1<=row<=8:\n return False\n if not 65<=ord(col)<=72:\n return False\n return True", "def in_bounds(self, location: tuple) -> bool:\n return 0 <= min(location) and max(location) <= 7", "def inBounds(self, px, py):\n return px >= 0 and py >= 0 and px < self.w and py < self.h", "def isPositionInRoom(self, pos):\n\t\tif pos.getX() >= 0.0 and pos.getX() < float(self.width):\n\t\t\tif pos.getY() >= 0.0 and pos.getY() < float(self.height):\n\t\t\t\treturn True\n\t\treturn False", "def isPositionInRoom(self, pos):\n posX = pos.getX()\n posY = pos.getY()\n if posX >= 0 and posX < self.width and posY >= 0 and posY < self.height:\n return True\n return False", "def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey", "def isPositionInRoom(self, pos):\n if pos.getY() > self.height or pos.getX() > self.width or pos.getX() < 0 or pos.getY() < 0:\n return False\n else:\n return True", "def bounds(self, pos):", "def isPositionInRoom(self, pos):\n if pos.getX() >= 0 and pos.getX() < self.w:\n if pos.getY() >= 0 and pos.getY() < self.h:\n return True\n return False\n return False", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def in_bounds(state, map_shape):\n return 0 <= state[0] < map_shape[0] and 0 <= state[1] < map_shape[1]", "def at_pos(self, pos):\n return pos-self.deadband < self.wm() < pos+self.deadband", "def IsBound(self) -> bool:", "def in_zone(self, bottom_left, top_right):\n return (bottom_left.x <= self.pos.x and self.pos.x <= bottom_left.x and\n top_right.y <= self.pos.y and self.pos.y <= top_right.y)", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def out_of_bounds(self):\n return self.rect.right <= 0 or self.rect.left >= self.screen_rect.width", "def isPositionInRoom(self, pos):\n #note to self '&' is bitwise, 'and' is a boolean operation\n #NB: python permits the a<b<c syntax used below, doesn't need verbose 'and' tests.\n #ie: 0 <= pos.getX() and pos.getX() <= self.width and 0 <= pos.getY() and pos.getY() <= self.height\n return 0 <= pos.getX() < self.width and 0 <= pos.getY() < self.height\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html", "def isInRange(self, position) -> bool:\n if position.getX() >= self.node.getX() - self.radius and position.getX() <= self.node.getX() + self.radius:\n if position.getY() >= self.node.getY() - self.radius and position.getY() <= self.node.getY() + self.radius:\n return True\n return False", "def check_bounds(self, row: int, col: int) -> bool:\n return 0 <= row < self.row and 0 <= col < self.col", "def validate_position(position: Tuple[int, int], bound: int) -> bool:\n if position[0] < 0 or position[0] >= bound:\n return False\n if position[1] < 0 or position[1] >= bound:\n return False\n return True", "def out_of_bounds(self):\n return self.rect.right <= 0", "def check_position(self, position):\n x_axis, y_axis = position\n try:\n return bool(self.full_map[x_axis][y_axis] not in \"#\"\n and 0 <= x_axis <= self.x_axis\n and 0 <= y_axis <= self.y_axis)\n\n except IndexError:\n return False", "def __cell_is_in_map(self, x, y) -> bool:\n return x >= 0 and y >= 0 and x < self.occupancy_map.info.width and y < self.occupancy_map.info.height", "def hasSpaceAround(self,x,y):\n global gamemap\n c = 0\n for x2 in xrange(-2,2):\n for y2 in xrange(-2,2):\n if self.near(x, y,x + x2,y + y2):\n if not gamemap[x + x2][y + y2].type[0]:\n c += 1\n if c >= 8:\n return False\n else:\n return True", "def isPositionInRoom(self, pos):\n if pos in self.tiles:\n return True\n else:\n return False", "def check_bounds (position, size):\n \n for item in position:\n # checks whether item is out of bounds\n if item < 0 or item >= size:\n return False\n return True", "def validatePosition(boardsize, pos):\n return pos.x in range(0, boardsize) and pos.y in range(0,boardsize)", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def isPositionInRoom(self, pos):\n #if key tuple position(x,y) is in dictionary tiles return True else return False\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n if (posx >= self.width) or (posy >= self.height):\n return False\n\n if (posx,posy) in self.tiles.keys():\n return True\n else:\n return False", "def has_bounds(self):\r\n bounds = self.bounds\r\n if bounds in (None, [None, None]):\r\n return False\r\n for i in xrange(bounds[0]):\r\n if bounds[0][i] is not None and bounds[0][i] > -np.inf:\r\n return True\r\n for i in xrange(bounds[1]):\r\n if bounds[1][i] is not None and bounds[1][i] < np.inf:\r\n return True\r\n return False", "def is_inside(pos):\r\n\t\trow, col = pos\r\n\t\treturn 0 <= row and row < num_rows and \\\r\n\t\t\t0 <= col and col < num_cols", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def contains ( self, pos ):\n # make sure xmin is minimum etc\n xmin = min(self.x_min,self.x_max)\n xmax = max(self.x_min,self.x_max)\n ymin = min(self.y_min,self.y_max)\n ymax = max(self.y_min,self.y_max)\n \n abs_tol = self.abs_tol\n # get pos indices inside rectangle (* == and)\n if self.include_border:\n inds = (pos[0, :] - xmin > -abs_tol) * \\\n (pos[0, :] - xmax < abs_tol) * \\\n (pos[1, :] - ymin > -abs_tol) * \\\n (pos[1, :] - ymax < abs_tol)\n else:\n inds = (pos[0, :] - xmin > abs_tol) * \\\n (pos[0, :] - xmax < -abs_tol) * \\\n (pos[1, :] - ymin > abs_tol) * \\\n (pos[1, :] - ymax < -abs_tol)\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n x = (xmin + xmax) / 2.0\n y = (ymin + ymax) / 2.0\n dr2 = (pos[0, :] - x)**2 + (pos[1, :] - y)**2\n inds[argmin(dr2)] = True\n \n return inds.astype(bool)", "def check_bounds(self):\n\n if self.bounds_action == self.BOUNCE:\n if self.hits_left_or_right():\n self.dx = self.dx * -1\n if self.hits_top_or_bottom():\n self.dy = self.dy * -1\n\n if self.bounds_action == self.STOP:\n if self.hits_left_or_right():\n self.dx = 0\n self.dy = 0\n if self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n\n if self.bounds_action == self.SKID:\n if self.hits_left_or_right():\n self.dx = 0\n if self.hits_top_or_bottom():\n self.dy = 0\n\n if self.bounds_action == self.DIE:\n if self.hits_left_or_right() or self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n self.visible = False", "def isValidPos(self, x, y, wallList, grid):\n if (x, y) not in wallList:\n return x > 0 and x < grid.width and y > 0 and y < grid.height", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y", "def within(self, x, y):\n return x >= self.top_x and x <= self.bottom_x and y >= self.bottom_y and y <= self.top_y", "def inCamp(self):\n return (((self.myTeam==1) and (self.ballPos.x <= self.width/2))\n | ((self.myTeam==2) and (self.ballPos.x >= self.width/2)))", "def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True", "def is_visible(self, position, size=0):\n # return True\n size /= self.scale # size is in pixel\n in_x = (self.focus.x + self.offset.x / self.scale - size <=\n position.x <=\n self.focus.x - self.offset.x / self.scale + size)\n in_y = (self.focus.y + self.offset.y / self.scale - size <=\n position.y <=\n self.focus.y - self.offset.y / self.scale + size)\n # if name == \"earth\":\n # print(\"{:+e} {:+e} {}\".format(self.focus.y + self.offset2.y\n # , position.y, in_y))\n # print(\"{:+e} {:+e}\".format(self.focus.x, self.focus.y))\n return in_x and in_y", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def _isInScreen(self, pos):\n if type(pos) is Vec2:\n return pos.y >= 0 and pos.y <= self.screenSize[1] and pos.x >= 0 \\\n and pos.x <= self.screenSize[0]\n\n return pos[1] >= 0 and pos[1] <= self.screenSize[1] and pos[0] >= 0 \\\n and pos[0] <= self.screenSize[0]", "def can_reach(self, position: tuple) -> bool:\n x = position[0]\n y = position[1]\n dist_to_shoulder = math.sqrt(x**2 + y**2)\n max_dist = self.bicep_length + self.forearm_length\n if dist_to_shoulder > max_dist:\n return False\n return True", "def square_is(position, expected, world):\n\n result = False\n size = len(world) - 1\n\n if size >= position[0] >= 0 and size >= position[1] >= 0:\n result = world[position[0]][position[1]] == expected\n\n return result", "def check_boundaries(self):\n # Checks if the enemy bar has gone of the net\n if self.rect.left <= self.settings.WINDOW_WIDTH / 2:\n self.rect.left = self.settings.WINDOW_WIDTH / 2\n self.isMovingUp = False\n\n # Checks if the enemy bar has gone out of bound to the right\n if self.rect.right >= self.settings.WINDOW_WIDTH:\n self.rect.right = self.settings.WINDOW_WIDTH\n self.isMovingUp = True", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def _check_if_position_on_board(coord: tuple, board_size: int):\n in_row = coord[0] in range(board_size)\n in_col = coord[1] in range(board_size)\n return in_row and in_col", "def in_board(self,pos : np.ndarray) -> bool:\r\n if 0 > pos[0] or pos[0] >= BOARD_SIZE:\r\n return False\r\n if 0 > pos[1] or pos[1] >= BOARD_SIZE:\r\n return False\r\n\r\n return True", "def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False", "def valid(self, pos):\n\t\tpos = Point(pos)\n\t\treturn 0 <= pos.x < self.dims.width and 0 <= pos.y < self.dims.height", "def is_in_collision_point(self, pos):\n x, y = pos\n return sqrt((self.x - x)**2 + (self.y - y)**2) < self.r", "def _point_within_bounds(bounds, p):\n A, B = bounds\n # we have to add epsilon since test against horizontal or vertical\n # lines may fail if the point is off by numerical precision\n eps = 1e-10\n (Ax,Ay), (Bx,By), (px,py)=A,B,p\n return (\n (min((Ax,Bx))-eps<=px<=max((Ax,Bx))+eps) and\n (min((Ay,By))-eps<=py<=max((Ay,By))+eps)\n )", "def if_near_boom(self, player_pos, boom_pos):\n if player_pos[0] == boom_pos[0]:\n if player_pos[1] in range(boom_pos[1]-2*30, boom_pos[1]+3*30, 30):\n return True\n else:\n return False\n elif player_pos[1] == boom_pos[1]:\n if player_pos[0] in range(boom_pos[0]-2*30, boom_pos[0]+3*30, 30):\n return True\n else:\n return False\n else:\n return False", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def is_in_field(self, x, y):\n return (self.origin_x <= x < self.width) and (self.origin_y <= y < self.height)", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def check_boundary(self, width, height):\r\n if 0 <= self.head[0] + self.direction[0]*10 <= width - 10 and 0 <= self.head[1] + self.direction[1]*10 <= height - 10:\r\n return True\r\n else:\r\n return False", "def isInside(self, position, maxDimLens):\n dim = len(position);\n deltas = [0] * dim;\n distSqr = 0;\n for i in range(0, dim):\n deltas[i] = math.fabs(position[i] - self.mSample[i]);\n if math.fabs(maxDimLens[i] - deltas[i]) < deltas[i]:\n deltas[i] = math.fabs(maxDimLens[i] - deltas[i]);\n distSqr += deltas[i]**2;\n\n if distSqr < ( self.mRadius**2 ):\n return True;\n else:\n return False;", "def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def in_box(coords, box):\n if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:\n return True\n return False", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def check_inside(self, person):\n p_top_x = person[0] + self.padding\n p_left_y = person[1] + self.padding\n p_bottom_x = person[2] - self.padding\n p_right_y = person[3] - self.padding\n\n return p_top_x >= self.top_x and p_left_y >= self.left_y and p_bottom_x <= self.bottom_x \\\n and p_right_y <= self.right_y", "def check_bounds(self, index):\n if index < self.lower_bound or index > self.upper_bound:\n return False\n return True", "def is_spot_possible(left, right, bottom, top):\n return True\n if right < 6 or bottom < 6:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if left > 18 or top > 18:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n if abs(top - bottom) > 16 or abs(right - left) > 16:\n # print(\"IMPOSSIBLE\", left, right, top, bottom)\n return False\n return True", "def in_box(coords, box):\n\tif box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:\n\t\treturn True\n\treturn False", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def _is_valid_land(x, y, grid):\n return (x >= 0) and (x < len(grid)) and (y >= 0) and (y < len(grid[0])) and grid[x][y]", "def in_rectangle(x, y):\n return ((self.min_x <= x <= self.max_x) and\n (self.min_y <= y <= self.max_y))", "def coordinates_within_board(n: int, x: int, y: int) -> bool:\n\n return x < n and y < n and x >= 0 and y >= 0", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def in_geo_limits(args: argparse.Namespace, track_data: dict) -> bool:\n return (track_data['boundaries']['north'] <= args.north_lim and\n track_data['boundaries']['south'] >= args.south_lim and\n track_data['boundaries']['east'] <= args.east_lim and\n track_data['boundaries']['west'] >= args.west_lim)", "def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside", "def is_pos_valid(pos, shape):\n x, y = pos\n is_valid = x >= 0 and x < shape[0] and y >= 0 and y < shape[1]\n return is_valid", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def check_masked (self, pos : list,) :\n count = 0\n total = 0\n for x in range(pos[0],min(pos[0] + AUTO_width1, self.m_x)) :\n for y in range(pos[1], min(pos[1] + AUTO_width1, self.m_y)) :\n total += 1\n if self.current_grid[x][y] :\n count += 1\n if count/total > 0.5 :\n return True\n else :\n return False", "def is_occupied(self, pos):\n return any([p == pos for p in self._workers.values()])", "def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True", "def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True", "def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True", "def __is_valid_position(self, position):\n return (position[0] >= 0\n and position[0] < self.config.arena_size[0]\n and position[1] >= 0\n and position[1] < self.config.arena_size[1]\n and self.arena[position[0]][position[1]] != Tile.TAIL)", "def on_board(pos, size):\n\n row, col = pos\n height, width = size\n\n return 0 <= row <= width and 0 <= col <= height", "def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0", "def g_in_bounds(x, lo, hi):\n\n return (x >= lo) and (x <= hi)", "def point_in_rectangle(point: Vector, rect_min: Vector, rect_max: Vector) -> bool:\n return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]", "def within_boundaries(move):\n if move == ord('w') and ZERO_BASE_PLYR_POS in range(0, 10):\n return False\n elif move == ord('s') and ZERO_BASE_PLYR_POS in range(90, 100):\n return False\n elif move == ord('a') and ZERO_BASE_PLYR_POS in range(0, 91, 10):\n return False\n elif move == ord('d') and ZERO_BASE_PLYR_POS in range(9, 100, 10): \n return False\n else:\n return True" ]
[ "0.8121233", "0.78420454", "0.783356", "0.7815986", "0.77785796", "0.77391034", "0.7499602", "0.74738336", "0.7412133", "0.7395001", "0.7287691", "0.7282496", "0.72665375", "0.72362554", "0.7205017", "0.7162476", "0.71581644", "0.71300095", "0.71250474", "0.7091146", "0.706064", "0.7015256", "0.6961238", "0.6956712", "0.694453", "0.6938326", "0.69321895", "0.6922159", "0.691907", "0.6914208", "0.69104975", "0.688735", "0.68483764", "0.68429667", "0.68360925", "0.68247086", "0.68147", "0.6806319", "0.68061113", "0.6795453", "0.6779978", "0.67799133", "0.676991", "0.67681503", "0.6754985", "0.674448", "0.673811", "0.67067087", "0.670484", "0.6702484", "0.6698071", "0.6678058", "0.6673175", "0.66574454", "0.66574454", "0.6640769", "0.66315186", "0.6626822", "0.662386", "0.6622775", "0.6618873", "0.66111994", "0.6600761", "0.6596996", "0.65873855", "0.65825313", "0.65792567", "0.65788895", "0.6572296", "0.6564709", "0.6538615", "0.6525626", "0.652221", "0.6519175", "0.65089583", "0.65083057", "0.6505923", "0.650339", "0.6495575", "0.6494731", "0.6491702", "0.64915377", "0.6489776", "0.64868444", "0.64819276", "0.6478762", "0.6475844", "0.647382", "0.6471909", "0.6458758", "0.6454542", "0.6452559", "0.6447726", "0.6447524", "0.642878", "0.6421772", "0.6418946", "0.64106095", "0.6409684", "0.6408432" ]
0.8067338
1
Takes a pawn and returns it's relative move position
def movePos(self,p,intMove): return pos(p.pos.x-(intMove*self.intPlayer(p.color)),p.pos.y+self.intPlayer(p.color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_pawn(pos, game):\n #Convert coordinates to row and column\n row = int(pos[1]//(SQUARESIZE+FENCEWIDTH))\n col = int(pos[0]//(SQUARESIZE+FENCEWIDTH))\n #Make move\n game.move_pawn(game.get_player_turn(), (col,row))", "def get_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_move(board, player):\n row, col = 0, 0\n return row, col", "def get_ai_move(board, player):\r\n row, col = 0, 0\r\n return row, col", "def get_ai_move(board, player):\n row, col = 0, 0\n return row, col", "def choose_absolute_move(self):\n move = self.choose_move()\n if self.player_name == 'A':\n return move\n # Player B, revert the IDs\n return (move + 6) % 12", "def _get_player_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_player_move when the game has not started!\")\n current_player = next(player for player in self.player_list if player.name == self.current_turn.name)\n if current_player is None:\n raise RuntimeError(\"Attempted to get player move from a player who does not exist!\")\n return current_player.move()", "def get_piece_jumping_position(self, captured_piece):\n row_diff = captured_piece.row - self.row # Compares the row/column numbers of the two pieces\n col_diff = captured_piece.col - self.col\n opp_row = row_diff + captured_piece.row\n opp_col = col_diff + captured_piece.col\n return {'opp_row': opp_row, 'opp_col': opp_col}", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y", "def DoMove(position, move):\n return position - move", "def get_move(self):\n if self._difficulty == 0:\n return self._get_easy_move()\n else:\n # Different stategies/difficulties can be attached here\n return", "def get_move(self, game):\n return", "def get_move(self, game_state: BotGameState) -> BotMove:\n return", "def get_move(moves):\n pass", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def move_origin(self, x, y):\n return Position(self.x - x, self.y - y)", "def makeMove(self, move, player):", "def is_pawn_move_valid(self, from_row, from_col, to_row, to_col):\n # Setup variables used\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n to_piece = self.board.squares[to_row][to_col]\n row_diff = abs(from_row - to_row)\n col_diff = abs(from_col - to_col)\n dc = 0\n\n # Set flag for first move of pawn\n first_move = True if from_row == 6 or from_row == 1 else False\n\n # If direction is not correct for white, exit\n if to_row - from_row > 0:\n dr = 1\n if self.piece_color(piece) == \"white\":\n return False\n\n # If direction is not correct for black, exit\n if to_row - from_row < 0:\n dr = -1\n if self.piece_color(piece) == \"black\":\n return False\n\n # If moving straight\n if from_col == to_col:\n # if not legal straight move, exit\n if not (row_diff == 1 or (first_move and row_diff == 2)):\n return False\n\n # make sure to move has no pieces on straight path\n dm = row_diff + 1\n\n # return value\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm)\n\n# if retVal and not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n\n return retVal\n\n # WHITE en passant\n # move from moveHistory => (\"piece\", fromRow, fromCol, toRow, toCol)\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♟\" and self.moveHistory[-1][1] == 1 and\\\n self.moveHistory[-1][3] == 3 and piece_color == \"white\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # BLACK en passant\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♙\" and self.moveHistory[-1][1] == 6 and\\\n self.moveHistory[-1][3] == 4 and piece_color == \"black\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # else move must be taking piece directly move\n # if legal taking piece move and (opponent-already check for own piece) piece at to-square\n if col_diff == 1 and row_diff == 1 and to_piece != None:\n\n# if not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n return True\n\n return False", "def getKickingPosition():\n pass", "def _acting_player_position(self):\n return self._env.acting_player_position", "def _get_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_move when the game has not started!\")\n if isinstance(self.current_turn, Player):\n return self._get_player_move()\n elif isinstance(self.current_turn, Enemy):\n return self._get_enemy_move()\n else:\n raise TypeError(\"You're trying to move something that isn't a character or an adversary.\")", "def getNextPosition(move, data):\n\tnextPos = {\"x\": data[\"you\"][\"body\"][0]['x'],\n\t\t\t\t\"y\": data[\"you\"][\"body\"][0]['y']}\n\n\tif move == 'up':\n\t\tnextPos[\"y\"] = nextPos[\"y\"] - 1\n\telif move == 'down':\n\t\tnextPos[\"y\"] = nextPos[\"y\"] + 1\n\telif move == 'right':\n\t\tnextPos[\"x\"] = nextPos[\"x\"] + 1\n\telif move == 'left'\t:\n\t\tnextPos[\"x\"] = nextPos[\"x\"] - 1\n\treturn nextPos", "def modpos(pos,L,move):\n pos += move\n if pos == L: #moved off right or bottom\n return(0)\n if pos == -1:#moved off top or left\n return(L-1)\n return(pos) #in the middle", "def get_move(board, player):\n #optimization to always pick the top-left corner on an empty board\n if set(board) == set([config.NO_PLAYER]):\n return 0\n result = minimax(board, player, 2, config.NEG_INF, config.INF)\n return result.index", "def get_player_move(self, roundNum, player):\n return (self.moves[roundNum])[player]", "def get_player_position(self):\n raise NotImplementedError", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def move_wrapper(board, player, trials):\r\n move = mm_move(board, player)\r\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\r\n return move[1]", "def get_move(state, player, max_time, verbose):\n # We call the player bot in a separate process.This allows us to terminate\n # if the player takes too long.\n manager = Manager()\n result = manager.dict() # result is a variable shared between our process and\n # the player's. This allows it to pass the move to us\n\n # Start a process with the function 'call_player' and the given arguments\n process = Process(target=call_player, args=(player, state, result))\n\n # Start the process\n process.start()\n\n # Rejoin at most max_time miliseconds later\n process.join(max_time / 1000)\n\n # Check if the process terminated in time\n move = None\n if process.is_alive():\n pr('! Player {} took too long, no move made.'.format(state.whose_turn()), verbose)\n\n process.terminate()\n process.join()\n\n else:\n # extract the move\n move = result['move']\n\n return move", "def getOpponentMove(move, playerBoard, oppBoard, playerSeeds, oppSeeds):\r\n pass", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def respawn_player(self):\n self.rect.x = 50\n self.rect.y = 50\n \n # Specifies the Player's spawnpoint as maze_arrangement[1][1], representing\n # the tile in the top-left corner of the maze\n self.__user_x = 1\n self.__user_y = 1", "def compute_new_position_from_dice(self, player_index, thr):\r\n new_position = self.player_positions[player_index] + thr.get_amount()\r\n\r\n if new_position >= len(self.squares):\r\n new_position -= len(self.squares)\r\n\r\n return new_position", "def getPawn(self,x,y):\n if(self.gameState[x,y]==EMPTY):\n return\n return Pawn(x,y,self.gameState[x,y])", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def move_wrapper(board, player, trials):\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]", "def move_relative(self, delta):\n return self.move(delta, relative=True)", "def getManipulatorPosition(transform):\n pm.select(transform)\n pm.setToolTo('Move')\n position = pm.manipMoveContext('Move', q=1, p=1)\n pm.select(clear=True)\n return position", "def get_ai_move(board):\n return Connect4MiniMax.get_move(board)", "def _AN_to_coords(self, move: str):\n\n orig_move = move\n\n extra_info = \"\"\n\n # remove all characters that don't matter when parsing\n for pointless_char in \"x+#\":\n move = move.replace(pointless_char, \"\")\n\n # Handle castling\n if CASTLE_QUEENSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n elif CASTLE_KINGSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n\n # Pawn promotion\n if move[-2] == \"=\":\n extra_info = move[-1] if self.white_to_move else move[-1].lower()\n move = move[:-2]\n\n # Destination of move, this is the only guaranteed substring in the move\n dest_str = move[-2:]\n dest = State._EAN_coords_to_board_coords(dest_str)\n move = move[:-2]\n\n # Deduce what piece actually made the move, if there is no shown there is no pawn\n # Note in AN pieces are always uppercase and location is lowercase,\n # so this makes it simple to check if we have a piece or a location\n piece = \"P\"\n if move and move[0].isupper():\n piece = move[0]\n move = move[1:]\n if not self.white_to_move:\n piece = piece.lower()\n\n # At this point the only info the move should contain is a hint on where the piece is coming from\n loc_hint = move\n\n possible_moves = self.get_all_moves()\n possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination\n possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation\n for possible_move in possible_moves:\n row, col = State._EAN_coords_to_board_coords(possible_move[0:2])\n if self.board[row][col] == piece:\n return (row, col), dest, extra_info\n\n raise ValueError(\"Algebraic notation parsing failed, no valid move found matching the given move \" + orig_move\n + \" with board state\\n\" + str(self))", "def move_me(self):\r\n\t\t#self.start_pos = self.rect.center\t\t\t\r\n\t\tif self.goal_pos is not None:\r\n\t\t\tprint(f'goal_pos: {self.goal_pos}, start_pos: {self.start_pos}')\r\n\t\t\tdx = self.goal_pos[0] - self.start_pos[0]\r\n\t\t\tdy = self.goal_pos[1] - self.start_pos[1]\r\n\r\n\t\t\tdistance = math.sqrt(dx*dx + dy*dy)\r\n\t\t\tself.shift += self.speed\r\n\r\n\t\ttry:\r\n\t\t\tif self.shift/distance < 0.99:\r\n\t\t\t\tself.rect.center = (self.start_pos[0] + self.shift/distance * dx,\r\n\t\t\t\t\t\t\t\t\t self.start_pos[1] + self.shift/distance * dy)\r\n\t\t\t\tprint(f'going to: {self.goal_pos}')\r\n\t\texcept ZeroDivisionError:\r\n\t\t\t\tpass\t\r\n\t\treturn True", "def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))", "def move(self, row, col, player):", "def player_pos(self) -> Pt:\n return self._player", "def move(self,p,intMove):\n gs = self.gameState.copy() #copy Board\n gs[p.pos.get()] = EMPTY #put position it was at as empty\n gs[self.movePos(p,intMove).get()] = p.color #set new position as filled\n return ((p,intMove),Board(gs,self.togglePlayer(self.whoseTurn)))", "def _player_loc():\n return _to_my_vec3(_get_mc().player.getTilePos())", "def my_location(state):\n return state['gladiators'][state['current_player']]['pos']", "def determine_move_position(self):\n green_probs = []\n net_size = len(self.net)\n adjacents = self.net[self.current_pos].adjacents\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in adjacents:\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i-1, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[4][0]\n green_probs.append((i, accum))\n #Returns the position in which the probability of\n #obtaining green when measuring is the lowest.\n return min(green_probs, key=itemgetter(1))[0]", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def player_movement(self):", "def firstMove(self):\n return (10, 10)", "def tile_to_world_loc(tile, player_tile):\n relxyz = tile_to_rel_loc(tile, player_tile)\n\n return (player.X + relxyz[0], player.Y + relxyz[1], player.Z + relxyz[2])", "def move(self):\r\n move = None\r\n if self.last_move is None:\r\n move = rockyman.move(self)\r\n else:\r\n index = the_moves.index(self.last_move) + 1\r\n if index >= len(the_moves):\r\n index = 0\r\n move = the_moves[index]\r\n self.last_move = move\r\n return move", "def getMove(self, board):\n pass", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def move(self):\r\n his_move = random.randint(0, 2)\r\n return the_moves[his_move]", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def position(self):\n return self.source.position + self.position_relative", "def make_move(self, board: Board) -> int:\n move, evalutation = self.minimax(board, -math.inf, math.inf, self._depth, 1)\n return move", "def follow(self):\n \n # create list to add with moves\n pos_list = [self.position, self.position, self.position, self.position]\n # list of surrounding indices\n moveset = add_lists(self.moves, pos_list)\n \n # checks if there is bot nearby\n for item in moveset:\n if type(item) is Bot:\n return item\n \n # if no bot found, returns original position\n return self.position", "def apply_move(cell, x, y):\r\n x2 = (co_ords[cell])[0] + x\r\n y2 = (co_ords[cell])[1] + y\r\n return (x2, y2)", "def relative_board(self):\n board = self.game.board\n if self.player_name == 'A':\n return board\n # Revert the board\n return board[6:] + board[:6]", "def best_move(self):\n if self._move is not None:\n return self._move\n else:\n return self.pass_move", "def player_position(self, player_ip, *args):\r\n\t\ttry:\r\n\t\t\tteam_type = self._teammates[player_ip]\r\n\t\texcept KeyError:\r\n\t\t\tself._comm_server.send_message(player_ip, \"position -1 -1\")\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tmaze_pos_finder = self._teams[team_type].maze_pos_finder\r\n\t\t\tplayer_info = self._teams[team_type].get_player_info_by_IP(player_ip)\r\n\r\n\t\t\tpos = maze_pos_finder.get_maze_pos(player_info.color_bgr)\r\n\r\n\t\t\tif pos is not None:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"position {0} {1}\" \\\r\n\t\t\t\t\t.format(*pos.position))\r\n\t\t\telse:\r\n\t\t\t\tself._comm_server.send_message(player_ip, \"position -1 -1\")", "def _get_position_delta(self) -> Tuple2IntType:\n rect = self.get_rect()\n if self._drawing_position == POSITION_NORTHWEST:\n return rect.topleft\n elif self._drawing_position == POSITION_NORTH:\n return rect.midtop\n elif self._drawing_position == POSITION_NORTHEAST:\n return rect.topright\n elif self._drawing_position == POSITION_WEST:\n return rect.midleft\n elif self._drawing_position == POSITION_CENTER:\n return rect.center\n elif self._drawing_position == POSITION_EAST:\n return rect.midright\n elif self._drawing_position == POSITION_SOUTHWEST:\n return rect.bottomleft\n elif self._drawing_position == POSITION_SOUTH:\n return rect.midbottom\n elif self._drawing_position == POSITION_SOUTHEAST:\n return rect.bottomright\n else:\n raise ValueError('unknown drawing position')", "def make_move(self, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n if self.stat_brain.all_valid(tower) == 0 or self.stat_brain.is_valid(height, index, tower):\r\n return height, index\r\n else:\r\n while not self.stat_brain.is_valid(height, index, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n return height, index", "def get_pos(self) -> tuple:\n return self.rect.center", "def get_next_move(self):\n return int(input('Enter your move: '))", "def get_center_position( peg, position_on_peg):\n x = Hanoi.FIRST_TOWER_X + peg * Hanoi.DISTANCE_BETWEEN_TOWER\n y = position_on_peg * Hanoi.DISK_HEIGHT + 0.5 * Hanoi.DISK_HEIGHT\n return (x, y)", "def locate(x, y):\n position(x * 6, y)", "def ChangePlayerPosition(PlayerAbsoluteX, PlayerAbsoluteY, MouseRelativeX, MouseRelativeY, dt) :\r\n MouseAbsoluteX, MouseAbsoluteY = RelativeToAbsolute(MouseRelativeX, MouseRelativeY)\r\n dx, dy = Normalize(PlayerAbsoluteX - MouseAbsoluteX, PlayerAbsoluteY - MouseAbsoluteY)\r\n return PlayerAbsoluteX + dx * dt, PlayerAbsoluteY + dy * dt", "def move_origin(self, x, y):\n return RobotPosition(self.x - x, self.y - y, self.theta)", "def position(self):\n if self.p:\n if self._finished:\n return None\n return self.p.get_position()*10", "def game_move(self):\n\t\t# make a note of the player who isn't playing\n\t\tfor x in self.players.keys():\n\t\t\tif x != self.nextPlayer:\n\t\t\t\totherPlayer = x\n\t\t\t\tbreak\n\t\t\n\t\t\n\t\t# If there are no remaining moves for this player, either the other\n\t\t# player has won or it's a draw\n\t\t# self.expansions = 1\n\t\tself.expansionCounter.count = 1\n\t\tif len(self.state.successors()) == 0:\n\t\t\tif self.state.is_win(otherPlayer):\n\t\t\t\treturn (None, otherPlayer)\n\t\t\telse:\n\t\t\t\t# None, None for a draw\n\t\t\t\treturn (None, None)\n\t\t\t\n\t\t# allow the player max_expansions for this turn\n\t\t# self.expansions = self.max_expansions\n\t\tself.expansionCounter.count = self.max_expansions\n\t\t\n\t\tnextPlayer = self.players[self.nextPlayer]\n\t\tlastPlayer = None\n\t\t\n\t\t# player may throw an exception\n\t\ttry:\n\t\t\t# get player's move, make sure we don't modify the current state\n\t\t\tmove = nextPlayer.move(self.state.get_player_state(self.nextPlayer), \n\t\t\t\t\t self.visitedStates)\n\t\t\t# player may give up\n\t\t\tif move.is_forfeit():\n\t\t\t\tprint \"Player\", self.nextPlayer, \"forfeits.\"\n\t\t\t\treturn (None, otherPlayer)\n\t\t\t# player may return illegal move\n\t\t\tif not self.state.is_valid_move(move):\n\t\t\t\tprint \"Illegal move returned by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\t\treturn (move, otherPlayer)\n\t\t\t# this player is now last player\n\t\t\tlastPlayer = self.nextPlayer\n\t\t\t# get the new next player and make the indicated move\n\t\t\tself.nextPlayer, clear = self.state.move(move, True)\n\t\t\tif clear:\n\t\t\t\tself.clear_repeat()\n\t\texcept:\n\t\t\tprint \"Exception thrown by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\tprint\n\t\t\ttraceback.print_exc()\n\t\t\tprint\n\t\t\treturn (None, otherPlayer)\n\t\t\n\t\tos.chdir(self.wd)\n\t\t\n\t\t# may be a repeated state IF the game cycles\n\t\tif self.is_repeat(self.state):\n\t\t\tself.state.handle_cycle()\n\t\t# otherwise, if the game cycles, note that we've been here\n\t\telif self.state.repeats():\n\t\t\tself.visitedStates.add(self.state.repeated_rep())\n\t\t\t\n\t\t# player may have sacrificed the game\n\t\tif self.state.is_win(otherPlayer):\n\t\t\treturn (move, otherPlayer)\n\t\t\n\t\t# player may have won\n\t\tif self.state.is_win(lastPlayer):\n\t\t\treturn (move, lastPlayer)\n\t\t\n\t\t# nobody's won or lost yet\n\t\treturn (move, None)", "def get_position(self, position):", "def get_absolute_pos(x, y, base):\n\n # give a small deadzone\n new_x = base[0] + (int(x / 2) if abs(x) > 2 else 0)\n new_y = base[1] - (int(y / 2) if abs(y) > 2 else 0)\n\n return (new_x, new_y)", "def move_relative(state, location, towards):\n move_options = util.move_options_to_list(state['move_options'])\n\n move_options = [m for m in move_options if m['type'] == 'move']\n\n if len(move_options) == 0:\n return None\n\n my_location_ = my_location(state)\n move_targets = np.array([m['target'] for m in move_options])\n target_locations = move_targets + my_location_\n\n distances_ = distances(location, target_locations)\n\n if towards:\n target_index = np.argmin(distances_)\n else:\n target_index = np.argmax(distances_)\n\n return move_options[target_index]", "def get_penguin_movement(self, state: FishGameState):\n return self.strategy.choose_action_from_state(state=state)", "def move(self):\n \n self.position = self.wander()", "def position_tile(self, target_row, target_col, current_row, current_col):\r\n moves_str = \"\"\r\n # current target is on the upper of 0\r\n if current_col == target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the left of 0\r\n elif current_row == target_row and current_col < target_col:\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n # current target is on the upperleft of 0\r\n elif current_row < target_row and current_col < target_col:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n lefts = target_col - current_col\r\n for dummy_l in range(lefts):\r\n moves_str += \"l\"\r\n for dummy_cycle in range(lefts - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_CIRCLE\"]\r\n moves_str += \"dru\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the upperright of 0\r\n elif current_col > target_col and current_row < target_row:\r\n ups = target_row - current_row\r\n for dummy_u in range(ups):\r\n moves_str += \"u\"\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"dlu\"\r\n for dummy_cycle in range(ups - 1):\r\n moves_str += CIRCLES[\"LEFT_CIRCLE\"]\r\n moves_str += \"ld\"\r\n # current target is on the right of 0\r\n elif current_col > target_col and current_row == target_row:\r\n rights = current_col - target_col\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n for dummy_cycle in range(rights - 1):\r\n if current_row <= 0: # can not go up\r\n moves_str += CIRCLES[\"DOWN_LEFT_CIRCLE\"]\r\n else:\r\n moves_str += CIRCLES[\"UP_LEFT_CIRCLE\"] \r\n moves_str += \"ulld\"\r\n return moves_str", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def get_position_on_game(self):\n return (self.peg, self.position_on_peg)", "def GetSnapPosition(self):\r\n\r\n snap, hAlign, vAlign, monitor = self._is_docked\r\n \r\n display = wx.Display(monitor)\r\n area = display.GetClientArea()\r\n size = self.GetManagedWindow().GetSize()\r\n \r\n pos = wx.Point()\r\n if hAlign == wx.LEFT:\r\n pos.x = area.x\r\n elif hAlign == wx.CENTER:\r\n pos.x = area.x + (area.width - size.x)/2\r\n else:\r\n pos.x = area.x + area.width - size.x\r\n\r\n if vAlign == wx.TOP:\r\n pos.y = area.y\r\n elif vAlign == wx.CENTER:\r\n pos.y = area.y + (area.height - size.y)/2\r\n else:\r\n pos.y = area.y + area.height - size.y\r\n\r\n return pos", "def find_closest_move(self, position, valid_moves):\n closest = None\n closest_value = 100\n for move in valid_moves:\n if closest:\n x_dist = abs(position[0] - move[0])\n y_dist = abs(position[1] - move[1])\n if x_dist + y_dist < closest_value:\n closest = move\n closest_value = x_dist + y_dist\n else:\n closest = move\n\n return closest", "def get_move_no(self):\r\n return self.command_manager.get_move_no()", "def get_pos(self):\n return self.rect.midtop", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def move(self, x, y, xoffset = 0, yoffset = 0):\n return (x + xoffset) % self.width, (y + yoffset) % self.height", "def move(self, board):\n winning_move = self.find_winning_move(board)\n if winning_move != -1:\n return winning_move\n\n blocking_move = self.find_blocking_move(board)\n if blocking_move != -1:\n return blocking_move\n\n if board[4] == \"4\": # center square is open\n return 4\n else:\n return self.prng.choice(board.available())", "def get_position():\n\n return character['Position']", "def mm_move(board, player):\r\n if board.check_win() == provided.PLAYERX:\r\n return SCORES[provided.PLAYERX],(-1,-1)\r\n elif board.check_win() == provided.PLAYERO:\r\n return SCORES[provided.PLAYERO],(-1,-1)\r\n elif board.check_win() == provided.DRAW:\r\n return SCORES[provided.DRAW],(-1,-1)\r\n else:\r\n empty_tuple_list = board.get_empty_squares()\r\n score_pos_tuple_list = []\r\n best_score = None\r\n best_pos = None\r\n for idx1 in range(len(empty_tuple_list)):\r\n empty_tuple = empty_tuple_list[idx1]\r\n board_clone = board.clone()\r\n board_clone.move(empty_tuple[0],empty_tuple[1],player)\r\n score_pos_tuple = mm_move(board_clone,provided.switch_player(player))\r\n score_pos_tuple_list.append(score_pos_tuple)\r\n\r\n #decide best score and pos fast!!!\r\n if score_pos_tuple[0]*SCORES[player] == 1:\r\n return (score_pos_tuple[0],empty_tuple)\r\n\r\n #decide best score and pos\r\n for idx2 in range(len(score_pos_tuple_list)):\r\n if idx2 == 0:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n else:\r\n if score_pos_tuple_list[idx2][0]*SCORES[player] > best_score*SCORES[player]:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n\r\n return (best_score,best_pos)", "def move(source, dest, speed=0):\n norm = normalise(source, dest)\n new_pos = (source[0] + norm[0], source[1] + norm[1])\n return new_pos", "def get_current_move(self):\n x_count = self.game_board.count('X')\n o_count = self.game_board.count('O')\n if x_count <= o_count:\n return 'X'\n return 'O'", "def validate_movement(self, piece, from_col, from_row, to_col, to_row):\n col_diff = abs(ord(from_col) - ord(to_col))\n row_diff = abs(from_row - to_row)\n\n # For any piece, it must actually move...\n if col_diff == 0 and row_diff == 0:\n return False\n # ...and there must be empty spaces in between the from/to squares (when on a column, row, or diagonal)\n if not self.validate_empty_between(from_col, from_row, to_col, to_row):\n return False\n\n # White pawn\n if piece == 'P':\n if col_diff == 1 and (to_row - from_row == 1):\n # Can move diagonally up one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'B' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 2:\n # From initial position, can go up one or two rows (but can't take a piece)\n return (to_row == 3 or to_row == 4) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move up one row (but can't take a piece)\n return to_row - from_row == 1 and self.get_square(to_col, to_row) == ' '\n # Black pawn\n elif piece == 'p':\n if col_diff == 1 and (from_row - to_row == 1):\n # Can move diagonally down one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'W' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 7:\n # From initial position, can go down one or two rows (but can't take a piece)\n return (to_row == 6 or to_row == 5) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move down one row (but can't take a piece)\n return from_row - to_row == 1 and self.get_square(to_col, to_row) == ' '\n # Rook\n elif piece.lower() == 'r':\n # Must remain in same column or same row\n return col_diff == 0 or row_diff == 0\n # Knight\n elif piece.lower() == 'n':\n # Jumps in a 2+1 pattern\n return (col_diff == 2 and row_diff == 1) or (col_diff == 1 and row_diff == 2)\n # Bishop\n elif piece.lower() == 'b':\n # Moves along diagonals\n return col_diff == row_diff\n # Queen\n elif piece.lower() == 'q':\n # Can move along columns, rows, or diagonals\n return col_diff == 0 or row_diff == 0 or col_diff == row_diff\n # King\n elif piece.lower() == 'k':\n # Can move a single square in any direction\n if not(0 <= col_diff <= 1) or not(0 <= row_diff <= 1):\n return False\n\n # But not next to the other king\n other_king = 'k' if piece.isupper() else 'K'\n # Get valid border squares\n border_squares = list(filter(\n lambda b_square: 'a' <= b_square[0] <= 'f' and 1 <= b_square[1] <= 8,\n [\n (chr(ord(to_col) - 1), to_row - 1), (to_col, to_row - 1), (chr(ord(to_col) + 1), to_row - 1),\n (chr(ord(to_col) - 1), to_row), (to_col, to_row), (chr(ord(to_col) + 1), to_row),\n (chr(ord(to_col) - 1), to_row + 1), (to_col, to_row + 1), (chr(ord(to_col) + 1), to_row + 1)\n ]\n ))\n # Check for the other king\n for square in border_squares:\n if self.get_square(square[0], square[1]) == other_king:\n return False\n\n return True", "def firstMove(board):\r\n x = board.size / 2\r\n return (x, x)", "def get_move(self):\n if self.__root.children is None:\n raise ValueError('Game has ended')\n\n children = [*sorted(self.__root.children, key=\n lambda x: x.win[0 if self.player == Board.PLAYER_0 else 1])]\n\n self.__root = children[-1]\n self.board[self.__root.position] = self.player\n self.change_player()", "def to_map_pos(self, screen_pos):\n return screen_pos + self.player_pos - SCREEN.size // 2", "def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")" ]
[ "0.70455617", "0.68026656", "0.6782019", "0.6533234", "0.6489556", "0.64045006", "0.63270336", "0.6322109", "0.6290889", "0.6268482", "0.6268482", "0.62472296", "0.6150574", "0.6125518", "0.61029345", "0.60926723", "0.6087858", "0.60839784", "0.60775626", "0.6045745", "0.60347223", "0.602867", "0.59942394", "0.5988664", "0.59633857", "0.59496516", "0.5937773", "0.5934936", "0.590294", "0.590294", "0.5891601", "0.5888488", "0.58811665", "0.58809173", "0.5879727", "0.5863888", "0.5861964", "0.5861964", "0.5835463", "0.5827348", "0.5824273", "0.58055484", "0.5802537", "0.57986087", "0.57974946", "0.5792622", "0.5780573", "0.5772553", "0.57569313", "0.57501006", "0.5746508", "0.5737757", "0.57338977", "0.57299703", "0.57215184", "0.570349", "0.5698776", "0.5692156", "0.5689439", "0.56857604", "0.56791186", "0.56765133", "0.56758523", "0.5666474", "0.56637067", "0.56594497", "0.56588143", "0.5656915", "0.5656684", "0.5655458", "0.56471556", "0.56471497", "0.56469166", "0.5638722", "0.56352425", "0.5630575", "0.56256837", "0.56254435", "0.561551", "0.56131566", "0.5613041", "0.56048006", "0.55967534", "0.5594468", "0.5592453", "0.5589267", "0.55882066", "0.5584677", "0.5583816", "0.558376", "0.55799466", "0.55717605", "0.5571696", "0.5570361", "0.5554319", "0.55522734", "0.55458087", "0.553823", "0.5538073", "0.553166" ]
0.6875913
1
Tells if a move is legal
def legalMove(self,p,intMove): mPos = self.movePos(p,intMove)#board position of move if(self.inBounds(mPos)!=True):#Can't make move out of board bounds return False #if(p.color != self.whoseTurn):#Can't make move if it's not players pawn # return False if(intMove==0):#to move forward the node must be empty return (self.gameState[mPos.get()] == EMPTY) else:#to attack the node must have an enemy return (self.gameState[mPos.get()] == self.togglePlayer(p.color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_valid(move):\n return True", "def valid_move(self, player, move):\n return (True)", "def legal_move(self, move, state = None):\n if state is None:\n state = copy(self.state)\n else:\n state = copy(state)\n return state[move // state.shape[0], move % state.shape[0]] == 0", "def validate_move(move):\n if move[0] in cc.VALID_RANKS and move[1] in cc.VALID_RANKS:\n valid = True\n else:\n valid = False\n return valid", "def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def is_legal_move(self, house_num):\n return True", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def _is_valid_move(self, vector, current_piece, other_piece):\n return True", "def is_valid_move(self, move: Any) -> bool:\n return move in self.get_possible_moves()", "def is_legal_move(self, current_player, move):\n\t\tstarting_pos = move[0]\n\t\tending_pos = move[1]\n\t\tif ending_pos[0] not in range(self.board_size) or ending_pos[1] not in range(self.board_size):\t# Discard any generated moves that fall off of the board\n\t\t\treturn False \n\t\tif self.board.repr[starting_pos[0]][starting_pos[1]]!=self.player_symbol[current_player]:\n\t\t\tprint \"this should never trigger and is redundant\"\n\t\t\treturn False\n\t\tif self.board.repr[ending_pos[0]][ending_pos[1]]!= '.':\t# Check that landing spot is empty\n\t\t\treturn False\n\t\tmiddle_pos = (starting_pos[0]-(starting_pos[0]-ending_pos[0])/2,starting_pos[1]-(starting_pos[1]-ending_pos[1])/2)\t# Check the middle spot is the other piece - this should in theory not matter because the pieces alternate\n\t\tother_player = 1 - current_player \n\t\tif self.board.repr[middle_pos[0]][middle_pos[1]] != self.player_symbol[other_player]:\n\t\t\treturn False \n\t\treturn True", "def is_valid_move(self, position, dest_square):\n if self.symbol.isupper() and position.turn != 'w':\n return False\n elif self.symbol.islower() and position.turn != 'b':\n return False\n elif dest_square not in self.calculate_scope(position):\n return False\n else:\n return True", "def is_valid_move(x:int, y:int,board_length) -> bool:\n if x < 0 or y < 0 or x == board_length or y == board_length:\n return False\n return True", "def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def is_move_valid(self, direction, reference_board=None):\n # Verify a left move does not take you off the board.\n if (direction == \"l\"):\n if (self._current_loc.get_column() == 0):\n return False\n # Verify an up move does not take you off the board.\n elif (direction == \"u\"):\n # Verify the move does not take you off the board.\n if (self._current_loc.get_row() == 0):\n return False\n # Verify a right move does not take you off the board.\n elif (direction == \"r\"):\n current_row = self._current_loc.get_row()\n max_column_number = len(self._untraversed_board[current_row])\n if self._current_loc.get_column() + 1 == max_column_number:\n return False\n # Verify a down move does not take you off the board.\n elif (direction == \"d\"):\n if self._current_loc.get_row() + 1 == len(self._untraversed_board):\n return False\n else:\n assert False, \"Invalid move direction.\"\n\n # Get the new location for a move in the specified direction.\n new_location = self._calculate_move_location(direction)\n new_row = new_location.get_row()\n new_col = new_location.get_column()\n # Verify the space is available\n if(reference_board is None):\n return BoardPath._untraversed_board[new_row][new_col] != \"#\"\n else:\n return reference_board[new_row][new_col] != \"#\"", "def is_legal(self, move, player, board):\r\n if(self.is_valid(move)==False):\r\n return False\r\n if(board[move]!=core.EMPTY):\r\n return False\r\n return True", "def move(self) -> bool:\n pass", "def any_legal_move(self, player, board):\r\n moves = self.legal_moves(player, board)\r\n #print(moves)\r\n return len(moves)!=0", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def is_valid_move(self, move):\n if type(move) == str:\n move = int(move)\n\n return move in self.get_possible_moves()", "def isValidMove(self, move: Move) -> bool:\n # TODO: How do we determine the move type?\n # Some form of duck typing?\n minigame_move_classes = {\n \"BuyPrivateCompany\": \"BuyPrivateCompanyMove\",\n \"BiddingForPrivateCompany\": \"BuyPrivateCompanyMove\",\n }\n return minigame_move_classes.get(self.minigame_class) == move.__class__.__name__", "def is_valid(move):\n return isinstance(move, int) and move in Othello.squares()", "def op_move_preconditions(self):\n\n if(self.next_move != self.FREE):\n return False\n\n return True", "def valid_move(board, row, col):\n return board[row][col] == '-'", "def is_valid_move(self, position: Point) -> bool:\n\t\tif self.tiles[position.x][position.y] == 0:\n\t\t\treturn True\n\t\treturn False", "def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3", "def valid_move(self, row, col):\n if not self._game_over:\n i_row, i_col = row-1, col-1\n #i_row and i_col wil be used to index the board (hence the i)\n (valid, flip_lst) = self._valid_placement(i_row, i_col)\n #print(\"FOR TESTING. Tiles Flipped: \", flip_lst)\n \n if valid:\n #Big Change: You decided to make determining validity\n # and flipping separate operations\n self._flip(i_row, i_col, flip_lst)\n else:\n print(\"\\nPlease enter a valid move!\")\n return False\n\n if self._board_is_full():\n self._game_over = True\n self._set_winner() \n \n self._switch_turn(self._turn)\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"\\nNo valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n self._switch_turn(self._turn) #Switch turn back to player before skip was determined\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"No valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n print(\"No moves exist for either player. GAME OVER\")\n self._game_over = True\n self._set_winner()\n return False\n\n return True\n elif self._game_over:\n print(\"The game is over. No more moves can be made!\")\n #TODO: Replace this^ with an exception later?\n return False", "def validate_move(move, player_board):\n select_row = move.select_row\n select_col = move.select_col\n \n player_board_rows = player_board.shape[0]\n player_board_cols = player_board.shape[1]\n \n if select_row >= player_board_rows or select_row < 0 or \\\n select_col >= player_board_cols or select_col < 0 or \\\n player_board[select_row][select_col] != -1:\n return False\n \n return True", "def validate_move(self, move_from, move_to, board):\n\n pass", "def can_move(self,direction):\r\n if direction in self.current_room.return_directions():\r\n print('move into the next room')\r\n # makes next room \r\n self.next_room(direction)\r\n return True\r\n else:\r\n print(\"Can't move that way\")\r\n return False", "def is_move_valid(move: Move, board: Board, whites_turn: bool) -> bool:\n if out_of_bounds(move[0]) == True or out_of_bounds(move[1]) == True:\n return False\n \n if move[0] == move[1]:\n return False\n\n if is_current_players_piece(piece_at_position(move[0], board), False) and whites_turn == True:\n return False\n elif is_current_players_piece(piece_at_position(move[0], board), True) and whites_turn == False:\n return False\n\n\n if piece_at_position(move[1], board) in WHITE_PIECES and whites_turn == True:\n return False\n elif piece_at_position(move[1], board) in BLACK_PIECES and whites_turn == False:\n return False\n\n\n if move[1] not in get_possible_moves(move[0], board):\n return False\n\n\n test_board = board\n test_board = update_board(test_board, move)\n if is_in_check(test_board, True) and whites_turn == True:\n return False\n elif is_in_check(test_board, False) and whites_turn == False:\n return False\n\n return True", "def valid_move(self, player, move):\n if self.rounds < len(self.players):\n if ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or not (True in [(pt in player.corners) for pt in move])):\n return (False)\n else:\n return (True)\n\n elif ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or (self.board).adj(player, move)\n or not (self.board).corner(player, move)):\n return (False)\n\n else:\n return (True)", "def check_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n # 1. moving out of the bar\n # 2. check if the source is of the valid player\n # 3. check if the destination is valid\n\n board.set_player_perspective(player)\n\n # 1.\n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n if board.bar[player] < 1:\n return False\n\n if not board.valid_dest(fields_to_move - 1):\n return False\n\n return True\n\n # 2.\n if not board.valid_source(spike_index):\n return False\n # 3.\n dest_spike_index = spike_index + fields_to_move\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board.all_at_home()\n \n return board.valid_dest(dest_spike_index)", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n if start_row != end_row and start_col != end_col: # Moving non-orthogonally\r\n return False\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n return False\r\n # When there is no piece to impede path, check if position is empty or piece is enemy piece\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n return False\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if start_col == end_col: # Moving verticially\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n return False\r\n # Checks if end_pos is empty or an enemy piece is on end_pos\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if row_difference < 0:\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n return False\r\n # Checks if end_pos is empty or an enemy piece is on end_pos\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # a cannon cannot capture another cannon\n if type(board[to_col][to_row]) == Cannon:\n return False\n\n # if destination within the board and the move is strictly horizontal or vertical\n if to_col in range(9) and to_row in range(10) and (to_col == from_col or to_row == from_row):\n # if move is to the left\n if to_col < from_col:\n # make sure there is exactly one intervening piece that's not a cannon\n piece_count = 0\n for col in range(to_col + 1, from_col):\n if type(board[col][to_row]) == Cannon:\n return False\n if issubclass(type(board[col][to_row]), Piece):\n piece_count += 1\n if piece_count == 1:\n return True\n # if move is to the right\n if to_col > from_col:\n # make sure there is exactly one intervening piece that's not a cannon\n piece_count = 0\n for col in range(from_col + 1, to_col):\n if type(board[col][to_row]) == Cannon:\n return False\n if issubclass(type(board[col][to_row]), Piece):\n piece_count += 1\n if piece_count == 1:\n return True\n # if move is upward\n if to_row < from_row:\n # make sure there is exactly one intervening piece that's not a cannon\n piece_count = 0\n for row in range(to_row + 1, from_row):\n if type(board[to_col][row]) == Cannon:\n return False\n if issubclass(type(board[to_col][row]), Piece):\n piece_count += 1\n if piece_count == 1:\n return True\n # if move is downward\n if to_row > from_row:\n # make sure there is exactly one intervening piece that's not a cannon\n piece_count = 0\n for row in range(from_row + 1, to_row):\n if type(board[to_col][row]) == Cannon:\n return False\n if issubclass(type(board[to_col][row]), Piece):\n piece_count += 1\n if piece_count == 1:\n return True\n return False\n\n # for moving diagonally in the red palace\n if (from_coordinates in [[3,0],[3,2],[5,0],[5,2]] and to_coordinates in [[3,0],[3,2],[5,0],[5,2]] and\n type(board[4][1]) != Cannon and issubclass(type(board[4][1]), Piece)):\n return True\n\n # for moving diagonally in the blue palace\n if (from_coordinates in [[3,7],[3,9],[5,7],[5,9]] and to_coordinates in [[3,7],[3,9],[5,7],[5,9]] and\n type(board[4][8]) != Cannon and issubclass(type(board[4][8]), Piece)):\n return True\n\n return False", "def verify_legal_move(self, direction):\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n\n if direction == \"LEFT\":\n b_x -= 1\n elif direction == \"RIGHT\":\n b_x += 1\n elif direction == \"DOWN\":\n b_y += 1\n else:\n raise ValueError\n\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def check_illegal_move(self, player, action):\n available_actions = self.filter_actions(player)\n if action not in available_actions:\n print('Illegal move! Please choose another move!')\n return False\n return True", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def legal_move(self, position_to, position_from):\n\n # Returns False if either position_to or position_from is out of bounds.\n if self.is_out_of_bounds(position_to, position_from) is True:\n\n return False\n\n # Return False if position_from does not contain a GamePiece to move.\n game_piece_object = self.get_game_piece_object_at_position(position_from)\n\n if game_piece_object is not None:\n\n game_piece_player_owner = game_piece_object.get_player()\n current_player = self.get_player_turn()\n\n # Returns False if current player does not move out of check on current\n # move or the move causes a check.\n if self.remains_in_check(current_player, position_to, position_from) is True:\n\n return False\n\n # Returns True if current player wishes to pass on their move.\n # Any position on the board will suffice, including a General's position.\n # Tested after simulated move to ensure current_player does not remain\n # in check. Bypasses following tests to make allowance for any position.\n if position_to == position_from:\n\n return True\n\n else:\n\n # Returns False if move is out of turn and GamePiece is moved by wrong\n # player.\n if current_player != game_piece_player_owner:\n\n return False\n\n # Returns False if current play is to capture General.\n # Capturing General is not allowed. Goal of game is checkmate.\n if position_to in [self.get_general_position_red(), self.get_general_position_blue()]:\n\n return False\n\n # All JangiGame rules are handled by not allowing potential moves to which\n # a player could move a GamePiece. These rules include elephant and horse\n # being blocked, cannon not having proper screen or jump/capture being another\n # cannon.\n elif position_to not in game_piece_object.get_potential_moves():\n\n return False\n\n return True\n\n else:\n\n if position_to == position_from:\n\n return True\n\n return False", "def is_rook_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same column or row\n if ((from_row != to_row and from_col != to_col) or\n (from_row == to_row and from_col == to_col)):\n return False\n\n # check if any pieces are in the way of destination\n if from_row != to_row:\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n if from_col != to_col:\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n dm = abs(to_row - from_row)\n\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)\n\n # Casting: Rook invalidation\n if retVal and (from_row == 0 or from_row == 7):\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n if piece_color == \"white\":\n if from_col == 0:\n self.whiteCanCastleQside = False\n elif from_col == 7:\n self.whiteCanCastleKside = False\n else:\n if from_col == 0:\n self.blackCanCastleQside = False\n elif from_col == 7:\n self.blackCanCastleKside = False\n\n return retVal", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def canMove(self):\n\n if self.index == len(self.path):\n self.move = False\n return self.move", "def is_legal_move(state, action, player, rewarding_move=False): # TODO: Update this function to an more\n # optimized one.\n action = action.get_action_as_dict()\n if rewarding_move:\n if player == state.get_next_player() == state.get_latest_player():\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND and state.in_hand[player * -1] > 0:\n return True\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n opponent_piece = state.get_board().get_player_pieces_on_board(Color(player * -1))\n if opponent_piece and action['action']['at'] in opponent_piece:\n return True\n return False\n else:\n if state.get_next_player() == player:\n if action['action_type'] == YoteActionType.ADD and state.in_hand[player] > 0:\n empty_cells = state.get_board().get_all_empty_cells()\n if empty_cells and action['action']['to'] in empty_cells:\n return True\n elif action['action_type'] == YoteActionType.MOVE:\n if state.get_board().get_cell_color(action['action']['at']) == Color(player):\n effective_moves = YoteRules.get_effective_cell_moves(state, action['action']['at'], player)\n if effective_moves and action['action']['to'] in effective_moves:\n return True\n return False\n return False", "def check_move(self, x, y):\n try:\n return self.map[self.y+y][self.x+x] == \" \" or [self.x+x, self.y+y] == self.end_pos\n except IndexError:\n return False", "def can_move(self, side, number_of_turns):\n return True", "def any_legal_move(player, board):\n return any(Othello.is_legal(sq, player, board) for sq in Othello.squares())", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n # Case for Red's side\r\n if start_piece.get_player_id() == 'r':\r\n if not (3 <= end_col <= 5 and 0 <= end_row <= 2): # Returns False when is to move outside the palace\r\n return False\r\n else:\r\n if abs(start_col - end_col) == 1 and abs(start_row - end_row) == 1: # Checks if end_pos forces a move diagonally\r\n return True\r\n else:\r\n return False\r\n\r\n # Case for Black's side\r\n else:\r\n if not (3 <= end_col <= 5 and 7 <= end_row <= 9): # Returns False when is to move outside the palace\r\n return False\r\n else:\r\n if abs(start_col - end_col) == 1 and abs(start_row - end_row) == 1: # Checks if end_pos forces a move diagonally\r\n return True\r\n else:\r\n return False", "def _valid_move_exists(self):\n lst = []\n for i_row in range(self._num_rows):\n for i_col in range(self._num_cols):\n if self._valid_placement(i_row, i_col)[0]:\n lst.append((i_row, i_col))\n\n return lst != [] #If lst != [], then the list has elements -> valid move(s) exist", "def validate_move(board: list, character: dict, direction: str) -> bool:\n if direction.strip().upper() == \"N\":\n return (character[\"Position\"][0] - 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"S\":\n return (character[\"Position\"][0] + 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"W\":\n return (character[\"Position\"][0], character[\"Position\"][1] - 1) in board\n elif direction.strip().upper() == \"E\":\n return (character[\"Position\"][0], character[\"Position\"][1] + 1) in board\n else:\n print(\"Please enter only directions shown above\")\n return False", "def is_moving(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.MOVE", "def valid_move(self, stone_color, index):\n if self.get(index) is not None:\n print(\"Invalid move - Space it occupied\")\n return False\n elif self.is_suicide(stone_color, index):\n print(\"Invalid move - Suicide\")\n return False\n else:\n return True", "def legal_move(marker, x, y, direction):\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False", "def makeMove(self, _pos, _check_legality=True):\n\n # Check legality is an argument mainly for testing purposes; not for live gameplay.\n if _check_legality:\n legality = self.board.getMoveLegality(self, _pos)\n is_legal_move, is_capturing = legality['is_legal_move'], legality['is_capturing']\n if not is_legal_move:\n # Will need to update this line with interface update.\n # exit(\"ILLEGAL MOVE of {} at {}\".format(self.color, _pos))\n return 'illegal'\n else:\n # _is_capturing is necessary because of the not too often situation of a move with\n # no liberties, but is legal because it's capturing.\n if is_capturing: self.board.playerMakesMove(self, _pos, _is_capturing=True)\n else: self.board.playerMakesMove(self, _pos)\n else:\n self.board.playerMakesMove(self, _pos)", "def can_move(self):\n return self.movement", "def _is_valid_move(self, vector, current_piece, other_piece):\n # If direction is forward and the space is non-empty, break\n if vector[0] == 0 and other_piece != \"empty\":\n return False\n # If direction is diagonal and space is empty, break\n if vector[0] != 0 and other_piece == \"empty\":\n return False\n # If moving by 2 spaces, check if in starting row\n if vector[1] == 2 and current_piece.position[1] != 1:\n return False\n if vector[1] == -2 and current_piece.position[1] != 6:\n return False\n\n return True", "def LegalMove(self, pos):\n\n return (0 <= pos <= BOARD_SIZE) and (self.state[pos] == EMPTY)", "def validate_move(coordinates: dict, character_dict: dict, user_input: str) -> bool:\n new_coordinate = get_new_coordinate(x_y_coordinate=character_dict, move_direction=user_input)\n return new_coordinate in coordinates", "def _ispinnedmove(self, from_, to_):\n return False", "def move_is_valid(self, pos):\n\n if (not isinstance(pos, tuple) or len(pos) != 2 or \n not isinstance(pos[0], int) or not isinstance(pos[1], int)):\n return False\n y, x = pos\n if (y >= 0 and y < self.size and x >= 0 and x < self.size and \n self.board[pos] == HexBoard.EMPTY):\n return True\n else:\n return False", "def allow_to_move(self, direction, row, column):\n if self.valid_coverage_cell(row, column):\n if self.collision(direction) is False and \\\n self.cov_grid[row][column] == NOT_VISITED:\n return True\n else:\n return False", "def CheckMove(self,move):\n\t\tif(move=='w'):\n\t\t\tif(self.x==0):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='s'):\n\t\t\tif(self.x==15):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='d'):\n\t\t\tif(self.y==35):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='a'):\n\t\t\tif(self.y==0):\n\t\t\t\treturn 0\n\t\t\treturn 1", "def is_move_valid(self, from_row, from_col, to_row, to_col):\n # check is taking own piece?\n if self._is_taking_own_piece(from_row, from_col, to_row, to_col):\n return False\n\n piece = self.board.squares[from_row][from_col]\n if piece == ChessPiece.W_ROOK or piece == ChessPiece.B_ROOK:\n return self.is_rook_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KNIGHT or piece == ChessPiece.B_KNIGHT:\n return self.is_knight_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_BISHOP or piece == ChessPiece.B_BISHOP:\n return self.is_bishop_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_QUEEN or piece == ChessPiece.B_QUEEN:\n return self.is_queen_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_KING or piece == ChessPiece.B_KING:\n return self.is_king_move_valid(from_row, from_col,\n to_row, to_col)\n if piece == ChessPiece.W_PAWN or piece == ChessPiece.B_PAWN:\n return self.is_pawn_move_valid(from_row, from_col,\n to_row, to_col)", "def is_knight_move_valid(self, from_row, from_col, to_row, to_col):\n # check for valid move\n if ((abs(from_row - to_row) == 1 and abs(from_col - to_col) == 2) or\n (abs(from_row - to_row) == 2 and abs(from_col - to_col) == 1)):\n return True\n return False", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n count = 0 # Count will track how many pieces are between start and end_pos\r\n\r\n if start_row != end_row and start_col != end_col: # Moving diagonally\r\n return False\r\n\r\n # If cannon moves to an empty position\r\n # if end_piece_player_id is None:\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if start_col == end_col: # Moving vertically\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n\r\n if row_difference < 0: # Moving up the board\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n # 1 piece between start_pos and end_pos and end_pos contains a chess piece\r\n if count == 1 and end_piece_player_id is not None:\r\n return True\r\n # end_pos has no piece and there are no pieces to impede path\r\n elif end_piece_player_id is None and count == 0:\r\n return True\r\n # Returns False for all other scenarios\r\n else:\r\n return False", "def is_valid_move(self, board, fieldy, fieldx):\n if isinstance(board[fieldy][fieldx], Piece):\n return False\n if self.posy - fieldy == self.direction and abs(self.posx - fieldx) == 1:\n return True\n else:\n return False", "def validate_move(board: list, character: list, direction: str) -> bool:\n max_x_y_coordinates = board[-1]\n valid_options = []\n if character[1] < max_x_y_coordinates[0]:\n valid_options.append(\"d\")\n if character[1] > 0:\n valid_options.append(\"a\")\n if character[0] < max_x_y_coordinates[1]:\n valid_options.append(\"s\")\n if character[0] > 0:\n valid_options.append(\"w\")\n if direction in valid_options:\n return True\n else:\n return False", "def test_legalMoveP_backwards_goose(self):\n rules_obj = rules.Rules(test_mode=True)\n board = gamenode.GameNode()\n startCoordinate = coordinate.Coordinate(3, 1)\n endCoordinate = coordinate.Coordinate(3, 2)\n board.setState(startCoordinate, types.GOOSE)\n actual_result = rules_obj.legalMoveP(board,\n startCoordinate,\n endCoordinate)\n expected_result = False\n self.assertEqual(actual_result, expected_result)", "def can_move(self):\r\n for wall in self.app.walls:\r\n if vec(self.grid_pos+self.direction) == wall:\r\n return False\r\n return True", "def make_move(self, **move_data: typing.Dict[str, typing.Any]) -> bool:\n raise NotImplementedError", "def can_move(self, relative_location: RelativeCoord) -> bool:\n\n return self.moves.can_move(relative_location)", "def move_atoms(self):\n return self.abivars.ionmov != 0", "def test_valid_move():\n\n board = Board()\n\n # a col outside the width of the board should be false\n assert board.valid_move(board.get_grid_size()[1] + 1) is False\n\n # only positive cols should be considered for a move\n assert board.valid_move(-2) is False\n\n # since board is empty all cols should have moves\n for i in range(board.get_grid_size()[1]):\n assert board.valid_move(i) is True\n\n # if a col is full no move can be made\n for i in range(board.get_grid_size()[1]):\n if i % 2 == 0:\n board.move(board.P1, 0)\n else:\n board.move(board.P2, 0)\n\n \"\"\"\n board now looks like this...\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|O|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|X|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|O|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|X|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|O|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n\n \"\"\"\n assert board.valid_move(0) is False", "def make_legal_move(move, board, start, end, move_number, en_passant_square, king_w, king_b, rook_w_l, rook_w_r,\n rook_b_l,\n rook_b_r, promotion_piece):\n # this function should have essentially what i had in main at first without making the move\n test_board = board.copy()\n valid_move = False\n piece = find_piece(board, start)\n end_piece = find_piece(board, end)\n\n if switch_player(move_number): # for whites move\n if (65 <= ord(piece) <= 90) and (validate_move(board, end, move_number)):\n if piece == \"P\":\n if pawn(board, start, end, move_number):\n if end[0] == 8:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n return False\n elif piece == \"K\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"N\":\n if knight(start, end):\n valid_move = True\n elif piece == \"B\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"Q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"R\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False\n if not switch_player(move_number):\n if (97 <= ord(piece) <= 122) and validate_move(board, end, move_number):\n if piece == \"p\":\n if pawn(board, start, end, move_number):\n if end[0] == 1:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n valid_move = False\n elif piece == \"k\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"n\":\n if knight(start, end):\n valid_move = True\n elif piece == \"b\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"r\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False", "def is_valid_move(self, side_color, x, y):\n return self.disc_place(side_color, x, y, check_only=True)", "def test_check_move_with_barely_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 5 + [\" \"],\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertTrue(valid)", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n # Case for Red\r\n if start_piece.get_player_id() == 'r':\r\n\r\n # Red soldier hasn't crossed river\r\n if 3 <= start_row <= 4:\r\n if end_row - start_row == 1 and start_col == end_col:\r\n return True\r\n\r\n # Red solider has crossed river\r\n else:\r\n # Checks if movement forces a diagonal\r\n if end_row - start_row == 1 and abs(end_col - start_col) == 1:\r\n return False\r\n # Rules out the diagonal and checks if movement is valid\r\n elif end_row - start_row == 1 or abs(end_col - start_col) == 1:\r\n return True\r\n else:\r\n return False\r\n\r\n # Case for Black\r\n else:\r\n # Black soldier hasn't crossed river\r\n if 5 <= start_row <= 6:\r\n if end_row - start_row == -1 and start_col == end_col:\r\n return True\r\n else:\r\n return False\r\n\r\n # Black soldier has crossed the river\r\n else:\r\n # Checks if movement forces a diagonal\r\n if end_row - start_row == -1 and abs(end_col - start_col) == 1:\r\n return False\r\n # Rules out the diagonal and checks if movement is valid\r\n elif end_row - start_row == -1 or abs(end_col - start_col) == 1:\r\n return True\r\n\r\n else:\r\n return False", "def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool:\n # getting coordinates for moved position\n moved_position = tuple(np.add(current_position, move))\n\n def compare_coordinates(a: tuple, b: tuple) -> bool:\n \"\"\"\n Helper function to compare coordinates\n Checks if a is smaller than b\n \"\"\"\n return all(np.array(a) < np.array(b))\n\n # checking if coordinates are inside the array (between (0,0) and (N,N))\n if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape):\n # checking if the coordinates are not on the obstacle\n if grid[moved_position] == 'x':\n return False\n else:\n return True\n else:\n return False", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n # For horizontal movements for the horse\r\n if abs(end_row - start_row) == 1 and abs(end_col - start_col) == 2:\r\n # For movement going left\r\n if end_col - start_col == -2:\r\n if board[start_row][start_col-1].get_piece() is None: # Checks if horse is blocked\r\n return True\r\n else:\r\n return False\r\n # For movement going right\r\n else:\r\n if board[start_row][start_col + 1].get_piece() is None: # Checks if horse is blocked\r\n return True\r\n else:\r\n return False\r\n\r\n # For vertical movement for the horse\r\n elif abs(end_row - start_row) == 2 and abs(end_col - start_col) == 1:\r\n # For movement going down\r\n if end_row - start_row == 2:\r\n if board[start_row + 1][start_col].get_piece() is None:\r\n return True\r\n else:\r\n return False\r\n # For movement going up\r\n if end_row - start_row == -2:\r\n if board[start_row - 1][start_col].get_piece() is None:\r\n return True\r\n else:\r\n return False\r\n\r\n # Returns False if invalid end_pos for the horse\r\n else:\r\n return False", "def is_legal(move, player, board):\n hasbracket = lambda direction: Othello.find_bracket(move, player, board, direction)\n return board[move] == EMPTY and any(map(hasbracket, DIRECTIONS))", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n \"\"\"General moves should only move +1 or -1 from its starting row and col\"\"\"\r\n\r\n # Checks for legality if end position contains a piece and it does not attack own side's piece\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n # Case for red General\r\n if start_piece.get_player_id() == 'r': # Start piece is guaranteed to contain a piece, binds red to the palace\r\n if 3 <= end_col <= 5 and 0 <= end_row <= 2:\r\n if abs(start_col - end_col) >= 1 and abs(start_row - end_row) >= 1:\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False\r\n # Case for black General\r\n else:\r\n if 3 <= end_col <= 5 and 7 <= end_row <= 9: # Binds end_pos to the palace\r\n if abs(start_col - end_col) == 1 and abs(start_row - end_row) == 1: # Guarantees that diagonal movements will return false\r\n return False\r\n else:\r\n return True\r\n else:\r\n return False", "def validate_movement(self, piece, from_col, from_row, to_col, to_row):\n col_diff = abs(ord(from_col) - ord(to_col))\n row_diff = abs(from_row - to_row)\n\n # For any piece, it must actually move...\n if col_diff == 0 and row_diff == 0:\n return False\n # ...and there must be empty spaces in between the from/to squares (when on a column, row, or diagonal)\n if not self.validate_empty_between(from_col, from_row, to_col, to_row):\n return False\n\n # White pawn\n if piece == 'P':\n if col_diff == 1 and (to_row - from_row == 1):\n # Can move diagonally up one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'B' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 2:\n # From initial position, can go up one or two rows (but can't take a piece)\n return (to_row == 3 or to_row == 4) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move up one row (but can't take a piece)\n return to_row - from_row == 1 and self.get_square(to_col, to_row) == ' '\n # Black pawn\n elif piece == 'p':\n if col_diff == 1 and (from_row - to_row == 1):\n # Can move diagonally down one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'W' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 7:\n # From initial position, can go down one or two rows (but can't take a piece)\n return (to_row == 6 or to_row == 5) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move down one row (but can't take a piece)\n return from_row - to_row == 1 and self.get_square(to_col, to_row) == ' '\n # Rook\n elif piece.lower() == 'r':\n # Must remain in same column or same row\n return col_diff == 0 or row_diff == 0\n # Knight\n elif piece.lower() == 'n':\n # Jumps in a 2+1 pattern\n return (col_diff == 2 and row_diff == 1) or (col_diff == 1 and row_diff == 2)\n # Bishop\n elif piece.lower() == 'b':\n # Moves along diagonals\n return col_diff == row_diff\n # Queen\n elif piece.lower() == 'q':\n # Can move along columns, rows, or diagonals\n return col_diff == 0 or row_diff == 0 or col_diff == row_diff\n # King\n elif piece.lower() == 'k':\n # Can move a single square in any direction\n if not(0 <= col_diff <= 1) or not(0 <= row_diff <= 1):\n return False\n\n # But not next to the other king\n other_king = 'k' if piece.isupper() else 'K'\n # Get valid border squares\n border_squares = list(filter(\n lambda b_square: 'a' <= b_square[0] <= 'f' and 1 <= b_square[1] <= 8,\n [\n (chr(ord(to_col) - 1), to_row - 1), (to_col, to_row - 1), (chr(ord(to_col) + 1), to_row - 1),\n (chr(ord(to_col) - 1), to_row), (to_col, to_row), (chr(ord(to_col) + 1), to_row),\n (chr(ord(to_col) - 1), to_row + 1), (to_col, to_row + 1), (chr(ord(to_col) + 1), to_row + 1)\n ]\n ))\n # Check for the other king\n for square in border_squares:\n if self.get_square(square[0], square[1]) == other_king:\n return False\n\n return True", "def isLegalMove(self, column, state):\n \n for i in range(6):\n if state[i][column] == ' ':\n # once we find the first empty, we know it's a legal move\n return True\n \n # if we get here, the column is full\n return False", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # if destination within the board\n if (to_col in range(9) and to_row in range(10) and\n # and the move is 1 up/down/left/right (with no other piece here) and then 1 farther out diagonally\n ((to_row - from_row == -2 and abs(to_col - from_col) == 1 and board[from_col][from_row - 1] == '') or\n (to_row - from_row == 2 and abs(to_col - from_col) == 1 and board[from_col][from_row + 1] == '') or\n (to_col - from_col == -2 and abs(to_row - from_row) == 1 and board[from_col - 1][from_row] == '') or\n (to_col - from_col == 2 and abs(to_row - from_row) == 1 and board[from_col + 1][from_row] == '')\n )\n ):\n return True\n else:\n return False", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def check4move(st, selected_unit, direction):\n return 1", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # if destination within the board and the move is strictly horizontal or vertical\n if to_col in range(9) and to_row in range(10) and (to_col == from_col or to_row == from_row):\n # if move is to the left\n if to_col < from_col:\n # make sure no other piece lies between to and from\n for col in range(to_col + 1, from_col):\n if board[col][to_row] != '':\n return False\n return True\n # if move is to the right\n if to_col > from_col:\n # make sure no other piece lies between to and from\n for col in range(from_col + 1, to_col):\n if board[col][to_row] != '':\n return False\n return True\n # if move is upward\n if to_row < from_row:\n # make sure no other piece lies between to and from\n for row in range(to_row + 1, from_row):\n if board[to_col][row] != '':\n return False\n return True\n # if move is downward\n if to_row > from_row:\n # make sure no other piece lies between to and from\n for row in range(from_row + 1, to_row):\n if board[to_col][row] != '':\n return False\n return True\n\n return False\n\n # if moving along the diagonals in the red palace\n if from_coordinates in [[3,0],[3,2],[5,0],[5,2]] and to_coordinates in [[3,0],[3,2],[5,0],[5,2]] and board[4][1] == '':\n return True\n if from_coordinates in [[3,0],[3,2],[5,0],[5,2]] and to_coordinates == [4,1]:\n return True\n if from_coordinates == [4,1] and to_coordinates in [[3,0],[3,2],[5,0],[5,2]]:\n return True\n\n # if moving along the diagonals in the blue palace\n if from_coordinates in [[3,7],[3,9],[5,7],[5,9]] and to_coordinates in [[3,7],[3,9],[5,7],[5,9]] and board[4][8] == '':\n return True\n if from_coordinates in [[3,7],[3,9],[5,7],[5,9]] and to_coordinates == [4,8]:\n return True\n if from_coordinates == [4,8] and to_coordinates in [[3,7],[3,9],[5,7],[5,9]]:\n return True\n\n return False", "def checkLegalMove(self, initialPosition, destinationPosition, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n if self.grid[initialPosition] != checkColor:\n print 'The piece you are trying to move is not yours! Please reselect your move.'\n return False\n if self.grid[destinationPosition] != emptyColor:\n print 'The destination position of your move is not empty! Please reselect your move.'\n return False\n if initialPosition == destinationPosition:\n print 'The initial and destination position of your move are the same. Please reselect your move.'\n return False\n\n if initialPosition[0] == destinationPosition[0]:\n x = initialPosition[0]\n if (destinationPosition[1] - initialPosition[1]) %2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[1] < destinationPosition[1]:\n for i in range(initialPosition[1]+1, destinationPosition[1], 2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i+1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[1]-1, destinationPosition[1], -2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i-1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n elif initialPosition[1] == destinationPosition[1]:\n y = initialPosition[1]\n if (destinationPosition[0] - initialPosition[0])%2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[0] < destinationPosition[0]:\n for i in range(initialPosition[0]+1, destinationPosition[0], 2):\n if self.grid[(i, y)] != otherColor or self.grid[(i+1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[0]-1, destinationPosition[0], -2):\n if self.grid[(i, y)] != otherColor or self.grid[(i-1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n # make turns\n print 'Making turns is invalid move! Please reselect your move.'\n return False", "def canMove(self, direction, robot, newPosX, newPosY):\n result = False\n if (newPosY < 0 or newPosY > len(self.map)):\n print (\"Déplacement impossible\")\n elif (newPosX < 0 or newPosX > len(self.map[newPosY])):\n print (\"Déplacement impossible\")\n else:\n if (self.isThereWallInDirection(direction, robot, \\\n newPosX, newPosY)):\n print(\"Déplacement impossible (mur sur le chemin)\")\n result = False\n else:\n car = self.map[newPosY][newPosX]\n logging.info(\"self.map[{}]={}\".format(newPosY, \\\n self.map[newPosY]))\n logging.info(\"new coord X={} : Y={} :: {}\".\\\n format(newPosX, newPosY, car))\n if (car == \"O\"):\n print(\"Déplacement impossible (mur)\")\n else:\n logging.info(\"Déplacement possible\")\n result = True\n return result", "def valid_bool(self):\n return bool(self.piece.validate_move(self.board, self))", "def validBoard():\r\n\r\n\tglobal move1, move2\r\n\r\n\tif move1==move2 or move1-move2==1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def has_moved(self):\n return self.move_count > 0", "def check_move(self, y, x):\n return 0 <= y < len(self.maze) \\\n and 0 <= x < len(self.maze[y]) \\\n and self.maze[y][x] != \"#\"", "def test_check_move_with_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] + [\" \"] * 5,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 3)\n self.assertTrue(valid)", "def validate_move(self, move_from, move_to, board):\n\n from_coordinates = JanggiGame.translate_to_grid(move_from)\n to_coordinates = JanggiGame.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n\n # for red soldiers (who can only move downward or to the side)\n if self.get_color() == 'red':\n # if destination within the board and the move is strictly one downward or to the side\n if (to_col in range(9) and to_row in range(10) and\n ((abs(to_col - from_col) == 1 and to_row == from_row) or (to_col == from_col and to_row - from_row == 1))):\n return True\n # if moving diagonally within the blue palace\n if from_coordinates in [[3,7],[5,7]] and to_coordinates == [4,8]:\n return True\n if from_coordinates == [4,8] and to_coordinates in [[3,9],[5,9]]:\n return True\n\n return False\n\n # for blue soldiers (who can only move upward or to the side)\n if self.get_color() == 'blue':\n # if destination within the board and the move is strictly one upward or to the side\n if (to_col in range(9) and to_row in range(10) and\n ((abs(to_col - from_col) == 1 and to_row == from_row) or (to_col == from_col and to_row - from_row == -1))):\n return True\n # if moving diagonally within the red palace\n if from_coordinates in [[3, 2], [5, 2]] and to_coordinates == [4, 1]:\n return True\n if from_coordinates == [4, 1] and to_coordinates in [[3, 0], [5, 0]]:\n return True\n\n return False\n\n return False", "def check_legal(self, cur_pos, new_pos, board, state): \n if cur_pos and new_pos in self._special:\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n\n if state == \"UNFINISHED\":\n if new_pos in self._special: # Make sure that the piece it's trying to take isn't it's own\n if board[new_row][new_col] is not None:\n if self.piece_type(new_pos, board).get_color() == self._color:\n return False\n \n if new_pos in self._special: # if its in the palace\n # Checking if the movement is left or right (one column apart) from the cur_pos\n if (new_col == cur_col + 1 or new_col == cur_col - 1) and new_row == cur_row:\n return True\n # Checking if forward or backward movement is legal\n elif (new_row == cur_row - 1 or new_row == cur_row + 1) and (new_col == cur_col):\n return True\n # Checking if diagonal lines are possible\n elif cur_pos in self._corners:\n if (new_row == cur_row + 1 or new_row == cur_row - 1) and (new_col == cur_col - 1 or new_col == cur_col + 1):\n return True\n else:\n return False\n else:\n return False", "def validMove(move):\r\n\r\n\tglobal tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\ta=eval(\"tile\"+str(move)+\"==0\")\r\n\treturn a", "def checkMove(move: Card, game) -> bool:\n lastMove = game.lastMove\n\n if move.number == lastMove.number:\n return True\n\n elif move.color == lastMove.color: \n return True\n\n elif move.wild: \n return True\n\n return False" ]
[ "0.86168677", "0.78282213", "0.78129864", "0.7796619", "0.7645074", "0.76316094", "0.762242", "0.75282884", "0.74927366", "0.7453383", "0.7452425", "0.73305404", "0.72937834", "0.7274391", "0.7266397", "0.7234366", "0.71924216", "0.7192144", "0.7192", "0.71551216", "0.714581", "0.7145095", "0.71425706", "0.7142265", "0.7113649", "0.70628214", "0.7060886", "0.7051618", "0.70410365", "0.70318425", "0.70190585", "0.7012234", "0.7002369", "0.6999567", "0.6987857", "0.69853216", "0.6959404", "0.6952135", "0.6945241", "0.69265854", "0.69238746", "0.6916593", "0.6891209", "0.68905425", "0.68864095", "0.68796384", "0.6879628", "0.6876595", "0.6854337", "0.6830468", "0.6827206", "0.68124354", "0.6808859", "0.6806946", "0.6806838", "0.6798952", "0.6782463", "0.6767816", "0.6761351", "0.67592525", "0.67267376", "0.6725823", "0.67161494", "0.671029", "0.6708849", "0.67067766", "0.6701182", "0.67002404", "0.66964984", "0.66812265", "0.66780245", "0.6670656", "0.66637266", "0.6663569", "0.66619027", "0.66614807", "0.66593397", "0.6650084", "0.6647372", "0.66409683", "0.66268647", "0.66137147", "0.66094995", "0.65720373", "0.6569931", "0.6564301", "0.6562264", "0.6555668", "0.65528804", "0.6548294", "0.6547696", "0.65457463", "0.65381235", "0.6535448", "0.65341604", "0.65322506", "0.6526687", "0.65058947", "0.6487632", "0.64755374" ]
0.69171023
41
needed for search Gets all legal available moves including those for the oppenent
def openMoves(self): arr = [] for y in range(0,HEIGHT): for x in range(0,WIDTH): t = self.getPawn(x,y) if(t!=None): for z in range(-1,2): if(self.legalMove(t,z)): #move , #newState arr.append((t,z)) return arr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_moves(self, board):\n self.available_moves = [move for move in board.legal_moves]", "def get_available_moves(self, board):\n pass", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def availablemoves(moves):\n useravailablemoves = []\n for move in moves:\n useravailablemoves.append(move['move'])\n return(useravailablemoves)", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def get_legal_moves(self):\n\n return self._legal_moves", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def get_legal_moves(self, pos: Position, game_board: GameBoard) -> PossibleMoveSet:\n pass", "def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available", "def get_valid_moves(self):\r\n # castling and en-passant rights are stored, because move affects these values\r\n temp_enpassant_possible = self.enpas_pos\r\n temp_castle = CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs)\r\n\r\n # for validating a possible move\r\n #1 all possibile moves are generated\r\n #2 each pos moves are made\r\n #3 generate opponent move\r\n #4 check if any of those moves let the king attacked\r\n #5 moves which let the king in chess are eliminated\r\n #6 the moves are undone\r\n moves = self.get_all_possible_moves() # 1\r\n\r\n # castle moves are directly introduced in valid moves\r\n if not self.turn_white:\r\n self.get_castle_moves(self.bKingPos[0], self.bKingPos[1], moves)\r\n else:\r\n self.get_castle_moves(self.wKingPos[0], self.wKingPos[1], moves)\r\n\r\n for i in range(len(moves) - 1, -1, -1): # 2\r\n self.make_move(moves[i])\r\n # 3 #4\r\n self.turn_white = not self.turn_white\r\n if self.in_check():\r\n moves.remove(moves[i]) # 5\r\n self.turn_white = not self.turn_white\r\n self.undo_move()\r\n\r\n # game ending possibilities\r\n if len(moves) == 0:\r\n if self.in_check():\r\n self.checkMate = True\r\n print(\"Checkmate !\")\r\n else:\r\n self.staleMate = True\r\n print(\"Stalemate !\")\r\n else:\r\n self.checkMate = False\r\n self.staleMate = False\r\n\r\n # the rigths are restored, and the values are not affected\r\n self.enpas_pos = temp_enpassant_possible\r\n self.cr_castle_r = temp_castle\r\n\r\n return moves", "def get_available_moves(self, board):\n available_moves = []\n for fieldx in range(len(board)):\n column = []\n for fieldy in range(len(board)):\n legit_move = board[self.posy][self.posx].is_valid_move(board, fieldx, fieldy)\n column.append(legit_move)\n available_moves.append(column)\n return available_moves", "def get_legal_moves(self, current_player):\n\t\tlegal_moves = []\n\t\tfor row in range(self.board_size):\n\t\t\tfor col in range(self.board_size):\n\t\t\t\tif self.board.repr[row][col] == self.player_symbol[current_player]:\n\t\t\t\t\tposition = (row,col)\n\t\t\t\t\tmove_fn_list = [self.north_move,\n\t\t\t\t\t\t\t\t self.east_move,\n\t\t\t\t\t\t\t\t self.south_move,\n\t\t\t\t\t\t\t\t self.west_move]\n\t\t\t\t\tfor move_fn in move_fn_list:\n\t\t\t\t\t\tmove = move_fn(position)\n\t\t\t\t\t\tif self.is_legal_move(current_player,move):\n\t\t\t\t\t \t\tlegal_moves.append(move)\n\t\t\t\t\t \t\t# now we are going to check for a double jump!\n\t\t\t\t\t \t\tstart = move[0]\n\t\t\t\t\t \t\tcur_end = move[1]\n\t\t\t\t\t \t\tnew_board = copy.deepcopy(self.board)\t# Make a copy of the board, and then make the move on that board\n\t\t\t\t\t \t\tnew_board.movePiece(start,cur_end)\n\t\t\t\t\t \t\tcontinue_move = move_fn(cur_end)\t\t# Try to move again in the same direction\n\t\t\t\t\t \t\tnew_game_state = Game(self.board_size,new_board,current_player)\t\t\t# make a whole new game state and check if our move is legal on that \n\t\t\t\t\t \t\twhile(new_game_state.is_legal_move(current_player, continue_move)):\n\t\t\t\t\t \t\t\tstart_cur = cur_end\n\t\t\t\t\t \t\t\tcur_end = continue_move[1]\n\t\t\t\t\t \t\t\tlegal_moves.append((start,cur_end))\n\t\t\t\t\t\t \t\tnew_board = copy.deepcopy(new_board)\n\t\t\t\t\t \t\t\tnew_board.movePiece(start_cur,cur_end)\n\t\t\t\t\t \t\t\tcontinue_move = move_fn(cur_end)\n\t\t\t\t\t \t\t\tnew_game_state = Game(new_game_state.board_size,new_board,current_player)\n\t\treturn legal_moves", "def get_moves(self):", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]", "def legal_moves_in_uci(self):\n\n # get all legal moves. 'legal_moves' is inherited attribute from super class that returns all possible moves\n return [m.uci() for m in list(self.legal_moves)]", "def get_all_available_moves(self, board):\n available_attacks = self.get_available_attacks(board)\n if any(True in sublist for sublist in available_attacks):\n attack = True\n return available_attacks, attack\n else:\n available_moves = self.get_available_moves(board)\n attack = False\n return available_moves, attack", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if not (target_row > 7 or target_row < 0 or target_col > 7 or target_col < 0):\n if board.status[target_row, target_col] == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n if not (self.field_row*self.color_value == 1 or self.field_row*self.color_value == -6):\n self.pot_moves = {(1*self.color_value, 0)}\n\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n\n for move in self.pot_capture_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list", "def get_moves(self):\n grid = self.model.grid\n # List of agents we can't overlap with\n no_overlap = [\"wall\", \"human\", \"zombie\"]\n\n if self.agent_type == \"zombie\" or \\\n (\"AvoidingZombie\" not in self.states and os.environ[\"mode\"] == \"5\"):\n no_overlap.append(\"road\")\n\n # Always give the option to stay on your current location(stand still)\n all_cells = self.neighbors()\n free_cells = [self.pos]\n\n # Get rid of cells that we may not move to by iterating through all\n # cells next to the agent, and only adding non-occupied cells\n for cell in all_cells:\n cell_occupied = False\n x, y = cell.pos\n # If there are agents in the current cell, and we are not allowed\n # to overlap with any of those agents, the cell is occupied.\n # Only add cells which are not occupied.\n if not grid.is_cell_empty((x, y)):\n for agent in grid[x][y]:\n if agent.agent_type in no_overlap:\n cell_occupied = True\n break\n if not cell_occupied:\n free_cells.append((x, y))\n return free_cells", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def get_all_game_pieces_potential_moves(self):\n\n board = self.get_board()\n\n for row in board:\n\n for column in row:\n\n if column is not None:\n\n print(column.get_label(), ': ' , column.get_potential_moves())", "def get_legal_moves(self, board):\n moves = set()\n capture_moves = set()\n for move in self.pot_moves:\n target_row = self.field_row + move[0]\n target_col = self.field_col + move[1]\n if self.path_clear(board, move, target_row, target_col):\n if board.status[target_row, target_col] * self.color_value == 0:\n moves.add(move)\n if board.status[target_row, target_col] * self.color_value < 0:\n capture_moves.add(move)\n self.legal_moves = moves\n self.legal_capture_moves = capture_moves", "def all_valid_moves(self, cur_board, all_english_words):\r\n rack_to_str = self.convert_rack_to_str()\r\n if len(rack_to_str) == 0:\r\n return [] \r\n \r\n moves_made = cur_board.find_words_on_board()\r\n actual_words = []\r\n\r\n if cur_board.board_empty: #no one has played yet.\r\n return Utils.get_all_legal_combos([\"\"], rack_to_str, all_english_words)\r\n\r\n\r\n\r\n \r\n for move in moves_made:\r\n actual_words.append(move[0]) #the first element is a string, representing the actual move made.\r\n \r\n \r\n return Utils.get_all_legal_combos(actual_words, rack_to_str, all_english_words)", "def available_moves(self):\n available_moves = []\n for i in range(self.quadrants_count):\n quadrant_positions = self.play_area[i].available_positions()\n for p in quadrant_positions:\n position = p + i * 9\n for j in range(self.quadrants_count):\n move1 = [str(position), str(j + 1), \"l\"]\n move2 = [str(position), str(j + 1), \"r\"]\n available_moves.append(\" \".join(move1))\n available_moves.append(\" \".join(move2))\n return available_moves", "def get_all_legal_moves(\n old_board, current_board, player: int\n) -> List[Tuple[Tuple, Tuple]]:\n possible_pieces_to_move, open_position = _check_open_rule(\n old_board=old_board, new_board=current_board, player=player\n )\n if open_position and len(possible_pieces_to_move) > 0:\n return [(pos, open_position) for pos in possible_pieces_to_move]\n else:\n # list[tuple[int,int]]\n owned_positions = [\n (r, c) for r in range(5) for c in range(5) if current_board[r][c] == player\n ]\n # list[tuple[tuple,tuple]]\n return [\n (pos, new_pos)\n for pos in owned_positions\n for new_pos in _get_empty_neighbors(pos, current_board)\n ]", "def get_legal_moves(self, i, j):\r\n legal_moves = list()\r\n for action in self.action_dic.keys():\r\n coordinate_change = self.action_dic[action]\r\n new_i = coordinate_change[0] + i\r\n new_j = coordinate_change[1] + j\r\n if (new_i >= 0 and new_i < 3) and (new_j >= 0 and new_j < 3):\r\n legal_moves.append(self.reflection_dic[action])\r\n return legal_moves", "def get_valid_moves(self):\n if self.king:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1],\n [self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n else:\n if self.player == 1:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1]]\n else:\n valid_moves = [[self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n return valid_moves", "def get_legal_nearby_moves(self, nearby_length=1):\n moves = []\n for row, col in self.get_legal_moves():\n if not self._is_nearby_empty(nearby_length, row, col):\n moves.append((row, col))\n\n return moves or None", "def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)", "def test_get_legal_moves():\n board = Board(*TEST_AGRU2)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.columns_list = [\n [HUMAN_DISK],\n [HUMAN_DISK, HUMAN_DISK],\n [HUMAN_DISK, HUMAN_DISK, HUMAN_DISK]\n ]\n comp.b.new_disk = (MOVE2)\n assert set(comp.get_legal_moves(comp.b)) == {(1, 0), (0, 1)}", "def __find_all_moves(self, tower) -> list:\r\n choice = []\r\n for height in range(1,len(tower.tower)-2):\r\n for index in range(1,4):\r\n if self.stat_brain.is_valid(height, index, tower):\r\n choice.append((height, index))\r\n \r\n r.shuffle(choice)\r\n return choice", "def get_possible_moves(self) -> list:\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = count >= 0.5 * len(self.claim)\n\n moves = []\n if not over:\n for i in self.letters:\n if i.isalpha():\n moves.append(i)\n return moves", "def possible_moves(self):\n lst_of_direcs = []\n for a_car in self.__cars:\n good_moves = a_car.possible_moves()\n new = [(a_car.get_name(),dire, des) for dire, des\\\n in good_moves.items()]\n lst_of_direcs.append(new[0])\n lst_of_direcs.append(new[1])\n return lst_of_direcs", "def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves", "def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list", "def possible_moves(self, pos: Point) -> List[Point]:\n # logger.debug(f\"inside possible_moves {pos}\")\n available_squares = []\n for direction in Direction.cardinal():\n # logger.debug(f\"direction = {direction}\")\n neighbor = pos + direction\n # logger.debug(f\"neighbor = {neighbor}\")\n if neighbor.x < 1 or self.width - 2 < neighbor.x or neighbor.y < 1 or self.height - 2 < neighbor.y:\n # logger.debug(f\"{neighbor} not in bounds\")\n continue\n if self.can_carve(pos, direction):\n # logger.debug(f\"can_carve returned True pos={pos}, direction={direction}\")\n available_squares.append(neighbor)\n # logger.debug(f\"available squares:\")\n # for square in available_squares:\n # logger.debug(f\"square={square}\")\n # logger.add(\"debug.log\")\n return available_squares", "def get_all_moves(self, castling_allowed=True):\n\n can_move = str.isupper if self.white_to_move else str.islower\n\n valid_moves = set()\n\n for row_num, row in enumerate(self.board):\n for col_num, piece in enumerate(row):\n if piece != EMPTY_SPACE and can_move(piece):\n\n location = (row_num, col_num)\n\n # Everything except the pawn movement\n if piece.lower() in NAME_TO_PIECE:\n valid_moves = valid_moves.union(self._get_standard_moves_for_piece(location, piece))\n\n # Pawn moves\n if piece.lower() == PAWN:\n valid_moves = valid_moves.union(self._get_pawn_moves(location, piece))\n\n # Castling\n if castling_allowed and piece.lower() == KING:\n valid_moves = valid_moves.union(self._get_possible_castles(piece))\n\n return valid_moves", "def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves", "def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask", "def check_for_moves(self) -> list:\r\n avail_moves = []\r\n for x in range(self.size):\r\n for y in range(self.size):\r\n if self.tags[x][y] is None:\r\n avail_moves.append((x, y))\r\n return avail_moves", "def get_moves(self, board, position):\n current_piece = board.grid[position[1]][position[0]]\n\n moves = []\n\n # For each direction in which the piece can move...\n for direction in self.directions:\n # for each vector in that direction...\n # (once a piece is encountered in a direction,\n # further positions in that direction are unaccessible,\n # therefore break out of inner FOR loop)\n for vector in direction:\n new_position = (position[0] + vector[0], position[1] + vector[1])\n\n # Check if the proposed destination is inbounds\n if board._inbounds(new_position) is False:\n break\n\n other_piece = board.grid[new_position[1]][new_position[0]]\n\n # Check if the proposed destination is occupied by a friendly piece\n if other_piece != \"empty\" and other_piece.player == current_piece.player:\n break\n\n # Check other validity conditions, mainly for pawn\n if self._is_valid_move(vector, current_piece, other_piece) is False:\n break\n\n # The destination is viable, add the move\n moves.append(Move(position, current_piece, new_position, other_piece))\n\n # If there was an enemy piece on the square\n if other_piece != \"empty\":\n break\n\n return moves", "def mappossiblemoves(self,nodeStart):\n destinations = [set() for ix in range(game.DICEMAX+1)]\n destinations[0] = set([nodeStart])\n routes = [[nodeStart]]\n #\n # TODO: Not convinced this adds anything more than it harms in this fn.\n #if self.hasEnteredRoomYet:\n # GAMEBOARD = game.TRIMMEDNODES\n #else:\n GAMEBOARD = game.BOARDNODES\n #\n for diceRoll in range(1,game.DICEMAX+1):\n newRoutes = []\n for route in routes:\n for nextLoc in GAMEBOARD[route[-1]]:\n if not nextLoc in route:\n # This location has not yet been visited by this route\n if nextLoc in game.ALLOWEDROOMNODES:\n # Location is a room - can move into it regardless\n # of population, but not move further. We can also\n # use any longer roll to move here by this route\n for thisRoll in range(diceRoll,game.DICEMAX+1):\n destinations[thisRoll].add(nextLoc)\n else:\n # Location is a square - can only move into it if\n # unoccupied, but can move further once there.\n if not nextLoc in self.charLocations:\n # Unoccupied (could check against \n # charLocations[1:] since we're the piece moving\n # but a nextLoc = charLocations[0] would be\n # rejected by being already in the route anyway)\n newRoute = list(route)\n newRoute.append(nextLoc)\n newRoutes.append(newRoute)\n destinations[diceRoll].add(nextLoc)\n routes = newRoutes\n #\n return [list(destSet) for destSet in destinations]", "def get_possible_moves(self):\n moves = []\n for i in range(1, self.current_total + 1):\n if i ** 2 <= self.current_total:\n moves.append(i ** 2)\n\n return moves", "def find_moveable_pieces(self, die, p1): \n moveable = []\n if (p1):\n #must we re-enter?\n if (self.p1vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p1vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n else:\n #must we re-enter?\n if (self.p2vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p2vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n return moveable", "def getPossibleMovesOutCheck(player, board):\n possibleMoves = []\n for col in board:\n for piece in col:\n if type(piece) != int and piece.player == player:\n pieceMoves = piece.availableMoves(board)\n piecePosX = piece.posx\n piecePosY = piece.posy\n for move in pieceMoves:\n copyBoard = copy.deepcopy(board)\n #Do the move on the copied board\n copyBoard[piecePosX][piecePosY] = 1\n newPiece = copy.deepcopy(piece)\n newPiece.posx = move[0]\n newPiece.posy = move[1]\n copyBoard[move[0]][move[1]] = newPiece\n #Check if the copied board is now in check...\n #if not then it is a valid move to do to get out of check\n if not isInCheck(player, copyBoard):\n possibleMoves.append(((piecePosX,piecePosY), move))\n\n return possibleMoves", "def possibleMoves(self,i,j):\n piece = self.board[i][j].piece\n if(piece.pieceCode == \"None\"):\n return []\n \n if(piece.name == \"pawn\"):\n return self.pawnMoves(piece,self.board)\n elif(piece.name == \"king\"):\n return self.kingSteps(self.board,piece.color)\n else:\n return self.pieceMoves(piece,self.board)", "def available_moves(board_state):\n for x, y in itertools.product(range(len(board_state)), range(len(board_state[0]))):\n if board_state[x][y] == 0:\n yield (x, y)", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n \n options = game.get_legal_moves()\n assert options == legal_moves, \"Mismatched moves\"\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n score, move = None, random.choice(legal_moves) if len(legal_moves) > 0 else None\n try:\n # Iterative deepening with Quiessance search:\n if self.iterative is True:\n results = deque(maxlen=3)\n for depth in range (self.search_depth, 25):\n score, move = self.dosearch(game, depth)\n results.append((score, move))\n if self.quiessant_search is True:\n if len(results) >=3 and all(x[1] == move for x in results):\n break\n elif score == float('-inf') or score == float ('inf'):\n break\n if self.time_left() < self.TIMER_THRESHOLD:\n break\n else:\n score, move = self.dosearch(game, self.search_depth)\n assert score is not None\n \n if len (options) > 0:\n assert not (move is None or move is (-1,-1)), \"Move ({}, {}) for '{}/{}' cannot be None or (-1,-1) if options ({}) exist\".format(move, score, self.method, self.score, options)\n assert move in options, \"Move ({}, {}) for '{}/{}' not from existing list of moves ({})\".format(move, score, self.method, self.score, options)\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search\n # (or iterative-deepening search iteration)\n return move", "def get_king_moves(self, state):\n #king_moves = []\n possible_moves = []\n if self.color == cc.WHITE_ACTIVE:\n enemy_color = cc.BLACK_ACTIVE\n enemy_pieces = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n enemy_color = cc.WHITE_ACTIVE\n enemy_pieces = cc.WHITE_PIECES\n else:\n raise Exception(\"GameState: Invalid Active Color\")\n\n for vector in cc.KING_VECTORS:\n rank = self.coord[0] + vector[0]\n column = self.coord[1] + vector[1]\n if rank in cc.VALID_RANKS and column in cc.VALID_RANKS:\n if state.board[rank, column] == cc.NO_PIECE:\n possible_moves.append(cc.Action(self.string, self.coord, (rank, column)))\n elif state.board[rank, column] in enemy_pieces:\n possible_moves.append(cc.Action(self.string, self.coord, (rank, column), capture=True))\n \n # # Iterate over list of king moves, removing ones that are under attack\n # for move in possible_moves:\n # if not check.space_under_attack(state, move.end, enemy_color):\n # king_moves.append(move)\n\n return possible_moves", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def checkPossibleMoves(self):\n possibleMovesArray = []\n\n for j in range(self.nrOfCars):\n minMaxChange = self.gridSize - self.length[j] + 1\n possibleMoves = []\n\n for i in range(1,minMaxChange):\n if self.checkMove(j, i) == 0:\n possibleMoves.append(i)\n else:\n break\n for i in range(1,minMaxChange):\n if self.checkMove(j, -i) == 0:\n possibleMoves.append(-i)\n else:\n break\n\n possibleMovesArray.append(possibleMoves)\n\n return possibleMovesArray", "def GenerateMoves(position):\n return [move for move in POSSIBLE_MOVES if move <= position]", "def get_legal_moves(self, color):\n moves = set() # stores the legal moves.\n color = max(0, color)\n\n # Get all the squares with pieces of the given color.\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n newmoves = self.get_moves_for_square((x,y))\n moves.update(newmoves)\n return list(moves)", "def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves", "def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]", "def get_possible_moves(board):\n\tpossible_moves = []\n\n\tfor count, player in enumerate(board):\n\t\tif player is not server_player and player is not user_player:\n\t\t\tpossible_moves.append(count)\n\n\treturn possible_moves", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def get_moves(self, x, y):\n\n if not self.piece_at(x, y):\n return set()\n\n moves = self._get_piece_moves(x, y)\n legal = set(moves)\n at = x, y\n for to in moves:\n res, captured = self._make_move(at, to)\n if not res:\n legal.remove(to)\n else:\n self._unmake_move(to, at, captured)\n\n self._check_integrity()\n return legal", "def moveAvailable(self, to):\n\t\tmoves = self.getAvailableMoves()\n\t\tprint(moves)\n\t\tif to in moves: return True\n\t\treturn False", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def get_possible_moves(self, current_x: int, current_y: int) -> List[(int, int)]:\n pass", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n move = (-1, -1) #Default\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n max_depth = 0\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if self.iterative:\n #Perform iterative search\n num_of_remaining_moves = len(game.get_blank_spaces())\n for depth in range(1,num_of_remaining_moves):\n if self.time_left() <= self.TIMER_THRESHOLD:\n return move\n\n if self.method == 'alphabeta':\n iterative_best_score, iterative_best_move = self.alphabeta(game, depth)\n else:\n iterative_best_score, iterative_best_move = self.minimax(game, depth)\n\n #Stores score and move of the deepest search\n score = iterative_best_score\n move = iterative_best_move\n max_depth = depth\n else:\n #Perform fixed-depth search\n if self.method == 'alphabeta':\n score, move = self.alphabeta(game, self.search_depth)\n else:\n score, move = self.minimax(game, self.search_depth)\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n return move", "def getLegalMoves(cls, piece, layout):\n color = cls._getColorAt(piece, layout)\n if color == \"w\":\n direction = \"b\"\n else:\n direction = \"f\"\n\n unfiltered_moves = []\n\n if cls._isKing(piece, layout):\n left, right = cls.adjacency_matrix[piece][\"b\"]\n unfiltered_moves.extend([left, right])\n left, right = cls.adjacency_matrix[piece][\"f\"]\n unfiltered_moves.extend([left, right])\n else:\n left, right = cls._getMoves(piece, direction)\n unfiltered_moves.extend([left, right])\n\n # remove None destinations from the list (board edges)\n filtered_moves = list(filter(lambda x: x != None, unfiltered_moves))\n\n # move destination is an opponent\n possible_jumps = list(\n filter(lambda x: cls._isOpponent(color, x, layout), filtered_moves)\n )\n if possible_jumps:\n # process jumps first because you MUST jump\n unfiltered_jumps = list(\n map(lambda x: cls._canJump(x, piece, layout), possible_jumps)\n )\n jumps = list(filter(lambda x: x != None, unfiltered_jumps))\n # we don't care about moves if we have jumps\n if jumps:\n return {\"jumps\": jumps}\n\n # if we got here, process moves\n moves = list(\n filter(\n lambda x: cls.Pieces.NONE == cls._getColorAt(\n x, layout), filtered_moves\n )\n )\n return {\"moves\": list(moves)}", "def available_moves(self):\n\n heaps = range(len(self.heaps))\n return [(h, take) for h in range(len(self.heaps))\n for take in range(1, self.heaps[h] + 1)]", "def GetMoves(self):\n return [(source, target) for source in self.scores.keys() for target in self.fullGraph.neighbors_iter(source) if target not in self.pathes[source].nodes]", "def get_potential_moves(self):\n\n return self._potential_moves", "def _get_piece_moves(self, x, y):\n\n piece = self.get_piece(x, y)\n moves = []\n\n if not piece:\n return moves\n\n if piece.name == 'rook' or piece.name == 'queen':\n direcs = ['up', 'down', 'left', 'right']\n moves = [self._get_moves_indirection(x, y, direc) for direc in\n direcs]\n\n elif piece.name == 'bishop' or piece.name == 'queen':\n direcs = ['d1', 'd2', 'd3', 'd4']\n for direc in direcs:\n moves += self._get_moves_indirection(x, y, direc)\n\n elif piece.name == 'king':\n moves = [(x-1, y-1), (x-1, y), (x-1, y+1), (x, y-1),\n (x, y+1), (x+1, y-1), (x+1, y), (x+1, y+1)]\n\n elif piece.name == 'knight':\n moves = [(x-1, y-2), (x-2, y-1), (x-2, y+1), (x-1, y+2),\n (x+1, y+2), (x+2, y+1), (x+1, y-2), (x+2, y-1)]\n\n elif piece.name == 'pawn':\n if piece.color == ChessGame.BLACK:\n moves = [(x-1, y), (x-1, y-1), (x-1, y+1)]\n else:\n moves = [(x+1, y), (x+1, y-1), (x+1, y+1)]\n\n tmp = list(moves)\n for u, v in tmp:\n if v != y and not self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n if v == y and self.is_enemy(u, v, piece.color):\n moves.remove((u, v))\n\n mycolor = piece.color\n valid = set()\n for (u, v) in moves:\n if not self.in_bounds(u, v):\n continue\n\n if not self.get_piece(u, v): # board is blank\n valid.add((u, v))\n\n if self.is_enemy(u, v, mycolor):\n valid.add((u, v))\n\n return valid", "def get_untried_moves(self, legal_moves):\n\n\t\t# Find all moves for which this node *does* have children\n\t\ttried_moves = [child.move for child in self.child_nodes]\n\n\t\t# Return all moves that are legal but have not been tried yet\n\t\treturn [move for move in legal_moves if move not in tried_moves]", "def get_available_moves(self, team_color):\n squares = self.squares_with_pieces_of_color(team_color)\n moves = []\n for square in squares:\n moves.extend(square.get_moves(self))\n return moves", "def get_possible_moves(self) -> list:\n result = []\n for lst in self.hori_lst:\n for item in lst:\n if item.isalpha():\n result.append(item)\n\n # add nodes to result if it's not taken and its line is not taken\n # for i in range(len(self.hori_lst)):\n # if not self.hori_result[i].isdigit():\n # for item in self.hori_lst[i]:\n # if not item.isdigit():\n # result.append(item)\n # # remove the node from result if its line has been taken\n # for i in range(len(self.left_lst)):\n # if self.left_result[i].isdigit():\n # for item in self.left_lst[i]:\n # if item in result:\n # result.remove(item)\n # # remove the node from result if its line has been taken\n # for i in range(len(self.right_lst)):\n # if self.right_result[i].isdigit():\n # for item in self.right_lst[i]:\n # if item in result:\n # result.remove(item)\n return result", "def get_all_moves(board, player):\n moves = []\n if not (player_has_won(board, player) or\n player_has_won(board, utils.get_opponent(player)) or\n (not is_valid_board(board))):\n for index in range(9):\n if board[index] == config.NO_PLAYER:\n moves += [index]\n return moves", "def __moves_available(self, board: Board):\n player_moves = self.get_num_of_moves(board, self.player_color)\n opponent_moves = self.get_num_of_moves(board, self.opponent_color)\n # print(len(player_moves), len(opponent_moves))\n\n return player_moves - opponent_moves * 3", "def get_tiger_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_tiger():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def get_legal(self, player):\n for start in map(tuple, np.argwhere(self._board == player)):\n yield from (\n (start, end)\n for end in self.paths(start)\n if self._check_zone_locks(start, end)\n )", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def get_move(self, game, legal_moves, time_left):\n logging.debug(\"get_move - legal moves: %s\", str(legal_moves))\n \n self.time_left = time_left\n\n\n # Check if we have any legal moves\n if not legal_moves:\n return (-1, -1)\n\n # Let's set best move so far to be the first legal move so we always \n # have something to return in case of timeout\n self.best_move_so_far = legal_moves[0]\n \n \n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if self.iterative:\n it = 1\n while True:\n if self.method == 'minimax':\n _, self.best_move_so_far = self.minimax(game, it)\n else:\n _, self.best_move_so_far = self.alphabeta(game, it)\n it += 1\n else: \n if self.method == 'minimax':\n _, self.best_move_so_far = self.minimax(game, self.search_depth)\n else:\n _, self.best_move_so_far = self.alphabeta(game, self.search_depth)\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n logging.debug(\"Time is up - get_move returning: %s\", str(self.best_move_so_far))\n return self.best_move_so_far\n\n # Return the best move from the last completed search iteration\n logging.debug(\"get_move returning: %s\", str(self.best_move_so_far))\n\n return self.best_move_so_far", "def get_valid_moves(self):\r\n validMoves = []\r\n\r\n for x in range(BOARD_SIZE):\r\n for y in range(BOARD_SIZE):\r\n pos = np.array([x,y])\r\n if self.board[pos[0],pos[1]] == 0:\r\n if(self.update_board(pos,_testing=True)):\r\n validMoves.append(pos)\r\n\r\n return validMoves", "def possible_moves(self, side: models.Side) -> typing.Iterator[\n typing.Tuple[models.Piece, int, int]]:\n raise NotImplementedError", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n\n # TODO: finish this function!\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n moves = game.get_legal_moves()\n best_move = (-1, -1)\n best_score = float(\"-inf\")\n depth = 1\n if moves:\n best_move = moves[0]\n else :\n return best_move\n \n\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if not self.iterative:\n if self.method == 'minimax':\n _, best_move = self.minimax(game, self.search_depth)\n elif self.method == 'alphabeta':\n _, best_move = self.alphabeta(game, self.search_depth) \n else:\n while self.time_left() > self.TIMER_THRESHOLD:\n if self.method == 'minimax':\n score, move = self.minimax(game, depth)\n elif self.method == 'alphabeta':\n score, move = self.alphabeta(game, depth)\n if score > best_score:\n best_move = move\n depth += 1 \n\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_best_moves(self):\n return self.best_moves", "def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves", "def get_all_valid_moves(self, player):\n moves = [] # Stores the possible moves\n capture_move_exists = False # Indicates if a capturing move is possible\n\n for piece in self.get_all_pieces(player):\n valid_moves = self._get_valid_moves(piece)\n\n for move, skip in valid_moves.items():\n moves.append([(piece.row, piece.col), move, skip])\n\n if len(skip) > 0:\n # Checks if there is a move that can capture a piece\n capture_move_exists = True\n\n if capture_move_exists:\n # Only gets the capturing moves if there is one\n eating_moves = []\n for move in moves:\n if len(move[2]) != 0:\n eating_moves.append(move)\n\n moves = eating_moves\n\n return moves", "def possible_moves(self):\n states = []\n possible_floors = self.possible_floors()\n possible_items = self.possible_items()\n\n for fl in possible_floors:\n for items in possible_items:\n new_floors = deepcopy(self.floors)\n for item in items:\n new_floors[self.lift_floor].remove(item)\n new_floors[fl].append(item)\n\n if self.validate_floors(new_floors):\n states.append(\n GameState(new_floors, lift_floor=fl, moves=self.moves+1)\n )\n\n return states", "def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves" ]
[ "0.7495228", "0.7211233", "0.7106526", "0.6994656", "0.68828064", "0.6859793", "0.6845441", "0.6823255", "0.68220854", "0.6766067", "0.6720792", "0.6693552", "0.6690474", "0.6682", "0.6669322", "0.66576886", "0.6649769", "0.66284806", "0.6552528", "0.6523163", "0.6483351", "0.64750797", "0.64672947", "0.64568913", "0.6421841", "0.64002156", "0.6379737", "0.63709116", "0.6366173", "0.63627136", "0.6355597", "0.63452965", "0.63386893", "0.63212574", "0.63209224", "0.6315063", "0.63134336", "0.62796193", "0.62717366", "0.626261", "0.6222694", "0.61953133", "0.6194773", "0.61675", "0.61653376", "0.61603606", "0.61440843", "0.6138285", "0.6137138", "0.6132093", "0.61272323", "0.61114275", "0.6110081", "0.60962886", "0.607672", "0.60703653", "0.605995", "0.60588396", "0.6056324", "0.60504067", "0.60247695", "0.6023153", "0.6019059", "0.60031694", "0.60008293", "0.599931", "0.5992458", "0.598784", "0.59823716", "0.59823716", "0.59823716", "0.59823716", "0.59823716", "0.59823716", "0.59823716", "0.5980818", "0.5978216", "0.5973922", "0.5968626", "0.5968436", "0.59676224", "0.59647095", "0.5962298", "0.5959459", "0.59577215", "0.5953524", "0.59372157", "0.59299046", "0.5928245", "0.59205383", "0.59201515", "0.5916984", "0.59127396", "0.5904953", "0.59046066", "0.59043676", "0.5902298", "0.5874908", "0.5864457", "0.58520806" ]
0.6491643
20
needed for search Create a new board state with the given move
def move(self,p,intMove): gs = self.gameState.copy() #copy Board gs[p.pos.get()] = EMPTY #put position it was at as empty gs[self.movePos(p,intMove).get()] = p.color #set new position as filled return ((p,intMove),Board(gs,self.togglePlayer(self.whoseTurn)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_state = StonehengeState(self.p1_turn, self.length,\n self.letters[:], self.claim[:])\n state = new_state\n if new_state.length == 1:\n state = self.move_length_1(move, new_state)\n if new_state.length == 2:\n state = self.move_length_2(move, new_state)\n if new_state.length == 3:\n state = self.move_length_3(move, new_state)\n if new_state.length == 4:\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\",\n \"C\", \"F\", \"E\", \"I\", \"P\", \"Q\"]:\n state = self.move_length_4(move, new_state)\n else:\n state = self.move_length_41(move, new_state)\n if new_state.length == 5:\n if move in [\"A\", \"B\", \"U\", \"O\", \"T\", \"Y\",\n \"C\", \"J\", \"E\", \"N\", \"V\", \"X\"]:\n state = self.move_length_5(move, new_state)\n elif move in [\"F\", \"I\", \"W\"]:\n state = self.move_length_51(move, new_state)\n else:\n state = self.move_length_52(move, new_state)\n return state", "def makeMove(self, board, move):\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\n\t\tthis_move = TicTacToeHelper.rotateMove(move, rotation)\n\n\t\tself.__state_history.append((this_state, this_move))", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(self, move: int) -> None:\n if move not in self._valid_moves:\n raise ValueError(f'Move \"{move}\" is not valid')\n\n self._update_board(move)\n\n self._win_state = self._check_winner()\n self._is_red_active = not self._is_red_active\n self.move_number += 1", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def make_move(board, move, ch):\n board[move['row']][move['col']] = ch\n \n winner = board_winner(board)\n \n if winner is not None:\n return True, winner\n \n if not board_has_move(board):\n return True, None\n \n return False, None", "def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def make_move(self, move, player):\n if not self.test_valid_move( move):\n return False\n self.game_state[move[0]][move[1]] = player", "def apply_move(self, start_move, move):\n\t\t# check that the start move and the move are Move objects\n\t\tif not isinstance(move, Move):\n\t\t\tmove = Move(move)\n\t\tif not isinstance(start_move, Move):\n\t\t\tstart_move = Move(start_move)\n\t\t# copy the board\n\t\tnext_board = copy.deepcopy(self.board)\n\t\t# place the move on the next board\n\t\tnext_board.place(self.next_player, start_move.point, move.point)\n\t\treturn GameState(next_board, self.next_player.other, move)", "def make_move(move, player, board):\n board[move] = player\n for d in DIRECTIONS:\n Othello.make_flips(move, player, board, d)\n return board", "def make_move(self, current_state):\n\n\t\tbatch_size = 192\n\n\t\ttest_board = np.zeros((batch_size, 3, 3, 3))\n\t\ttest_cows = np.zeros((batch_size, 2))\n\t\ttest_labels = np.zeros((batch_size, 1)) \n\n\t\tnew_states = current_state.expand_states()\n\n\t\tif len(new_states) == 0:\n\t\t\treturn None\n\n\t\tfor i, state in enumerate(new_states):\n\n\t\t\tdesc = self.state_descriptor(state, self.player_index)\n\t\t\ttest_board[i] = np.asarray(desc[0])\n\t\t\ttest_cows[i] = np.asarray([desc[1], desc[2]])\n\n\t\treturn new_states[self.train_value_function_approximation(\n\t\t\tTrue, (test_board, test_cows, test_labels), len(new_states))]", "def forecast_move(self, move):\n if move not in get_legal_moves(self):\n raise RuntimeError(\"Attempted forecast of illegal move\")\n newGameState = deepcopy(self)\n newGameState[move[0]][move[1]] = 1\n newGameState.player_locations[whose_turn] = move\n newGameState.whose_turn ^= 1\n return newGameState", "def add_move(self, move):\n \n self.current_board[move.position[0]][move.position[1]] = move.player.char", "def makeMove(self, move):\n\t\ttry:\n\t\t\tif (self.board[int(move) - 1] is Piece.BLANK):\n\t\t\t\tself.board[int(move) - 1] = self.current\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\texcept:\n\t\t\treturn 0", "def result(self, board_state, move):\n # Create a copy of the current board state\n output_state = BoardState(other_state=board_state)\n # Swap pieces\n output_state.move_piece(move)\n # Eliminate pieces\n output_state.eliminate_piece()\n return output_state", "def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def make_move(self, move):\n # Unpack the information from the move\n piece = self.get_piece(move[0])\n row = move[1][0]\n col = move[1][1]\n eliminated_pieces = move[2]\n\n # Move the piece and eliminate the captured pieces\n self.board[piece.row][piece.col] = None\n self.board[row][col] = piece\n piece.row = row\n piece.col = col\n self.remove(eliminated_pieces)\n\n # Checks if the piece has been promoted\n if piece.get_player() == Player.black and \\\n piece.row == constant.BOARD_DIMENSION - 1 \\\n and not piece.is_king():\n piece.make_king()\n self.num_black_kings += 1\n\n elif piece.get_player() == Player.white and \\\n piece.row == 0 \\\n and not piece.is_king():\n piece.make_king()\n self.num_white_kings += 1\n\n # Add the move performed to the history of the game\n self.moves.append(move)", "def _makeAMove(self, prev_move, board: str) -> int:\n\n (myWins, otherWins, _, _) = self.winsBlocksForks(board)\n move = choice([self.findEmptyCell(board, myWin) for myWin in myWins] if myWins else\n [self.findEmptyCell(board, otherWin) for otherWin in otherWins] if otherWins else\n list(self.otherMove(board, emptyCellsCount(board)))\n )\n return move", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def __make_move(self, user):\n if user:\n pos = int(input('Make your move (1-9): '))\n print('\\033[2A\\r ')\n while self.state[pos-1] != ' ':\n pos = int(input('\\rSeat\\'s taken. Make your move (1-9): '))\n print('\\033[2A\\r ')\n self.state[pos-1] = self.player_char\n else:\n time.sleep(.5)\n pos = random.randint(1, 9)\n if self.difficulty == 1:\n best = self.__find_best()\n pos = best+1 if best else pos\n while self.state[pos-1] != ' ':\n pos = random.randint(1, 9)\n self.state[pos-1] = self.opponent_char\n self.history.append(self.state[:])", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def apply_move(board_state, move, side):\n move_x, move_y = move\n\n def get_tuples():\n for x in range(len(board_state)):\n if move_x == x:\n temp = list(board_state[x])\n temp[move_y] = side\n yield tuple(temp)\n else:\n yield board_state[x]\n\n return tuple(get_tuples())", "def _update_board(self, move: int) -> None:\n row = self._column_to_row[move] # Find what row to place the disk in\n if self._is_red_active:\n self.board_array[row][move] = 1\n self.hash = self.hash ^ int(self._red_hash_keys[row][move]) # Update hash\n else:\n self.board_array[row][move] = -1\n self.hash = self.hash ^ int(self._yellow_hash_keys[row][move]) # # Update hash\n\n self._column_to_row[move] += 1\n if self._column_to_row[move] == 6:\n self._valid_moves.remove(move)", "def action(new_board, move, player):\r\n \r\n global nodes_generated \r\n global min_prune\r\n global max_prune\r\n global max_depth\r\n \r\n if player == 1:\r\n for i in range(0,len(new_board.white)):\r\n if new_board.white[i] == move[0]:\r\n new_board.white[i] = move[1]\r\n if len(move) == 3:\r\n new_board.black.remove(move[2])\r\n elif player == -1:\r\n for i in range(0,len(new_board.black)):\r\n if new_board.black[i] == move[0]:\r\n new_board.black[i] = move[1]\r\n if len(move) == 3:\r\n new_board.white.remove(move[2])\r\n return new_board", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state", "def move(self, state, move_cmd, i, j):\r\n new_state = self.clone_state(state)\r\n coordinate_change = self.action_dic[self.reflection_dic[move_cmd]]\r\n new_state[i][j], new_state[i + coordinate_change[0]][j + coordinate_change[1]] = \\\r\n new_state[i + coordinate_change[0]][j + coordinate_change[1]]\\\r\n , new_state[i][j]\r\n return new_state", "def make_move(self,board, action, player_id):\n row = np.max(np.where(board[:, action] == EMPTY_VAL))\n new_board = np.copy(board)\n new_board[row, action] = player_id\n\n return new_board", "def api_make_move(self, move_input):\n return self.board.attempt_move(move_input)", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def make_move(self, board: Block) -> int:\n raise NotImplementedError", "def board_from_move(self, move, color):\n new_board = KhetBoard(self.color_pieces[TeamColor.silver] + self.color_pieces[TeamColor.red])\n new_board.apply_move(move)\n new_board.apply_laser(color)\n return new_board", "def move(self, action: Action) -> State:\n new_state = State(self.size_h, self.size_v, self.wall_squares, self.boxes, self.storage_locations,\n self.current_location, action)\n\n if action == Action.DOWN:\n down_loc = (new_state.current_location[0] + 1, new_state.current_location[1])\n two_away = (down_loc[0] + 1, down_loc[1])\n new_state.current_location = down_loc\n if down_loc in new_state.boxes:\n new_state.boxes.remove(down_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.UP:\n up_loc = (new_state.current_location[0] - 1, new_state.current_location[1])\n two_away = (up_loc[0] - 1, up_loc[1])\n new_state.current_location = up_loc\n if up_loc in new_state.boxes:\n new_state.boxes.remove(up_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.RIGHT:\n right_loc = (new_state.current_location[0], new_state.current_location[1] + 1)\n two_away = (right_loc[0], right_loc[1] + 1)\n new_state.current_location = right_loc\n if right_loc in new_state.boxes:\n new_state.boxes.remove(right_loc)\n new_state.boxes.append(two_away)\n\n elif action == Action.LEFT:\n left_loc = (new_state.current_location[0], new_state.current_location[1] - 1)\n two_away = (left_loc[0], left_loc[1] - 1)\n new_state.current_location = left_loc\n if left_loc in new_state.boxes:\n new_state.boxes.remove(left_loc)\n new_state.boxes.append(two_away)\n\n new_state._validate() # TODO: Remove me for the final product.\n return new_state", "def make_move(state, action, player, rewarding_move=False): # TODO : done and next_is_reward can be removed as\n # they are in the state object\n board = state.get_board()\n json_action = action.get_json_action()\n action = action.get_action_as_dict()\n captured = None\n reward = 0\n next_is_reward = False\n previous_is_reward = False\n if rewarding_move:\n state.boring_moves = 0\n previous_is_reward = True\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND:\n reward += 1\n state.in_hand[player * -1] -= 1\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n board.empty_cell(action['action']['at'])\n reward += 1\n else:\n if action['action_type'] == YoteActionType.ADD:\n state.boring_moves += 1\n state.in_hand[player] -= 1\n board.fill_cell(action['action']['to'], Color(player))\n elif action['action_type'] == YoteActionType.MOVE:\n at = action['action']['at']\n to = action['action']['to']\n\n def distance(cell_1, cell_2):\n import math\n return math.sqrt((cell_1[0] - cell_2[0]) ** 2 + (cell_1[1] - cell_2[1]) ** 2)\n\n board.empty_cell(at)\n board.fill_cell(to, Color(player))\n if int(distance(at, to)) == 1:\n state.boring_moves += 1\n elif int(distance(at, to)) > 1:\n state.boring_moves = 0\n next_is_reward = True\n board.fill_cell(to, Color(player))\n if at[0] == to[0] and at[1] < to[1]:\n board.empty_cell((at[0], at[1] + 1))\n captured = (at[0], at[1] + 1)\n elif at[0] == to[0] and at[1] > to[1]:\n board.empty_cell((at[0], at[1] - 1))\n captured = (at[0], at[1] - 1)\n elif at[1] == to[1] and at[0] < to[0]:\n board.empty_cell((at[0] + 1, at[1]))\n captured = (at[0] + 1, at[1])\n elif at[1] == to[1] and at[0] > to[0]:\n board.empty_cell((at[0] - 1, at[1]))\n captured = (at[0] - 1, at[1])\n reward += 1\n\n state.set_board(board)\n state.score[player] += reward\n state.captured = captured\n state.rewarding_move = next_is_reward\n state.previous_is_reward = previous_is_reward\n state.set_latest_player(player)\n state.set_latest_move(json_action)\n if next_is_reward:\n state.set_next_player(player)\n else:\n state.set_next_player(player * -1)\n\n done = YoteRules.is_end_game(state)\n return state, done, next_is_reward", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def request_move(self, board):\n pass", "def make_move(self, move: Tuple[int, int]) -> MoveError:\n\n # Make sure our move is going to be valid\n if self.is_winner():\n return MoveError.GAME_WON\n\n elif move[0] >= self._board_size or move[0] < 0 or move[1] >= self._board_size or move[1] < 0:\n return MoveError.OUT_OF_RANGE\n\n elif self._board[move[1]][move[0]] != self.NEUTRAL_PLAYER:\n return MoveError.TAKEN\n\n # If we make it to here, then it is valid to make the move\n self._board[move[1]][move[0]] = self._players[self._current_player]\n self._number_of_moves = self._number_of_moves + 1\n self._last_move = move\n\n self._check_for_winner()\n\n # Only change who the player is if we didn't get a winner,\n # otherwise the final board's color will be wrong\n if not self.is_winner():\n self._current_player = (self._current_player + 1) % len(self._players)\n\n return MoveError.OKAY", "def move_to(self, board, new_pos, next_turn):\n board.get_covered_squares(next_turn)\n self.get_legal_moves(board)\n legal_pos = {(self.field_row + move[0], self.field_col + move[1])\n for move in self.legal_moves | self.legal_capture_moves}\n if (FIELD_ROW_DICT[new_pos[1]], FIELD_COL_DICT[new_pos[0]]) in legal_pos:\n for piece in board.figures:\n if (piece.field_row == FIELD_ROW_DICT[new_pos[1]]\n and piece.field_col == FIELD_COL_DICT[new_pos[0]]):\n piece.get_captured(board)\n old_pos_row = copy.deepcopy(self.field_row)\n old_pos_col = copy.deepcopy(self.field_col)\n self.field_row = FIELD_ROW_DICT[new_pos[1]]\n self.field_col = FIELD_COL_DICT[new_pos[0]]\n board.create_board()\n board.king_in_check(self.color)\n if board.in_check == self.color:\n self.field_row = old_pos_row\n self.field_col = old_pos_col\n board.create_board()\n return False, \"illegal move: king in check\"\n else:\n return True, \"legal move carried out\"\n else:\n return False, f\"illegal move: {self.short_name} cannot move to {new_pos}\"", "def make_move(self, move_to_play, color_to_move, return_capture=False):\r\n captures = 0\r\n if move_to_play == 'PASS':\r\n board_copy = Board(self.state, self.previous_state, self.to_move)\r\n if self.to_move == 1:\r\n board_copy.to_move = 2\r\n else:\r\n board_copy.to_move = 1\r\n if return_capture:\r\n return board_copy, captures\r\n else:\r\n return board_copy\r\n\r\n current_state = np.array(self.state)\r\n ptemp_state = np.array(current_state)\r\n\r\n for p in ORTHOGONAL_POSITIONS[move_to_play]:\r\n if self.board[p[0]][p[1]].chain_liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += len(self.chains[(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color)])\r\n current_state = self.remove_chain(self.board[p[0]][p[1]].chain_num, self.board[p[0]][p[1]].color,\r\n current_state)\r\n\r\n elif self.board[p[0]][p[1]].liberty == 1 and self.board[p[0]][p[1]].color != color_to_move:\r\n captures += 1\r\n current_state[p[0]][p[1]] = 0\r\n\r\n current_state[move_to_play[0]][move_to_play[1]] = color_to_move\r\n if color_to_move == 1:\r\n temp_board = Board(current_state, ptemp_state, 2)\r\n else:\r\n temp_board = Board(current_state, ptemp_state, 1)\r\n if return_capture:\r\n return temp_board, captures\r\n else:\r\n return temp_board", "def applyMove(self, (from_row,from_col), (to_row,to_col)):\n newboard = deepcopy(self)\n piece = newboard.board[from_row][from_col]\n newboard.board[from_row][from_col] = None\n newboard.board[to_row][to_col] = piece\n newboard.toplay = 'BLACK' if self.toplay == 'WHITE' else 'WHITE'\n return newboard", "def execute_move(self, move, color):\n\n #Much like move generation, start at the new piece's square and\n #follow it on all 8 directions to look for a piece allowing flipping.\n\n # Add the piece to the empty square.\n # print(move)\n flips = [flip for direction in self.__directions\n for flip in self._get_flips(move, direction, color)]\n assert len(list(flips))>0\n for x, y in flips:\n #print(self[x][y],color)\n self[x][y] = color", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def make_move(self, px, py):\n if self.state != State.IN_PROGRESS:\n raise GameEndedError('Cannot make move. The game has ended.')\n x, y, i, j = self.to_coords(px, py)\n board = self.boards[x][y]\n if (x, y) not in self.available_boards:\n raise IllegalMoveError('Illegal move. Board is unavailable.')\n board.set_square(i, j, self.__on_turn)\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n self.last_move = (x, y, i, j)\n self.history.append(self.last_move)", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def get_move(moves):\n pass", "def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def make_move(board, index, p):\n if not utils.is_valid_player(p) or p == config.NO_PLAYER:\n raise Exception('Input p must = {0} or {1}. p = {3}'.format(config.HUMAN, config.COMPUTER, p))\n result = copy.deepcopy(board)\n result[index] = p\n if not is_valid_board(result):\n raise Exception('Move for {0} at {1} is invalid.'.format(p, index))\n return result", "def make_move(board, position, player):\n # only valid moves are passed in here\n board[position-1] = player", "def makeMove(self, colour, move):\n emptyCellList = [r for r in range(self.getHeight()) if self.cell[move][r]==EMPTY]\n self.cell[move][emptyCellList[0]] = colour # put the stone in the first empty cell of column 'move'", "def make_move(self, board: Board) -> int:\n\n move = input()\n move = int(move)\n\n while move not in board.get_valid_moves():\n print(\"That is not a valid move\")\n move = input()\n move = int(move)\n\n return move", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def make_move(self, move_from, move_to):\n\n from_coordinates = self.translate_to_grid(move_from)\n to_coordinates = self.translate_to_grid(move_to)\n from_col = from_coordinates[0]\n from_row = from_coordinates[1]\n to_col = to_coordinates[0]\n to_row = to_coordinates[1]\n value_at_from = self._board[from_col][from_row]\n value_at_to = self._board[to_col][to_row]\n\n # if player passes the same square value for both move_from and move_to, then they are passing their turn\n # which they however cannot do if they are in check\n if not self.is_in_check(self._turn) and move_from == move_to:\n if self._turn == 'red':\n self._turn = 'blue'\n else:\n self._turn = 'red'\n return True\n\n # if game isn't over yet and move_from location holds a piece\n if (self.get_game_state() == 'UNFINISHED' and issubclass(type(value_at_from), Piece) and\n # and that piece belongs to the player whose turn it is\n self._turn == value_at_from.get_color() and\n # and the movement pattern is valid for that piece\n value_at_from.validate_move(move_from, move_to, self._board)):\n # if there's a piece belonging to the player whose turn it is at move_to (attempting to capture own piece)\n if issubclass(type(value_at_to), Piece) and self._turn == value_at_to.get_color():\n return False\n else:\n # Make the move\n # if move_to holds an opposing piece, it gets captured\n if issubclass(type(value_at_to), Piece):\n self.remove_piece(value_at_to)\n value_at_from.set_location(move_to)\n self._board[to_col][to_row] = value_at_from\n self._board[from_col][from_row] = ''\n # whether the player was already in check or this move placed them in check, they cannot now be in check\n if type(self.is_a_player_in_check(self._turn)) is General and self.is_a_player_in_check(self._turn).get_color() == self._turn:\n # Revert the move and return False since the player did not bring themself out of check\n self._board[from_col][from_row] = value_at_from\n self._board[to_col][to_row] = value_at_to\n value_at_from.set_location(move_from)\n if issubclass(type(value_at_to), Piece):\n self._active_pieces[value_at_to.get_color()].append(value_at_to)\n return False\n # the player either already wasn't or now isn't in check, so set its General to not in check\n self.get_general(self._turn).set_in_check(False)\n # if opposing general was put in check, set its in_check status to True and see if checkmate\n if type(self.is_a_player_in_check(self._turn)) is General:\n self.is_a_player_in_check(self._turn).set_in_check(True)\n if self.checkmate(self.is_a_player_in_check(self._turn)):\n if self._turn == 'red':\n self._game_state = 'RED_WON'\n else:\n self._game_state = 'BLUE_WON'\n # turn complete, change to other player's turn\n if self._turn == 'red':\n self._turn = 'blue'\n else:\n self._turn = 'red'\n return True\n return False", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def make_move(self, position_from, position_to):\n\n # Returns False if position_from or position_to are not an actual position\n # ranging from 'a1' to 'i10' for columns 'a' through 'i' and rows '1' \n # through '10'.\n if not isinstance(position_to, str) or\\\n not isinstance(position_from,str) or\\\n len(position_to) > 3 or len(position_to) < 2 or\\\n len(position_from) > 3 or len(position_from) < 2:\n\n return False \n\n # Returns False if game has been won.\n elif self.get_game_state() != 'UNFINISHED':\n\n return False\n\n position_to = position_to.lower()\n position_from = position_from.lower()\n current_player = self.get_player_turn()\n\n # Returns False if move is not legal based on Janggi game rules.\n if self.legal_move(position_to, position_from) is not True:\n\n return False\n\n # Returns True if current player wishes to pass on their move.\n # Any position on the board will suffice, including a General's position.\n # Tested in legal_move to exit legal_move safely. The second test for pass\n # updates player turn and returns True, so long as the player is not\n # currently in check.\n if position_to == position_from:\n\n if self.is_in_check(current_player) is True:\n \n return False\n\n self.update_player_turn()\n return True\n\n # Move is valid. Adjusted board, update potential moves for all GamePieces,\n # test checkmate (change game_state if True), update player turn, and\n # return True.\n else:\n\n game_piece_object = self.get_game_piece_object_at_position(position_from)\n self.adjust_board(game_piece_object, position_to, position_from)\n self.update_potential_moves()\n \n # Sets the General's Board position attribute if the position has\n # changed.\n if isinstance(game_piece_object, General):\n\n if current_player == 'BLUE':\n\n self.set_general_position_blue(position_to)\n\n else:\n\n self.set_general_position_red(position_to)\n\n # Sets game_state to the player who won if checkmate is detected.\n if self.is_checkmate() is True:\n\n if current_player == 'BLUE':\n\n self.set_game_state('BLUE_WON')\n \n else:\n\n self.set_game_state('RED_WON')\n\n self.update_player_turn()\n return True", "def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False", "def is_new_move(my_board, x, y):\n return my_board[x, y] == CLOSED", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def move(self, movement):\n index = self.state.index(0)\n\n new_state = self.state.copy()\n\n if movement == 'up':\n new_state[index], new_state[index - 3] = new_state[index - 3], new_state[index]\n elif movement == 'down':\n new_state[index], new_state[index + 3] = new_state[index + 3], new_state[index]\n elif movement == 'left':\n new_state[index], new_state[index - 1] = new_state[index - 1], new_state[index]\n else:\n # movement == 'right'\n new_state[index], new_state[index + 1] = new_state[index + 1], new_state[index]\n \n return new_state", "def make_move(self, request):\n\n player = Player.query(Player.name == request.player_name).get()\n\n \"\"\"we validate that the player is in the Data Base\"\"\"\n if not player:\n raise endpoints.NotFoundException('player not found')\n\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n \"\"\"we validate that the game where we want to create the board exists\"\"\"\n if not game:\n raise endpoints.NotFoundException(\n 'Game not found in the DB, please start a new game')\n\n board = Board.query(Board.key == player.board).get()\n \"\"\"we validate that the board where we want to create the board exists\"\"\"\n if not board:\n raise endpoints.NotFoundException('board not found')\n\n \"\"\"we validate that the board of the player is active, the player can't create\n multiple boards for the same Game\"\"\"\n if not player.board and not player.board_active:\n raise endpoints.ConflictException(\n 'This player has already an empty board have already a board')\n\n if player.board != board.key:\n raise endpoints.ConflictException('the board for this player is not the proper')\n\n if not gameutils.valid_target_pointed(request.x_position, request.y_position):\n raise endpoints.ConflictException('the targeted position is not ok')\n\n\n try:\n result = gameutils.search_in_board(board, request.x_position, request.y_position)\n\n if result == \"error\":\n raise endpoints.ConflictException('there is a problem with the BOARD')\n else:\n score = Score.query(Score.player_name == player.name, Score.board == board, Score.game == game)\n board.add_target(request.x_position, request.y_position)\n game.add_move_to_history(request.x_position, request.y_position, player.name)\n message = score.target_hitted(player,request.x_position, request.y_position, board, game)\n if score.check_if_win():\n message = \"You sunk the last Boat, you win!!!\"\n board.deactivate()\n return StringMessage(message=message)\n\n except ValueError:\n raise endpoints.BadRequestException('please verify the information ')", "def makeMove(self, move, player):", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location", "def make_move(self, time_limit, players_score):\n \n start_time = time.time()\n d = 1 \n \n reach_the_end = False\n best_direction = None\n chosen_state = None\n \n time_limit = (2 * self.game_time * float(self.player_turns - self.turns + 1)) / ((self.player_turns + 1) * self.player_turns)\n time_limit += self.spaire_time\n\n if time_limit >= 5:\n TIME_ESTIMATION = 0.9 \n else:\n TIME_ESTIMATION = 0.85\n\n while not reach_the_end: \n \n iter_time_limit = TIME_ESTIMATION * ( time_limit - (time.time() - start_time) )\n \n state = State(get_directions(),self.board,self.locations,self.fruits_on_board_dict,PLAYER,players_score,self.penalty_score,self.fruits_ttl,self.turns)\n\n try:\n _, best_direction, reach_the_end,chosen_state = self.alphabeta.search(state,d,True,iter_time_limit,alpha=float('-inf'), beta=float('inf'))\n d += 1\n except Exception as e:\n self.spaire_time = time_limit - (time.time() - start_time)\n break\n \n # Set new location \n if best_direction == None:\n best_direction = self.get_random_move() \n self.set_player_location(best_direction)\n \n self.turns += 1\n return best_direction", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def makeMove(self, state, column, color):\n \n temp = [x[:] for x in state]\n for i in range(6):\n if temp[i][column] == ' ':\n temp[i][column] = color\n return temp", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def update_board(board: Board, move: Move) -> Board:\n old_position = move[0]\n new_position = move[1]\n character = board[old_position[0]][old_position[1]]\n board = change_position(board, new_position, character)\n board = clear_position(board, old_position)\n \n return board", "def exploreNext(neighbor, move):\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)", "def select_move(self, game_state, return_visit_counts=False):\n \n # Start with a tree consisting of a root node only. The root node\n # is associated with the given board position.\n root = self.create_node(game_state)\n \n # If no legal moves can be made from the given board position, pass \n # the turn. This happens when all of the players pieces are surrounded,\n # if the player has no pieces left or if the game is over. \n if not root.branches:\n if return_visit_counts:\n return Act.pass_turn(), {}\n return Act.pass_turn()\n \n for i in range(self.num_rounds):\n # On each iteration, walk down the tree to a leaf node and select\n # a move to make from the corresponding leaf game state.\n node = root\n next_move = self.select_branch(node)\n while node.has_child(next_move):\n node = node.get_child(next_move)\n next_move = self.select_branch(node)\n \n # Create a new tree node for the selected move and add it to\n # the tree. If the leaf node corresponds to a finished game\n # then don't create a new node and assign a value to the node\n # based on who won.\n if node.state.is_not_over():\n if next_move:\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.play(next_move))\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value \n else:\n # If the current player can't make any moves from the\n # selected gamestate then next_move will be 'None' meaning\n # the player passes the turn.\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.pass_turn())\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value\n else:\n # If the game in the current state is over, then the last\n # player must have won the game. Thus the value/reward for the\n # other player is 1. The current node is not updated with\n # the new reward as no branches can stem from a finished game\n # state.\n move = node.last_move\n node = node.parent\n value = 1\n \n # Update the nodes traversed to get to the leaf node with the \n # new value for the new move.\n while node is not None:\n node.record_visit(move, value)\n move = node.last_move\n node = node.parent\n value *= -1\n \n # Get the visit counts of the branches if they were requested.\n if return_visit_counts:\n visit_counts = {}\n for move in root.branches.keys():\n visit_counts[move] = root.branches[move].visit_count\n \n # Get a list of possible moves sorted according to visit count,\n # the move with the highest visit count should be first in the list.\n moves = [move for move in root.moves()]\n moves = sorted(moves, key=root.visit_count, reverse=True)\n \n # Loop through the sorted moves and return the first legal one.\n for move in moves:\n if not game_state.is_move_illegal(move):\n if return_visit_counts:\n return Act.play(move), visit_counts\n return Act.play(move)\n \n # If no legal move is found then pass the turn.\n if return_visit_counts:\n return Act.pass_turn(), visit_counts\n return Act.pass_turn()", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def start_state():\n return chess.Board()", "def result(board, action):\n\n # Create completely new board\n temp_board = copy.deepcopy(board)\n # Location of move to be made\n row_index = action[0]\n col_index = action[1]\n\n # Check for valid action\n if not 0 <= row_index <= 2 or not 0 <= col_index <= 2:\n raise Exception(\"Invalid Action\")\n\n # Make move and update board\n if board[row_index][col_index] is None:\n temp_board[row_index][col_index] = player(board)\n else:\n raise Exception(\"Invalid Action\")\n\n return temp_board", "def make_move(self, index):\n if self.board[index] is None and self.get_winner() is None:\n self.board[index] = self.player\n self.player = 'O' if self.player == 'X' else 'X'\n self.winner = self.get_winner()", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def getMove(self, board):\n pass", "def move(self, move):\n out = ''\n for val in self.moves[move]:\n out += self.state[val]\n self.state = out", "def make_move(json):\n res = result(json[\"board\"], tuple(json[\"move\"]))\n emit(\"update\", res)", "def result(self, state, action):\r\n\r\n sc = copy.deepcopy(state)\r\n new_piece, player = self.new_or_old_piece(state)\r\n current_player, to_action, from_action = action\r\n\r\n # Make the move\r\n sc[to_action[0]][to_action[1]] = current_player\r\n\r\n # There can't be more than 6 pieces in any state.\r\n if not new_piece:\r\n # Now making from place as null again\r\n sc[from_action[0]][from_action[1]] = '-'\r\n\r\n return sc", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def next_move(self, board):\n self.num_moves += 1\n \n \n ################### TODO: ######################################\n # Implement your strategy here. \n # Feel free to call as many as helper functions as you want.\n # We only cares the return of this function\n ################################################################", "def result(board, action):\n #we start by creating a deep copy of me board for me not to modify the original\n new_board = copy.deepcopy(board)\n #I get the player's turn in the current board.\n action_token = player(new_board)\n #If I the corresponding spot on my board is available\n if (new_board[action[0]][action[1]] == EMPTY):\n #then I will make that move with the current player\n new_board[action[0]][action[1]] = action_token\n return new_board\n else:\n #else, I raise a not a valid action error because the place is already taken or does not exist.\n raise Exception('Not a valid action')", "def move(self, board):\r\n self.start_time = time.time()\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n if disk_total < 15:\r\n # In early-game, we can allow a deeper minimax search since there's not too many possible moves.\r\n self.minimax_max_depth = 7\r\n\r\n elif disk_total < 45:\r\n # In mid-game, minimax tree has the most branches. Therefore, we must give it space to breathe.\r\n self.minimax_max_depth = 5\r\n else:\r\n # In the very end-game, minimax tree has the least branches, so we can allow a full search.\r\n self.minimax_max_depth = 8\r\n\r\n possible_moves = self.find_possible_moves(board, self.my_color)\r\n\r\n # If there's only one move available, return it\r\n if len(possible_moves) == 1:\r\n return possible_moves[0]\r\n\r\n # If we can take a corner, take it and don't consider any other options.\r\n # This rarely backfires and allows to save a tiny bit of time\r\n corners = [(0,0), (0,7), (7,0), (7,7)]\r\n for corner in corners:\r\n if corner in possible_moves:\r\n return corner\r\n\r\n # Grow a minimax tree to find the best available move\r\n alpha_init = -10000000\r\n beta_init = 10000000\r\n\r\n available_moves = self.minimax(board, 0, self.my_color, alpha_init, beta_init)\r\n print(available_moves)\r\n if available_moves != 0:\r\n best_value = max(available_moves.values())\r\n for move in available_moves:\r\n if available_moves[move] == best_value:\r\n return move\r\n\r\n return None", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def make_move(self, board: Board) -> int:\n move, evalutation = self.minimax(board, -math.inf, math.inf, self._depth, 1)\n return move", "def find_best_move(state: GameState) -> None:", "def make_move(self, start, end):\r\n start_pos = self.parse_pos(start) # Start and end position are lists that contain column and row\r\n end_pos = self.parse_pos(end)\r\n\r\n start_row = start_pos[0] # Position of row and columns are assigned to variables\r\n start_col = start_pos[1]\r\n end_row = end_pos[0]\r\n end_col = end_pos[1]\r\n\r\n board = self._board.get_board()\r\n start_piece = board[start_row][start_col].get_piece()\r\n end_piece = board[end_row][end_col].get_piece()\r\n\r\n\r\n # If there is no piece to be moved or game is over or piece is to be moved to its original location\r\n if start_piece is None or self._game_state != \"UNFINISHED\"\\\r\n or (start_row == end_row and start_col == end_col):\r\n return False\r\n\r\n start_piece_id = start_piece.get_player_id() # Contains the player id associated with the piece\r\n end_piece_player_id = None\r\n if end_piece is not None: # Executes if end piece contains a piece object\r\n end_piece_player_id = end_piece.get_player_id()\r\n\r\n # If Red's turn\r\n if self._player_turn == 1:\r\n if start_piece_id != 'r': # If red moves a black piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board) : # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n # Checks if move violates flying general and puts self in check\r\n if self.is_not_flying_general() is True and self.is_in_check(\"red\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board,end_piece_player_id, end_piece)\r\n return False\r\n\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n # If Black's turn\r\n elif self._player_turn == -1:\r\n if start_piece_id != 'b': # If black moves a red piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board): # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n if self.is_not_flying_general() is True and self.is_in_check(\"black\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board, end_piece_player_id, end_piece)\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, i, 2, \"wpawn\"+str(i)))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, i, 7, \"bpawn\"+str(i)))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'a', 1, \"wrook0\"))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'b', 1, \"wknight0\"))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'c', 1, \"wbishop0\"))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, 'd', 1, \"wqueen\"))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, 'e', 1, \"wking\"))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'f', 1, \"wbishop1\"))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'g', 1, \"wknight1\"))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'h', 1, \"wrook1\"))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'a', 8, \"brook0\"))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'b', 8, \"bknight0\"))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'c', 8, \"bbishop0\"))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, 'd', 8, \"bqueen\"))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, 'e', 8, \"bking\"))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'f', 8, \"bbishop1\"))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'g', 8, \"bknight1\"))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'h', 8, \"brook1\"))", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def next_state(s_curr, action, params):\n P_dist = params['P_dist']\n R = params['R']\n n_rows = params['n_rows']\n n_cols = params['n_cols']\n occ_grid = params['occ_grid']\n\n rnd = np.random.uniform()\n\n s_next = s_curr\n\n # Actions - ['left','right','up','down']\n\n if rnd <= P_dist:\n if action == 0:\n move = 2\n elif action == 1:\n move = 2\n elif action == 2:\n move = 1\n else:\n move = 0\n elif rnd < 2*P_dist:\n if action == 0:\n move = 3\n elif action == 1:\n move = 3\n elif action == 2:\n move = 1\n else:\n move = 1\n else:\n move = action\n\n # Move left\n if move == 0:\n row_next = s_curr[0]\n col_next = s_curr[1] - 1\n if col_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move right\n if move == 1:\n row_next = s_curr[0]\n col_next = s_curr[1] + 1\n if col_next < n_cols and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move up\n if move == 2:\n row_next = s_curr[0] - 1\n col_next = s_curr[1]\n if row_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move down\n if move == 3:\n row_next = s_curr[0] + 1\n col_next = s_curr[1]\n if row_next < n_rows and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n r = R[s_next[0], s_next[1]]\n return s_next, r" ]
[ "0.76256883", "0.7569273", "0.7310993", "0.7211619", "0.72101855", "0.7194801", "0.7156599", "0.7146737", "0.7015209", "0.695439", "0.6947784", "0.69166005", "0.68700886", "0.68499726", "0.68240017", "0.68005484", "0.6775084", "0.677143", "0.6759415", "0.6757112", "0.6747905", "0.67469174", "0.6725215", "0.6722685", "0.6694036", "0.66837233", "0.6666776", "0.66548693", "0.66170526", "0.66025776", "0.6575576", "0.65568334", "0.65539306", "0.6542836", "0.65365756", "0.65276855", "0.6522291", "0.6520904", "0.6507446", "0.6502272", "0.6499487", "0.64907026", "0.6489363", "0.648773", "0.6471291", "0.6453185", "0.64527863", "0.64466745", "0.64246523", "0.641948", "0.64181113", "0.6416431", "0.6409723", "0.63979197", "0.63915026", "0.63831913", "0.63800514", "0.63698554", "0.6364695", "0.63605505", "0.6353994", "0.6347889", "0.63364255", "0.6335118", "0.6308442", "0.63058", "0.630155", "0.6291477", "0.62749046", "0.6268776", "0.62664443", "0.62645197", "0.6260608", "0.62576747", "0.62552875", "0.6255119", "0.6244211", "0.6241727", "0.6236889", "0.62319416", "0.62109816", "0.6209435", "0.6207664", "0.62055564", "0.620226", "0.6200814", "0.6199905", "0.6199102", "0.6198061", "0.61903983", "0.6188637", "0.6185589", "0.6182482", "0.61814076", "0.61750054", "0.6173348", "0.617186", "0.6169439", "0.6166761", "0.61640996" ]
0.6648307
28
A custom Django template tag for encoding URL query string parameters.
def urlparams(*_, **kwargs): non_empty = {k: v for k, v in kwargs.items() if v is not None} if non_empty: return '?{}'.format(urlencode(non_empty)) return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)", "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)", "def encoded_query_str(request):\n return updated_query_str(request)", "def queryparams(*args, **kwargs):\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)", "def _render_tag(self, tag, query_str):\n t = Template('{%% load djblets_utils %%}'\n '{%% autoescape off %%}%s{%% endautoescape %%}'\n % tag)\n\n request = HttpRequest()\n\n if query_str:\n request.GET = QueryDict(query_str)\n\n return t.render(Context({\n 'request': request,\n })).replace('&amp;', '&')", "def querystring_replace(context, **kwargs):\n # borrowed as-is from derrida codebase\n # inspired by https://stackoverflow.com/questions/2047622/how-to-paginate-django-with-other-get-variables\n\n # get a mutable copy of the current request\n querystring = context[\"request\"].GET.copy()\n # update with any parameters passed in\n # NOTE: needs to *set* fields rather than using update,\n # because QueryDict update appends to field rather than replacing\n for key, val in kwargs.items():\n querystring[key] = val\n # return urlencoded query string\n return querystring.urlencode()", "def urlencode(query):\n\n if hasattr(query, 'items'):\n # mapping objects\n query = query.items()\n l = []\n for k, v in query:\n k = quote_plus(k)\n if isinstance(v, basestring):\n v = quote_plus(v)\n l.append(k + '=' + v)\n else:\n v = quote_plus(unicode(v))\n l.append(k + '=' + v)\n return '&'.join(l)", "def url_replace(request, field, value):\n _dict = request.GET.copy()\n _dict[field] = value\n return _dict.urlencode()", "def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)", "def this_url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return '{}?{}'.format(\n context['view'].request.META['PATH_INFO'],\n urlencode(query)\n )", "def url_with_querystring(url, **kwargs):\n return url + '?' + urlencode(kwargs)", "def do_urlencode(value):\n return urllib.quote(value.encode('utf8'))", "def _generate_query_string(self):\n \n query_items = {}\n \n for key, val in self.__dict__.iteritems():\n if not key.startswith('_'):\n query_items[key] = val.encode('utf-8')\n \n return urllib.urlencode(query_items)", "def param_replace(context, **kwargs):\n d = context['request'].GET.copy()\n for k,v in kwargs.items():\n d[k] = v\n for k in [k for k,v in d.items() if not v]:\n del d[k]\n return d.urlencode()", "def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def query_options_to_url(self):\n return '&'.join(['$%s=%s' % (key, value) for (key, value) in self.query_options.items()])", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def get_query_string(self):\r\n pass", "def smart_encode(**kwargs):\r\n args = dict(kwargs)\r\n for k, v in args.items():\r\n if v is None:\r\n del args[k]\r\n if not args:\r\n return ''\r\n return urlencode(args)", "def urlencode(self, value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n elif value is None:\n return \"\"\n\n assert isinstance(value, str)\n\n return urllib.quote_plus(value)", "def urlencode(query, doseq=0):\r\n if hasattr(query, 'items'):\r\n query = query.items()\r\n\r\n return original_urlencode(\r\n [(force_str(k),\r\n [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))\r\n for k, v in query],\r\n doseq)", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def __str__(self):\n options = {}\n\n if self.marker:\n options['marker'] = self.marker\n\n if self.direction:\n options['direction'] = self.direction\n\n if self.limit:\n options['limit'] = self.limit\n\n if self.search:\n options['search'] = self.search\n\n return urllib.urlencode(options)", "def _unicode_urlencode(params):\r\n if isinstance(params, dict):\r\n params = params.items()\r\n return urllib.parse.urlencode([(k, v.encode('utf-8') or v) for k, v in params])", "def _url(path, **kwargs):\n if kwargs:\n encoded_parameters = urllib.urlencode(kwargs)\n if path.endswith('?'):\n # Trailing ? on path. Append parameters to end.\n return '%s%s' % (path, encoded_parameters)\n elif '?' in path:\n # Append additional parameters to existing query parameters.\n return '%s&%s' % (path, encoded_parameters)\n else:\n # Add query parameters to path with no query parameters.\n return '%s?%s' % (path, encoded_parameters)\n else:\n return path", "def toURLEncoded(self):\n args = sorted(self.toPostArgs().items())\n return urllib.parse.urlencode(args)", "def query_string(source=None, **kwargs):\n q = QueryDict('', True)\n if source:\n q.update(source)\n for k, v in kwargs.items():\n if v not in ['']:\n q.update({k: v})\n if q:\n return \"{}\".format('?' + q.urlencode())\n return q.urlencode()", "def aws_urlquote(value):\n if isinstance(value, unicode):\n value = value.encode(\"utf-8\")\n return quote(value, \"/\")", "def create_query_sting(param_dict):\n params = \"&\".join([f\"{key}={value}\" for key, value in param_dict.items()])\n return params.replace(\"#\", \"%23\")", "def get_url(**kwargs):\r\n valid_kwargs = {key: value.encode('utf-8') for key, value in kwargs.iteritems() if value is not None}\r\n return u'{0}?{1}'.format(_url, urlencode(valid_kwargs))", "def reverse_add_get_parameters(url_name, *args, **kwargs):\n from django.core.urlresolvers import reverse \n import urllib\n url = reverse(url_name, args = args)\n params = urllib.parse.urlencode(kwargs)\n return url + \"?%s\" % params", "def encode_parameters(self, text):\n return quote_plus(text, safe='=:&\"')", "def test_additional_query_args():\n assert (normalize_url(\"http://example.com?c=d\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b&c=d\")\n assert (normalize_url(\"http://example.com\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b\")\n assert (normalize_url(\"http://example.com\", [(\"résumé\", \"résumé\")]) ==\n \"http://example.com/?r%C3%A9sum%C3%A9=r%C3%A9sum%C3%A9\")", "def url_escape(value, plus=True):\r\n quote = urllib_parse.quote_plus if plus else urllib_parse.quote\r\n return quote(utf8(value))", "def urlencode_s(query_params, allow_func=None):\n return \"&\".join([k + \"=\" + v for k, v in\n urlencode_sl(query_params, allow_func=allow_func)])", "def querystring(data, exclude=(), **kwargs):\n items = reduce(operator.add, (\n list((k, v) for v in values)\n for k, values in data.lists() if k not in exclude\n ), [])\n\n for key, value in kwargs.items():\n items.append((key, force_text(value)))\n\n return urlencode(sorted(items))", "def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)", "def urlencode(query, doseq=False):\n if isinstance(query, MultiValueDict):\n query = query.lists()\n elif hasattr(query, \"items\"):\n query = query.items()\n query_params = []\n for key, value in query:\n if value is None:\n raise TypeError(\n \"Cannot encode None for key '%s' in a query string. Did you \"\n \"mean to pass an empty string or omit the value?\" % key\n )\n elif not doseq or isinstance(value, (str, bytes)):\n query_val = value\n else:\n try:\n itr = iter(value)\n except TypeError:\n query_val = value\n else:\n # Consume generators and iterators, when doseq=True, to\n # work around https://bugs.python.org/issue31706.\n query_val = []\n for item in itr:\n if item is None:\n raise TypeError(\n \"Cannot encode None for key '%s' in a query \"\n \"string. Did you mean to pass an empty string or \"\n \"omit the value?\" % key\n )\n elif not isinstance(item, bytes):\n item = str(item)\n query_val.append(item)\n query_params.append((key, query_val))\n return original_urlencode(query_params, doseq)", "def _url_encode(self, text):\n try:\n return (urllib.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))\n except:\n print('Using python3')\n return (urllib.parse.quote(text.replace(u'and', u'&'), safe='')\n .replace(u'%20', u'+'))", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def unicode_urlencode(params):\n if isinstance(params, dict):\n params = params.items()\n return urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v)\n for k, v in params])", "def composeQueryUrl(self, params):\n\t\ttextparams = urllib.urlencode(params)\n\t\treturn self.api_url + \"?\" + textparams", "def _other_page_querystring(self, page_number):\n if self.paginator.request:\n self.base_queryset['page'] = page_number\n return self.base_queryset.urlencode()\n\n # raise Warning(\"You must supply Paginator() with the request object for a proper querystring.\")\n return 'page=%s' % page_number", "def bob_export_url(query, value, export_variable_name='export'):\n if not query:\n return '%s=%s' % (export_variable_name, value)\n query = query.copy()\n if value:\n query[export_variable_name] = value\n else:\n try:\n del query[export_variable_name]\n except KeyError:\n pass\n return query.urlencode()", "def fixup_parameters(url, backend):\n result = url\n if backend == \"django\":\n result = url.replace(\"{\", \"(?P<\").replace(\"}\", \">.+)\")\n\n return result", "def _get_query_part(params: dict) -> str:\n params_cleaned = {k: v for k, v in params.items() if v is not None}\n return ('?' + urlencode(params_cleaned, quote_via=quote, safe=\"/,\")) if params_cleaned else \"\"", "def _prepare_params(self, params):\n if isinstance(params, six.string_types):\n return params\n return urllib.urlencode(params)", "def unicode_urlencode(self, params):\n if isinstance(params, dict):\n params = params.items()\n for i, param in enumerate(params):\n if isinstance(param[1], list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n return urllib.urlencode(\n [(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params]\n )", "def bob_sort_url(query, field, sort_variable_name, type):\n query = query.copy()\n if type == 'desc':\n query[sort_variable_name] = '-' + field\n elif type == 'asc':\n query[sort_variable_name] = field\n return query.urlencode()", "def _url(path, **kwargs):\n if kwargs:\n if isinstance(kwargs.get('owner'), users.User):\n kwargs['owner'] = kwargs['owner'].email()\n encoded_parameters = urllib.urlencode(kwargs)\n if path.endswith('?'):\n # Trailing ? on path. Append parameters to end.\n return '%s%s' % (path, encoded_parameters)\n elif '?' in path:\n # Append additional parameters to existing query parameters.\n return '%s&%s' % (path, encoded_parameters)\n else:\n # Add query parameters to path with no query parameters.\n return '%s?%s' % (path, encoded_parameters)\n else:\n return path", "def _unicode_urlencode(params):\n if isinstance(params, dict):\n params = list(params.items())\n for i, param in enumerate(params):\n if isinstance(param[1], list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n result = urllib.parse.urlencode([(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params])\n return result", "def unicode_urlencode(self, params):\n if isinstance(params, dict):\n params = params.items()\n for i, param in enumerate(params):\n if isinstance(param[1] , list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n if maj_ver == 2:\n return urllib.urlencode(\n [(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params]\n )\n\n if maj_ver == 3:\n return urllib.parse.urlencode(\n [(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params]\n )\n\n return", "def test_percent_encode_querystring():\n assert (normalize_url(\"http://example.com/?a=hello{}\") ==\n \"http://example.com/?a=hello%7B%7D\")", "def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)", "def _FormatUrlParams(self, params):\n param_fragments = []\n for param in sorted(params.iteritems(), key=lambda x: x[0]):\n param_fragments.append('%s=%s' % (param[0], self._UrlEscape(param[1])))\n\n return '&'.join(param_fragments)", "def unicode_urlencode(self, params):\n if isinstance(params , dict):\n params = params.items()\n for i, param in enumerate(params):\n if isinstance(param[1] , list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n if maj_ver == 2:\n return urllib.urlencode(\n [(k , isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params]\n )\n\n if maj_ver == 3:\n return urllib.parse.urlencode(\n [(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params]\n )\n\n return", "def test_append_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"foo=baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=baz&bar=bar'))", "def toQueryString(self):\n return self.__str__()", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def FormatUrl(params):\n input_list = []\n for item in sorted(params.iteritems(), key=lambda x: x[0]):\n input_list.append('%s=%s' % (item[0], UrlEscape(item[1])))\n return '&'.join(input_list)", "def __str__(self):\r\n self.query = urllib.urlencode(self.args)\r\n self.query = urllib.unquote(self.query)\r\n return urlparse.urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))", "def querystring(parser, token):\r\n bits = token.split_contents()\r\n tag = bits.pop(0)\r\n updates = token_kwargs(bits, parser)\r\n # ``bits`` should now be empty of a=b pairs, it should either be empty, or\r\n # have ``without`` arguments.\r\n if bits and bits.pop(0) != \"without\":\r\n raise TemplateSyntaxError(\"Malformed arguments to '%s'\" % tag)\r\n removals = [parser.compile_filter(bit) for bit in bits]\r\n return QuerystringNode(updates, removals)", "def _encode_url(data: str) -> str:\n return urllib.parse.quote(data, safe=\"\")", "def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url", "def redirect_add_get_parameters(url_name, *args, **kwargs):\n from django.urls import reverse \n from django.http import HttpResponseRedirect\n import urllib\n url = reverse(url_name, args = args)\n params = urllib.parse.urlencode(kwargs)\n return HttpResponseRedirect(url + \"?%s\" % params)", "def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)", "def _encode_query_value(text, maximal=True):\n if maximal:\n bytestr = normalize('NFC', text).encode('utf8')\n return u''.join([_QUERY_VALUE_QUOTE_MAP[b] for b in bytestr])\n return u''.join([_QUERY_VALUE_QUOTE_MAP[t]\n if t in _QUERY_VALUE_DELIMS else t for t in text])", "def urlencode_sl(query_params, allow_func=None):\n query_params = query_params or {}\n encoded_pairs = []\n for k, v in query_params.items():\n # Keys are also percent-encoded according to OAuth spec.\n k = percent_encode(unicode_to_utf8(k))\n if allow_func and not allow_func(k, v):\n continue\n elif is_bytes_or_unicode(v):\n encoded_pairs.append((k, percent_encode(v),))\n else:\n if is_sequence(v):\n # Loop over the sequence.\n if len(v) > 0:\n for i in v:\n encoded_pairs.append((k, percent_encode(i), ))\n # ``urllib.urlencode()`` doesn't preserve blank lists.\n # Therefore, we're discarding them.\n #else:\n # # Preserve blank list values.\n # encoded_pairs.append((k, \"\", ))\n else:\n encoded_pairs.append((k, percent_encode(v),))\n # Sort after encoding according to the OAuth spec.\n return sorted(encoded_pairs)", "def url_with_query_str(url, *args, **kwargs):\n if len(args):\n d = args[0]\n elif len(kwargs):\n d = kwargs\n else:\n raise Exception('not found dict')\n if any(\"%\" in k for k in d.keys()):\n query = \"&\".join([\"{}={}\".format(k, v) for k, v in d.items()])\n else:\n query = urlencode(d)\n url = url.split('?')[0]\n return \"{}?{}\".format(url, query)", "def test_append_with_multiple_values_and_same_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1&a=2&a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def url_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n return quote(str(val))", "def encode(self, url):\n\n # Escape ()\n return url.replace(\"(\", \"%28\").replace(\")\", \"%29\") if url else url", "def make_url(params, add_plugin_path=True):\r\n pairs = []\r\n for k, v in params.items():\r\n k = urllib.parse.quote_plus(str(k))\r\n v = urllib.parse.quote_plus(str(v))\r\n pairs.append(\"%s=%s\" % (k, v))\r\n params_str = \"&\".join(pairs)\r\n if add_plugin_path:\r\n return \"%s?%s\" % (sys.argv[0], params_str)\r\n return params_str", "def url(self, **kwargs) -> str:\n return self._pattern.format(**kwargs)", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def add_query_params(url: str, additional_params: dict) -> str:\n url_components = urlparse(url)\n original_params = parse_qs(url_components.query)\n # Before Python 3.5 you could update original_params with\n # additional_params, but here all the variables are immutable.\n merged_params = {**original_params, **additional_params}\n updated_query = urlencode(merged_params, doseq=True)\n # _replace() is how you can create a new NamedTuple with a changed field\n return url_components._replace(query=updated_query).geturl()", "def url(self):\n return url_search_posts(self.parameters, url_domain=self.url_domain)", "def get_querystring_for_page(request, page_number, querystring_key,\n default_number=1, prefix=\"?\"):\n querydict = request.GET.copy()\n querydict[querystring_key] = page_number\n # for page number 1 there is no need for querystring\n if page_number == default_number:\n del querydict[querystring_key]\n if \"querystring_key\" in querydict:\n del querydict[\"querystring_key\"]\n if querydict:\n return \"%s%s\" % (prefix, querydict.urlencode())\n return \"\"", "def urlQuote(string):\r\n return quote(string.encode(\"utf-8\"))", "def _create_query_str(data):\n params = []\n for name, value in data.items():\n params.append(name + '=' + str(value))\n\n return '?' + '&'.join(params)", "def safe_urlencode(params, doseq=0):\n if IS_PY3:\n return urlencode(params, doseq)\n\n if hasattr(params, \"items\"):\n params = params.items()\n\n new_params = []\n\n for k, v in params:\n k = k.encode(\"utf-8\")\n\n if isinstance(v, (list, tuple)):\n new_params.append((k, [force_bytes(i) for i in v]))\n else:\n new_params.append((k, force_bytes(v)))\n\n return urlencode(new_params, doseq)", "def url(self, suffix: str, *, version: Optional[str] = None,\n query: Union[None, dict, MultiValueDict] = None,\n **kwargs: Any) -> str:\n name = self.url_name_format.format(\n version=version,\n app_label=self.app_label,\n basename=self.basename,\n suffix=suffix,\n )\n url = reverse(name, kwargs=kwargs)\n\n # support multiple values for same param: \"id=1&id=2&id=3\"\n if isinstance(query, MultiValueDict):\n params = []\n for key, value in query.lists():\n if isinstance(value, list):\n params.append('&'.join([f'{key}={x}' for x in value]))\n else:\n params.append(f'{key}={value}')\n\n url += f\"?{'&'.join(params)}\"\n elif query:\n # simple dict query params\n url += '?%s' % urlencode(query)\n return url", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def test_append_with_multiple_values_and_same_key_seperated(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1\" \"a=2\" \"a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def _build_url_exact(self, q: str, **kwargs: Dict) -> str:\n url = f\"{self._URL}?where=\"\n if kwargs.get('doi'):\n input_doi = kwargs.get('doi')\n url += f'''{{\"doi\":\"{input_doi}\"}}'''\n return url", "def t(param):\n return '<' + param + '>'", "def encodeToURL(self):\n return self.fields.toURL(self.request.return_to)", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def html_quote(v):\n if v is None:\n return ''\n return cgi.escape(str(v), 1)", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def make_query_string(query, params):\n query_string = query\n\n index = 1\n for param in params:\n if param:\n to_replace = \"%%param%d%%\" % index\n query_string = query_string.replace(to_replace, param)\n index += 1\n\n return query_string", "def urlsafe(self):\n return urllib.quote(self.normalized())", "def b2_url_encode(s):\n return quote(s.encode('utf-8'))" ]
[ "0.6936687", "0.6936687", "0.65685886", "0.6343588", "0.62395805", "0.6233473", "0.6176143", "0.61670446", "0.6154101", "0.6131022", "0.60958886", "0.60618484", "0.6031017", "0.6029763", "0.5963693", "0.59559804", "0.59069306", "0.57844955", "0.57651424", "0.5763139", "0.5728873", "0.57060605", "0.56999815", "0.56999815", "0.55883867", "0.5585684", "0.5564089", "0.5551775", "0.5533645", "0.55259657", "0.5511073", "0.55026597", "0.5470186", "0.54468286", "0.54097944", "0.5405995", "0.53902274", "0.53782004", "0.53738534", "0.5360476", "0.53586245", "0.53322583", "0.5330078", "0.5312329", "0.53088534", "0.5274674", "0.5261749", "0.5230577", "0.52088964", "0.520818", "0.52006286", "0.5192988", "0.5190362", "0.51561683", "0.5152566", "0.51485443", "0.5147419", "0.5143158", "0.5139034", "0.5137799", "0.5133955", "0.5127543", "0.51150626", "0.5113676", "0.50909173", "0.5085674", "0.5082483", "0.50496", "0.50287384", "0.5015965", "0.5014243", "0.50061566", "0.50029474", "0.4984317", "0.49716085", "0.4964267", "0.4962956", "0.49608755", "0.49554542", "0.4951263", "0.49317554", "0.4925175", "0.4913354", "0.49011457", "0.48946765", "0.48900545", "0.48860005", "0.488376", "0.4875362", "0.4875362", "0.48731586", "0.48718345", "0.48356816", "0.48266312", "0.48220795", "0.48189834", "0.48147604", "0.4811169", "0.48047033", "0.48023114" ]
0.5674797
24
Used by all TalkChannels when logging messages to preserve messages order
def _mc_gen(): n = 1 while True: yield n n += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log(self, msg):\n self.telegram_queue.put(f\"{__name__.split('.')[-1]}: {msg}\")", "def log(self, message):", "def _log(self, message):\n pass", "def log_message(self, fmt, *args):\r\n pass\r\n # log_message\r", "def lastMessageReceived():", "def on_message(self, msg):\n self.log.info(msg)", "def on_message(client, userdata, msg):\n sortLight(msg)", "def log_message(self, format, *args):", "def onMessage(self, msg):\n log.msg(str(msg))", "def patchMessages():\n import OnlineEnv as Online\n app=Gaudi.ApplicationMgr()\n Configs.AuditorSvc().Auditors = []\n app.MessageSvcType = 'LHCb::FmcMessageSvc'\n if Gaudi.allConfigurables.has_key('MessageSvc'):\n del Gaudi.allConfigurables['MessageSvc']\n msg = Configs.LHCb__FmcMessageSvc('MessageSvc')\n msg.fifoPath = os.environ['LOGFIFO']\n msg.LoggerOnly = True\n msg.doPrintAlways = False\n# msg.OutputLevel = MSG_WARNING\n# msg.OutputLevel = Online.OutputLevel\n msg.OutputLevel = MSG_INFO", "def do_handle_log(self, workunit, level, *msg_elements):\r\n pass", "def consolidate_messages(self, msg):", "def log(self, msg):\n print(msg)", "def process_messages(self):\n pass", "def privmsg(self, user, channel, msg):\n self.logger.log(msg)", "def log(self, message: str):", "def on_log(self, mqtt_client, userdata, level, buf):\n logging.debug(\"DEBUG - on_log received\")", "def msg_handler(self, msg):\n self.view.frame.log.append(msg)", "def replay_log(self):\n logs = self.logger.read_logs()\n for log in logs:\n self.parse_message(log)", "def on_sync(self):\r\n self.log()", "def log(self, level, msg, *args, **kwargs):\n pass", "def log_message(self, fmt, *args):\n pass", "def SetLogMessages() -> None:\n caller = previousframe(2)\n caller_path = Path(caller.filename)\n caller_name, caller_dir = caller_path.stem, caller_path.parent\n msgs = (caller_dir / \"messages.json\").read_text(\"utf-8\")\n msg = AttrDict(json.loads(msgs)[caller_name])\n for name, obj in caller.frame.f_locals.items():\n if (isfunction(obj) or isclass(obj)) and name in msg:\n if hasattr(obj, \"_log_msg\"):\n obj._log_msg = AttrDict(obj._log_msg)\n obj._log_msg.update(msg[name])\n else:\n obj._log_msg = msg[name]", "def sub_callbackmsg(self, msg):\n\n print (msg.message)\n self.received_msg = self.received_msg + [msg.message]\n print (self.received_msg)", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def logline(msg):\n print msg", "def logIt(self, msg):\n\n if (self.logger): self.logger.logIt(msg)", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += \"\\r\\n[%s] \" % time.process_time()\n self.parent.log += string + \"\\r\\n\"", "def event_log(self):\n pass", "def handle_message(self, msg):\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def testIgnoreMessage(self):\n\n self.logger.accept('c',self.logger.foo)\n self.logger.accept('c',self.logger.bar)\n self.logger.ignore('c')\n messager.send('c')\n # No methods should have been called.\n self.assertEqual(self.logger.log,[])", "def log_message(self, msg):\n\t\tself.logView.log_message(msg)", "def _log_prepend(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart = p.get_start_iter()\n\t\tp.insert(start, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_start_iter(), 0.0)", "def on_message(self, message):\n obj = json_decode(message)\n self.writing_logs(obj)\n return", "def on_received(self, order):\n self.received_messages.append(order)\n self.bm.on_interesting_shit()", "def send_messages(messages, sent_messages):\n while messages:\n current_message = messages.pop()\n print(f\"Sending message: {current_message}\")\n sent_messages.append(current_message)", "def test_order_of_loaded_messages():\n app = HelperApp(server.message_app)\n app.post('/login/', {'username': 'jessie', 'password': 'frog'})\n\n # Add a bunch of messages\n for i, l in enumerate(\"abcd\"):\n # Sleep a second so that our messages have different timestamps\n if i != 0:\n time.sleep(1)\n\n app.get('/compose/')\n app.post('/compose/', {'to': 'james', 'subject': l, 'body': l.upper()})\n\n all_messages = message.load_all_messages()\n sent_messages = message.load_sent_messages('jessie')\n received_messages = message.load_sent_messages('james')\n\n # Check that we're loading messages correctly\n for messages in (all_messages, sent_messages, received_messages):\n for prev, current in zip(messages, messages[1:]):\n assert prev['time'] > current['time']", "def log_message(self, formate, *args):\n return", "def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)", "def _onLog(self, client:mqtt.Client, userdata:Any, level:int, buf:str) -> None:\n\t\tself.lowLevelLogging and self.messageHandler and self.messageHandler.logging(self, mqtt.LOGGING_LEVEL[level], f'MQTT: {buf}')", "def after_rotate_hindcast_logs(msg, config, checklist):\n return []", "def appendMsg(self, msg):\n # self.message += msg\n theTime = self.logger.mytime()\n # self.message += theTime + \" \" + str( msg )\n self.message = str(self.message) + str(theTime) + \" \" + str(msg)", "def on_message(self, context, payload):\n values = []\n\n if len(payload):\n if self.show_remaining:\n context['remaining-payload'] = self.toHex(payload)\n\n if self.orderedKeyList is not None:\n keyList = self.orderedKeyList\n elif self.sortByKey:\n keyList = sorted(context.keys())\n else:\n keyList = context.keys()\n\n for key in keyList:\n if key[0] == '$' or key[0] == '#':\n continue\n\n excluded = self.excludeAll\n\n if key in self.exclusions:\n excluded = True\n if key in self.inclusions:\n excluded = False\n\n if key not in context:\n excluded = True\n \n if not excluded:\n if not self.squelch_key:\n values.append('{0}={1}'.format(key, context[key]))\n else:\n values.append('{0}'.format(context[key]))\n\n if len(values):\n line = ','.join(values)\n if self.out_file is not None and len(line) > 0:\n self.out_file.write('{0}\\n'.format(line))\n self.out_file.flush()\n if self.verbosity() >= Verbosity.Verbose:\n print line\n else:\n print line\n\n self.dispatch_to_next(context, payload)", "def handleMessage(msg):", "def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)", "def order_log_results(self, log_search_order):\n raise errors.Unimplemented()", "def write(self, msg):\n self._current_stream.write(msg)\n if type(self._log) == list:\n self._log.append(msg)", "def emit(self, record):\n priority = self.getPriority(record.levelname)\n\n for line in record.getMessage().split('\\n'):\n line = line.strip()\n if line:\n self.syslog(priority, line)", "def __init__(self):\n self.msg_dict = dict() # msg: last_print_time_stamp", "def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))", "def _msg_cb(self, main_loop, msg):\n self._msg_hdr(msg)", "def log(msg):\n print msg", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def log(self, string):\n if self.PRINT:\n print(string)\n sys.stdout.flush()\n self.message_list.append(string)", "def flag_messages(self, org, messages):\n pass", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += f\"\\r\\n[{time.process_time()}] \"\n self.parent.log += string + \"\\r\\n\"", "def handle_message(self, message):", "def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)", "def show_messages(self):\n self.masterlog.revealme()", "def list_messages(self):", "def logs_add_message(self, level, message):\n pass", "def on_log( client, userdata, level, buf ):\n print( \"MQTTClient log: \", buf )", "def handle_log(self, workunit, level, *msg_elements):\r\n if level <= self.settings.log_level:\r\n self.do_handle_log(workunit, level, *msg_elements)", "def messages(self, messages):\r\n\r\n self._messages = messages", "def _handle_ordered_packet(self, packet):\n pass", "def define_log_post_format_hooks(self):\n # TODO remove this once structlog supports hooks or handlers\n # these hooks accept a 'msg' and do not return anything\n return []", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)", "def archive_messages(self, org, messages):\n pass", "def comsume_msg(self, msg_type):", "def tracker_print(msg: Any) -> None:\n collective.communicator_print(msg)", "def log( cls, level, msg, *args, **kwargs ):\n arg_set = ( level, msg, ) # not clear how to get rest, for now discard\n print( f\"{arg_set[0]} >> {arg_set[1]}\" )\n cls.__log_later.append( arg_set )", "def log(self, message):\n #logs.logger.debug(\"asyncore log: %s\" % message)\n pass", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def logger(self, message):\n if hasattr(self.log, '__call__'):\n self.log(message.strip())", "def _log_message(self, *msgs,\n sep=' ',\n extra_indent_level=1,\n prefix_with_name=False,\n _prefix='',\n _suffix=''):\n if not msgs:\n return\n\n # do nothing unless enabled! if disabled, the other 'stack' accesses will blow up\n if self._enabled_stack[-1] <= 0: # disabled\n return\n\n # 0.3.0\n logging_state = self.logging_state_stack[-1]\n # Write nothing if output is stifled (caller is NOT _deco_base_f_wrapper_)\n # NOTE: only check global_mute() IN REALTIME, like so:\n mute = max(logging_state.mute, self.global_mute())\n if mute == self.MUTE.ALL:\n return\n # adjust for calls not being logged -- don't indent an extra level\n # (no 'log_calls frame', no 'arguments:' to align with),\n # and prefix with display name cuz there's no log_calls \"frame\"\n # NOTE, In this case we force \"prefix_with_name = True\" <<<\n ####if mute == self.MUTE.CALLS:\n if mute >= self.log_message_auto_prefix_threshold():\n extra_indent_level -= 1\n prefix_with_name = True\n\n indent_len = (logging_state.indent_len\n + (extra_indent_level * self.INDENT)\n )\n if indent_len < 0:\n indent_len = 0 # clamp\n\n the_msg = sep.join(map(str, msgs))\n if prefix_with_name:\n the_msg = logging_state.output_fname + ': ' + the_msg\n assert isinstance(_prefix, str) and isinstance(_suffix, str)\n the_msg = _prefix + the_msg + _suffix\n\n logging_state.logging_fn(prefix_multiline_str(' ' * indent_len, the_msg))", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def test_logger(self):\n from workflow import engine\n\n logging.root.handlers = []\n engine.LOG.handlers = []\n\n other_logger = logging.getLogger('other')\n wf_logger = engine.get_logger('workflow.test')\n\n test_io = StringIO()\n root_io = StringIO()\n other_io = StringIO()\n\n logging.root.addHandler(logging.StreamHandler(root_io))\n other_logger.addHandler(logging.StreamHandler(other_io))\n wf_logger.addHandler(logging.StreamHandler(test_io))\n\n # set the root level to WARNING; wf should honour parent level\n logging.root.setLevel(logging.WARNING)\n\n logging.warn('root warn')\n other_logger.warn('other warn')\n wf_logger.warn('wf warn')\n\n logging.info('root info')\n other_logger.info('other info')\n wf_logger.info('wf info')\n\n assert root_io.getvalue() == \"root warn\\nother warn\\n\" # Root logger should have two msgs\n assert other_io.getvalue() == \"other warn\\n\" # Other logger should have one msg\n assert test_io.getvalue() == \"wf warn\\n\" # Wf logger should have one msg\n\n root_io.seek(0)\n other_io.seek(0)\n test_io.seek(0)\n\n # now set too to DEBUG and wf to INFO\n logging.root.setLevel(logging.DEBUG)\n engine.reset_all_loggers(logging.WARNING)\n\n logging.warn('root warn')\n other_logger.warn('other warn')\n wf_logger.warn('wf warn')\n\n logging.info('root info')\n other_logger.info('other info')\n wf_logger.info('wf info')\n\n assert root_io.getvalue() == \"root warn\\nother warn\\n\" \"root info\\nother info\\n\" # Root logger should have four msgs\n assert other_io.getvalue() == \"other warn\\nother info\\n\" # Other logger should have two msg\n assert test_io.getvalue() == \"wf warn\\n\" # Wf logger should have one msg", "def __call__(self,message):\n if self.header != self.prevHeader:\n if self.prevHeader:\n self.writeFooter()\n if self.header:\n self.writeHeader(self.header)\n self.prevHeader = self.header\n self.writeMessage(message)", "def _stdlog(self, msg):\n print msg\n logger.info(msg)", "def _log_message(self, message):\n\t\tif message not in self._logged_messages:\n\t\t\twith open(self._logfile, \"a\") as f:\n\t\t\t\tf.write(message + \"\\n\")\n\t\tself._logged_messages.append(message)", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def send_messages(messages):\n while messages:\n msg = messages.pop()\n sent_messages.append(msg)", "def test_hl_message(self):\n utcmock = MagicMock()\n utcmock.utcnow = MagicMock(return_value=datetime.datetime.utcfromtimestamp(0))\n with patch(\"datetime.datetime\", utcmock):\n out = saltsupport.LogCollector()\n out.highlight(\"The {} TTYs became {} TTYs and vice versa\", \"real\", \"pseudo\")\n assert saltsupport.LogCollector.INFO in out.messages\n assert (\n type(out.messages[saltsupport.LogCollector.INFO])\n == saltsupport.LogCollector.MessagesList\n )\n assert out.messages[saltsupport.LogCollector.INFO] == [\n \"00:00:00.000 - The real TTYs became \" \"pseudo TTYs and vice versa\"\n ]", "def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))", "def log(self, message):\n if VERBOSE:\n print self, message", "def log(msg):\n print(str(msg))", "def logIt(self, astr, prefix=\" [D] \"):\n self.protocol.logIt(astr, prefix=prefix)", "def write_message(self, payload):\n self.messages.append(payload)", "def after_rotate_logs(msg, config, checklist):\n return []", "def trace(msg):\n import datetime\n print('[{:%Y-%m-%d %H:%M:%S}]: '.format(datetime.datetime.now()) + msg)", "def horde_message(self, message):", "def msg_callback(self, *args, **kwargs):\n log(*args, name=self.idf.name, **kwargs)", "def log(content):\n\n now = datetime.datetime.now().strftime(\"%c\")\n now_time = time.time()\n # msg_last = '{} - {: >5.1f} seconds - {}'.format(now, now_time - TIME_LAST, content)\n\n if Logger._time_last is not None:\n msg_last = Logger.human_seconds(now_time - Logger._time_last)\n else:\n msg_last = ' ' * 13\n\n msgs = [now, msg_last, content]\n\n msg = \" │ \".join(msgs)\n\n msg_lines = [\"─\" * len(content) for content in msgs]\n\n msg_top = \"─┬─\".join(msg_lines)\n msg_lower = \"─┴─\".join(msg_lines)\n\n print(\" ┌─{}─┐\".format(msg_top))\n print(\" │ {} │\".format(msg))\n print(\" └─{}─┘\".format(msg_lower))\n\n Logger._time_last = time.time()", "def log(self, msg):\n self.ansible.log(msg)", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def event_logged(self, event):\n prefix = termcolor.colored(\">>>\", \"yellow\")\n logging.info(\"%s %s\" % (prefix, event.colored_str()))\n if event.to:\n subject = \"%s: %s\" % (self.name, event.phase)\n self.send_message(event.to, subject, event_email(event, parser=self.parser))", "def test_nth_log_post(self):\n self.conveyer.execute(self.conveyer.log(\"{message: \\\"first\\\"}\"))\n commands = self.conveyer.log(\"{message: \\\"second\\\"}\")\n self.assertEquals(len(commands), 1)\n self.assertEquals(type(commands[0]), AppendLogCmd)\n self.assertEquals(commands[0].event, \"{message: \\\"second\\\"}\")\n self.assertEquals(self.events_out.getvalue(), \"{message: \\\"first\\\"}\")", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def print_messages(sent_msgs):\n\tprint(\"\\nHere is the initial list of messages\")\n\tfor message in messages:\n\t\tprint(message)\n\tprint(\"\\nHere is a list of printed messages\")\n\tfor sent_msg in sorted(sent_msgs):\n\t\tprint(sent_msg)", "def print_messages(sent_msgs):\n\tprint(\"\\nHere is the initial list of messages\")\n\tfor message in messages:\n\t\tprint(message)\n\tprint(\"\\nHere is a list of printed messages\")\n\tfor sent_msg in sorted(sent_msgs):\n\t\tprint(sent_msg)" ]
[ "0.6550122", "0.63551664", "0.6192797", "0.601406", "0.5984528", "0.5872652", "0.58380395", "0.5811983", "0.5803327", "0.5792493", "0.5775632", "0.57648224", "0.57644266", "0.57573664", "0.57374674", "0.5730767", "0.5729119", "0.5690887", "0.568535", "0.56792635", "0.56616557", "0.5650943", "0.5650082", "0.56198496", "0.56136715", "0.5603306", "0.55897695", "0.5579788", "0.55775046", "0.55666727", "0.5564892", "0.5554616", "0.5538817", "0.55308205", "0.54993165", "0.54963595", "0.54891664", "0.5474408", "0.54649407", "0.54605126", "0.54544234", "0.54529893", "0.5451919", "0.54452515", "0.54406184", "0.54353756", "0.54221064", "0.5411409", "0.54003626", "0.5395716", "0.53917027", "0.5386291", "0.53787076", "0.53775364", "0.537268", "0.53721243", "0.53670955", "0.53669566", "0.5365537", "0.535564", "0.5351785", "0.5351357", "0.5344319", "0.53367233", "0.533458", "0.5331575", "0.53273815", "0.53264534", "0.5322779", "0.531684", "0.5316674", "0.5314442", "0.52963996", "0.529253", "0.5288012", "0.52865064", "0.5272735", "0.5268481", "0.5267604", "0.52671695", "0.52573013", "0.5254527", "0.52511775", "0.5249698", "0.5249687", "0.523774", "0.52374357", "0.5234942", "0.5229257", "0.52291334", "0.5224798", "0.5214193", "0.5212675", "0.52110875", "0.5208654", "0.52052385", "0.5204766", "0.520468", "0.5195922", "0.51942706", "0.51942706" ]
0.0
-1