query
stringlengths 9
3.4k
| document
stringlengths 9
87.4k
| metadata
dict | negatives
sequencelengths 4
101
| negative_scores
sequencelengths 4
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
---|---|---|---|---|---|---|
Rotate points by quaternions. | def quat_rotate(X, q):
# repeat q along 2nd dim
ones_x = X[[0], :, :][:, :, [0]] * 0 + 1
q = torch.unsqueeze(q, 1) * ones_x
q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)
X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)
X_rot = hamilton_product(q, hamilton_product(X, q_conj))
return X_rot[:, :, 1:4] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)",
"def rotate_points(points,quaternions):\n \n res = np.zeros((quaternions.shape[0],points.shape[0],4)) \n res[:,:,1:] = points \n conjugates = conjugate(quaternions) \n \n for ix in range(len(points)):\n res[:,ix,:] = multiply_quaternions(quaternions,res[:,ix,:])\n res[:,ix,:] = multiply_quaternions(res[:,ix,:],conjugates)\n return res[:,:,1:]",
"def rotate(points, q):\n # Rotation is achieved by a quaternion sandwich: q * p * (q^-1)\n # This implementation computes the two quaternion products one after the other\n res = np.zeros((len(points), 4))\n # q*p\n res[:, 0] = -np.sum(q[1:]*points, axis=1)\n res[:, 1:] = q[0]*points + np.cross(q[1:], points)\n # (q*p) * q^-1\n res2 = np.zeros((len(points), 4))\n res2[:, 0] = res[:, 0]*q[0] - np.dot(res[:, 1:].copy(), -q[1:].copy())\n res2[:, 1:] = res[:, 0].copy().reshape((-1, 1))*(-q[1:]) + (q[0]*res[:, 1:]) + np.cross(res[:, 1:], -q[1:])\n return res2[:, 1:]",
"def point_rotation_by_quaternion(v, q):\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]",
"def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate",
"def spread_quaternions(points,num=100,quats_per_step=100):\n quats = np.zeros((num,4))\n rot_points = np.zeros((num,points.shape[0],3))\n quats[0,:] = [0,0,0,1]\n rot_points[0,:,:] = points\n for ix in range(1,num):\n rand_quats = random_quaternions(quats_per_step) \n positioned = rotate_points(points,rand_quats) \n dists = point_dists(rot_points[:ix],positioned)\n new_rot = np.argmax(dists) \n quats[ix,:]=rand_quats[new_rot,:]\n rot_points[ix,:,:] = positioned[new_rot,:,:]\n \n return quats,rot_points",
"def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q",
"def rotate(self, quaternion, origin_x = 0, origin_y = 0, origin_z = 0):\n\n for atom in self.get_atoms():\n atom.rotate(quaternion, origin_x, origin_y, origin_z)",
"def quatPassiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q).T @ quatRightMat(q) @ v_q\n\treturn v_qnew[1:]",
"def qrot(q, v):\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n qvec = q[..., 1:]\n uv = torch.cross(qvec.double(), v.double(), dim=len(q.shape) - 1)\n uuv = torch.cross(qvec.double(), uv.double(), dim=len(q.shape) - 1)\n return v + 2 * (q[..., :1] * uv + uuv)",
"def rotate_points(points, rot):\n t0 = time.time()\n points_rot = points.transpose()\n points_rot = rot @ points_rot\n points_rot = points_rot.transpose()\n # print(f\"Rotation Took {(time.time() - t0) * 1000:.1f} ms\")\n return points_rot",
"def quatActiveRot(q, v):\n\tv_q = np.zeros((4, 1))\n\tv_q[1:] = v\n\tv_qnew = quatLeftMat(q) @ quatRightMat(q).T @ v_q\n\treturn v_qnew[1:]",
"def qrotate(self, angle):\n\n q = int(round(angle / 90.0)) % 4\n a = 90.0 * q\n\n if (q == 0):\n pass\n elif (q == 1):\n self.srotate(a)\n self.center = (self.q_size[1] - self.center[1],\n 0 + self.center[0])\n elif (q == 2):\n self.srotate(a)\n self.center = (self.q_size[0] - self.center[0],\n self.q_size[1] - self.center[1])\n elif (q == 3):\n self.srotate(a)\n self.center = (0 + self.center[1],\n self.q_size[0] - self.center[0])",
"def qrot(q, v):\n assert q.shape[-1] == 4\n assert v.shape[-1] == 3\n assert q.shape[:-1] == v.shape[:-1]\n\n original_shape = v.shape\n q = q.view(-1, 4)\n v = v.view(-1, 3)\n\n qvec = q[:, 1:]\n uv = torch.cross(qvec, v, dim=1)\n uuv = torch.cross(qvec, uv, dim=1)\n return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)",
"def rotate(self, quaternion):\n rot3d = quaternion2rot3d(quaternion)\n new_pos = np.dot(self.atom_pos, rot3d.T)\n self.set_atom_pos(new_pos)",
"def test_quaternion_invert():\n q = np.array([0.58183503, -0.75119889, -0.24622332, 0.19116072])\n q_inv = pr.q_conj(q)\n q_q_inv = pr.concatenate_quaternions(q, q_inv)\n assert_array_almost_equal(pr.q_id, q_q_inv)",
"def rotate_points(self, pts, theta):\r\n assert(pts.shape[1] == 3)\r\n\r\n # get the rotation axis\r\n rot = (self.omega.vector())[:3]\r\n\r\n # normalize the rotation axis\r\n rot = rot/np.sqrt(np.sum(np.square(rot)))\r\n\r\n # get the axes\r\n a, b, c = rot\r\n\r\n # compute the quaternion from the rotation axis and angle\r\n rot = np.quaternion(np.cos(theta/2), np.sin(theta/2)*a,\r\n np.sin(theta/2)*b, np.sin(theta/2)*c)\r\n\r\n # normalize the quaternion\r\n rot = rot/np.abs(rot)\r\n\r\n # add a column of 0s to the pts\r\n pts = np.append(np.zeros(pts.shape[0])[:, np.newaxis], pts, axis=1)\r\n\r\n # create quaternion of pts\r\n pts = quaternion.as_quat_array(pts)\r\n\r\n # use quaternions to rotate the points\r\n out = rot*pts*np.conjugate(rot)\r\n\r\n # return the rotated points\r\n return(quaternion.as_float_array(out)[:, 1:])",
"def rotate(self, angle, points=None):\n if not isinstance(angle, torch.Tensor):\n angle = self.tensor.new_tensor(angle)\n\n assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \\\n f'invalid rotation angle shape {angle.shape}'\n\n if angle.numel() == 1:\n self.tensor[:, 0:3], rot_mat_T = rotation_3d_in_axis(\n self.tensor[:, 0:3],\n angle,\n axis=self.YAW_AXIS,\n return_mat=True)\n else:\n rot_mat_T = angle\n rot_sin = rot_mat_T[2, 0]\n rot_cos = rot_mat_T[0, 0]\n angle = np.arctan2(rot_sin, rot_cos)\n self.tensor[:, 0:3] = self.tensor[:, 0:3] @ rot_mat_T\n\n self.tensor[:, 6] += angle\n\n if points is not None:\n if isinstance(points, torch.Tensor):\n points[:, :3] = points[:, :3] @ rot_mat_T\n elif isinstance(points, np.ndarray):\n rot_mat_T = rot_mat_T.cpu().numpy()\n points[:, :3] = np.dot(points[:, :3], rot_mat_T)\n elif isinstance(points, BasePoints):\n points.rotate(rot_mat_T)\n else:\n raise ValueError\n return points, rot_mat_T",
"def add_quaternion(self, px, py, q):\n iy, ix = np.where(\n create_circular_mask(self.height, self.width, py, px, radius=50))\n idices = ix + iy * self.width\n for idx in idices:\n if self.average:\n self._rotations_buffer[idx].append(q.tolist())\n self._rotations[idx] = q.tolist()",
"def multiply_quaternions( qa, qb ):\n combined = Quaternion()\n\n combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)\n combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)\n combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)\n combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)\n return combined",
"def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat",
"def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R",
"def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot",
"def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')",
"def rotate_points_degrees(points, axis, angle, origin=None):\n return rotate_points(points, axis, radians(angle), origin)",
"def rotate_q_exp(self, qx, qy, qz):\n # qx, qy, qz = self.rotate_q_exp(qx, qy, qz)\n \n q_vector = np.array( [[qx],[qy],[qz]] )\n \n q_rotated = np.dot( self.rotation_matrix_exp, q_vector )\n qx = q_rotated[0,0]\n qy = q_rotated[1,0]\n qz = q_rotated[2,0]\n \n return qx, qy, qz",
"def rotate_quadrant(self, quadrant, direction):\n direction = Direction(direction)\n self.play_area[quadrant].rotate(direction)",
"def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)",
"def turn(self, angle=pi, points=[]):\n for point in points:\n point.rotate(angle, self)",
"def rotate(q, v):\n if v.ndim == 1:\n qv = np.append(v,0)\n else:\n qv = np.hstack([v,np.zeros((len(v),1))])\n out = mult(q,qv)\n out = mult(out, inv(q))\n return out[:,:3]",
"def test_rotate_vec(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(-1, -1, -1), 180.)\n vec = Vec3(1, 0, 0)\n\n rotated_vec = quat.rotate_vec(vec)\n\n self.assertAlmostEqual(-1/3.0, rotated_vec.x)\n self.assertAlmostEqual(2/3.0, rotated_vec.y)\n self.assertAlmostEqual(2/3.0, rotated_vec.z)",
"def rotate(points, rot_vecs):\n theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]\n with np.errstate(invalid='ignore'):\n v = rot_vecs / theta\n v = np.nan_to_num(v)\n dot = np.sum(points * v, axis=1)[:, np.newaxis]\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n\n return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v",
"def rotate(points, rot_vecs):\n theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]\n with np.errstate(invalid='ignore'):\n v = rot_vecs / theta\n v = np.nan_to_num(v)\n dot = np.sum(points * v, axis=1)[:, np.newaxis]\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n\n return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v",
"def rotate(points, rot_vecs):\n theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis]\n with np.errstate(invalid='ignore'):\n v = rot_vecs / theta\n v = np.nan_to_num(v)\n dot = np.sum(points * v, axis=1)[:, np.newaxis]\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n\n return cos_theta * points + sin_theta * np.cross(v, points) + dot * (1 - cos_theta) * v",
"def qwc_rotation(pauli_operators):\n paulis_with_identity = (qml.Identity, qml.PauliX, qml.PauliY, qml.PauliZ)\n if not all(isinstance(element, paulis_with_identity) for element in pauli_operators):\n raise TypeError(\n \"All values of input pauli_operators must be either Identity, PauliX, PauliY, or PauliZ instances,\"\n \" instead got pauli_operators = {}.\".format(pauli_operators)\n )\n\n for pauli in pauli_operators:\n if isinstance(pauli, qml.PauliX):\n qml.RY(-np.pi / 2, wires=pauli.wires)\n\n elif isinstance(pauli, qml.PauliY):\n qml.RX(np.pi / 2, wires=pauli.wires)",
"def write_interpolated_quaternions(interpolated_quaternions, timestamps, path):\n filename = \"quaternions_interpolated.csv\"\n data = [(q[0], q[1], q[2], q[3], t) for q, t in zip(interpolated_quaternions, timestamps)]\n with open(path + filename, \"w+\") as csvfile:\n header = [(\"q_w\", \"q_x\", \"q_y\", \"q_z\", \"t\")]\n content = header + data\n writer = csv.writer(csvfile)\n print(\"Writing: \" + path + filename)\n writer.writerows(content)",
"def test_quaternion_rotation_consistent_with_multiplication():\n random_state = np.random.RandomState(1)\n for _ in range(5):\n v = pr.random_vector(random_state)\n q = pr.random_quaternion(random_state)\n v_im = np.hstack(((0.0,), v))\n qv_mult = pr.concatenate_quaternions(\n q, pr.concatenate_quaternions(v_im, pr.q_conj(q)))[1:]\n qv_rot = pr.q_prod_vector(q, v)\n assert_array_almost_equal(qv_mult, qv_rot)",
"def rotations4(polycube, axis):\r\n for i in range(4):\r\n yield rot90(polycube, i, axis)",
"def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)",
"def rotate(points, angle=0):\n # x = np.cos(R) * X - np.sin(R) * Y\n # y = np.sin(R) * X + np.cos(R) * Y\n X = points[0]\n Y = points[1]\n x = np.cos(angle) * X - np.sin(angle) * Y\n y = np.sin(angle) * X + np.cos(angle) * Y\n return [x, y]",
"def rotate_points(points, axis, angle, origin=None):\n if not origin:\n origin = [0.0, 0.0, 0.0]\n # rotation matrix\n x, y, z = normalize_vector(axis)\n c = cos(angle)\n t = (1 - cos(angle))\n s = sin(angle)\n R = [\n [t * x * x + c , t * x * y - s * z, t * x * z + s * y],\n [t * x * y + s * z, t * y * y + c , t * y * z - s * x],\n [t * x * z - s * y, t * y * z + s * x, t * z * z + c]\n ]\n # translate points\n points = translate_points(points, scale_vector(origin, -1.0))\n # rotate points\n points = [multiply_matrix_vector(R, point) for point in points]\n # translate points back\n points = translate_points(points, origin)\n return points",
"def rotate_vectors(q, vec):\n rot_vec = []\n for i, v in enumerate(vec):\n rot_vec.append(q.rotate(v))\n return rot_vec",
"def rotate_points(self, pointcloud_model, DEGREES = 0, query_points = False, use_rotation_tensor = False, save_rotation_tensor = False):\n ## https://en.wikipedia.org/wiki/Rotation_matrix\n if(use_rotation_tensor != True):\n angle_range = DEGREES\n x_angle = radians(random.random() * angle_range)\n y_angle = radians(random.random() * angle_range)\n z_angle = radians(random.random() * angle_range)\n\n rot_x = torch.Tensor([[1,0,0,0],[0, cos(x_angle),-sin(x_angle),0], [0, sin(x_angle), cos(x_angle),0], [0,0,0,1]])\n rot_y = torch.Tensor([[cos(y_angle),0,sin(y_angle), 0],[0, 1, 0,0], [-sin(y_angle),0,cos(y_angle),0], [0,0,0,1]])\n rot_z = torch.Tensor([[cos(z_angle), -sin(z_angle),0,0],[sin(z_angle), cos(z_angle),0,0],[0,0,1,0], [0,0,0,1]])\n rotation_matrix = torch.mm(rot_y, rot_z)\n rotation_matrix = torch.mm(rot_x,rotation_matrix) \n\n batch_size, point_cloud_size, _ = pointcloud_model.shape\n pointcloud_model = torch.cat([pointcloud_model, torch.ones(batch_size, point_cloud_size,1).to(self.device)], dim = 2)\n \n \n pointcloud_model_rotated = torch.matmul(pointcloud_model, rotation_matrix.to(self.device))\n self.rotation_matrix = rotation_matrix\n \n if(save_rotation_tensor):\n torch.save(rotation_matrix, 'rotation_matrix.pt') #used for plane prediction, change it at your will \n return pointcloud_model_rotated[:,:,0:3], (x_angle, y_angle, z_angle)\n else: \n batch_size, point_cloud_size, _ = pointcloud_model.shape\n pointcloud_model = pointcloud_model / sqrt(0.55**2 + 0.55**2 + 0.55**2)\n pointcloud_model = torch.cat([pointcloud_model, torch.ones(batch_size, point_cloud_size,1).to(self.device)], dim = 2)\n pointcloud_model_rotated =torch.matmul(pointcloud_model, self.rotation_matrix.to(self.device))\n return pointcloud_model_rotated[:,:,0:3]",
"def multiply_quaternions(quats1,quats2):\n w1 = quats1[:,0]\n x1 = quats1[:,1]\n y1 = quats1[:,2]\n z1 = quats1[:,3]\n\n w2 = quats2[:,0]\n x2 = quats2[:,1]\n y2 = quats2[:,2]\n z2 = quats2[:,3]\n\n res = np.zeros((quats1.shape[0],4))\n \n res[:,0] = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\n res[:,1] = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\n res[:,2] = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\n res[:,3] = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 \n return res",
"def rotate(points, angle):\n ca = np.cos(angle*pi/180)\n sa = np.sin(angle*pi/180)\n R = np.array([[ca, -sa], [sa, ca]]) # positive is CCW\n R.shape += (1,) # add dim for broadcasting over n points\n points = points.T\n points.shape = (1,) + points.shape # 1x2xn\n points = (R*points).sum(axis=1).T # do rotation and return original shape\n return points",
"def rotate(self, angle, point=None):\n # Actually not working\n if not point:\n point = self.center\n for i in range(len(self.points)):\n self.points[i].rotate(angle, point)",
"def _quatm(q1, q0):\n w0, x0, y0, z0 = q0\n w1, x1, y1, z1 = q1\n\n return torch.cuda.FloatTensor([\n -x1*x0 - y1*y0 - z1*z0 + w1*w0,\n x1*w0 + y1*z0 - z1*y0 + w1*x0,\n -x1*z0 + y1*w0 + z1*x0 + w1*y0,\n x1*y0 - y1*x0 + z1*w0 + w1*z0,\n ])",
"def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])",
"def test_x_y_and_z_rot(self):\n\n axis = Vec3(4, 5, 6)\n # Create a Matrix representing a rotation.\n mat = Matrix44.from_axis_angle_deg(axis, 45.0)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure it matches the expected quaternion.\n expected_quat = Quat.from_axis_angle_deg(axis, 45.0)\n self.assertAlmostEqual(quat.x, expected_quat.x)\n self.assertAlmostEqual(quat.y, expected_quat.y)\n self.assertAlmostEqual(quat.z, expected_quat.z)\n self.assertAlmostEqual(quat.w, expected_quat.w)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def test_interpolate_same_quaternion():\n n_steps = 3\n random_state = np.random.RandomState(42)\n a = pr.random_axis_angle(random_state)\n q = pr.quaternion_from_axis_angle(a)\n traj = [pr.quaternion_slerp(q, q, t) for t in np.linspace(0, 1, n_steps)]\n assert_equal(len(traj), n_steps)\n assert_array_almost_equal(traj[0], q)\n assert_array_almost_equal(traj[1], q)\n assert_array_almost_equal(traj[2], q)",
"def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V",
"def to_quaternion(self,roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]",
"def rotate(points, rot_vecs):\n theta = np.linalg.norm(rot_vecs, axis=1)[:, np.newaxis] #np.newaxis converts this into a column vector.\n with np.errstate(invalid='ignore'):\n v = rot_vecs / theta\n v = np.nan_to_num(v)\n \n check = (theta!=0).astype(int)\n dot = np.sum(points * v, axis=1)[:, np.newaxis]\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n return (cos_theta * points) + check*(((1 - cos_theta) * v * dot) + (sin_theta * np.cross(v, points)))",
"def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw",
"def _rotate_points(points, angle = 45, center = (0,0)):\n if angle == 0:\n return points\n angle = angle*np.pi/180\n ca = np.cos(angle)\n sa = np.sin(angle)\n sa = np.array((-sa, sa))\n c0 = np.array(center)\n if np.asarray(points).ndim == 2:\n return (points - c0) * ca + (points - c0)[:,::-1] * sa + c0\n if np.asarray(points).ndim == 1:\n return (points - c0) * ca + (points - c0)[::-1] * sa + c0",
"def rotateZ(self, *args, **kwargs):\n ...",
"def quat_angle(quat):\n return 2 * float(np.arccos(min(1, max(-1, quat[0]))))",
"def rotation_mat_to_quat(R, q):\n q[0] = np.sqrt(R[0] + R[4] + R[8]) / 2\n q[1] = (R[7] - R[5]) / (4. * q[0])\n q[2] = (R[2] - R[6]) / (4. * q[0])\n q[3] = (R[3] - R[1]) / (4. * q[0])",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, kappa, omega) = self.get_phi_kappa_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.kappa_opposite_rotation_matrix(phi, np.deg2rad(self.alpha), kappa, omega)",
"def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat",
"def rotate(self, vector):\n if isinstance(vector, Quaternion):\n return self._rotate_quaternion(vector)\n q = Quaternion(vector=vector)\n a = self._rotate_quaternion(q).vector\n if isinstance(vector, list):\n l = [x for x in a]\n return l\n elif isinstance(vector, tuple):\n l = [x for x in a]\n return tuple(l)\n else:\n return a",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def from_quaternion(self, q: np.ndarray) -> np.ndarray:\n if q is None:\n return np.identity(3)\n if q.shape[-1]!=4 or q.ndim>2:\n raise ValueError(\"Quaternion must be of the form (4,) or (N, 4)\")\n if q.ndim>1:\n q /= np.linalg.norm(q, axis=1)[:, None] # Normalize\n R = np.zeros((q.shape[0], 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)\n R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])\n R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])\n R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])\n R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)\n R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])\n R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])\n R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])\n R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)\n return R\n q /= np.linalg.norm(q)\n return np.array([\n [1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],\n [2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],\n [2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])",
"def test_rotate_vec_z(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(0, 0, 1), 90.)\n vec = Vec3(1, 1, 1)\n\n rotated_vec = quat.rotate_vec(vec)\n\n # 90 deg around z moves x from positive to negative\n self.assertAlmostEqual(-1.0, rotated_vec.x)\n self.assertAlmostEqual(1.0, rotated_vec.y)\n self.assertAlmostEqual(1.0, rotated_vec.z)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, omega) = angles[0:2]\n chi = np.deg2rad(self.chi)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def quat_to_yaw_deg(qx,qy,qz,qw):\n degree = pi/180\n sqy = qy*qy\n sqz = qz*qz\n siny = 2 * (qw*qz+qx*qy)\n cosy = 1 - 2*(qy*qy+qz*qz)\n yaw = int(atan2(siny,cosy)/degree)\n return yaw",
"def random_rotate():\n u = np.random.uniform(size=3)\n\n # Random quaternion\n q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),\n np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),\n np.sqrt(u[0])*np.sin(2*np.pi*u[2]),\n np.sqrt(u[0])*np.cos(2*np.pi*u[2])])\n \n # Convert the quaternion into a rotation matrix \n rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],\n 2*q[1]*q[2] - 2*q[0]*q[3],\n 2*q[1]*q[3] + 2*q[0]*q[2]],\n [2*q[1]*q[2] + 2*q[0]*q[3],\n q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],\n 2*q[2]*q[3] - 2*q[0]*q[1]],\n [2*q[1]*q[3] - 2*q[0]*q[2],\n 2*q[2]*q[3] + 2*q[0]*q[1],\n q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])\n return rotMat",
"def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def rotation_from_sphere_points_torch(x, y):\n if x.dim() == 1:\n x = x.unsqueeze(-2)\n if y.dim() == 1:\n y = y.unsqueeze(-2)\n\n dim = x.shape[1]\n\n # Compute the inner product\n inner_product = torch.mm(x, y.T)\n # Clamp in case any value is not in the interval [-1,1]\n # A small number is added/substracted to the bounds to avoid NaNs during backward computation.\n inner_product = inner_product.clamp(-1. + 1e-15, 1. - 1e-15)\n\n # Compute intermediate vector\n c_vec = x - y * inner_product\n c_vec = c_vec / torch.norm(c_vec)\n\n R = torch.eye(dim, dim, dtype=inner_product.dtype) + \\\n torch.sin(torch.acos(inner_product)) * (torch.mm(y.T, c_vec) - torch.mm(c_vec.T, y)) + \\\n (inner_product - 1.) * (torch.mm(y.T, y) + torch.mm(c_vec.T, c_vec))\n\n return R",
"def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):\n axis = axis.lower()\n axis_to_vec = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}\n\n if axis not in axis_to_vec:\n raise ValueError('Invalid axis. Must be either \"x\", \"y\", or \"z\"')\n\n rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)\n return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)",
"def rotate_particle(quaternion, particle):\n rot3d = quaternion2rot3d(quaternion)\n new_pos = np.dot(particle.atom_pos, rot3d.T)\n particle.set_atom_pos(new_pos)",
"def test_rotate_vec_x(self):\n\n quat = Quat.from_axis_angle_deg(Vec3(1, 0, 0), 90.)\n vec = Vec3(1, 1, 1)\n\n rotated_vec = quat.rotate_vec(vec)\n\n # 90 deg around x moves y from positive to negative\n self.assertAlmostEqual(1.0, rotated_vec.x)\n self.assertAlmostEqual(-1.0, rotated_vec.y)\n self.assertAlmostEqual(1.0, rotated_vec.z)",
"def test_conversions_axis_angle_quaternion():\n q = np.array([1, 0, 0, 0])\n a = pr.axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, np.array([1, 0, 0, 0]))\n q2 = pr.quaternion_from_axis_angle(a)\n assert_array_almost_equal(q2, q)\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n q = pr.quaternion_from_axis_angle(a)\n\n a2 = pr.axis_angle_from_quaternion(q)\n assert_array_almost_equal(a, a2)\n\n q2 = pr.quaternion_from_axis_angle(a2)\n pr.assert_quaternion_equal(q, q2)",
"def random_quaternions(count=100):\n rands = np.random.rand(count,3)\n root_1 = np.sqrt(rands[:,0])\n minus_root_1 = np.sqrt(1-rands[:,0])\n two_pi_2 = np.pi*2*rands[:,1]\n two_pi_3 = np.pi*2*rands[:,2]\n \n res = np.zeros((count,4))\n res[:,0] = minus_root_1*np.sin(two_pi_2)\n res[:,1] = minus_root_1*np.cos(two_pi_2)\n res[:,2] = root_1*np.sin(two_pi_3)\n res[:,3] = root_1*np.cos(two_pi_3)\n \n return res",
"def quat_to_rotmat(quat): \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def euler_to_quat(roll, pitch, yaw):\n pose = Pose()\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose.orientation",
"def quaternion_to_Rot(q: array):\n\n # Create a vector from the quaternion parameters (and check dimensions)\n q = array(q).reshape(4)\n\n # Normalize the quaternion\n q = divide(q, sqrt(sum(power(q, 2))))\n\n # Auxiliary matrix\n q_hat = zeros((3, 3))\n q_hat[0, 1] = -q[3]\n q_hat[0, 2] = q[2]\n q_hat[1, 2] = -q[1]\n q_hat[1, 0] = q[3]\n q_hat[2, 0] = -q[2]\n q_hat[2, 1] = q[1]\n\n # Return the rotation matrix\n return eye(3) + 2 * dot(q_hat, q_hat) + 2 * dot(q[0], q_hat)",
"def _rotate_points(points, angle=45, center=(0, 0)):\n if angle == 0:\n return points\n angle = angle * pi / 180\n ca = cos(angle)\n sa = sin(angle)\n sa = np.array((-sa, sa))\n c0 = np.array(center)\n if np.asarray(points).ndim == 2:\n return (points - c0) * ca + (points - c0)[:, ::-1] * sa + c0\n if np.asarray(points).ndim == 1:\n return (points - c0) * ca + (points - c0)[::-1] * sa + c0",
"def rotate(origin, point, angle):\n oy, ox = origin\n py, px = point\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qy, qx",
"def test_quaternion_diff():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.random_quaternion(random_state)\n q2 = pr.random_quaternion(random_state)\n a_diff = pr.quaternion_diff(q1, q2) # q1 - q2\n q_diff = pr.quaternion_from_axis_angle(a_diff)\n q3 = pr.concatenate_quaternions(q_diff, q2) # q1 - q2 + q2\n pr.assert_quaternion_equal(q1, q3)",
"def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q",
"def rotate(origin, point, angle): # Library export\r\n ox, oy = origin\r\n px, py = point\r\n\r\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\r\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\r\n return (qx, qy)",
"def rotate(origin, point, angle):\n ox, oy = origin[0],origin[1]\n px, py = point[0],point[1]\n\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return [qx, qy]",
"def test_interpolate_quaternion():\n n_steps = 10\n random_state = np.random.RandomState(0)\n a1 = pr.random_axis_angle(random_state)\n a2 = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a1)\n q2 = pr.quaternion_from_axis_angle(a2)\n\n traj_q = [pr.quaternion_slerp(q1, q2, t)\n for t in np.linspace(0, 1, n_steps)]\n traj_R = [pr.matrix_from_quaternion(q) for q in traj_q]\n R_diff = np.diff(traj_R, axis=0)\n R_diff_norms = [np.linalg.norm(Rd) for Rd in R_diff]\n assert_array_almost_equal(R_diff_norms,\n R_diff_norms[0] * np.ones(n_steps - 1))",
"def rotate_quaternion ( angle, axis, old ):\n\n import numpy as np\n\n # Note that the axis vector should be normalized and we test for this\n # In general, the old quaternion need not be normalized, and the same goes for the result\n # although in our applications we only ever use unit quaternions (to represent orientations)\n assert old.size==4, 'Error in old quaternion dimension'\n assert axis.size==3, 'Error in axis dimension'\n assert np.isclose (np.sum(axis**2),1.0), 'axis normalization error {} {} {}'.format(*axis)\n\n # Standard formula for rotation quaternion, using half angles\n rot = np.sin(0.5*angle) * axis\n rot = np.array([np.cos(0.5*angle),rot[0],rot[1],rot[2]],dtype=np.float_)\n\n e = quatmul ( rot, old ) # Apply rotation to old quaternion\n return e",
"def to_q(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n return self.to_quaternion(method=method, **kw)",
"def rotate(origin, point, angle):\n ox, oy = origin\n px, py = point\n qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)\n qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)\n return qx, qy",
"def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n B = quat.size(0)\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def test_quaternion_conventions():\n q_wxyz = np.array([1.0, 0.0, 0.0, 0.0])\n q_xyzw = pr.quaternion_xyzw_from_wxyz(q_wxyz)\n assert_array_equal(q_xyzw, np.array([0.0, 0.0, 0.0, 1.0]))\n q_wxyz2 = pr.quaternion_wxyz_from_xyzw(q_xyzw)\n assert_array_equal(q_wxyz, q_wxyz2)\n\n random_state = np.random.RandomState(42)\n q_wxyz_random = pr.random_quaternion(random_state)\n q_xyzw_random = pr.quaternion_xyzw_from_wxyz(q_wxyz_random)\n assert_array_equal(q_xyzw_random[:3], q_wxyz_random[1:])\n assert_equal(q_xyzw_random[3], q_wxyz_random[0])\n q_wxyz_random2 = pr.quaternion_wxyz_from_xyzw(q_xyzw_random)\n assert_array_equal(q_wxyz_random, q_wxyz_random2)",
"def rotate_points(points, a, b):\n if points.ndim == 1:\n points = points[None, :]\n\n a = normalize_vector(a)\n b = normalize_vector(b)\n k = normalize_vector(np.cross(a, b))\n theta = angle_between_vectors(a, b, normalize=False)\n\n points_rot = points * np.cos(theta) \\\n + np.cross(k, points) * np.sin(theta) \\\n + k * np.dot(k, points.T).reshape(-1, 1) * (1 - np.cos(theta))\n return points_rot",
"def rotation(self, *args, **kwargs) -> Any:\n pass",
"def euler_from_quaternion(x, y, z, w):\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return roll_x, pitch_y, yaw_z # in radians"
] | [
"0.7982522",
"0.7720816",
"0.76980317",
"0.7503948",
"0.71261555",
"0.67684865",
"0.66305953",
"0.66130453",
"0.65267867",
"0.6443186",
"0.64176416",
"0.64084005",
"0.63606745",
"0.633426",
"0.6299331",
"0.62791884",
"0.6247232",
"0.6116867",
"0.61149526",
"0.608029",
"0.60714394",
"0.6052814",
"0.6051079",
"0.60110676",
"0.6009486",
"0.6009138",
"0.6005317",
"0.59892696",
"0.5981084",
"0.59687",
"0.5949173",
"0.59448624",
"0.59448624",
"0.59448624",
"0.5928245",
"0.5914801",
"0.59128773",
"0.5912674",
"0.589625",
"0.5893935",
"0.588985",
"0.58744645",
"0.5869657",
"0.5869088",
"0.58470744",
"0.58285874",
"0.5818727",
"0.58120626",
"0.5810329",
"0.58026487",
"0.5794535",
"0.5787848",
"0.57683265",
"0.5763524",
"0.57569665",
"0.57538414",
"0.5750544",
"0.5734629",
"0.57280844",
"0.5722019",
"0.5719371",
"0.5718425",
"0.5718425",
"0.571428",
"0.57126313",
"0.57116973",
"0.5703747",
"0.57020825",
"0.5700782",
"0.5696124",
"0.5692763",
"0.5692763",
"0.5692645",
"0.5679335",
"0.5677076",
"0.5665164",
"0.56526184",
"0.5651864",
"0.56486386",
"0.5634201",
"0.5627337",
"0.56266177",
"0.5616632",
"0.56050205",
"0.55951005",
"0.5585532",
"0.55841994",
"0.5582086",
"0.5570331",
"0.5569869",
"0.55634254",
"0.5558872",
"0.5546873",
"0.5544687",
"0.5539757",
"0.55391484",
"0.553899",
"0.55386454",
"0.55280447"
] | 0.6787191 | 6 |
Multiply qa by qb. | def hamilton_product(qa, qb):
qa_0 = qa[:, :, 0]
qa_1 = qa[:, :, 1]
qa_2 = qa[:, :, 2]
qa_3 = qa[:, :, 3]
qb_0 = qb[:, :, 0]
qb_1 = qb[:, :, 1]
qb_2 = qb[:, :, 2]
qb_3 = qb[:, :, 3]
# See https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
q_mult_0 = qa_0 * qb_0 - qa_1 * qb_1 - qa_2 * qb_2 - qa_3 * qb_3
q_mult_1 = qa_0 * qb_1 + qa_1 * qb_0 + qa_2 * qb_3 - qa_3 * qb_2
q_mult_2 = qa_0 * qb_2 - qa_1 * qb_3 + qa_2 * qb_0 + qa_3 * qb_1
q_mult_3 = qa_0 * qb_3 + qa_1 * qb_2 - qa_2 * qb_1 + qa_3 * qb_0
return torch.stack([q_mult_0, q_mult_1, q_mult_2, q_mult_3], dim=-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quatMultiply(q1, q2):\n\tq1 = q1.flatten()\n\tq2 = q2.flatten()\n\tq3 = np.zeros(4)\n\tq3[0] = q1[0] * q2[0] - np.dot(q1[1:], q2[1:])\n\tq3[1:] = (q1[0] * q2[1:] + q2[0] * q1[1:] + np.cross(q1[1:], q2[1:]))\n\treturn (q3 / np.linalg.norm(q3)).reshape(-1, 1)",
"def multiply_quaternions( qa, qb ):\n combined = Quaternion()\n\n combined.w = (qa.w * qb.w - qa.x * qb.x - qa.y * qb.y - qa.z * qb.z)\n combined.x = (qa.x * qb.w + qa.w * qb.x + qa.y * qb.z - qa.z * qb.y)\n combined.y = (qa.w * qb.y - qa.x * qb.z + qa.y * qb.w + qa.z * qb.x)\n combined.z = (qa.w * qb.z + qa.x * qb.y - qa.y * qb.x + qa.z * qb.w)\n return combined",
"def quat_multiply(q1, q2):\n q = np.array([0.0, 0.0, 0.0, 0.0])\n q[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]\n q[1] = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]\n q[2] = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]\n q[3] = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]\n return q",
"def __mul__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Multiply, value)\n return out",
"def quatmul ( a, b ):\n\n import numpy as np\n\n assert a.size==4, 'Error in a dimension'\n assert b.size==4, 'Error in b dimension'\n\n return np.array ( [ a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3],\n a[1]*b[0] + a[0]*b[1] - a[3]*b[2] + a[2]*b[3],\n a[2]*b[0] + a[3]*b[1] + a[0]*b[2] - a[1]*b[3],\n a[3]*b[0] - a[2]*b[1] + a[1]*b[2] + a[0]*b[3] ], dtype=np.float_ )",
"def square(q_1: Q) -> Q:\n\n end_q_type = f\"{q_1.q_type}²\"\n\n qxq = _commuting_products(q_1, q_1)\n\n sq_q = Q(q_type=end_q_type, representation=q_1.representation)\n sq_q.t = qxq[\"tt\"] - qxq[\"xx+yy+zz\"]\n sq_q.x = qxq[\"tx+xt\"]\n sq_q.y = qxq[\"ty+yt\"]\n sq_q.z = qxq[\"tz+zt\"]\n\n return sq_q",
"def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)",
"def multiplier(self) -> global___Expression:",
"def qmult(q1, q2):\n q1i = np.array(q1)\n q2i = np.array(q2)\n\n if q1i.ndim != q2i.ndim:\n logging.error('Number of dimensions in quaternion q1 and quaternion q2 do not match')\n return -1\n\n # check to make sure input has the correct dimensions\n q1i = qvalidate(q1i, 'q1', 'qmult')\n q2i = qvalidate(q2i, 'q2', 'qmult')\n\n if isinstance(q1i, int):\n return q1i\n\n if isinstance(q2i, int):\n return q2i\n\n # make sure elements match\n if q1i.size != q2i.size:\n logging.error('Number of elements in quaternion q1 and quaternion q2 do not match')\n return -1\n\n # now the actual dirty work\n qtmp0 = q1i[:, 0] * q2i[:, 0] - q1i[:, 1] * q2i[:, 1] - q1i[:, 2] * q2i[:, 2] - q1i[:, 3] * q2i[:, 3]\n qtmp1 = q1i[:, 1] * q2i[:, 0] + q1i[:, 0] * q2i[:, 1] - q1i[:, 3] * q2i[:, 2] + q1i[:, 2] * q2i[:, 3]\n qtmp2 = q1i[:, 2] * q2i[:, 0] + q1i[:, 3] * q2i[:, 1] + q1i[:, 0] * q2i[:, 2] - q1i[:, 1] * q2i[:, 3]\n qtmp3 = q1i[:, 3] * q2i[:, 0] - q1i[:, 2] * q2i[:, 1] + q1i[:, 1] * q2i[:, 2] + q1i[:, 0] * q2i[:, 3]\n\n qout = np.array([qtmp0, qtmp1, qtmp2, qtmp3]).T\n\n return qout",
"def Add(p, q):\n return p*q",
"def mul(self, a, b):\n return a * b",
"def update_Q(self, Qsa, Qsa_next, reward, alpha, gamma):\r\n return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))",
"def mul(a, b):\n c = Calculator()\n result = c.mul(a, b)\n click.echo('{} * {} = {}'.format(a, b, result))",
"def quatmul_torch(q1, q2):\n # RoI dimension. Unsqueeze if not fitting.\n a = q1.unsqueeze(0) if q1.dim() == 1 else q1\n b = q2.unsqueeze(0) if q2.dim() == 1 else q2\n\n # Corner dimension. Unsequeeze if not fitting.\n a = a.unsqueeze(1) if a.dim() == 2 else a\n b = b.unsqueeze(1) if b.dim() == 2 else b\n\n # Quaternion product\n x = a[:, :, 1] * b[:, :, 0] + a[:, :, 2] * b[:, :, 3] - a[:, :, 3] * b[:, :, 2] + a[:, :, 0] * b[:, :, 1]\n y = -a[:, :, 1] * b[:, :, 3] + a[:, :, 2] * b[:, :, 0] + a[:, :, 3] * b[:, :, 1] + a[:, :, 0] * b[:, :, 2]\n z = a[:, :, 1] * b[:, :, 2] - a[:, :, 2] * b[:, :, 1] + a[:, :, 3] * b[:, :, 0] + a[:, :, 0] * b[:, :, 3]\n w = -a[:, :, 1] * b[:, :, 1] - a[:, :, 2] * b[:, :, 2] - a[:, :, 3] * b[:, :, 3] + a[:, :, 0] * b[:, :, 0]\n\n return torch.stack((w, x, y, z), dim=2)",
"def multiply(self, a, b):\n return a * b",
"def _mul(self, other):\n if isinstance(other, SeqFormula):\n form1, v1 = self.formula, self.variables[0]\n form2, v2 = other.formula, other.variables[0]\n formula = form1 * form2.subs(v2, v1)\n start, stop = self._intersect_interval(other)\n return SeqFormula(formula, (v1, start, stop))",
"def product(self, q: np.ndarray) -> np.ndarray:\n if isinstance(q, Quaternion):\n qw, qx, qy, qz = q\n elif isinstance(q, (np.ndarray, list, tuple)):\n qw, qx, qy, qz = Quaternion(q)\n else:\n raise TypeError(f\"q must be a Quaternion or an array, not {type(q)}\")\n pq = np.array([\n self.w*qw - self.x*qx - self.y*qy - self.z*qz,\n self.w*qx + self.x*qw + self.y*qz - self.z*qy,\n self.w*qy - self.x*qz + self.y*qw + self.z*qx,\n self.w*qz + self.x*qy - self.y*qx + self.z*qw])\n return pq / np.linalg.norm(pq)",
"def qmult(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:\n w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3]\n w2, x2, y2, z2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3]\n w = w1*w2 - x1*x2 - y1*y2 - z1*z2\n x = w1*x2 + x1*w2 + y1*z2 - z1*y2\n y = w1*y2 + y1*w2 + z1*x2 - x1*z2\n z = w1*z2 + z1*w2 + x1*y2 - y1*x2\n return torch.stack((w, x, y, z), dim=1)",
"def __matmul__(self, qubit):\n if isinstance(qubit, str):\n qubit = self.get_index(qubit)\n return self.compiled[qubit].y",
"def qUpdate(self,s,a,r,sPrime):\r\n #get max_a' Q(s',a')\r\n \"\"\"\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in actions:\r\n qCurr = Q[(sPrime,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr\r\n \"\"\"\r\n maxQ = self.maxQ(sPrime)[0]\r\n #update Q and return it\r\n self.Q[(s,a)] = (1 - self.alpha) * self.Q[(s,a)] + self.alpha * (r + self.gamma * maxQ)",
"def ea_from_q(p, q):\n return p * q / (0.622 + 0.378 * q)",
"def __mul__(self, quat2):\n p4=quat2.w\n p = quat2.imaginary\n p_cross = skew_symmetric(p)\n A=np.zeros((4,4))\n A[:3,:3]=p4*np.eye(3)+p_cross\n A[3,0:3] = -p.T\n A[:3,3] = p\n A[3,3] = p4\n quat_as_vector = dot(A,self.asColVector(\"xyzw\"))\n return Quat(quat_as_vector)",
"def mult(p, q):\n if p.ndim == 1 and q.ndim > 1:\n p = np.tile(p,(q.shape[0],1))\n if q.ndim == 1 and p.ndim > 1:\n q = np.tile(q,(p.shape[0],1))\n if q.ndim == 1 and p.ndim == 1:\n p = p.reshape((1,4))\n q = q.reshape((1,4))\n\n ps = p[:,3]\n qs = q[:,3]\n pv = p[:,:3]\n qv = q[:,:3]\n\n pq = np.empty_like(p)\n pq[:,3] = ps * qs \n pq[:,3] -= arraylist_dot(pv, qv).flatten()\n pq[:,:3] = ps[:,np.newaxis] * qv \n pq[:,:3] += pv * qs[:,np.newaxis] \n pq[:,:3] += np.cross(pv , qv)\n\n #opposite sign due to different convention on the basis vectors\n #pq *= -1\n return pq",
"def __mul__(self,y): \n\n # BZO mulitplication\n if type(y)==type(self):\n Out = self._CreateSameType()\n \n for Ind1 in self.IndList():\n Obj1=self[Ind1]\n for Ind2 in y.IndList():\n Obj2=y[Ind2]\n \n Ind3 = tuple(add(Ind1,Ind2))\n \n Out[Ind3] += Obj1*Obj2\n \n # Scalar multiplicatin\n else:\n\n Out = self._CreateSameType()\n\n Out.SetLists(self.IndList(),[y*x for x in self.__ObjList])\n\n # Multiplication with item of its own type\n \n \n \n \n \n return Out",
"def _mul(a, b):\n return a * b",
"def __mul__(self, other):\n if is_unit(other):\n # print \"quantity * unit\"\n # Many other mul/div operations delegate to here because I was debugging\n # a dimensionless unit conversion problem, which I ended up fixing within\n # the reduce_unit() method.\n unit = self.unit * other\n return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity * quantity\"\n # Situations where the units cancel can result in scale factors from the unit cancellation.\n # To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit\n return (self * other._value) * other.unit\n else:\n # print \"quantity * scalar\"\n return self._change_units_with_factor(self.unit, other, post_multiply=False)",
"def update_Qsa(self,Qsa,Q_next_sa, next_reward, alpha, gamma):\n old_Q = Qsa\n Qsa = old_Q + alpha*(next_reward + gamma*Q_next_sa - old_Q)\n return Qsa",
"def __mul__(self, A):\n pass",
"def multiplication(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a * b",
"def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)",
"def mul(self):\n a = self.pop()\n b = self.pop()\n c= a*b\n self.push(c)",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps * other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def quatdot(q_1,q_2):\n dot = np.zeros((1,4))\n dot = q_1[0,0]*q_2[0,0] + q_1[0,1]*q_2[0,1] + q_1[0,2]*q_2[0,2] + q_1[0,3]*q_2[0,3]\n return dot",
"def mul(a: Decimal, b: Decimal) -> Decimal:\n return a * b",
"def quaternion_multiplication(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar * b_scalar - a_vecx * b_vecx - a_vecy * b_vecy - a_vecz * b_vecz\n r_vecx = a_scalar * b_vecx + a_vecx * b_scalar + a_vecy * b_vecz - a_vecz * b_vecy\n r_vecy = a_scalar * b_vecy + a_vecy * b_scalar + a_vecz * b_vecx - a_vecx * b_vecz\n r_vecz = a_scalar * b_vecz + a_vecz * b_scalar + a_vecx * b_vecy - a_vecy * b_vecx\n\n \"\"\"\n a = torch.randn([2, 3, 4])\n b = torch.randn([2, 3, 4])\n print(a) # 2 matrices of size 3 x 4\n print(b) # 2 matrices of size 3 x 4\n print(torch.stack([a, b])) # 4 matrices of size 3 x 4, first a, then b\n \"\"\"\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )",
"def mul(a,b):\r\n return a*b",
"def mul(self, b):\n self.a *= float(b)",
"def create_q(size, b, order, m):\n s = np.matrix(np.zeros(size))\n for i in order:\n s[0,i] = 1\n a = np.dot(np.dot(s, b), s.T)\n a = a * (1. / (2. * m))\n return a.item(0)",
"def sq(self, x):\n\t\treturn x * x",
"def scalar_q(q_1: Q) -> Q:\n\n end_q_type = f\"scalar_q({q_1.q_type})\"\n s = Q([q_1.t, 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n return s",
"def _mul(self, other):\n return None",
"def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat",
"def multiplicar(self):\n self.resultado = self.valor_1 * self.valor_2",
"def __mul__(self, other):\n if not isinstance(other, SeqBase):\n raise TypeError('cannot multiply sequence and %s' % type(other))\n return SeqMul(self, other)",
"def q1_prod_q2(self, q1, q2):\n\n q3 = np.array([\n (q1[0]*q2[0])-(q2[1]*q1[1])-(q2[2]*q1[2])-(q2[3]*q1[3]),\n (q2[0]*q1[1])+(q2[1]*q1[0])+(q2[2]*q1[3])-(q2[3]*q1[2]),\n (q2[0]*q1[2])+(q2[2]*q1[0])-(q2[1]*q1[3])+(q2[3]*q1[1]),\n (q2[0]*q1[3])+(q2[3]*q1[0])+(q2[1]*q1[2])-(q2[2]*q1[1])\n ])\n\n return q3",
"def __mul__(self, other):\r\n return self.prod(other)",
"def qMethod(g_b, g_n, m_b, m_n):\n\tB = g_b @ g_n.T + m_b @ m_n.T\n\tZ = (np.cross(g_b.flatten(), g_n.flatten()) + np.cross(m_b.flatten(), m_n.flatten())).reshape(-1, 1)\n\tK = np.block([[B + B.T - np.trace(B) * np.eye(3), Z], # quadratic cost max qTKq\n\t\t\t\t [Z.T, np.trace(B)]])\n\tw, v = np.linalg.eig(K)\n\tq_ = v[:, np.argmax(w), np.newaxis] # maximum eigenvector\n\tq_ /= np.linalg.norm(q_)\n\tq_b2n = np.zeros((4, 1)) # convert unit quat from [v s] to [s v]\n\tq_b2n[0, 0] = q_[-1, 0]\n\tq_b2n[1:, 0] = q_[:-1, 0]\n\treturn q_b2n",
"def multiplication(a, b):\n pass",
"def product1(a, b, c) :\n return a * b * c",
"def multiply(a, b):\n return a * b",
"def multiply(a, b):\n return a * b",
"def multiply(a, b):\n return a * b",
"def multiply(a, b):\n return a * b",
"def multiply(a, b):\n return a * b",
"def mult(a, b):\n return a * b",
"def __mul__(self,l):\r\n\t\t\r\n\t\t# multiply\r\n\t\tm = self.multiply(l)\r\n\t\t\r\n\t\treturn m",
"def update_Qi(Qval0, Qval1, reward1, alpha, gamma):\n return Qval0 + alpha * (reward1 + gamma*Qval1 - Qval0)",
"def multiplication(a, b):\n return a * b",
"def submitPirQuery(self,q,base):\n x,omega = self.db.shape\n print ('OMEGA IS ',omega)\n results = np.zeros(omega,dtype=np.uint64) \n for bit_idx in range(len(q)):\n if q[bit_idx]==0:\n continue\n results = (utilities.scaleArrayGF(self.db[bit_idx],q[bit_idx],base) + results) % base\n \n return results",
"def expand_q(self) -> Q:\n \"\"\"Expand each term.\"\"\"\n\n self.t = sp.expand(self.t)\n self.x = sp.expand(self.x)\n self.y = sp.expand(self.y)\n self.z = sp.expand(self.z)\n return self",
"def q(self, q: ComType):\n if isinstance(q, complex):\n self._pwr = q\n else:\n self._pwr = complex(0, q)",
"def __mul__(self, o): \n return MoebGen(self._a * o.a + self._b * o.c, self._a * o.b + self._b * o.d, \n self._c * o.a + self._d * o.c, self._c * o.b + self._d * o.d)",
"def __call__(self, q):\n # SASCalculator ignores the scale, so we add it in here\n yout = BasePDFGenerator.__call__(self, q)\n yout *= self.scale.value\n return yout",
"def quaterion_product(q, p):\n q0 = q[3]\n p0 = p[3]\n\n return [q0*p[0:3] + p0*q[0:3] + mtimes(skew(q[0:3]), p[0:3]), q0*p0 - mtimes(q[0:3].T, p[0:3])]",
"def __mul__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'mul')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(\n tf.multiply(self.tf, other.tf), self.type_name, provenance)\n else:\n provenance = NQExprProvenance(\n operation='mul',\n inner=self.provenance,\n other=NQExprProvenance(operation='constant', args=(None, other)))\n return self.context.as_nql(\n tf.multiply(self.tf, other), self.type_name, provenance)",
"def Mq_inv(self, q):\n\t\traise NotImplementedError",
"def multiply(a, b):\n return a*b",
"def addmul(a,b):\n return a*b+a*b",
"def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states",
"def multiply(a,b):\n return a*b",
"def cross_product(qa, qb):\n qa_0 = qa[:, :, 0]\n qa_1 = qa[:, :, 1]\n qa_2 = qa[:, :, 2]\n\n qb_0 = qb[:, :, 0]\n qb_1 = qb[:, :, 1]\n qb_2 = qb[:, :, 2]\n\n # See https://en.wikipedia.org/wiki/Cross_product\n q_mult_0 = qa_1 * qb_2 - qa_2 * qb_1\n q_mult_1 = qa_2 * qb_0 - qa_0 * qb_2\n q_mult_2 = qa_0 * qb_1 - qa_1 * qb_0\n\n return torch.stack([q_mult_0, q_mult_1, q_mult_2], dim=-1)",
"def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))",
"def multiply_quaternions(quats1,quats2):\n w1 = quats1[:,0]\n x1 = quats1[:,1]\n y1 = quats1[:,2]\n z1 = quats1[:,3]\n\n w2 = quats2[:,0]\n x2 = quats2[:,1]\n y2 = quats2[:,2]\n z2 = quats2[:,3]\n\n res = np.zeros((quats1.shape[0],4))\n \n res[:,0] = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\n res[:,1] = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\n res[:,2] = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\n res[:,3] = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 \n return res",
"def __mul__(self, i):\n return asarray(multiply(self, i))",
"def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],\n newlist[i][2])\n return Dyadic(newlist)",
"def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)",
"def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()",
"def math_use(a, b):\n res_product1 = a * b\n print(\"{} * {} = {}\".format(a, b, res_product1))\n if isinstance(a, int):\n res_product2 = a ** b\n print(\"{0} ** {1} = {2}\".format(a, b, res_product2))",
"def q(self, s, a):\n # The Q value of the current state is based on the max Q value of the next state.\n next_state_max_q = max([self.qtable[s[0]+x][s[1]+y] for (x,y) in self.maze.moves()])\n self.qtable[s[0]+a[0]][s[1]+a[1]] = (self.qtable[s[0]+a[0]][s[1]+a[1]]\n + self.alpha * (self.r(s,a) + self.gamma * next_state_max_q\n - self.qtable[s[0]+a[0]][s[1]+a[1]]))\n\n return self.qtable[s[0]+a[0]][s[1]+a[1]]",
"def multiplier(*args):\n if not args:\n return 0\n product = args[0]\n for a in args[1:]:\n print('a is: ',a)\n product *= a\n return product,args",
"def second_q_ops(self):\n raise NotImplementedError()",
"def mul(a,b):\n return [a[0]*b[0],a[1]*b[1],a[2]*b[2],1.0]",
"def math_mult():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(mult(a, b))",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def T1q(request):\n T = _get_test_table()\n if request.param:\n T[\"a\"] = T[\"a\"] * u.m\n return T",
"def mult_operation(self):\n n1 = self.memory[self.memory[self._cursor + 1]]\n n2 = self.memory[self.memory[self._cursor + 2]]\n position = self.memory[self._cursor + 3]\n self.memory[position] = n1 * n2\n # print(f'Cursor: {self._cursor}\\tAssigning position {position} with value {n1} * {n2} = {n1 * n2}')\n return",
"def multiply(self):\n return self._do_calc(self.multiplier)",
"def multiply(self):\n return self._do_calc(self.multiplier)",
"def multiply(self):\n return self._do_calc(self.multiplier)",
"def multiply_by_expression(self, expr):\n graded_dict = {}\n for expr in self.graded_dict:\n graded_dict[expr * expr] = self.graded_dict[expr]\n return ANCOVA(graded_dict)",
"def act(self, q_values, *args, **kwargs):\n pass",
"def product(a, b):\n return a * b",
"def mul(self, left: int, right: int, put_idx: int) -> None:\n self.write(left * right, put_idx)",
"def sqr(a):\n return a * a",
"def __mul__(self, _scalar):\n\t\tans = copy.deepcopy(self)\n\t\tfor i in range(0, self.n):\n\t\t\t\tans[i] *= _scalar\n\t\treturn ans",
"def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2",
"def __mul__(self, other, nested=False):\n\n other = formula(other, namespace=self.namespace)\n\n selftermnames = self.termnames()\n othertermnames = other.termnames()\n\n I = len(selftermnames)\n J = len(othertermnames)\n\n terms = []\n termnames = []\n\n for i in range(I):\n for j in range(J):\n termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))\n pieces = termname.split('*')\n pieces.sort()\n termname = '*'.join(pieces)\n termnames.append(termname)\n\n selfnames = self.terms[i].names()\n othernames = other.terms[j].names()\n\n if self.terms[i].name is 'intercept':\n _term = other.terms[j]\n _term.namespace = other.namespace\n\n elif other.terms[j].name is 'intercept':\n _term = self.terms[i]\n _term.namespace = self.namespace\n else:\n names = []\n\n d1 = len(selfnames) \n d2 = len(othernames)\n\n for r in range(d1):\n for s in range(d2):\n name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))\n pieces = name.split('*')\n pieces.sort()\n name = '*'.join(pieces)\n names.append(name)\n\n def product_func(value, d1=d1, d2=d2):\n\n out = []\n for r in range(d1):\n for s in range(d2):\n out.append(value[r] * value[d1+s])\n return N.array(out)\n\n sumterms = self + other\n sumterms.terms = [self, other] # enforce the order we want\n sumterms.namespace = self.namespace\n\n _term = quantitative(names, func=sumterms, termname=termname,\n transform=product_func)\n _term.namespace = self.namespace\n\n\n terms.append(_term)\n\n return formula(terms, namespace=self.namespace)",
"def a_q(self, phi, ci, tl):\n\t return (self.j(phi, tl)*(ci - self.gamma(tl)))/(4.*(ci + 2.*self.gamma(tl)))",
"def product(self):\n raise NotImplementedError"
] | [
"0.6547377",
"0.6411355",
"0.64028084",
"0.62495494",
"0.62204504",
"0.6107999",
"0.606811",
"0.605762",
"0.6046647",
"0.60203433",
"0.5900787",
"0.58968675",
"0.5880514",
"0.5878625",
"0.58512473",
"0.5835514",
"0.57973033",
"0.57964456",
"0.5778016",
"0.57761025",
"0.57125384",
"0.5705553",
"0.5688845",
"0.5683221",
"0.56560856",
"0.5648899",
"0.56422406",
"0.5615325",
"0.55983293",
"0.55809397",
"0.55803376",
"0.55666685",
"0.5554949",
"0.5525627",
"0.5522744",
"0.5512599",
"0.55036837",
"0.54933685",
"0.5491981",
"0.5482044",
"0.5470703",
"0.5468698",
"0.5466589",
"0.5463462",
"0.54559964",
"0.5450712",
"0.5442747",
"0.5438984",
"0.54365927",
"0.5430494",
"0.5430494",
"0.5430494",
"0.5430494",
"0.5430494",
"0.54285115",
"0.54278153",
"0.54173505",
"0.5412311",
"0.5411559",
"0.5410864",
"0.5409871",
"0.54079723",
"0.540633",
"0.54026055",
"0.54009974",
"0.5391443",
"0.53819174",
"0.5378759",
"0.5368025",
"0.53568214",
"0.53541464",
"0.53460366",
"0.53418165",
"0.53335273",
"0.5332858",
"0.53302747",
"0.53255254",
"0.5317846",
"0.53128755",
"0.53099924",
"0.5306177",
"0.5304148",
"0.5302133",
"0.5273326",
"0.526277",
"0.52576834",
"0.5247893",
"0.5247893",
"0.5247893",
"0.5243895",
"0.523816",
"0.5237014",
"0.5233794",
"0.5228137",
"0.5213381",
"0.5211781",
"0.52113426",
"0.5210853",
"0.52063984"
] | 0.6089931 | 7 |
Check that matrix type is preserved. | def test_matrix_b_only(self):
a = array([[1., 2.], [3., 4.]])
b = matrix([2., 1.]).T
self.do(a, b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def verify_numpy_type(self, matrix):\n if type(matrix) != np.ndarray and matrix != None:\n return np.asfarray(matrix)\n elif type(matrix) == np.ndarray and matrix != None:\n return matrix",
"def isMatrix(M):\r\n if type(M) == matrix:\r\n return M\r\n elif type(M) == np.ndarray:\r\n return matrix(M)\r\n else:\r\n raise Exception('Unknown input format. Should be matrix or numpy array')",
"def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)",
"def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two",
"def is_MatrixMorphism(x):\n return isinstance(x, MatrixMorphism_abstract)",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def test_check_matrix():\n R_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n R = pr.check_matrix(R_list)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_int_array = np.eye(3, dtype=int)\n R = pr.check_matrix(R_int_array)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_array = np.eye(3)\n R = pr.check_matrix(R_array)\n assert_array_equal(R_array, R)\n\n R = np.eye(4)\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix with shape\",\n pr.check_matrix, R)\n\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0.1, 1]])\n assert_raises_regexp(\n ValueError, \"inversion by transposition\", pr.check_matrix, R)\n\n R = np.array([[1, 0, 1e-16], [0, 1, 0], [0, 0, 1]])\n R2 = pr.check_matrix(R)\n assert_array_equal(R, R2)\n\n R = -np.eye(3)\n assert_raises_regexp(ValueError, \"determinant\", pr.check_matrix, R)",
"def _values_of_same_type(self, val1, val2):\n if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):\n return True\n else:\n return super(SparseParameter, self)._values_of_same_type(val1, val2)",
"def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"",
"def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')",
"def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols",
"def _need_transpose(expr_matrix, adj_matrix):\n return expr_matrix.shape[1] != adj_matrix.shape[0]",
"def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True",
"def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)",
"def __type_of_elements_correct_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int, float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int, float))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_np_matrix():\n X = np.arange(12).reshape(3, 4)\n\n assert not isinstance(as_float_array(X), np.matrix)\n assert not isinstance(as_float_array(np.matrix(X)), np.matrix)\n assert not isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(atleast2d_or_csr(X), np.matrix)\n assert not isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix)\n assert not isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(safe_asarray(X), np.matrix)\n assert not isinstance(safe_asarray(np.matrix(X)), np.matrix)\n assert not isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix)",
"def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()",
"def __size_restriction_correct_matrix_matrix(self):\n\n strTestName = 'Matrix size equal to the size of a matrix (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizEq('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True",
"def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")",
"def _is_supported_matrix(data):\n return (\n spsp.isspmatrix_csc(data)\n or spsp.isspmatrix_csr(data)\n or spsp.isspmatrix_bsr(data)\n or spsp.isspmatrix_dia(data)\n )",
"def hastype_helper(t, model):\n if t == model:\n return True\n elif isinstance(model, type) and issubclass(model, types.Type):\n return isinstance(t, model)\n else:\n return False",
"def check_type(self):\n return True",
"def _type_check(matrix_a, matrix_b=None, cast=False):\n\n if matrix_b is None and matrix_a.dtype in NUMPY_FLOAT_DTYPES:\n return matrix_a\n elif matrix_b is None and cast:\n return _cast_to_float64(matrix_a)\n elif matrix_b is None:\n err_msg = \"Matrix data type must be float32 or float64; {a} provided\".format(a=matrix_a.dtype)\n raise ValueError(err_msg)\n\n # Check dtypes\n if matrix_a.dtype == np.float32 and matrix_b.dtype == np.float32:\n return matrix_a, matrix_b\n\n elif matrix_a.dtype == np.float64 and matrix_b.dtype == np.float64:\n return matrix_a, matrix_b\n\n elif (matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64) and cast:\n debug_print(\"Recasting matrix data types {a} and {b} to np.float64\".format(a=matrix_a.dtype, b=matrix_b.dtype))\n return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)\n\n elif matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64:\n err_msg = \"Matrix data types must be in concordance; {a} and {b} provided\".format(a=matrix_a.dtype,\n b=matrix_b.dtype)\n raise ValueError(err_msg)",
"def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass",
"def _check_eigenmatrices(self):\n if self._has(\"P\") and self._has(\"Q\") and \\\n _simplify(_expand(self._.P * self._.Q)) \\\n != self.order(expand=True, simplify=True) \\\n * identity_matrix(SR, self._.d + 1):\n warn(Warning(\"the eigenmatrices do not multiply \"\n \"into a multiple of the identity matrix\"))",
"def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))",
"def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def is_compat_col(self, col):\n return isinstance(col, DomainMatrix) and col.shape == (self.n, 1) and col.domain.is_ZZ",
"def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)",
"def is_matrix(self, a_list):\n if type(a_list) != list:\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for index in range(len(a_list)):\n if type(a_list[index]) != list or \\\n len(a_list[index]) != len(a_list[(index - 1)]):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for value in a_list[index]:\n if not isinstance(value, (int, float)):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n return a_list",
"def test_partition(self):\n mat = self.mat\n self.assertSequenceEqual(\n [mat.m, mat.n, mat.shape[2], mat.shape[3], mat.dtype],\n [self.m, self.n, self.p, self.q, self.dtype]\n )\n if not mat.is_active:\n self.assertSequenceEqual(\n [mat.mloc, mat.mstart, mat.mend, mat.nloc, mat.nstart, mat.nend],\n [0, 0, 0, 0, 0, 0]\n )\n else:\n pass",
"def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))",
"def test3(self):\r\n a = T.matrix()\r\n self.assertTrue(None == _as_scalar(a))\r\n self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],\r\n [0, 'x', 1])(a)))",
"def __type_of_elements_incorrect_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)",
"def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"",
"def is_rowvector(matrix):\n return is_matrix(matrix) and matrix.shape[0] == 1",
"def __verifyMatrixProperties(self, matrix, order):\n \n # Get the shape and number of dimentions of the matrix\n shape = np.shape(matrix)\n size = np.size(matrix)\n dims = len(shape)\n \n # Verify that the matrix has two dimensions\n if dims != 2:\n errmsg = (f'Invalid number of dimensions ({dims}) of {order} matrix. '\n 'Must be exactly 2.')\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix is not empty\n if size == 0:\n errmsg = f'Input for {order} matrix is empty.'\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix does not have a dimension greater than 10\n for i, dim in enumerate(shape):\n if dim > 10:\n errmsg = (f'Invalid dimension size of {dim} for dimension {i} '\n f'of {order} matrix. Must be <= 10.')\n raise MatrixOperationError(errmsg)",
"def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6",
"def matrix_type(self, matrix_type):\n allowed_values = [\"ASYMMETRIC\", \"AUTOMATIC_DETECTION\", \"SYMMETRIC_POSITIVE_INDEFINITE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and matrix_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `matrix_type` ({0}), must be one of {1}\" # noqa: E501\n .format(matrix_type, allowed_values)\n )\n\n self._matrix_type = matrix_type",
"def is_mat_list(list_matrices):\n flag = True\n if isinstance(list_matrices, list):\n for matrix in list_matrices:\n if not isinstance(matrix, np.matrix):\n flag = False\n # TODO Check for matrix dimensions?\n else:\n flag = False\n return flag",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\r\n raise NotImplementedError()",
"def square_check(self):\n return len(self.matrix) == len(self.matrix[0])",
"def check_matrix(self, mat: Matrix) -> bool:\n matrix_expected_row_len = len(\n max([self.left_to_right_regexes, self.right_to_left_regexes], key=len)\n )\n matrix_row_strings = [\n ''.join(mat[i][j] for j in range(mat.columns)) for i in range(mat.rows)\n ]\n if matrix_expected_row_len != len(matrix_row_strings):\n raise ValueError(\n f'Matrix with {len(matrix_row_strings)} rows is incompatible with level of {matrix_expected_row_len} rows.'\n )\n\n matrix_expected_column_len = len(\n max([self.up_to_down_regexes, self.down_to_up_regexes], key=len)\n )\n matrix_column_strings = [\n ''.join(mat[j][i] for j in range(mat.rows)) for i in range(mat.columns)\n ]\n if matrix_expected_column_len != len(matrix_column_strings):\n raise ValueError(\n f'Matrix with {len(matrix_column_strings)} columns is incompatible with level of {matrix_expected_column_len} columns.'\n )\n\n for row, utd_regex, dtu_regex in itertools.zip_longest(\n matrix_column_strings,\n self.up_to_down_regexes,\n self.down_to_up_regexes,\n fillvalue=re.compile(''),\n ):\n if (utd_regex.pattern and re.fullmatch(utd_regex, row) is None) or (\n dtu_regex.pattern and re.fullmatch(dtu_regex, row) is None\n ):\n return False\n\n for row, ltr_regex, rtl_regex in itertools.zip_longest(\n matrix_row_strings,\n self.left_to_right_regexes,\n self.right_to_left_regexes,\n fillvalue=re.compile(''),\n ):\n if (ltr_regex.pattern and re.fullmatch(ltr_regex, row) is None) or (\n rtl_regex.pattern and re.fullmatch(rtl_regex, row) is None\n ):\n return False\n\n return True",
"def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)",
"def is_symmetric(mat):\n return np.allclose(mat.T, mat)",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, GpuArrayType):\r\n raise NotImplementedError()",
"def _supports(self, item):\n if SparseParameter._is_supported_matrix(item):\n return True\n else:\n return super(SparseResult, self)._supports(item)",
"def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE",
"def verify_sub_matrixes(self, matrix=None):\n local_matrix = matrix if matrix else self.matrix\n\n for i in range(len(local_matrix.matrix)):\n temp_matrix = [[]]\n for j in range(i + 1):\n for k in range(i + 1):\n temp_matrix[j].append(local_matrix.matrix[j][k])\n temp_matrix.append([])\n \n temp_matrix.remove([])\n submatrix = Matrix(temp_matrix)\n print(f\"Submatriz de {i + 1}x{i + 1}\")\n det = submatrix.get_determinant()\n print(f\"Determinante = {det}\")\n submatrix.print_matrix()\n if det == 0:\n return False\n \n return True",
"def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )",
"def f_supports(self, data):\n if self._is_supported_matrix(data):\n return True\n else:\n return super(SparseParameter, self).f_supports(data)",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))",
"def __size_restriction_correct_matrix_number(self):\n\n strTestName = 'Matrix size higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 13)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def __size_restriction_incorrect_matrix_matrix(self):\n\n strTestName = 'Matrix size lower than the size of a matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def test_import_type_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert x.dtype == import_data('/tmp/test.sparsetxt').dtype",
"def check_if_vec(a):\n isRowvec = False\n isNmat = False\n if not isinstance(a, FrovedisBlockcyclicMatrix):\n a = np.asarray(a)\n if a.ndim == 1:\n isRowvec = True\n else:\n isNmat = True\n return (isRowvec, isNmat)",
"def __relational_restriction_correct_NumpyMatrix_vs_number(self):\n strTestName = 'Numpy matrix higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mParameter1', 'Matrix parameter')\n RxCSObject.paramType('mParameter1', np.ndarray)\n RxCSObject.paramH('mParameter1', 0)\n\n RxCSObject.mParameter1 = np.random.randint(1, 10, (2, 2))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_conversions_to_matrix():\n R = np.eye(3)\n R2R = pr.matrix_from(R=R)\n assert_array_almost_equal(R2R, R)\n\n a = np.array([1, 0, 0, 0])\n a2R = pr.matrix_from(a=a)\n assert_array_almost_equal(a2R, R)\n\n q = np.array([1, 0, 0, 0])\n q2R = pr.matrix_from(q=q)\n assert_array_almost_equal(q2R, R)\n\n e_xyz = np.array([0, 0, 0])\n e_xyz2R = pr.matrix_from(e_xyz=e_xyz)\n assert_array_almost_equal(e_xyz2R, R)\n\n e_zyx = np.array([0, 0, 0])\n e_zyx2R = pr.matrix_from(e_zyx=e_zyx)\n assert_array_almost_equal(e_zyx2R, R)\n\n assert_raises_regexp(ValueError, \"no rotation\", pr.matrix_from)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def is_numpy(obj):\n return 'numpy' in str(type(obj))",
"def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity",
"def test_transpose_mat(self):\n self.init()\n assert np.all(transpose_mat(self.i64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.fi64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.f64_2) == self.f64_2.T)\n assert np.all(transpose_mat(self.ff64_2) == self.ff64_2.T)\n assert transpose_mat(self.i64_2).dtype == 'float64'\n assert transpose_mat(self.fi64_2).dtype == 'float64'\n assert transpose_mat(self.f64_2).dtype == 'float64'\n assert transpose_mat(self.ff64_2).dtype == 'float64'\n assert transpose_mat(self.i64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.fi64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.f64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.ff64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.i64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.fi64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.f64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.ff64_2).flags['C_CONTIGUOUS'] == False",
"def only_matrices(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Generates matrices. matrices = [p_matrix, reduced p_matrix]\n matrices = M.compute(input, must_print)\n\n #Storing output.\n store_output(matrices)",
"def test_format_matrix(self):\r\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n row_labels = ['a', 'b', 'c']\r\n col_labels = [11, 22, 33]\r\n res = format_matrix(a, row_labels, col_labels)\r\n\r\n # test as list\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)\r\n\r\n # tes as array\r\n a = array(a)\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)",
"def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)",
"def _check_real_symmetric(A: np.array) -> bool:\n return np.allclose(A, A.T, atol=1e-9)",
"def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def test_to_matrix(self):\n v = np.copy(zero)\n with self.assertRaises(ZeroDivisionError):\n rowan.to_matrix(v)\n\n v = 2 * np.ones(4)\n with self.assertRaises(ValueError):\n rowan.to_matrix(v)\n\n v = np.copy(one)\n self.assertTrue(np.all(rowan.to_matrix(v) == np.eye(3)))\n\n v = np.copy(half)\n self.assertTrue(\n np.allclose(rowan.to_matrix(v), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]))\n )\n\n v[3] *= -1\n self.assertTrue(\n np.allclose(\n rowan.to_matrix(v), np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n )\n )",
"def isdense(qob):\n return isinstance(qob, np.ndarray)",
"def issparse(qob):\n return isinstance(qob, sp.spmatrix)",
"def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return",
"def check_k_matrix_stability(self):\r\n K = self.make_k_matrix()\r\n vals, vects = scipy_sparse_eigens(K)\r\n principal_val = vals.max()\r\n print(\"ht3_solver:\\t'Stiffness' matrix principal eigenvalue was \"\r\n + str(principal_val))\r\n if principal_val > 1:\r\n print(\"##########################################################\")\r\n print(\"ht3_solver:\\tWARNING\")\r\n print(\"ht3_solver:\\tPrincipal eigenvalue is more than one.\")\r\n print(\"ht3_solver:\\tThe analysis will be unstable.\")\r\n print(\"ht3_solver:\\tIf this is OK, just go and modify the code \"\r\n + \"or something.\")\r\n print(\"##########################################################\")\r\n raise(AssertionError)",
"def check_double_matrix(mat):\n if len(mat.shape) != 2:\n sys.stderr.write(\"Invalid matrix: dimension {} not {}\\n\"\n .format(len(mat.shape), 2))\n return False\n\n # If the strides hasn't got the same number of elements, really weird\n # things happened... Let's abort in such case\n assert len(mat.strides) == len(mat.shape)\n\n if mat.itemsize != SIZEOF_DOUBLE:\n sys.stderr.write(\"Invalid matrix: item size {} not {}\\n\"\n .format(mat.itemsize, SIZEOF_DOUBLE))\n return False\n\n if mat.strides[0] < mat.strides[1] or mat.strides[1] != mat.itemsize:\n sys.stderr.write(\"Invalid strides for a C matrix: {}\\n\"\n .format(mat.strides))\n return False\n\n # If itemsize couldn't divide the stride, nothing would work...\n assert (mat.strides[0] % mat.itemsize) == 0\n\n if mat.strides[0] < mat.shape[1] * mat.strides[1]:\n sys.stderr.write(\"Too small strides for shape: {} < {}\\n\"\n .format(mat.strides[0], mat.shape[1] * mat.strides[1]))\n return False\n return True",
"def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype",
"def confirm_symmetry(mat: numpy.ndarray, symmetry: List[Any]) -> None:\n is_unity = validate_unity(symmetry[0])\n if len(symmetry) == 1 and is_unity:\n return\n build_symmetry_operations(symmetry)\n validate_matrix_symmetry(mat, symmetry)",
"def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def __check_signal(self, signal):\n if not(isinstance(signal, np.ndarray)):\n raise TypeError()\n if len(signal.shape) != 1:\n raise TypeError()\n if not(hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n if signal.shape[0] != self.dim:\n raise TypeError()",
"def _check_column_or_1d(y, context=\"\"):\n CHANGE = False\n try:\n s = tuple(np.shape(y))\n except Exception as e:\n raise ValueError(\"%sCould not get shape of y. \"\n \"y should be an ndarray or scipy sparse csr \"\n \"/csc matrix of shape (n_samples, ). Got %s.\"\n \"Details:\\n%r\" % (context, type(y), e))\n\n if len(s) == 0:\n raise ValueError(\"%sy is empty: y = %r.\" % (context, y))\n\n if len(s) == 2 and s[1] == 1:\n CHANGE = True\n warnings.warn(\"%sA column-vector y was passed when a 1d array was\"\n \" expected. Change the shape of y to \"\n \"(n_samples, ), for example using ravel().\" % context,\n InputDataWarning)\n\n if len(s) == 2 and s[1] > 1:\n CHANGE = True\n warnings.warn(\"%sA matrix y was passed for as for labels. \"\n \"Most estimators expect a one dimensional label vector.\"\n \"Consider changing the shape of y to (n_samples, ).\" %\n context, InputDataWarning)\n\n return CHANGE",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])",
"def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)",
"def check_design_matrix(design_matrix):\n names = [name for name in design_matrix.keys()]\n frame_times = design_matrix.index\n matrix = design_matrix.values\n return frame_times, matrix, names",
"def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")",
"def is_identity(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, np.eye(mat.shape[0]), atol=eps)",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity",
"def is_invertible(matrix: np.ndarray) -> bool:\n return matrix.shape[0] == matrix.shape[1] and np.linalg.det(matrix) != 0",
"def _validate_connectivity_matrix_shape(self, connectivity_matrix):\n validate_array_ndim('connectivity matrix', connectivity_matrix, 2)\n\n for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):\n if (\n hasattr(self.attrs, attr)\n and self.attrs[attr] != np.shape(connectivity_matrix)[axis]\n ):\n raise ValueError(\n 'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='\n '{ngaba} imply connectivity matrix of size '\n '({nser}, {ngaba}), got {cm_shape} instead.'.format(\n nser=getattr(self.attrs, 'no_ser_neurons', 'any'),\n ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),\n cm_shape=np.shape(connectivity_matrix),\n )\n )",
"def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))",
"def assert_is_rotmat(rotmat):\n\n assert rotmat.shape == (3,3)\n np.testing.assert_array_almost_equal(np.linalg.det(rotmat), 1.0)\n np.testing.assert_array_almost_equal(rotmat.transpose(), np.linalg.inv(rotmat))",
"def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()"
] | [
"0.75150174",
"0.67877185",
"0.6370516",
"0.62093437",
"0.61971074",
"0.61142915",
"0.6034021",
"0.60186875",
"0.59572864",
"0.5932115",
"0.5802232",
"0.57511467",
"0.57066",
"0.5695186",
"0.5665314",
"0.56607133",
"0.5646371",
"0.56367624",
"0.5622803",
"0.5620641",
"0.5593088",
"0.5587037",
"0.55837977",
"0.55747634",
"0.55531716",
"0.5548002",
"0.55292165",
"0.55196416",
"0.5517354",
"0.5508062",
"0.550424",
"0.5498901",
"0.54783785",
"0.5475085",
"0.5463449",
"0.5456224",
"0.5450043",
"0.5442099",
"0.5435904",
"0.54162514",
"0.5400875",
"0.54002726",
"0.53687006",
"0.53647286",
"0.5359484",
"0.5348523",
"0.5339522",
"0.53363925",
"0.53341246",
"0.53290695",
"0.53287137",
"0.53252906",
"0.5290343",
"0.52777433",
"0.52698237",
"0.52560157",
"0.52490276",
"0.52473843",
"0.52417713",
"0.5236918",
"0.5232117",
"0.52219164",
"0.52171504",
"0.5216801",
"0.51913345",
"0.5178073",
"0.5167911",
"0.51665926",
"0.5157032",
"0.51531214",
"0.5149281",
"0.51467043",
"0.5139498",
"0.513897",
"0.513387",
"0.5130205",
"0.51221913",
"0.51214",
"0.5098216",
"0.50747937",
"0.50722885",
"0.50708866",
"0.50677526",
"0.5062261",
"0.5051118",
"0.50506145",
"0.50494045",
"0.50494015",
"0.5046994",
"0.50371206",
"0.5032708",
"0.5023289",
"0.50208586",
"0.5018706",
"0.5014096",
"0.50114495",
"0.50087655",
"0.5005096",
"0.50020325",
"0.4994158",
"0.4993709"
] | 0.0 | -1 |
Check that matrix type is preserved. | def test_matrix_a_and_b(self):
a = matrix([[1., 2.], [3., 4.]])
b = matrix([2., 1.]).T
self.do(a, b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def verify_numpy_type(self, matrix):\n if type(matrix) != np.ndarray and matrix != None:\n return np.asfarray(matrix)\n elif type(matrix) == np.ndarray and matrix != None:\n return matrix",
"def isMatrix(M):\r\n if type(M) == matrix:\r\n return M\r\n elif type(M) == np.ndarray:\r\n return matrix(M)\r\n else:\r\n raise Exception('Unknown input format. Should be matrix or numpy array')",
"def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)",
"def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two",
"def is_MatrixMorphism(x):\n return isinstance(x, MatrixMorphism_abstract)",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def test_check_matrix():\n R_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n R = pr.check_matrix(R_list)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_int_array = np.eye(3, dtype=int)\n R = pr.check_matrix(R_int_array)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_array = np.eye(3)\n R = pr.check_matrix(R_array)\n assert_array_equal(R_array, R)\n\n R = np.eye(4)\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix with shape\",\n pr.check_matrix, R)\n\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0.1, 1]])\n assert_raises_regexp(\n ValueError, \"inversion by transposition\", pr.check_matrix, R)\n\n R = np.array([[1, 0, 1e-16], [0, 1, 0], [0, 0, 1]])\n R2 = pr.check_matrix(R)\n assert_array_equal(R, R2)\n\n R = -np.eye(3)\n assert_raises_regexp(ValueError, \"determinant\", pr.check_matrix, R)",
"def _values_of_same_type(self, val1, val2):\n if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):\n return True\n else:\n return super(SparseParameter, self)._values_of_same_type(val1, val2)",
"def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"",
"def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')",
"def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols",
"def _need_transpose(expr_matrix, adj_matrix):\n return expr_matrix.shape[1] != adj_matrix.shape[0]",
"def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True",
"def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)",
"def __type_of_elements_correct_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int, float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int, float))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_np_matrix():\n X = np.arange(12).reshape(3, 4)\n\n assert not isinstance(as_float_array(X), np.matrix)\n assert not isinstance(as_float_array(np.matrix(X)), np.matrix)\n assert not isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(atleast2d_or_csr(X), np.matrix)\n assert not isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix)\n assert not isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(safe_asarray(X), np.matrix)\n assert not isinstance(safe_asarray(np.matrix(X)), np.matrix)\n assert not isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix)",
"def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()",
"def __size_restriction_correct_matrix_matrix(self):\n\n strTestName = 'Matrix size equal to the size of a matrix (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizEq('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True",
"def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")",
"def _is_supported_matrix(data):\n return (\n spsp.isspmatrix_csc(data)\n or spsp.isspmatrix_csr(data)\n or spsp.isspmatrix_bsr(data)\n or spsp.isspmatrix_dia(data)\n )",
"def hastype_helper(t, model):\n if t == model:\n return True\n elif isinstance(model, type) and issubclass(model, types.Type):\n return isinstance(t, model)\n else:\n return False",
"def check_type(self):\n return True",
"def _type_check(matrix_a, matrix_b=None, cast=False):\n\n if matrix_b is None and matrix_a.dtype in NUMPY_FLOAT_DTYPES:\n return matrix_a\n elif matrix_b is None and cast:\n return _cast_to_float64(matrix_a)\n elif matrix_b is None:\n err_msg = \"Matrix data type must be float32 or float64; {a} provided\".format(a=matrix_a.dtype)\n raise ValueError(err_msg)\n\n # Check dtypes\n if matrix_a.dtype == np.float32 and matrix_b.dtype == np.float32:\n return matrix_a, matrix_b\n\n elif matrix_a.dtype == np.float64 and matrix_b.dtype == np.float64:\n return matrix_a, matrix_b\n\n elif (matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64) and cast:\n debug_print(\"Recasting matrix data types {a} and {b} to np.float64\".format(a=matrix_a.dtype, b=matrix_b.dtype))\n return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)\n\n elif matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64:\n err_msg = \"Matrix data types must be in concordance; {a} and {b} provided\".format(a=matrix_a.dtype,\n b=matrix_b.dtype)\n raise ValueError(err_msg)",
"def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass",
"def _check_eigenmatrices(self):\n if self._has(\"P\") and self._has(\"Q\") and \\\n _simplify(_expand(self._.P * self._.Q)) \\\n != self.order(expand=True, simplify=True) \\\n * identity_matrix(SR, self._.d + 1):\n warn(Warning(\"the eigenmatrices do not multiply \"\n \"into a multiple of the identity matrix\"))",
"def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))",
"def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def is_compat_col(self, col):\n return isinstance(col, DomainMatrix) and col.shape == (self.n, 1) and col.domain.is_ZZ",
"def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)",
"def is_matrix(self, a_list):\n if type(a_list) != list:\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for index in range(len(a_list)):\n if type(a_list[index]) != list or \\\n len(a_list[index]) != len(a_list[(index - 1)]):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for value in a_list[index]:\n if not isinstance(value, (int, float)):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n return a_list",
"def test_partition(self):\n mat = self.mat\n self.assertSequenceEqual(\n [mat.m, mat.n, mat.shape[2], mat.shape[3], mat.dtype],\n [self.m, self.n, self.p, self.q, self.dtype]\n )\n if not mat.is_active:\n self.assertSequenceEqual(\n [mat.mloc, mat.mstart, mat.mend, mat.nloc, mat.nstart, mat.nend],\n [0, 0, 0, 0, 0, 0]\n )\n else:\n pass",
"def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))",
"def test3(self):\r\n a = T.matrix()\r\n self.assertTrue(None == _as_scalar(a))\r\n self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],\r\n [0, 'x', 1])(a)))",
"def __type_of_elements_incorrect_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)",
"def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"",
"def is_rowvector(matrix):\n return is_matrix(matrix) and matrix.shape[0] == 1",
"def __verifyMatrixProperties(self, matrix, order):\n \n # Get the shape and number of dimentions of the matrix\n shape = np.shape(matrix)\n size = np.size(matrix)\n dims = len(shape)\n \n # Verify that the matrix has two dimensions\n if dims != 2:\n errmsg = (f'Invalid number of dimensions ({dims}) of {order} matrix. '\n 'Must be exactly 2.')\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix is not empty\n if size == 0:\n errmsg = f'Input for {order} matrix is empty.'\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix does not have a dimension greater than 10\n for i, dim in enumerate(shape):\n if dim > 10:\n errmsg = (f'Invalid dimension size of {dim} for dimension {i} '\n f'of {order} matrix. Must be <= 10.')\n raise MatrixOperationError(errmsg)",
"def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6",
"def matrix_type(self, matrix_type):\n allowed_values = [\"ASYMMETRIC\", \"AUTOMATIC_DETECTION\", \"SYMMETRIC_POSITIVE_INDEFINITE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and matrix_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `matrix_type` ({0}), must be one of {1}\" # noqa: E501\n .format(matrix_type, allowed_values)\n )\n\n self._matrix_type = matrix_type",
"def is_mat_list(list_matrices):\n flag = True\n if isinstance(list_matrices, list):\n for matrix in list_matrices:\n if not isinstance(matrix, np.matrix):\n flag = False\n # TODO Check for matrix dimensions?\n else:\n flag = False\n return flag",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\r\n raise NotImplementedError()",
"def square_check(self):\n return len(self.matrix) == len(self.matrix[0])",
"def check_matrix(self, mat: Matrix) -> bool:\n matrix_expected_row_len = len(\n max([self.left_to_right_regexes, self.right_to_left_regexes], key=len)\n )\n matrix_row_strings = [\n ''.join(mat[i][j] for j in range(mat.columns)) for i in range(mat.rows)\n ]\n if matrix_expected_row_len != len(matrix_row_strings):\n raise ValueError(\n f'Matrix with {len(matrix_row_strings)} rows is incompatible with level of {matrix_expected_row_len} rows.'\n )\n\n matrix_expected_column_len = len(\n max([self.up_to_down_regexes, self.down_to_up_regexes], key=len)\n )\n matrix_column_strings = [\n ''.join(mat[j][i] for j in range(mat.rows)) for i in range(mat.columns)\n ]\n if matrix_expected_column_len != len(matrix_column_strings):\n raise ValueError(\n f'Matrix with {len(matrix_column_strings)} columns is incompatible with level of {matrix_expected_column_len} columns.'\n )\n\n for row, utd_regex, dtu_regex in itertools.zip_longest(\n matrix_column_strings,\n self.up_to_down_regexes,\n self.down_to_up_regexes,\n fillvalue=re.compile(''),\n ):\n if (utd_regex.pattern and re.fullmatch(utd_regex, row) is None) or (\n dtu_regex.pattern and re.fullmatch(dtu_regex, row) is None\n ):\n return False\n\n for row, ltr_regex, rtl_regex in itertools.zip_longest(\n matrix_row_strings,\n self.left_to_right_regexes,\n self.right_to_left_regexes,\n fillvalue=re.compile(''),\n ):\n if (ltr_regex.pattern and re.fullmatch(ltr_regex, row) is None) or (\n rtl_regex.pattern and re.fullmatch(rtl_regex, row) is None\n ):\n return False\n\n return True",
"def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)",
"def is_symmetric(mat):\n return np.allclose(mat.T, mat)",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, GpuArrayType):\r\n raise NotImplementedError()",
"def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE",
"def _supports(self, item):\n if SparseParameter._is_supported_matrix(item):\n return True\n else:\n return super(SparseResult, self)._supports(item)",
"def verify_sub_matrixes(self, matrix=None):\n local_matrix = matrix if matrix else self.matrix\n\n for i in range(len(local_matrix.matrix)):\n temp_matrix = [[]]\n for j in range(i + 1):\n for k in range(i + 1):\n temp_matrix[j].append(local_matrix.matrix[j][k])\n temp_matrix.append([])\n \n temp_matrix.remove([])\n submatrix = Matrix(temp_matrix)\n print(f\"Submatriz de {i + 1}x{i + 1}\")\n det = submatrix.get_determinant()\n print(f\"Determinante = {det}\")\n submatrix.print_matrix()\n if det == 0:\n return False\n \n return True",
"def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )",
"def f_supports(self, data):\n if self._is_supported_matrix(data):\n return True\n else:\n return super(SparseParameter, self).f_supports(data)",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))",
"def __size_restriction_correct_matrix_number(self):\n\n strTestName = 'Matrix size higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 13)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def __size_restriction_incorrect_matrix_matrix(self):\n\n strTestName = 'Matrix size lower than the size of a matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def test_import_type_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert x.dtype == import_data('/tmp/test.sparsetxt').dtype",
"def check_if_vec(a):\n isRowvec = False\n isNmat = False\n if not isinstance(a, FrovedisBlockcyclicMatrix):\n a = np.asarray(a)\n if a.ndim == 1:\n isRowvec = True\n else:\n isNmat = True\n return (isRowvec, isNmat)",
"def __relational_restriction_correct_NumpyMatrix_vs_number(self):\n strTestName = 'Numpy matrix higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mParameter1', 'Matrix parameter')\n RxCSObject.paramType('mParameter1', np.ndarray)\n RxCSObject.paramH('mParameter1', 0)\n\n RxCSObject.mParameter1 = np.random.randint(1, 10, (2, 2))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def test_conversions_to_matrix():\n R = np.eye(3)\n R2R = pr.matrix_from(R=R)\n assert_array_almost_equal(R2R, R)\n\n a = np.array([1, 0, 0, 0])\n a2R = pr.matrix_from(a=a)\n assert_array_almost_equal(a2R, R)\n\n q = np.array([1, 0, 0, 0])\n q2R = pr.matrix_from(q=q)\n assert_array_almost_equal(q2R, R)\n\n e_xyz = np.array([0, 0, 0])\n e_xyz2R = pr.matrix_from(e_xyz=e_xyz)\n assert_array_almost_equal(e_xyz2R, R)\n\n e_zyx = np.array([0, 0, 0])\n e_zyx2R = pr.matrix_from(e_zyx=e_zyx)\n assert_array_almost_equal(e_zyx2R, R)\n\n assert_raises_regexp(ValueError, \"no rotation\", pr.matrix_from)",
"def is_numpy(obj):\n return 'numpy' in str(type(obj))",
"def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity",
"def only_matrices(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Generates matrices. matrices = [p_matrix, reduced p_matrix]\n matrices = M.compute(input, must_print)\n\n #Storing output.\n store_output(matrices)",
"def test_transpose_mat(self):\n self.init()\n assert np.all(transpose_mat(self.i64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.fi64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.f64_2) == self.f64_2.T)\n assert np.all(transpose_mat(self.ff64_2) == self.ff64_2.T)\n assert transpose_mat(self.i64_2).dtype == 'float64'\n assert transpose_mat(self.fi64_2).dtype == 'float64'\n assert transpose_mat(self.f64_2).dtype == 'float64'\n assert transpose_mat(self.ff64_2).dtype == 'float64'\n assert transpose_mat(self.i64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.fi64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.f64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.ff64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.i64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.fi64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.f64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.ff64_2).flags['C_CONTIGUOUS'] == False",
"def test_format_matrix(self):\r\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n row_labels = ['a', 'b', 'c']\r\n col_labels = [11, 22, 33]\r\n res = format_matrix(a, row_labels, col_labels)\r\n\r\n # test as list\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)\r\n\r\n # tes as array\r\n a = array(a)\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)",
"def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)",
"def _check_real_symmetric(A: np.array) -> bool:\n return np.allclose(A, A.T, atol=1e-9)",
"def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def test_to_matrix(self):\n v = np.copy(zero)\n with self.assertRaises(ZeroDivisionError):\n rowan.to_matrix(v)\n\n v = 2 * np.ones(4)\n with self.assertRaises(ValueError):\n rowan.to_matrix(v)\n\n v = np.copy(one)\n self.assertTrue(np.all(rowan.to_matrix(v) == np.eye(3)))\n\n v = np.copy(half)\n self.assertTrue(\n np.allclose(rowan.to_matrix(v), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]))\n )\n\n v[3] *= -1\n self.assertTrue(\n np.allclose(\n rowan.to_matrix(v), np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n )\n )",
"def isdense(qob):\n return isinstance(qob, np.ndarray)",
"def issparse(qob):\n return isinstance(qob, sp.spmatrix)",
"def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return",
"def check_double_matrix(mat):\n if len(mat.shape) != 2:\n sys.stderr.write(\"Invalid matrix: dimension {} not {}\\n\"\n .format(len(mat.shape), 2))\n return False\n\n # If the strides hasn't got the same number of elements, really weird\n # things happened... Let's abort in such case\n assert len(mat.strides) == len(mat.shape)\n\n if mat.itemsize != SIZEOF_DOUBLE:\n sys.stderr.write(\"Invalid matrix: item size {} not {}\\n\"\n .format(mat.itemsize, SIZEOF_DOUBLE))\n return False\n\n if mat.strides[0] < mat.strides[1] or mat.strides[1] != mat.itemsize:\n sys.stderr.write(\"Invalid strides for a C matrix: {}\\n\"\n .format(mat.strides))\n return False\n\n # If itemsize couldn't divide the stride, nothing would work...\n assert (mat.strides[0] % mat.itemsize) == 0\n\n if mat.strides[0] < mat.shape[1] * mat.strides[1]:\n sys.stderr.write(\"Too small strides for shape: {} < {}\\n\"\n .format(mat.strides[0], mat.shape[1] * mat.strides[1]))\n return False\n return True",
"def check_k_matrix_stability(self):\r\n K = self.make_k_matrix()\r\n vals, vects = scipy_sparse_eigens(K)\r\n principal_val = vals.max()\r\n print(\"ht3_solver:\\t'Stiffness' matrix principal eigenvalue was \"\r\n + str(principal_val))\r\n if principal_val > 1:\r\n print(\"##########################################################\")\r\n print(\"ht3_solver:\\tWARNING\")\r\n print(\"ht3_solver:\\tPrincipal eigenvalue is more than one.\")\r\n print(\"ht3_solver:\\tThe analysis will be unstable.\")\r\n print(\"ht3_solver:\\tIf this is OK, just go and modify the code \"\r\n + \"or something.\")\r\n print(\"##########################################################\")\r\n raise(AssertionError)",
"def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype",
"def confirm_symmetry(mat: numpy.ndarray, symmetry: List[Any]) -> None:\n is_unity = validate_unity(symmetry[0])\n if len(symmetry) == 1 and is_unity:\n return\n build_symmetry_operations(symmetry)\n validate_matrix_symmetry(mat, symmetry)",
"def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def __check_signal(self, signal):\n if not(isinstance(signal, np.ndarray)):\n raise TypeError()\n if len(signal.shape) != 1:\n raise TypeError()\n if not(hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n if signal.shape[0] != self.dim:\n raise TypeError()",
"def _check_column_or_1d(y, context=\"\"):\n CHANGE = False\n try:\n s = tuple(np.shape(y))\n except Exception as e:\n raise ValueError(\"%sCould not get shape of y. \"\n \"y should be an ndarray or scipy sparse csr \"\n \"/csc matrix of shape (n_samples, ). Got %s.\"\n \"Details:\\n%r\" % (context, type(y), e))\n\n if len(s) == 0:\n raise ValueError(\"%sy is empty: y = %r.\" % (context, y))\n\n if len(s) == 2 and s[1] == 1:\n CHANGE = True\n warnings.warn(\"%sA column-vector y was passed when a 1d array was\"\n \" expected. Change the shape of y to \"\n \"(n_samples, ), for example using ravel().\" % context,\n InputDataWarning)\n\n if len(s) == 2 and s[1] > 1:\n CHANGE = True\n warnings.warn(\"%sA matrix y was passed for as for labels. \"\n \"Most estimators expect a one dimensional label vector.\"\n \"Consider changing the shape of y to (n_samples, ).\" %\n context, InputDataWarning)\n\n return CHANGE",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])",
"def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)",
"def check_design_matrix(design_matrix):\n names = [name for name in design_matrix.keys()]\n frame_times = design_matrix.index\n matrix = design_matrix.values\n return frame_times, matrix, names",
"def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")",
"def is_identity(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, np.eye(mat.shape[0]), atol=eps)",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity",
"def is_invertible(matrix: np.ndarray) -> bool:\n return matrix.shape[0] == matrix.shape[1] and np.linalg.det(matrix) != 0",
"def _validate_connectivity_matrix_shape(self, connectivity_matrix):\n validate_array_ndim('connectivity matrix', connectivity_matrix, 2)\n\n for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):\n if (\n hasattr(self.attrs, attr)\n and self.attrs[attr] != np.shape(connectivity_matrix)[axis]\n ):\n raise ValueError(\n 'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='\n '{ngaba} imply connectivity matrix of size '\n '({nser}, {ngaba}), got {cm_shape} instead.'.format(\n nser=getattr(self.attrs, 'no_ser_neurons', 'any'),\n ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),\n cm_shape=np.shape(connectivity_matrix),\n )\n )",
"def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))",
"def assert_is_rotmat(rotmat):\n\n assert rotmat.shape == (3,3)\n np.testing.assert_array_almost_equal(np.linalg.det(rotmat), 1.0)\n np.testing.assert_array_almost_equal(rotmat.transpose(), np.linalg.inv(rotmat))",
"def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()"
] | [
"0.7511979",
"0.6786339",
"0.6370353",
"0.6207709",
"0.6195359",
"0.6113633",
"0.60332847",
"0.60168874",
"0.5954919",
"0.593091",
"0.5800703",
"0.5748301",
"0.57073087",
"0.56955373",
"0.5663681",
"0.5658096",
"0.5645039",
"0.56355584",
"0.56208754",
"0.5619118",
"0.55896413",
"0.55861485",
"0.55827206",
"0.55718523",
"0.5551271",
"0.55450845",
"0.55267197",
"0.5517317",
"0.5516156",
"0.5505818",
"0.5503123",
"0.54962087",
"0.54768413",
"0.54730165",
"0.5462204",
"0.5453964",
"0.5447164",
"0.5440565",
"0.54355425",
"0.541412",
"0.53993154",
"0.5396825",
"0.5367669",
"0.53618807",
"0.5356726",
"0.5347793",
"0.53371394",
"0.5335615",
"0.533031",
"0.5328866",
"0.5328039",
"0.5323676",
"0.5288904",
"0.52770895",
"0.5268447",
"0.5254202",
"0.52473557",
"0.52449125",
"0.5239463",
"0.5237024",
"0.52302474",
"0.5220615",
"0.5215354",
"0.521518",
"0.5190707",
"0.5178269",
"0.516642",
"0.5166052",
"0.5156337",
"0.5153266",
"0.51482975",
"0.5145041",
"0.5137847",
"0.5137343",
"0.5133654",
"0.5128586",
"0.51209766",
"0.5120919",
"0.5097296",
"0.50750613",
"0.5070846",
"0.50691867",
"0.50646365",
"0.50611806",
"0.5049473",
"0.5049251",
"0.5048314",
"0.5047829",
"0.50466263",
"0.5033994",
"0.5031608",
"0.5022509",
"0.5020492",
"0.50169104",
"0.501446",
"0.50118107",
"0.50069803",
"0.5003245",
"0.5001395",
"0.4991792",
"0.49901626"
] | 0.0 | -1 |
Check that matrix type is preserved. | def test_matrix_b_only(self):
a = array([[1., 2.], [2., 1.]])
self.do(a, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def verify_numpy_type(self, matrix):\n if type(matrix) != np.ndarray and matrix != None:\n return np.asfarray(matrix)\n elif type(matrix) == np.ndarray and matrix != None:\n return matrix",
"def isMatrix(M):\r\n if type(M) == matrix:\r\n return M\r\n elif type(M) == np.ndarray:\r\n return matrix(M)\r\n else:\r\n raise Exception('Unknown input format. Should be matrix or numpy array')",
"def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)",
"def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two",
"def is_MatrixMorphism(x):\n return isinstance(x, MatrixMorphism_abstract)",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def test_check_matrix():\n R_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n R = pr.check_matrix(R_list)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_int_array = np.eye(3, dtype=int)\n R = pr.check_matrix(R_int_array)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_array = np.eye(3)\n R = pr.check_matrix(R_array)\n assert_array_equal(R_array, R)\n\n R = np.eye(4)\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix with shape\",\n pr.check_matrix, R)\n\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0.1, 1]])\n assert_raises_regexp(\n ValueError, \"inversion by transposition\", pr.check_matrix, R)\n\n R = np.array([[1, 0, 1e-16], [0, 1, 0], [0, 0, 1]])\n R2 = pr.check_matrix(R)\n assert_array_equal(R, R2)\n\n R = -np.eye(3)\n assert_raises_regexp(ValueError, \"determinant\", pr.check_matrix, R)",
"def _values_of_same_type(self, val1, val2):\n if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):\n return True\n else:\n return super(SparseParameter, self)._values_of_same_type(val1, val2)",
"def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"",
"def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')",
"def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols",
"def _need_transpose(expr_matrix, adj_matrix):\n return expr_matrix.shape[1] != adj_matrix.shape[0]",
"def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True",
"def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)",
"def __type_of_elements_correct_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int, float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int, float))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_np_matrix():\n X = np.arange(12).reshape(3, 4)\n\n assert not isinstance(as_float_array(X), np.matrix)\n assert not isinstance(as_float_array(np.matrix(X)), np.matrix)\n assert not isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(atleast2d_or_csr(X), np.matrix)\n assert not isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix)\n assert not isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(safe_asarray(X), np.matrix)\n assert not isinstance(safe_asarray(np.matrix(X)), np.matrix)\n assert not isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix)",
"def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()",
"def __size_restriction_correct_matrix_matrix(self):\n\n strTestName = 'Matrix size equal to the size of a matrix (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizEq('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True",
"def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")",
"def _is_supported_matrix(data):\n return (\n spsp.isspmatrix_csc(data)\n or spsp.isspmatrix_csr(data)\n or spsp.isspmatrix_bsr(data)\n or spsp.isspmatrix_dia(data)\n )",
"def hastype_helper(t, model):\n if t == model:\n return True\n elif isinstance(model, type) and issubclass(model, types.Type):\n return isinstance(t, model)\n else:\n return False",
"def check_type(self):\n return True",
"def _type_check(matrix_a, matrix_b=None, cast=False):\n\n if matrix_b is None and matrix_a.dtype in NUMPY_FLOAT_DTYPES:\n return matrix_a\n elif matrix_b is None and cast:\n return _cast_to_float64(matrix_a)\n elif matrix_b is None:\n err_msg = \"Matrix data type must be float32 or float64; {a} provided\".format(a=matrix_a.dtype)\n raise ValueError(err_msg)\n\n # Check dtypes\n if matrix_a.dtype == np.float32 and matrix_b.dtype == np.float32:\n return matrix_a, matrix_b\n\n elif matrix_a.dtype == np.float64 and matrix_b.dtype == np.float64:\n return matrix_a, matrix_b\n\n elif (matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64) and cast:\n debug_print(\"Recasting matrix data types {a} and {b} to np.float64\".format(a=matrix_a.dtype, b=matrix_b.dtype))\n return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)\n\n elif matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64:\n err_msg = \"Matrix data types must be in concordance; {a} and {b} provided\".format(a=matrix_a.dtype,\n b=matrix_b.dtype)\n raise ValueError(err_msg)",
"def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass",
"def _check_eigenmatrices(self):\n if self._has(\"P\") and self._has(\"Q\") and \\\n _simplify(_expand(self._.P * self._.Q)) \\\n != self.order(expand=True, simplify=True) \\\n * identity_matrix(SR, self._.d + 1):\n warn(Warning(\"the eigenmatrices do not multiply \"\n \"into a multiple of the identity matrix\"))",
"def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))",
"def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def is_compat_col(self, col):\n return isinstance(col, DomainMatrix) and col.shape == (self.n, 1) and col.domain.is_ZZ",
"def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)",
"def is_matrix(self, a_list):\n if type(a_list) != list:\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for index in range(len(a_list)):\n if type(a_list[index]) != list or \\\n len(a_list[index]) != len(a_list[(index - 1)]):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for value in a_list[index]:\n if not isinstance(value, (int, float)):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n return a_list",
"def test_partition(self):\n mat = self.mat\n self.assertSequenceEqual(\n [mat.m, mat.n, mat.shape[2], mat.shape[3], mat.dtype],\n [self.m, self.n, self.p, self.q, self.dtype]\n )\n if not mat.is_active:\n self.assertSequenceEqual(\n [mat.mloc, mat.mstart, mat.mend, mat.nloc, mat.nstart, mat.nend],\n [0, 0, 0, 0, 0, 0]\n )\n else:\n pass",
"def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))",
"def test3(self):\r\n a = T.matrix()\r\n self.assertTrue(None == _as_scalar(a))\r\n self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],\r\n [0, 'x', 1])(a)))",
"def __type_of_elements_incorrect_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)",
"def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"",
"def is_rowvector(matrix):\n return is_matrix(matrix) and matrix.shape[0] == 1",
"def __verifyMatrixProperties(self, matrix, order):\n \n # Get the shape and number of dimentions of the matrix\n shape = np.shape(matrix)\n size = np.size(matrix)\n dims = len(shape)\n \n # Verify that the matrix has two dimensions\n if dims != 2:\n errmsg = (f'Invalid number of dimensions ({dims}) of {order} matrix. '\n 'Must be exactly 2.')\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix is not empty\n if size == 0:\n errmsg = f'Input for {order} matrix is empty.'\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix does not have a dimension greater than 10\n for i, dim in enumerate(shape):\n if dim > 10:\n errmsg = (f'Invalid dimension size of {dim} for dimension {i} '\n f'of {order} matrix. Must be <= 10.')\n raise MatrixOperationError(errmsg)",
"def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6",
"def matrix_type(self, matrix_type):\n allowed_values = [\"ASYMMETRIC\", \"AUTOMATIC_DETECTION\", \"SYMMETRIC_POSITIVE_INDEFINITE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and matrix_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `matrix_type` ({0}), must be one of {1}\" # noqa: E501\n .format(matrix_type, allowed_values)\n )\n\n self._matrix_type = matrix_type",
"def is_mat_list(list_matrices):\n flag = True\n if isinstance(list_matrices, list):\n for matrix in list_matrices:\n if not isinstance(matrix, np.matrix):\n flag = False\n # TODO Check for matrix dimensions?\n else:\n flag = False\n return flag",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\r\n raise NotImplementedError()",
"def square_check(self):\n return len(self.matrix) == len(self.matrix[0])",
"def check_matrix(self, mat: Matrix) -> bool:\n matrix_expected_row_len = len(\n max([self.left_to_right_regexes, self.right_to_left_regexes], key=len)\n )\n matrix_row_strings = [\n ''.join(mat[i][j] for j in range(mat.columns)) for i in range(mat.rows)\n ]\n if matrix_expected_row_len != len(matrix_row_strings):\n raise ValueError(\n f'Matrix with {len(matrix_row_strings)} rows is incompatible with level of {matrix_expected_row_len} rows.'\n )\n\n matrix_expected_column_len = len(\n max([self.up_to_down_regexes, self.down_to_up_regexes], key=len)\n )\n matrix_column_strings = [\n ''.join(mat[j][i] for j in range(mat.rows)) for i in range(mat.columns)\n ]\n if matrix_expected_column_len != len(matrix_column_strings):\n raise ValueError(\n f'Matrix with {len(matrix_column_strings)} columns is incompatible with level of {matrix_expected_column_len} columns.'\n )\n\n for row, utd_regex, dtu_regex in itertools.zip_longest(\n matrix_column_strings,\n self.up_to_down_regexes,\n self.down_to_up_regexes,\n fillvalue=re.compile(''),\n ):\n if (utd_regex.pattern and re.fullmatch(utd_regex, row) is None) or (\n dtu_regex.pattern and re.fullmatch(dtu_regex, row) is None\n ):\n return False\n\n for row, ltr_regex, rtl_regex in itertools.zip_longest(\n matrix_row_strings,\n self.left_to_right_regexes,\n self.right_to_left_regexes,\n fillvalue=re.compile(''),\n ):\n if (ltr_regex.pattern and re.fullmatch(ltr_regex, row) is None) or (\n rtl_regex.pattern and re.fullmatch(rtl_regex, row) is None\n ):\n return False\n\n return True",
"def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)",
"def is_symmetric(mat):\n return np.allclose(mat.T, mat)",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, GpuArrayType):\r\n raise NotImplementedError()",
"def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE",
"def _supports(self, item):\n if SparseParameter._is_supported_matrix(item):\n return True\n else:\n return super(SparseResult, self)._supports(item)",
"def verify_sub_matrixes(self, matrix=None):\n local_matrix = matrix if matrix else self.matrix\n\n for i in range(len(local_matrix.matrix)):\n temp_matrix = [[]]\n for j in range(i + 1):\n for k in range(i + 1):\n temp_matrix[j].append(local_matrix.matrix[j][k])\n temp_matrix.append([])\n \n temp_matrix.remove([])\n submatrix = Matrix(temp_matrix)\n print(f\"Submatriz de {i + 1}x{i + 1}\")\n det = submatrix.get_determinant()\n print(f\"Determinante = {det}\")\n submatrix.print_matrix()\n if det == 0:\n return False\n \n return True",
"def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )",
"def f_supports(self, data):\n if self._is_supported_matrix(data):\n return True\n else:\n return super(SparseParameter, self).f_supports(data)",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))",
"def __size_restriction_correct_matrix_number(self):\n\n strTestName = 'Matrix size higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 13)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def __size_restriction_incorrect_matrix_matrix(self):\n\n strTestName = 'Matrix size lower than the size of a matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def test_import_type_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert x.dtype == import_data('/tmp/test.sparsetxt').dtype",
"def check_if_vec(a):\n isRowvec = False\n isNmat = False\n if not isinstance(a, FrovedisBlockcyclicMatrix):\n a = np.asarray(a)\n if a.ndim == 1:\n isRowvec = True\n else:\n isNmat = True\n return (isRowvec, isNmat)",
"def __relational_restriction_correct_NumpyMatrix_vs_number(self):\n strTestName = 'Numpy matrix higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mParameter1', 'Matrix parameter')\n RxCSObject.paramType('mParameter1', np.ndarray)\n RxCSObject.paramH('mParameter1', 0)\n\n RxCSObject.mParameter1 = np.random.randint(1, 10, (2, 2))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def test_conversions_to_matrix():\n R = np.eye(3)\n R2R = pr.matrix_from(R=R)\n assert_array_almost_equal(R2R, R)\n\n a = np.array([1, 0, 0, 0])\n a2R = pr.matrix_from(a=a)\n assert_array_almost_equal(a2R, R)\n\n q = np.array([1, 0, 0, 0])\n q2R = pr.matrix_from(q=q)\n assert_array_almost_equal(q2R, R)\n\n e_xyz = np.array([0, 0, 0])\n e_xyz2R = pr.matrix_from(e_xyz=e_xyz)\n assert_array_almost_equal(e_xyz2R, R)\n\n e_zyx = np.array([0, 0, 0])\n e_zyx2R = pr.matrix_from(e_zyx=e_zyx)\n assert_array_almost_equal(e_zyx2R, R)\n\n assert_raises_regexp(ValueError, \"no rotation\", pr.matrix_from)",
"def is_numpy(obj):\n return 'numpy' in str(type(obj))",
"def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity",
"def only_matrices(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Generates matrices. matrices = [p_matrix, reduced p_matrix]\n matrices = M.compute(input, must_print)\n\n #Storing output.\n store_output(matrices)",
"def test_transpose_mat(self):\n self.init()\n assert np.all(transpose_mat(self.i64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.fi64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.f64_2) == self.f64_2.T)\n assert np.all(transpose_mat(self.ff64_2) == self.ff64_2.T)\n assert transpose_mat(self.i64_2).dtype == 'float64'\n assert transpose_mat(self.fi64_2).dtype == 'float64'\n assert transpose_mat(self.f64_2).dtype == 'float64'\n assert transpose_mat(self.ff64_2).dtype == 'float64'\n assert transpose_mat(self.i64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.fi64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.f64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.ff64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.i64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.fi64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.f64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.ff64_2).flags['C_CONTIGUOUS'] == False",
"def test_format_matrix(self):\r\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n row_labels = ['a', 'b', 'c']\r\n col_labels = [11, 22, 33]\r\n res = format_matrix(a, row_labels, col_labels)\r\n\r\n # test as list\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)\r\n\r\n # tes as array\r\n a = array(a)\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)",
"def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)",
"def _check_real_symmetric(A: np.array) -> bool:\n return np.allclose(A, A.T, atol=1e-9)",
"def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def test_to_matrix(self):\n v = np.copy(zero)\n with self.assertRaises(ZeroDivisionError):\n rowan.to_matrix(v)\n\n v = 2 * np.ones(4)\n with self.assertRaises(ValueError):\n rowan.to_matrix(v)\n\n v = np.copy(one)\n self.assertTrue(np.all(rowan.to_matrix(v) == np.eye(3)))\n\n v = np.copy(half)\n self.assertTrue(\n np.allclose(rowan.to_matrix(v), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]))\n )\n\n v[3] *= -1\n self.assertTrue(\n np.allclose(\n rowan.to_matrix(v), np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n )\n )",
"def isdense(qob):\n return isinstance(qob, np.ndarray)",
"def issparse(qob):\n return isinstance(qob, sp.spmatrix)",
"def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return",
"def check_double_matrix(mat):\n if len(mat.shape) != 2:\n sys.stderr.write(\"Invalid matrix: dimension {} not {}\\n\"\n .format(len(mat.shape), 2))\n return False\n\n # If the strides hasn't got the same number of elements, really weird\n # things happened... Let's abort in such case\n assert len(mat.strides) == len(mat.shape)\n\n if mat.itemsize != SIZEOF_DOUBLE:\n sys.stderr.write(\"Invalid matrix: item size {} not {}\\n\"\n .format(mat.itemsize, SIZEOF_DOUBLE))\n return False\n\n if mat.strides[0] < mat.strides[1] or mat.strides[1] != mat.itemsize:\n sys.stderr.write(\"Invalid strides for a C matrix: {}\\n\"\n .format(mat.strides))\n return False\n\n # If itemsize couldn't divide the stride, nothing would work...\n assert (mat.strides[0] % mat.itemsize) == 0\n\n if mat.strides[0] < mat.shape[1] * mat.strides[1]:\n sys.stderr.write(\"Too small strides for shape: {} < {}\\n\"\n .format(mat.strides[0], mat.shape[1] * mat.strides[1]))\n return False\n return True",
"def check_k_matrix_stability(self):\r\n K = self.make_k_matrix()\r\n vals, vects = scipy_sparse_eigens(K)\r\n principal_val = vals.max()\r\n print(\"ht3_solver:\\t'Stiffness' matrix principal eigenvalue was \"\r\n + str(principal_val))\r\n if principal_val > 1:\r\n print(\"##########################################################\")\r\n print(\"ht3_solver:\\tWARNING\")\r\n print(\"ht3_solver:\\tPrincipal eigenvalue is more than one.\")\r\n print(\"ht3_solver:\\tThe analysis will be unstable.\")\r\n print(\"ht3_solver:\\tIf this is OK, just go and modify the code \"\r\n + \"or something.\")\r\n print(\"##########################################################\")\r\n raise(AssertionError)",
"def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype",
"def confirm_symmetry(mat: numpy.ndarray, symmetry: List[Any]) -> None:\n is_unity = validate_unity(symmetry[0])\n if len(symmetry) == 1 and is_unity:\n return\n build_symmetry_operations(symmetry)\n validate_matrix_symmetry(mat, symmetry)",
"def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def __check_signal(self, signal):\n if not(isinstance(signal, np.ndarray)):\n raise TypeError()\n if len(signal.shape) != 1:\n raise TypeError()\n if not(hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n if signal.shape[0] != self.dim:\n raise TypeError()",
"def _check_column_or_1d(y, context=\"\"):\n CHANGE = False\n try:\n s = tuple(np.shape(y))\n except Exception as e:\n raise ValueError(\"%sCould not get shape of y. \"\n \"y should be an ndarray or scipy sparse csr \"\n \"/csc matrix of shape (n_samples, ). Got %s.\"\n \"Details:\\n%r\" % (context, type(y), e))\n\n if len(s) == 0:\n raise ValueError(\"%sy is empty: y = %r.\" % (context, y))\n\n if len(s) == 2 and s[1] == 1:\n CHANGE = True\n warnings.warn(\"%sA column-vector y was passed when a 1d array was\"\n \" expected. Change the shape of y to \"\n \"(n_samples, ), for example using ravel().\" % context,\n InputDataWarning)\n\n if len(s) == 2 and s[1] > 1:\n CHANGE = True\n warnings.warn(\"%sA matrix y was passed for as for labels. \"\n \"Most estimators expect a one dimensional label vector.\"\n \"Consider changing the shape of y to (n_samples, ).\" %\n context, InputDataWarning)\n\n return CHANGE",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])",
"def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)",
"def check_design_matrix(design_matrix):\n names = [name for name in design_matrix.keys()]\n frame_times = design_matrix.index\n matrix = design_matrix.values\n return frame_times, matrix, names",
"def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")",
"def is_identity(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, np.eye(mat.shape[0]), atol=eps)",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity",
"def is_invertible(matrix: np.ndarray) -> bool:\n return matrix.shape[0] == matrix.shape[1] and np.linalg.det(matrix) != 0",
"def _validate_connectivity_matrix_shape(self, connectivity_matrix):\n validate_array_ndim('connectivity matrix', connectivity_matrix, 2)\n\n for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):\n if (\n hasattr(self.attrs, attr)\n and self.attrs[attr] != np.shape(connectivity_matrix)[axis]\n ):\n raise ValueError(\n 'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='\n '{ngaba} imply connectivity matrix of size '\n '({nser}, {ngaba}), got {cm_shape} instead.'.format(\n nser=getattr(self.attrs, 'no_ser_neurons', 'any'),\n ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),\n cm_shape=np.shape(connectivity_matrix),\n )\n )",
"def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))",
"def assert_is_rotmat(rotmat):\n\n assert rotmat.shape == (3,3)\n np.testing.assert_array_almost_equal(np.linalg.det(rotmat), 1.0)\n np.testing.assert_array_almost_equal(rotmat.transpose(), np.linalg.inv(rotmat))",
"def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()"
] | [
"0.7511979",
"0.6786339",
"0.6370353",
"0.6207709",
"0.6195359",
"0.6113633",
"0.60332847",
"0.60168874",
"0.5954919",
"0.593091",
"0.5800703",
"0.5748301",
"0.57073087",
"0.56955373",
"0.5663681",
"0.5658096",
"0.5645039",
"0.56355584",
"0.56208754",
"0.5619118",
"0.55896413",
"0.55861485",
"0.55827206",
"0.55718523",
"0.5551271",
"0.55450845",
"0.55267197",
"0.5517317",
"0.5516156",
"0.5505818",
"0.5503123",
"0.54962087",
"0.54768413",
"0.54730165",
"0.5462204",
"0.5453964",
"0.5447164",
"0.5440565",
"0.54355425",
"0.541412",
"0.53993154",
"0.5396825",
"0.5367669",
"0.53618807",
"0.5356726",
"0.5347793",
"0.53371394",
"0.5335615",
"0.533031",
"0.5328866",
"0.5328039",
"0.5323676",
"0.5288904",
"0.52770895",
"0.5268447",
"0.5254202",
"0.52473557",
"0.52449125",
"0.5239463",
"0.5237024",
"0.52302474",
"0.5220615",
"0.5215354",
"0.521518",
"0.5190707",
"0.5178269",
"0.516642",
"0.5166052",
"0.5156337",
"0.5153266",
"0.51482975",
"0.5145041",
"0.5137847",
"0.5137343",
"0.5133654",
"0.5128586",
"0.51209766",
"0.5120919",
"0.5097296",
"0.50750613",
"0.5070846",
"0.50691867",
"0.50646365",
"0.50611806",
"0.5049473",
"0.5049251",
"0.5048314",
"0.5047829",
"0.50466263",
"0.5033994",
"0.5031608",
"0.5022509",
"0.5020492",
"0.50169104",
"0.501446",
"0.50118107",
"0.50069803",
"0.5003245",
"0.5001395",
"0.4991792",
"0.49901626"
] | 0.0 | -1 |
Check that matrix type is preserved. | def test_matrix_a_and_b(self):
a = matrix([[1., 2.], [2., 1.]])
self.do(a, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def verify_numpy_type(self, matrix):\n if type(matrix) != np.ndarray and matrix != None:\n return np.asfarray(matrix)\n elif type(matrix) == np.ndarray and matrix != None:\n return matrix",
"def isMatrix(M):\r\n if type(M) == matrix:\r\n return M\r\n elif type(M) == np.ndarray:\r\n return matrix(M)\r\n else:\r\n raise Exception('Unknown input format. Should be matrix or numpy array')",
"def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)",
"def _check_matrix(self, x, *args):\n if self._special and x.determinant() != 1:\n raise TypeError('matrix must have determinant one')\n F = self.invariant_bilinear_form()\n if x * F * x.transpose() != F:\n raise TypeError('matrix must be orthogonal with respect to the invariant form')\n # TODO: check that quadratic form is preserved in characteristic two",
"def is_MatrixMorphism(x):\n return isinstance(x, MatrixMorphism_abstract)",
"def is_integer(matrix):\n return numpy.issubdtype(matrix.dtype, numpy.integer)",
"def test_check_matrix():\n R_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n R = pr.check_matrix(R_list)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_int_array = np.eye(3, dtype=int)\n R = pr.check_matrix(R_int_array)\n assert_equal(type(R), np.ndarray)\n assert_equal(R.dtype, np.float64)\n\n R_array = np.eye(3)\n R = pr.check_matrix(R_array)\n assert_array_equal(R_array, R)\n\n R = np.eye(4)\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix with shape\",\n pr.check_matrix, R)\n\n R = np.array([[1, 0, 0], [0, 1, 0], [0, 0.1, 1]])\n assert_raises_regexp(\n ValueError, \"inversion by transposition\", pr.check_matrix, R)\n\n R = np.array([[1, 0, 1e-16], [0, 1, 0], [0, 0, 1]])\n R2 = pr.check_matrix(R)\n assert_array_equal(R, R2)\n\n R = -np.eye(3)\n assert_raises_regexp(ValueError, \"determinant\", pr.check_matrix, R)",
"def _values_of_same_type(self, val1, val2):\n if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):\n return True\n else:\n return super(SparseParameter, self)._values_of_same_type(val1, val2)",
"def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"",
"def correct_type(data):\n\n if all(data[col].dtypes == data.dtypes[0] for col in data.columns):\n if all(data[col].isnull().sum() == 0 for col in data.columns):\n print('All columns have values of the correct type.')\n else:\n print('Bad result.')",
"def _check_transformation_matrix_homogeneity(self):\n transformation_matrices_similar = True # assume they are all similar\n first = True\n rows = None\n cols = None\n for transform in self:\n if first:\n rows = transform.rows\n cols = transform.cols\n first = False\n else:\n if transform.rows != rows or transform.cols != cols:\n transformation_matrices_similar = False\n break\n return transformation_matrices_similar, rows, cols",
"def _need_transpose(expr_matrix, adj_matrix):\n return expr_matrix.shape[1] != adj_matrix.shape[0]",
"def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True",
"def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)",
"def __type_of_elements_correct_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int, float))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int, float))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_np_matrix():\n X = np.arange(12).reshape(3, 4)\n\n assert not isinstance(as_float_array(X), np.matrix)\n assert not isinstance(as_float_array(np.matrix(X)), np.matrix)\n assert not isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(atleast2d_or_csr(X), np.matrix)\n assert not isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix)\n assert not isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix)\n\n assert not isinstance(safe_asarray(X), np.matrix)\n assert not isinstance(safe_asarray(np.matrix(X)), np.matrix)\n assert not isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix)",
"def _validate_X(X):\n return X if not isinstance(X, pd.DataFrame) else X.as_matrix()",
"def __size_restriction_correct_matrix_matrix(self):\n\n strTestName = 'Matrix size equal to the size of a matrix (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizEq('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True",
"def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")",
"def _is_supported_matrix(data):\n return (\n spsp.isspmatrix_csc(data)\n or spsp.isspmatrix_csr(data)\n or spsp.isspmatrix_bsr(data)\n or spsp.isspmatrix_dia(data)\n )",
"def hastype_helper(t, model):\n if t == model:\n return True\n elif isinstance(model, type) and issubclass(model, types.Type):\n return isinstance(t, model)\n else:\n return False",
"def check_type(self):\n return True",
"def _type_check(matrix_a, matrix_b=None, cast=False):\n\n if matrix_b is None and matrix_a.dtype in NUMPY_FLOAT_DTYPES:\n return matrix_a\n elif matrix_b is None and cast:\n return _cast_to_float64(matrix_a)\n elif matrix_b is None:\n err_msg = \"Matrix data type must be float32 or float64; {a} provided\".format(a=matrix_a.dtype)\n raise ValueError(err_msg)\n\n # Check dtypes\n if matrix_a.dtype == np.float32 and matrix_b.dtype == np.float32:\n return matrix_a, matrix_b\n\n elif matrix_a.dtype == np.float64 and matrix_b.dtype == np.float64:\n return matrix_a, matrix_b\n\n elif (matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64) and cast:\n debug_print(\"Recasting matrix data types {a} and {b} to np.float64\".format(a=matrix_a.dtype, b=matrix_b.dtype))\n return _cast_to_float64(matrix_a), _cast_to_float64(matrix_b)\n\n elif matrix_a.dtype != np.float64 or matrix_b.dtype != np.float64:\n err_msg = \"Matrix data types must be in concordance; {a} and {b} provided\".format(a=matrix_a.dtype,\n b=matrix_b.dtype)\n raise ValueError(err_msg)",
"def test_types(self):\n \n self.assertIsInstance(self.tx_data_in, numpy.ndarray)\n self.assertIsInstance(self.circuit_simulation, bool)\n self.assertIsInstance(self.bypass, bool)\n \n pass",
"def _check_eigenmatrices(self):\n if self._has(\"P\") and self._has(\"Q\") and \\\n _simplify(_expand(self._.P * self._.Q)) \\\n != self.order(expand=True, simplify=True) \\\n * identity_matrix(SR, self._.d + 1):\n warn(Warning(\"the eigenmatrices do not multiply \"\n \"into a multiple of the identity matrix\"))",
"def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))",
"def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def is_compat_col(self, col):\n return isinstance(col, DomainMatrix) and col.shape == (self.n, 1) and col.domain.is_ZZ",
"def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype",
"def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)",
"def is_matrix(self, a_list):\n if type(a_list) != list:\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for index in range(len(a_list)):\n if type(a_list[index]) != list or \\\n len(a_list[index]) != len(a_list[(index - 1)]):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n else:\n for value in a_list[index]:\n if not isinstance(value, (int, float)):\n raise ValueError(\n \"Must make Matrix w/list of numerical lists\")\n return a_list",
"def test_partition(self):\n mat = self.mat\n self.assertSequenceEqual(\n [mat.m, mat.n, mat.shape[2], mat.shape[3], mat.dtype],\n [self.m, self.n, self.p, self.q, self.dtype]\n )\n if not mat.is_active:\n self.assertSequenceEqual(\n [mat.mloc, mat.mstart, mat.mend, mat.nloc, mat.nstart, mat.nend],\n [0, 0, 0, 0, 0, 0]\n )\n else:\n pass",
"def _validate_matrix_shape(matrix: FieldMatrix, shape: Tuple[int, int]):\n if len(matrix) != shape[0]:\n raise ValueError(\n 'Invalid matrix row len = %d: not consistent with expected shape: %s.' %\n (len(matrix), shape))\n\n for m in matrix:\n if len(m) != shape[1]:\n raise ValueError(\n 'Invalid matrix col len = %d: not consistent with expected shape: %s.'\n % (len(m), shape))",
"def test3(self):\r\n a = T.matrix()\r\n self.assertTrue(None == _as_scalar(a))\r\n self.assertTrue(None == _as_scalar(T.DimShuffle([False, False],\r\n [0, 'x', 1])(a)))",
"def __type_of_elements_incorrect_long_Numpy_matrix(self):\n strTestName = 'Float elements in a Numpy matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy matrix #1')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramTypeEl('parameter1', (int))\n\n RxCSObject.paramAddMan('parameter2', 'Numpy vector #2')\n RxCSObject.paramType('parameter2', np.ndarray)\n RxCSObject.paramTypeEl('parameter2', (int))\n\n RxCSObject.parameter1 = np.random.randn(1e2,1e2)\n RxCSObject.parameter2 = np.random.randn(1e2,1e2)\n\n self.__parametersCheck_error(RxCSObject, ElementTypeError, strTestName)",
"def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"",
"def is_rowvector(matrix):\n return is_matrix(matrix) and matrix.shape[0] == 1",
"def __verifyMatrixProperties(self, matrix, order):\n \n # Get the shape and number of dimentions of the matrix\n shape = np.shape(matrix)\n size = np.size(matrix)\n dims = len(shape)\n \n # Verify that the matrix has two dimensions\n if dims != 2:\n errmsg = (f'Invalid number of dimensions ({dims}) of {order} matrix. '\n 'Must be exactly 2.')\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix is not empty\n if size == 0:\n errmsg = f'Input for {order} matrix is empty.'\n raise MatrixOperationError(errmsg)\n\n # Verify that the matrix does not have a dimension greater than 10\n for i, dim in enumerate(shape):\n if dim > 10:\n errmsg = (f'Invalid dimension size of {dim} for dimension {i} '\n f'of {order} matrix. Must be <= 10.')\n raise MatrixOperationError(errmsg)",
"def _is_rotation_matrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6",
"def matrix_type(self, matrix_type):\n allowed_values = [\"ASYMMETRIC\", \"AUTOMATIC_DETECTION\", \"SYMMETRIC_POSITIVE_INDEFINITE\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and matrix_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `matrix_type` ({0}), must be one of {1}\" # noqa: E501\n .format(matrix_type, allowed_values)\n )\n\n self._matrix_type = matrix_type",
"def is_mat_list(list_matrices):\n flag = True\n if isinstance(list_matrices, list):\n for matrix in list_matrices:\n if not isinstance(matrix, np.matrix):\n flag = False\n # TODO Check for matrix dimensions?\n else:\n flag = False\n return flag",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\r\n raise NotImplementedError()",
"def square_check(self):\n return len(self.matrix) == len(self.matrix[0])",
"def check_matrix(self, mat: Matrix) -> bool:\n matrix_expected_row_len = len(\n max([self.left_to_right_regexes, self.right_to_left_regexes], key=len)\n )\n matrix_row_strings = [\n ''.join(mat[i][j] for j in range(mat.columns)) for i in range(mat.rows)\n ]\n if matrix_expected_row_len != len(matrix_row_strings):\n raise ValueError(\n f'Matrix with {len(matrix_row_strings)} rows is incompatible with level of {matrix_expected_row_len} rows.'\n )\n\n matrix_expected_column_len = len(\n max([self.up_to_down_regexes, self.down_to_up_regexes], key=len)\n )\n matrix_column_strings = [\n ''.join(mat[j][i] for j in range(mat.rows)) for i in range(mat.columns)\n ]\n if matrix_expected_column_len != len(matrix_column_strings):\n raise ValueError(\n f'Matrix with {len(matrix_column_strings)} columns is incompatible with level of {matrix_expected_column_len} columns.'\n )\n\n for row, utd_regex, dtu_regex in itertools.zip_longest(\n matrix_column_strings,\n self.up_to_down_regexes,\n self.down_to_up_regexes,\n fillvalue=re.compile(''),\n ):\n if (utd_regex.pattern and re.fullmatch(utd_regex, row) is None) or (\n dtu_regex.pattern and re.fullmatch(dtu_regex, row) is None\n ):\n return False\n\n for row, ltr_regex, rtl_regex in itertools.zip_longest(\n matrix_row_strings,\n self.left_to_right_regexes,\n self.right_to_left_regexes,\n fillvalue=re.compile(''),\n ):\n if (ltr_regex.pattern and re.fullmatch(ltr_regex, row) is None) or (\n rtl_regex.pattern and re.fullmatch(rtl_regex, row) is None\n ):\n return False\n\n return True",
"def test_matrix_kv(matrix):\n assert isinstance(matrix.kv, unitdata.Storage)",
"def is_symmetric(mat):\n return np.allclose(mat.T, mat)",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, GpuArrayType):\r\n raise NotImplementedError()",
"def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE",
"def _supports(self, item):\n if SparseParameter._is_supported_matrix(item):\n return True\n else:\n return super(SparseResult, self)._supports(item)",
"def verify_sub_matrixes(self, matrix=None):\n local_matrix = matrix if matrix else self.matrix\n\n for i in range(len(local_matrix.matrix)):\n temp_matrix = [[]]\n for j in range(i + 1):\n for k in range(i + 1):\n temp_matrix[j].append(local_matrix.matrix[j][k])\n temp_matrix.append([])\n \n temp_matrix.remove([])\n submatrix = Matrix(temp_matrix)\n print(f\"Submatriz de {i + 1}x{i + 1}\")\n det = submatrix.get_determinant()\n print(f\"Determinante = {det}\")\n submatrix.print_matrix()\n if det == 0:\n return False\n \n return True",
"def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )",
"def f_supports(self, data):\n if self._is_supported_matrix(data):\n return True\n else:\n return super(SparseParameter, self).f_supports(data)",
"def is_square(matrix):\n return is_matrix(matrix) and matrix.shape[0] == matrix.shape[1]",
"def _is_equal_to_atom(self, atom):\n\n return (self.type == atom.type and self.shape == atom.shape\n and self.itemsize == atom.itemsize\n and np.all(self.dflt == atom.dflt))",
"def __size_restriction_correct_matrix_number(self):\n\n strTestName = 'Matrix size higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizHE('parameter1', 13)\n\n RxCSObject.parameter1 = np.random.randn(3, 5)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)",
"def __size_restriction_incorrect_matrix_matrix(self):\n\n strTestName = 'Matrix size lower than the size of a matrix (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('mRefParameter1', 'String ref. parameter')\n RxCSObject.paramType('mRefParameter1', np.ndarray)\n\n # Now, let us define a matrix\n RxCSObject.paramAddMan('parameter1', 'Numpy array 2D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizL('parameter1', 'mRefParameter1')\n\n RxCSObject.mRefParameter1 = np.random.randn(2, 2)\n RxCSObject.parameter1 = np.random.randn(2, 2)\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def test_import_type_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert x.dtype == import_data('/tmp/test.sparsetxt').dtype",
"def check_if_vec(a):\n isRowvec = False\n isNmat = False\n if not isinstance(a, FrovedisBlockcyclicMatrix):\n a = np.asarray(a)\n if a.ndim == 1:\n isRowvec = True\n else:\n isNmat = True\n return (isRowvec, isNmat)",
"def __relational_restriction_correct_NumpyMatrix_vs_number(self):\n strTestName = 'Numpy matrix higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mParameter1', 'Matrix parameter')\n RxCSObject.paramType('mParameter1', np.ndarray)\n RxCSObject.paramH('mParameter1', 0)\n\n RxCSObject.mParameter1 = np.random.randint(1, 10, (2, 2))\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_conversions_to_matrix():\n R = np.eye(3)\n R2R = pr.matrix_from(R=R)\n assert_array_almost_equal(R2R, R)\n\n a = np.array([1, 0, 0, 0])\n a2R = pr.matrix_from(a=a)\n assert_array_almost_equal(a2R, R)\n\n q = np.array([1, 0, 0, 0])\n q2R = pr.matrix_from(q=q)\n assert_array_almost_equal(q2R, R)\n\n e_xyz = np.array([0, 0, 0])\n e_xyz2R = pr.matrix_from(e_xyz=e_xyz)\n assert_array_almost_equal(e_xyz2R, R)\n\n e_zyx = np.array([0, 0, 0])\n e_zyx2R = pr.matrix_from(e_zyx=e_zyx)\n assert_array_almost_equal(e_zyx2R, R)\n\n assert_raises_regexp(ValueError, \"no rotation\", pr.matrix_from)",
"def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)",
"def is_numpy(obj):\n return 'numpy' in str(type(obj))",
"def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity",
"def test_transpose_mat(self):\n self.init()\n assert np.all(transpose_mat(self.i64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.fi64_2) == self.i64_2.T)\n assert np.all(transpose_mat(self.f64_2) == self.f64_2.T)\n assert np.all(transpose_mat(self.ff64_2) == self.ff64_2.T)\n assert transpose_mat(self.i64_2).dtype == 'float64'\n assert transpose_mat(self.fi64_2).dtype == 'float64'\n assert transpose_mat(self.f64_2).dtype == 'float64'\n assert transpose_mat(self.ff64_2).dtype == 'float64'\n assert transpose_mat(self.i64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.fi64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.f64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.ff64_2).flags['F_CONTIGUOUS'] == True\n assert transpose_mat(self.i64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.fi64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.f64_2).flags['C_CONTIGUOUS'] == False\n assert transpose_mat(self.ff64_2).flags['C_CONTIGUOUS'] == False",
"def only_matrices(must_print):\n\n #Extracting input.\n input = find_input()\n\n #Generates matrices. matrices = [p_matrix, reduced p_matrix]\n matrices = M.compute(input, must_print)\n\n #Storing output.\n store_output(matrices)",
"def test_format_matrix(self):\r\n a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n row_labels = ['a', 'b', 'c']\r\n col_labels = [11, 22, 33]\r\n res = format_matrix(a, row_labels, col_labels)\r\n\r\n # test as list\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)\r\n\r\n # tes as array\r\n a = array(a)\r\n self.assertEqual(res,\r\n '\\t11\\t22\\t33\\na\\t1\\t2\\t3\\nb\\t4\\t5\\t6\\nc\\t7\\t8\\t9')\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n a,\r\n row_labels[:2],\r\n col_labels)\r\n self.assertRaises(\r\n ValueError,\r\n format_matrix,\r\n None,\r\n row_labels,\r\n col_labels)",
"def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)",
"def _check_real_symmetric(A: np.array) -> bool:\n return np.allclose(A, A.T, atol=1e-9)",
"def check_squareness(self, Am):\r\n if len(Am) != len(Am[0]):\r\n raise ArithmeticError(\"Matrix must be square to inverse.\")",
"def test_to_matrix(self):\n v = np.copy(zero)\n with self.assertRaises(ZeroDivisionError):\n rowan.to_matrix(v)\n\n v = 2 * np.ones(4)\n with self.assertRaises(ValueError):\n rowan.to_matrix(v)\n\n v = np.copy(one)\n self.assertTrue(np.all(rowan.to_matrix(v) == np.eye(3)))\n\n v = np.copy(half)\n self.assertTrue(\n np.allclose(rowan.to_matrix(v), np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]))\n )\n\n v[3] *= -1\n self.assertTrue(\n np.allclose(\n rowan.to_matrix(v), np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n )\n )",
"def isdense(qob):\n return isinstance(qob, np.ndarray)",
"def issparse(qob):\n return isinstance(qob, sp.spmatrix)",
"def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return",
"def check_double_matrix(mat):\n if len(mat.shape) != 2:\n sys.stderr.write(\"Invalid matrix: dimension {} not {}\\n\"\n .format(len(mat.shape), 2))\n return False\n\n # If the strides hasn't got the same number of elements, really weird\n # things happened... Let's abort in such case\n assert len(mat.strides) == len(mat.shape)\n\n if mat.itemsize != SIZEOF_DOUBLE:\n sys.stderr.write(\"Invalid matrix: item size {} not {}\\n\"\n .format(mat.itemsize, SIZEOF_DOUBLE))\n return False\n\n if mat.strides[0] < mat.strides[1] or mat.strides[1] != mat.itemsize:\n sys.stderr.write(\"Invalid strides for a C matrix: {}\\n\"\n .format(mat.strides))\n return False\n\n # If itemsize couldn't divide the stride, nothing would work...\n assert (mat.strides[0] % mat.itemsize) == 0\n\n if mat.strides[0] < mat.shape[1] * mat.strides[1]:\n sys.stderr.write(\"Too small strides for shape: {} < {}\\n\"\n .format(mat.strides[0], mat.shape[1] * mat.strides[1]))\n return False\n return True",
"def check_k_matrix_stability(self):\r\n K = self.make_k_matrix()\r\n vals, vects = scipy_sparse_eigens(K)\r\n principal_val = vals.max()\r\n print(\"ht3_solver:\\t'Stiffness' matrix principal eigenvalue was \"\r\n + str(principal_val))\r\n if principal_val > 1:\r\n print(\"##########################################################\")\r\n print(\"ht3_solver:\\tWARNING\")\r\n print(\"ht3_solver:\\tPrincipal eigenvalue is more than one.\")\r\n print(\"ht3_solver:\\tThe analysis will be unstable.\")\r\n print(\"ht3_solver:\\tIf this is OK, just go and modify the code \"\r\n + \"or something.\")\r\n print(\"##########################################################\")\r\n raise(AssertionError)",
"def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype",
"def confirm_symmetry(mat: numpy.ndarray, symmetry: List[Any]) -> None:\n is_unity = validate_unity(symmetry[0])\n if len(symmetry) == 1 and is_unity:\n return\n build_symmetry_operations(symmetry)\n validate_matrix_symmetry(mat, symmetry)",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))",
"def __check_signal(self, signal):\n if not(isinstance(signal, np.ndarray)):\n raise TypeError()\n if len(signal.shape) != 1:\n raise TypeError()\n if not(hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n if signal.shape[0] != self.dim:\n raise TypeError()",
"def _check_column_or_1d(y, context=\"\"):\n CHANGE = False\n try:\n s = tuple(np.shape(y))\n except Exception as e:\n raise ValueError(\"%sCould not get shape of y. \"\n \"y should be an ndarray or scipy sparse csr \"\n \"/csc matrix of shape (n_samples, ). Got %s.\"\n \"Details:\\n%r\" % (context, type(y), e))\n\n if len(s) == 0:\n raise ValueError(\"%sy is empty: y = %r.\" % (context, y))\n\n if len(s) == 2 and s[1] == 1:\n CHANGE = True\n warnings.warn(\"%sA column-vector y was passed when a 1d array was\"\n \" expected. Change the shape of y to \"\n \"(n_samples, ), for example using ravel().\" % context,\n InputDataWarning)\n\n if len(s) == 2 and s[1] > 1:\n CHANGE = True\n warnings.warn(\"%sA matrix y was passed for as for labels. \"\n \"Most estimators expect a one dimensional label vector.\"\n \"Consider changing the shape of y to (n_samples, ).\" %\n context, InputDataWarning)\n\n return CHANGE",
"def checktypestest(chosen_df):\n for i in chosen_df:\n if not chosen_df.dtypes[1] == chosen_df.dtypes[i]:\n raise ValueError('Types do not match')",
"def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])",
"def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)",
"def check_design_matrix(design_matrix):\n names = [name for name in design_matrix.keys()]\n frame_times = design_matrix.index\n matrix = design_matrix.values\n return frame_times, matrix, names",
"def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")",
"def is_identity(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, np.eye(mat.shape[0]), atol=eps)",
"def test_sparsity(self):\n\n self.validator.adata.X = self.validator.adata.X.toarray()\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: Sparsity of 'X' is 0.875 which is greater than 0.5, \"\n \"and it is not a 'scipy.sparse.csr_matrix'. It is \"\n \"STRONGLY RECOMMENDED to use this type of matrix for \"\n \"the given sparsity.\"\n ],\n )",
"def is_valid(self) -> bool:\r\n try:\r\n self.shape\r\n return True\r\n except ValueError:\r\n return False",
"def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity",
"def is_invertible(matrix: np.ndarray) -> bool:\n return matrix.shape[0] == matrix.shape[1] and np.linalg.det(matrix) != 0",
"def _validate_connectivity_matrix_shape(self, connectivity_matrix):\n validate_array_ndim('connectivity matrix', connectivity_matrix, 2)\n\n for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):\n if (\n hasattr(self.attrs, attr)\n and self.attrs[attr] != np.shape(connectivity_matrix)[axis]\n ):\n raise ValueError(\n 'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='\n '{ngaba} imply connectivity matrix of size '\n '({nser}, {ngaba}), got {cm_shape} instead.'.format(\n nser=getattr(self.attrs, 'no_ser_neurons', 'any'),\n ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),\n cm_shape=np.shape(connectivity_matrix),\n )\n )",
"def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))",
"def assert_is_rotmat(rotmat):\n\n assert rotmat.shape == (3,3)\n np.testing.assert_array_almost_equal(np.linalg.det(rotmat), 1.0)\n np.testing.assert_array_almost_equal(rotmat.transpose(), np.linalg.inv(rotmat))",
"def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()",
"def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True"
] | [
"0.7513969",
"0.6788523",
"0.6372158",
"0.6208938",
"0.6196592",
"0.6115402",
"0.60343564",
"0.6018366",
"0.5956586",
"0.5932612",
"0.58029556",
"0.57486594",
"0.570607",
"0.5696255",
"0.5663594",
"0.56593513",
"0.564664",
"0.5636785",
"0.5621079",
"0.5619576",
"0.5590659",
"0.5587903",
"0.5583932",
"0.55744684",
"0.5553423",
"0.5547999",
"0.5527378",
"0.55190825",
"0.55174947",
"0.550662",
"0.5504467",
"0.5497828",
"0.5478532",
"0.5473597",
"0.54627687",
"0.54541516",
"0.54490227",
"0.54423296",
"0.5436204",
"0.54151386",
"0.5399243",
"0.5398737",
"0.53689075",
"0.53642064",
"0.53569466",
"0.5348835",
"0.5338463",
"0.5335733",
"0.5333373",
"0.53302675",
"0.53299683",
"0.5323965",
"0.5289715",
"0.52785534",
"0.5268619",
"0.52555025",
"0.5246927",
"0.5246348",
"0.524021",
"0.5237892",
"0.5231285",
"0.5220822",
"0.5217007",
"0.52166516",
"0.5191682",
"0.517918",
"0.5166032",
"0.516541",
"0.5157465",
"0.5153048",
"0.5148005",
"0.51457703",
"0.5138726",
"0.5137814",
"0.5135138",
"0.51305175",
"0.5120691",
"0.51202905",
"0.50983465",
"0.50754136",
"0.5070791",
"0.50705427",
"0.5066336",
"0.5060914",
"0.5051628",
"0.50498706",
"0.50497186",
"0.50491",
"0.50482553",
"0.5036309",
"0.50329655",
"0.5023079",
"0.5020586",
"0.50173634",
"0.5015172",
"0.50117874",
"0.5007597",
"0.50047284",
"0.5001705",
"0.4993182",
"0.49919426"
] | 0.0 | -1 |
Loops through each page within a single PDB and sums up the stats of each page to arrive at the overall total | def analyze(directory, pdf_file, doc_type):
total_redaction_count = 0
total_redacted_text_area = 0
total_estimated_text_area = 0
total_estimated_num_words_redacted = 0
# Split the pdb (which is a pdf file) into individual jpgs.
redaction_module.pdf_to_jpg(directory, pdf_file)
os.chdir(directory)
for jpg_file in os.listdir(directory):
# Iterating through each page of the PDB
if jpg_file.endswith(".jpg"):
[redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type)
total_redaction_count += redaction_count
total_redacted_text_area += redacted_text_area
total_estimated_text_area += estimated_text_area
total_estimated_num_words_redacted += estimated_num_words_redacted
# Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly).
os.remove(jpg_file)
# Now that we've gone through each page, we need to calculate the stats for the document.
if total_estimated_text_area != 0:
total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area)
else:
total_percent_text_redacted = 0
data = []
# open csv file and write the stats in a single row representing the document.
with open('output.csv', mode='a+') as output:
output_writer = csv.writer(output, delimiter=',')
row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted]
data.append(row)
print(tabulate(data, headers=[" ", " ", " ", " ", " "]))
output_writer.writerow(row)
output.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loop_example():\n\n totals = []\n\n for row in poke_stats:\n totals.append(sum(row))\n \n return(totals)",
"def stats_page():\n import alltheitems.stats\n return alltheitems.stats.index()",
"def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)",
"def scrape_central(page):\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find(\"table\", {\"class\" : \"ez1\"})\n rows = table.findAll('tr')\n page = int(table.find('tr', {'class': 'black'}).span.text)\n\n data_page = []\n for row in rows[1:]:\n item = {}\n cols = row.findAll('td')\n\n if len(cols) == 38:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[35].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[34].text.strip()\n item['date_tor_apply'] = cols[24].text.strip()\n item['date_tor_granted'] = cols[27].text.strip()\n item['date_ec_receipt'] = cols[24].text.strip()\n item['date_ec_granted'] = cols[33].text.strip()\n clearance = cols[37].findAll('img', {'src': 'images/ec.png'})\n tor = cols[37].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[37].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[37].findAll('img', {'src': 'images/forms.png'})\n com = cols[37].findAll('img', {'src': 'images/com.png'})\n mon = cols[37].findAll('img', {'src': 'images/mon.png'})\n add = cols[37].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n\n if len(cols) == 29:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[26].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[25].text.strip()\n item['date_tor_apply'] = None\n item['date_tor_granted'] = None\n item['date_ec_receipt'] = None\n item['date_ec_granted'] = cols[24].text.strip()\n clearance = cols[28].findAll('img', {'src': 'images/ec.png'})\n tor = cols[28].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[28].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[28].findAll('img', {'src': 'images/forms.png'})\n com = cols[28].findAll('img', {'src': 'images/com.png'})\n mon = cols[28].findAll('img', {'src': 'images/mon.png'})\n add = cols[28].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n return data_page",
"def main():\n href_list = fundlist.get_fund_list()\n\n single_values = None\n asset_allocations = None\n geo_allocations = None\n sector_allocations = None\n top10_holdings = None\n\n for href in href_list:\n url = 'http://idata.fundata.com' + href\n fund_profile = FundProfileScraper(url)\n\n value_dict = fund_profile.scrape_all_single_value()\n if single_values is None:\n single_values = pd.DataFrame([value_dict.values()],\n columns=value_dict.keys())\n else:\n temp_df = pd.DataFrame(value_dict.values(),\n columns=value_dict.keys())\n single_values.append(temp_df)\n\n asset_allocation_list = fund_profile.scrape_asset_allocation()\n allocations_with_href = [[href, asset_class]\n for asset_class in asset_allocation_list]\n if asset_allocations is None:\n asset_allocations = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n asset_allocations.append(temp_df)\n\n\n geo_allocations_list = fund_profile.scrape_geo_allocation()\n geo_allocations_href = [[href, geo_class]\n for geo_class in geo_allocations_list]\n if geo_allocations is None:\n geo_allocations = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n geo_allocations.append(temp_df)\n\n sector_allocations_list = fund_profile.scrape_sector_allocation()\n sector_allocations_href = [[href, sector_class]\n for sector_class in sector_allocations_list]\n if sector_allocations is None:\n sector_allocations = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n sector_allocations.append(temp_df)\n\n top10_holding_list = fund_profile.scrape_top10_holdings()\n top10_holding_href = [[href, holding]\n for holding in top10_holding_list]\n if top10_holdings is None:\n top10_holdings = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n else:\n temp_df = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n top10_holdings.append(temp_df)\n\n time.sleep(randint(1, 5))\n\n single_values.to_pickle('./single_values.pkl')\n asset_allocations.to_pickle('./asset_allocations.pkl')\n geo_allocations.to_pickle('./geo_allocations.pkl')\n sector_allocations.to_pickle('sector_allocations.pkl')\n top10_holdings.to_pickle('top10_holdings.pkl')",
"def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput",
"def assemble_stats(lma_sum, mma_sum, hma_sum, peer_lma_sum, peer_mma_sum, peer_hma_sum):\n lma_pct = 0.0\n mma_pct = 0.0\n hma_pct = 0.0\n\n peer_lma_pct = 0.0\n peer_mma_pct = 0.0\n peer_hma_pct = 0.0\n\n stats = {}\n\n target_lar_total = lma_sum + mma_sum + hma_sum\n if target_lar_total:\n lma_pct = round(1.0 * lma_sum / target_lar_total, 3)\n mma_pct = round(1.0 * mma_sum / target_lar_total, 3)\n hma_pct = round(1.0 * hma_sum / target_lar_total, 3)\n maj_pct = round(mma_pct + hma_pct, 3)\n stats.update({\n 'lma': lma_sum, \n 'lma_pct': lma_pct, \n 'mma': mma_sum,\n 'mma_pct': mma_pct,\n 'hma': hma_sum,\n 'hma_pct': hma_pct,\n 'maj_pct': maj_pct,\n 'lar_total': target_lar_total\n })\n else:\n stats.update({\n 'lar_total': 0,\n 'lma': 0, \n 'lma_pct': 0, \n 'mma': 0,\n 'mma_pct': 0,\n 'hma': 0,\n 'hma_pct': 0\n })\n #assemble peer data\n peer_lar_total = peer_lma_sum + peer_mma_sum + peer_hma_sum\n if peer_lar_total:\n peer_lma_pct = round(1.0 * peer_lma_sum / peer_lar_total, 3)\n peer_mma_pct = round(1.0 * peer_mma_sum / peer_lar_total, 3)\n peer_hma_pct = round(1.0 * peer_hma_sum / peer_lar_total, 3)\n peer_maj_pct = round(peer_mma_pct + peer_hma_pct, 3)\n stats.update({\n 'peer_lma': peer_lma_sum, \n 'peer_lma_pct': peer_lma_pct, \n 'peer_mma': peer_mma_sum,\n 'peer_mma_pct': peer_mma_pct,\n 'peer_hma': peer_hma_sum,\n 'peer_hma_pct': peer_hma_pct,\n 'peer_maj_pct': peer_maj_pct,\n 'peer_lar_total': peer_lar_total\n })\n else:\n stats.update({\n 'peer_lma': 0,\n 'peer_lma_pct': 0, \n 'peer_mma': 0, \n 'peer_mma_pct': 0,\n 'peer_hma': 0,\n 'peer_hma_pct': 0,\n 'peer_lar_total': 0\n })\n odds_lma = odds_ratio(lma_pct, peer_lma_pct)\n odds_mma = odds_ratio(mma_pct, peer_mma_pct)\n odds_hma = odds_ratio(hma_pct, peer_hma_pct)\n odds_maj = odds_ratio(mma_pct+hma_pct, peer_mma_pct+peer_hma_pct)\n stats.update({\n 'odds_lma':odds_lma,\n 'odds_mma':odds_mma,\n 'odds_hma':odds_hma,\n 'odds_maj':odds_maj\n })\n return stats",
"def getAllPageNumbers(self):\n\t\tfor subpage in self.subpages:\n\t\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(subpage.link)\n\t\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\t\t\tsubpage.setNbrPages( self.getNbrPages(soupPage) )",
"def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total",
"def yield_stats(go_analysis):\n for i in xrange(go_analysis.nrow()):\n yield go_analysis[0][i], go_analysis[1][i], go_analysis[2][i], go_analysis[3][i], p_value_from_r(go_analysis[4][i]), p_value_from_r(go_analysis[5][i])",
"def print_numa_stats(numafiles):\n for numafile in numafiles:\n numafile.seek(0)\n node_id = int(numafile.name[numafile.name.find(\"/node/node\")+10:-9])\n ts = int(time.time())\n stats = dict(line.split() for line in numafile.read().splitlines())\n for stat, tag in (# hit: process wanted memory from this node and got it\n (\"numa_hit\", \"hit\"),\n # miss: process wanted another node and got it from\n # this one instead.\n (\"numa_miss\", \"miss\")):\n print (\"sys.numa.zoneallocs %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Count this one as a separate metric because we can't sum up hit +\n # miss + foreign, this would result in double-counting of all misses.\n # See `zone_statistics' in the code of the kernel.\n # foreign: process wanted memory from this node but got it from\n # another node. So maybe this node is out of free pages.\n print (\"sys.numa.foreign_allocs %d %s node=%d\"\n % (ts, stats[\"numa_foreign\"], node_id))\n # When is memory allocated to a node that's local or remote to where\n # the process is running.\n for stat, tag in ((\"local_node\", \"local\"),\n (\"other_node\", \"remote\")):\n print (\"sys.numa.allocation %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Pages successfully allocated with the interleave policy.\n print (\"sys.numa.interleave %d %s node=%d type=hit\"\n % (ts, stats[\"interleave_hit\"], node_id))",
"def eliminating_loop_example():\n\n totals_comp = [sum(row) for row in poke_stats]\n\n return(totals_comp)",
"def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1",
"def total(self):\n return self._evaluate()['hits']['total']",
"def SumaryPagos(vj):\n\n vj.PagosVenta = {}\n vj.MontoCobros = 0.0 # Sumatoria de todos los pagos reallizados\n\n for idPago, row in vj.tbPagos.rows.items(): # Recorre todos los pagos\n vj.MontoCobros += row.cuc # Acumula los pago en cuc\n vj.MontoCobros +=vj.Cnv( row.cup, MD.Cup, MD.Cuc ) # Acumula los pago en cup (convertido a cuc)\n\n idVent = row.idVent # Id de la venta a la que pertenece el pago\n if idVent not in vj.PagosVenta: # Si no hay pago para la venta \n vj.PagosVenta[idVent] = [] # Crea una lista vacia\n\n vj.PagosVenta[idVent].append(idPago) # Agrega el pago a la venta",
"def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)",
"def compute_statistics(self):",
"def gather_all_profiles(year, month):\n page = 1\n urls = []\n\n print(\"{}-{} : Begin indexing.\".format(year, month))\n\n while (page > 0):\n urlstring = \"http://scamdigger.com/{}/{}/page/{}\".format(year,month,page) \n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(urlstring)\n urls += enumerate_profiles(urlhandle, page)\n # time.sleep(1+jitter)\n page += 1\n except:\n page = 0\n\n print(\"{}-{} : {} profiles\".format(year,month,len(urls)))\n\n for url in urls:\n uid = url[30:-1]\n outfile=PROFILES+os.sep+uid+'.json'\n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(url)\n scrape_profile(urlhandle, outfile, year, month)\n # time.sleep(1+jitter)\n except Exception as e:\n print(\"Exception when handling {}\".format(url))\n print(e)\n \n print(\"{}-{} : complete.\".format(year,month))",
"def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)",
"def getStats(population, masterList):\n for team in population:\n for i in range(13): #13 are the number of roster spots?\n team.totHr += masterList[team.roster[i]].hr\n team.totAvg += masterList[team.roster[i]].avg\n team.totRuns += masterList[team.roster[i]].runs\n team.totSb += masterList[team.roster[i]].sb\n team.totRbi += masterList[team.roster[i]].rbi\n if i == 12:\n team.totAvg = team.totAvg / 13\n return population",
"def core_stats():\n data = get_tsv_dataset(os.path.join(DATA_DIR, TOTALS_FILE))\n if data is None:\n return make_response(jsonify({'error': 'Data could not be read'}), 500)\n # parse up so we can manipulate things.\n dataset = [int(x) for x in data]\n annual_sightings = sum(dataset)\n # for each 'month' (selection of x4\n monthly_sightings = []\n max_sightings = 0\n max_month = 0\n\n # grab each month's data into its own list for post processing.\n # also calculate some other numbers as we go.\n for i in range(0, len(dataset), 4):\n # select 4x data points.\n this_month = dataset[i:i + 4]\n total_sightings_this_month = sum(this_month)\n monthly_sightings.append(total_sightings_this_month)\n old_max = max_sightings\n max_sightings = max(max_sightings, total_sightings_this_month)\n if old_max < max_sightings:\n # it could be the 0th month.\n max_month = len(monthly_sightings)\n\n mean_monthly_sightings = mean(monthly_sightings)\n month_name = list(calendar.month_name)[max_month]\n return make_response(jsonify({'annual_sightings': annual_sightings,\n 'max_sightings': max_sightings,\n 'max_sighting_month': month_name,\n 'mean_monthly_sightings': mean_monthly_sightings}), 200)",
"def legacy_pagecounts(project, start, end,\n access_site='all-sites', granularity='daily'):\n project_arg = 'all-projects'\n if project != 'all-projects':\n project_arg = '{}.org'.format(project)\n args = PC_ARGS.format(project=project_arg,\n start=start,\n end=end,\n access_site=access_site,\n granularity=granularity)\n return __api__(PC_ENDPOINT, args)",
"def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics",
"def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects",
"def patrimony_total(self):\n pass",
"def run(self):\n with open(self.source_file) as file:\n for index, mem_access in enumerate(file):\n access_type = mem_access.split(' ')[0]\n address = int(mem_access.split(' ')[1], 16)\n self.page_table.query(address, access_type, index)\n return {\"memory_accesses\": self.mem_accesses,\n \"page_faults\": self.page_table.page_faults,\n \"writes_to_disk\": self.page_table.writes_to_disk}",
"def advancedStats():",
"def parse_pdfs():\n # get all of the pdf files in the dir\n pahopdffiles = [f for f in listdir(paho_raw_reports_dir) if isfile(join(paho_raw_reports_dir, f))]\n # set up a list to hold the data for all pdf files\n all_pdf_data = []\n # read in each pdf file\n for pahopdffile in pahopdffiles:\n try:\n logging.info(\"Now attempting to read in: \"+pahopdffile)\n fullfilepath = os.path.join(paho_raw_reports_dir, pahopdffile)\n tables = camelot.read_pdf(fullfilepath)\n # get the pandas dataframe from each pdf\n pdfdataframe = tables[0].df\n # ensure that this is a valid PAHO COVID19 report\n report_keywords = ['Cumulative','COVID-19','Americas'] \n if not all(x in pdfdataframe[0].iloc[0] for x in report_keywords):\n logging.error(pahopdffile+\" was not recognised as a normal PAHO pdf file. Skipping.\")\n continue\n # set up the list to hold the data for this file\n reportdata = []\n # create a variable to store the date of this report\n date = None\n # create a variable to store the last subregion seen\n subregion = None\n # PAHO has different formats for their tables, so we need to check the number of columns in the pdf\n numcolumns = len(pdfdataframe.columns)\n # get the row index for the last country\n lastcountryrowindex = pdfdataframe[1][pdfdataframe[1] == 'Total'].index[0]-1\n for rowindex,rowdata in pdfdataframe.iterrows():\n # set up variables to hold the data for the dict\n country_or_territory_name = None\n confirmed_cases = None\n probable_cases = None\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n if numcolumns == 6:\n # this is the old format that they started with\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].replace('Cumulative suspected and confirmed COVID-19 cases reported by \\ncountries and territories in the Americas, as of ','')\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n if not date:\n raise RuntimeError(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[5]\n # store null data for all other fields that were not present in the old reports\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n elif numcolumns == 9:\n # PAHO added in probable cases\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[2]:\n split_numbers = rowdata[2].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n transmission_type = rowdata[8]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[5]\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[6].replace(\",\",\"\"))\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[8]\n elif numcolumns == 10:\n # PAHO added in country ISO codes and special characters\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(3,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[2] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[2] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[2]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[3]:\n split_numbers = rowdata[3].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n transmission_type = rowdata[9]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[5].replace(\",\",\"\"))\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[6]\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[7].replace(\",\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n if rowdata[9] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[9]\n else:\n logging.error(\"Unrecognised number of columns in the pdf file. Skipping for now.\"+\n \"Check if the report format changed from PAHO.\")\n # if we were at least able to scrape the country or territory name, create a dict and add it to the list\n if country_or_territory_name is not None:\n # set up the dict to store each row of data\n reportdict = collections.OrderedDict()\n # add the values to the dict in the order that we want for the report\n reportdict['date'] = date\n reportdict['country_or_territory_name'] = country_or_territory_name\n reportdict['confirmed_cases'] = confirmed_cases\n reportdict['probable_cases'] = probable_cases\n reportdict['confirmed_deaths'] = confirmed_deaths\n reportdict['probable_deaths'] = probable_deaths\n reportdict['recovered'] = recovered\n reportdict['percentage_increase_confirmed'] = percentage_increase_confirmed\n reportdict['transmission_type'] = transmission_type\n # now add this dict to our list for this report/pdf\n reportdata.append(reportdict)\n # once we are done adding all data for this pdf, add this pdf report to the list of all reports\n # if the reportdata list is not empty\n if reportdata:\n all_pdf_data.append(reportdata)\n logging.info(\"Successfully parsed \"+pahopdffile)\n except Exception as exc:\n logging.exception(\"Problem found while parsing \"+pahopdffile)\n raise\n logging.info(\"Completed parsing all pdfs in folder.\")\n return all_pdf_data",
"def _addPageRatio(self, outlines, pageLabels):\n for i in range(0, len(outlines)):\n outline = outlines[i]\n if type(outline) == list:\n self._addPageRatio(outlines[i], pageLabels)\n continue\n elif not outline.has_key('/Page'):\n print (\"Error: outline has no key '/Page'\")\n sys.exit(-1)\n pageHeight = outline['/Page']['/MediaBox'][-1]\n idIndirect = outline.page.idnum\n if pageLabels.has_key(idIndirect):\n pageNum = pageLabels[idIndirect]\n else:\n print ('Error: Page corresponds to IndirectObject %d not Found' % idIndirect)\n sys.exit(-1)\n if outline.has_key('/Top'):\n top = outline['/Top']\n else:\n top = pageHeight\n if outline.has_key('/Zoom'):\n zoom = outline['/Zoom']\n else:\n zoom = 1\n outline = dict(outline)\n try:\n outline['/Ratio'] = pageNum + (1 - top / zoom / pageHeight)\n except:\n pass\n outlines[i] = outline",
"def _count_pages_pdf(self, bin_pdf):\n pages = 0\n for match in re.compile(r\"/Count\\s+(\\d+)\").finditer(bin_pdf):\n pages = int(match.group(1))\n return pages",
"def get_totals(data, cols):\n\n spots = [len(data[data.Phase == p]) for p in pd.unique(data.Phase)]\n j = 0\n added_rows = 0\n for i in range(len(spots)):\n spots[i] += j + added_rows\n j = spots[i]\n added_rows = 1\n spots = [0] + spots\n\n end = len(cols) - 1\n\n final = pd.DataFrame(columns = data.columns)\n blank = pd.DataFrame({c:'' for c in data.columns}, index = [-1])\n\n for ind, p in enumerate(pd.unique(data.Phase)):\n plu = 4 if ind else 3\n section = data.loc[data.Phase == p]\n sums = blank.copy()\n\n sums.loc[-1, 'Deleted'] = 'Total'\n\n for u in data.columns:\n if '#' in u:\n lett = alpha[list(data.columns).index(u)]\n if 'CO' not in u:\n sums.loc[-1, u] = '=SUMIF(' + lett + str(spots[ind] + plu) + ':' + lett + str(spots[ind + 1] + 2) + ',\">0\")'\n else:\n sums.loc[-1, u] = '=SUM(' + lett + str(spots[ind] + plu) + ':' + lett + str(spots[ind + 1] + 2) + ')'\n if 'Unit_Total' in cols:\n sums.loc[-1, 'M/M_Total'] = '=SUM(' + alpha[end -2] + str(spots[ind] + plu) + ':' + alpha[end -2] + str(spots[ind + 1] + 2) + ')'\n sums.loc[-1, 'Unit_Total'] = '=SUM(' + alpha[end -1] + str(spots[ind] + plu) + ':' + alpha[end -1] + str(spots[ind + 1] + 2) + ')'\n sums.loc[-1, 'Line_Total'] = '=SUM(' + alpha[end] + str(spots[ind] + plu) + ':' + alpha[end] + str(spots[ind + 1] + 2) + ')'\n\n section = pd.concat([section, sums])\n final = pd.concat([final, section], ignore_index = True)\n\n final = final[cols]\n\n spots = [t + 1 for t in spots[1:]]\n\n return final, spots",
"def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase",
"def total(evictiondata):\r\n total = 0\r\n for index, row in evictiondata.iterrows():\r\n total += row['filings_2020']",
"def read_page(bs, adj):\n paragraphs = bs.find('div',{'id':'bodyContent'}).find_all('p')\n for p in paragraphs:\n EntryParser.count_name(p.text, adj)\n return adj",
"def task7(self, doc_uuid):\n page_totalRead ={}\n page_reads = {} \n for entry in self.records:\n if((entry['event_type'] == 'pagereadtime') and (entry['subject_doc_id'] == doc_uuid)):\n if (entry['subject_page'] in page_totalRead):\n page_totalRead[entry['subject_page']] += int(entry['event_readtime'])\n else:\n page_totalRead[entry['subject_page']] = int(entry['event_readtime'])\n if(entry['subject_page'] in page_reads):\n page_reads[entry['subject_page']] += 1\n else:\n page_reads[entry['subject_page']] = 1\n page_meanRead = {}\n for page in page_totalRead:\n if (page in page_reads):\n page_meanRead[page] = page_totalRead[page]/page_reads[page]\n GUI.show_line(page_meanRead, \"Page\", \"Mean Read Time\", \"Retention\")",
"def mor_prepare_data():\n prices, locations, areas, links = [], [], [], []\n for i in range(START_PAGE, SEARCHING_DEPTH+1):\n handler = requests.get(main_url, params={\"page\": str(i)})\n soup = bs4.BeautifulSoup(handler.text, 'lxml')\n heads = soup.find_all(\"header\")\n once = True\n for head in heads:\n if head.find(\"meta\", {\"itemprop\": \"category\"}) and once:\n\n raw_price = head.find(\"meta\", {\"itemprop\": \"price\"})\n price = int(float(raw_price[\"content\"]) if raw_price else \"\")\n\n raw_loc_list = head.find(\"h2\",\n {\"class\": \"single-result__title\"}).getText().strip().split(\n \", \")\n found = False\n for loc in raw_loc_list:\n if location_mapper[CITY].get(loc.lower(), 0):\n location = location_mapper[CITY][loc.lower()]\n\n found = True\n break\n if not found:\n location = \"\"\n if DEBUG_MODE:\n print(raw_loc_list)\n\n raw_area = head.find(\"p\", {\n \"class\": \"single-result__price single-result__price--currency\"}).getText().strip().split()\n if price and location:\n square_price = raw_area[0] if len(raw_area) == 2 else \"\".join(\n (raw_area[0], raw_area[1]))\n\n area = int(price / float(square_price.replace(\",\", \".\")))\n link_url = head.find('a')['href']\n\n if location and area and link_url:\n prices.append(price) if price < PRICE_UPPER_LIMIT else prices.append(\n PRICE_UPPER_LIMIT)\n locations.append(location)\n areas.append(area) if area < AREA_UPPER_LIMIT else areas.append(\n AREA_UPPER_LIMIT)\n links.append(link_url)\n\n return prices, locations, areas, links",
"def update_totals(self):\n # Reset counts to 0\n self.total_f = self.total_s = self.total_intra = self.total_mac_regular = self.total_mac_infected = \\\n self.total_mac_activated = self.total_regular_fast = self.total_regular_slow = self.total_infected_fast = \\\n self.total_infected_slow = self.total_activated_fast = self.total_activated_slow = self.total_f_degree = \\\n self.total_s_degree = self.total_activation = 0\n self.total_f_o2 = self.total_s_o2 = 0.0\n\n for node in self.node_list.values():\n # Get values from node\n fast_in_node = node.subpopulations[BACTERIA_FAST]\n slow_in_node = node.subpopulations[BACTERIA_SLOW]\n intra_in_node = node.subpopulations[BACTERIA_INTRACELLULAR]\n reg_mac_in_node = node.subpopulations[MACROPHAGE_REGULAR]\n inf_mac_in_node = node.subpopulations[MACROPHAGE_INFECTED]\n act_mac_in_node = node.subpopulations[MACROPHAGE_ACTIVATED]\n degree = node.degree\n o2_tens = node.oxygen_tension\n # Update relevant totals\n self.total_f += fast_in_node\n self.total_s += slow_in_node\n self.total_intra += intra_in_node\n self.total_mac_regular += reg_mac_in_node\n self.total_mac_infected += inf_mac_in_node\n self.total_mac_activated += act_mac_in_node\n self.total_regular_fast += fast_in_node * reg_mac_in_node\n self.total_regular_slow += slow_in_node * reg_mac_in_node\n self.total_infected_fast += fast_in_node * inf_mac_in_node\n self.total_infected_slow += slow_in_node * inf_mac_in_node\n self.total_activated_fast += fast_in_node * act_mac_in_node\n self.total_activated_slow += slow_in_node * act_mac_in_node\n # TODO - check usage of degree\n self.total_f_degree += fast_in_node * degree\n self.total_s_degree += slow_in_node * degree\n self.total_f_o2 += fast_in_node * (1/o2_tens)\n self.total_s_o2 += slow_in_node * o2_tens\n self.total_activation += reg_mac_in_node * inf_mac_in_node",
"def total(h):\r\n\treturn sum(i.points() for i in h)",
"def calculate_num_scrape_pages(h, r):\n p = (int(search_hits)/res_per_page) + 1\n return p",
"def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)",
"def get_num_of_pages(self):",
"def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts",
"def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf",
"def showStats(population, masterList, index):\n count = 0\n if index == \"all\":\n for team in population:\n print (\"Team at index\", count)\n print(\"Tot Avg\", team.totAvg)\n print(\"Tot Runs\", team.totRuns)\n print(\"Tot HRs\", team.totHr)\n print(\"Tot RBIs\", team.totRbi)\n print(\"Tot SB\", team.totSb)\n print(\"Tot points\", team.points, '\\n')\n count += 1\n else:\n print(\"Team at index\", index)\n print(\"Tot Avg\", population[index].totAvg)\n print(\"Tot Runs\", population[index].totRuns)\n print(\"Tot HRs\", population[index].totHr)\n print(\"Tot RBIs\", population[0].totRbi)\n print(\"Tot SB\", population[0].totSb)\n print(\"Tot points\", population[0].points, '\\n')",
"def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)",
"def test_getTotalIndividualCount(self):\r\n # Verified with iNEXT.\r\n self.assertEqual(self.est1.getTotalIndividualCount(), 15)\r\n\r\n # Verified against results in Colwell 2012 paper.\r\n self.assertEqual(self.est2.getTotalIndividualCount(), 976)\r\n self.assertEqual(self.est3.getTotalIndividualCount(), 237)",
"def measurements(self, minPag = 4, maxPag = 50, nPagesRacc = 100, percentageTest = 0.20):\n allPages = self.db.getPages()\n test = random.sample(allPages, int(len(allPages) * percentageTest))\n avg = []\n centroids = self.getAllCentroids(inferior_limit = minPag, withPrint = False, saveFile = False, test = test)\n for nPage in tqdm(range(minPag, maxPag+1)):\n c = self.db.db.cursor()\n c.execute('SELECT cat_name FROM catpage GROUP BY cat_name HAVING COUNT(*)<?', [nPage]) \n for ex in {c[0] for c in c.fetchall()}.intersection(centroids.keys()):\n centroids.pop(ex,None)\n m1, m2, m3 = [], [], []\n start_time = time.time()\n for page in random.sample(test, nPagesRacc):\n m1p, m2p, m3p = self.recommendCategory(page = page, centroids = centroids, randomWeb = False, printRes = False)\n m1.append(m1p)\n m2.append(m2p)\n m3.append(m3p)\n elapsed_time = time.time() - start_time\n tuple = (nPage, len(centroids), elapsed_time, np.mean(m1), np.std(m1), np.mean(m2), np.std(m2), np.mean(m3), np.std(m3))\n print(\"\\n\", tuple)\n avg.append(tuple)\n self.writeFile(avg, \"avg.pickle\")\n return avg",
"def sumPoints(popSize, population, x):\n skipCount = 0\n for num in range(popSize):\n passParam = 0\n # print \"pre-skip check; skip count is: \", skipCount\n if skipCount != 0:\n skipCount -= 1\n # print \"skip detected\"\n continue\n if num > 0: # done this way because: if num = 0 population[num-1] will fail\n if x == \"avg\": # each category evaluates a different attribute of the team\n if population[num - 1].totAvg == population[num].totAvg:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totAvg == population[num + count].totAvg:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"sb\": # each category evaluates a different attribute of the team\n if population[num - 1].totSb == population[num].totSb:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totSb == population[num + count].totSb:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"hr\":\n if population[num - 1].totHr == population[num].totHr:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totHr == population[num + count].totHr:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"rbi\":\n if population[num - 1].totRbi == population[num].totRbi:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totRbi == population[num + count].totRbi:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"runs\":\n if population[num - 1].totRuns == population[num].totRuns:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totRuns == population[num + count].totRuns:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n if passParam == 0: # will only be 0 if no repeats are detected; alterative to putting an else statement in each category if clause\n population[num].points += popSize - num # in which case: business as usual\n # print \"more points yo\"\n else:\n population[num].points += popSize - num # business as usual\n # print \"getting them points\"\n # print num, population[num].points",
"def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table",
"def fetch_counts(datestruct):\n response = call_responder('elasticsearch', 'query/daily_proofreader_hits')\n for rec in response['result']['hits']['hits']:\n data = rec['_source']\n if data['user'] not in datestruct:\n datestruct[data['user']] = {\"cleave\": 0, \"merge\": 0,\n \"split-supervoxel\": 0}\n if '/cleave/' in data['uri']:\n datestruct[data['user']]['cleave'] += 1\n elif '/merge' in data['uri']:\n datestruct[data['user']]['merge'] += 1\n elif '/split-supervoxel' in data['uri']:\n datestruct[data['user']]['split-supervoxel'] += 1",
"def stats(self):",
"def dataStats(reportsDir = \"./reports/\"):\n legMulti = glob.glob(reportsDir+\"/leg/*.json\")\n legOne = glob.glob(reportsDir+\"/leg/oneproc/*.json\")\n legBroken = glob.glob(reportsDir+\"/leg/broken/*.json\")\n \n malMulti = glob.glob(reportsDir+\"/mal/*.json\")\n malOne = glob.glob(reportsDir+\"/mal/oneproc/*.json\")\n malBroken = glob.glob(reportsDir+\"/mal/broken/*.json\")\n \n print(\"\"\"Legal files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(legBroken+legMulti+legOne), len(legOne), len(legMulti), len(legBroken)))\n print(\"\"\"Malicious files:\n Total: {0}, One-proc: {1}, Multi-proc: {2}, Broken: {3} \"\"\"\n .format(len(malBroken+malMulti+malOne), len(malOne), len(malMulti), len(malBroken)))\n print(\"Working samples: {0}\".format(len(malMulti+malOne+legMulti+legOne)))",
"def report(self, stream):\n from collections import OrderedDict\n self.stats['total'] = sum(self.stats.values())\n for group in self.report_data.values():\n group.stats['total'] = sum(group.stats.values())\n self.report_file.write(self.jinja.get_template('report.html').render(\n report=OrderedDict(sorted(self.report_data.items())),\n stats=self.stats,\n ))\n self.report_file.close()\n if self.config.verbosity > 1:\n stream.writeln(\"-\" * 70)\n stream.writeln(\"HTML: %s\" % self.report_file.name)",
"def calDominationCount(p,visitedPoints):\n isDominated = utils.MultiThread(utils.dominating, zip([visitedPoints[k].mean for k in visitedPoints],repeat(p.mean)))\n dominationCount = sum(isDominated)\n print('Please _cutils.calDominantionCount(). This method is too slow.')\n return dominationCount",
"def sum_stats(input_stats, out, name):\n ss(input_stats, out, name)",
"def extract_competencies(pdf: PDFQuery) -> List[Dict]:\n\n page_count = get_page_count(pdf)\n results: List[Dict] = []\n\n for i in range(page_count - 1):\n # Limit the extraction to the current page and only extract text\n selectors = [\n ('with_parent', 'LTPage[page_index=\"%s\"]' % (i)),\n ('with_formatter', 'text'),\n ]\n\n # Try to find a \"Modulnummer\" on that page. If there is none, then it's\n # not a module-description page.\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Modulnummer\",), (\"Titel\",), (Point(\n 120, 0), Point(\n 490, 1)), \"id\"))\n except ValueError as err:\n eprint(\n \"No \\\"Modulnummer\\\" found on page %s, skipping...\" %\n (i + 1))\n continue\n\n # Find the module title\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Titel\",), (\"Leistungspunkte\", \"Credits\"), (Point(\n 120, 0), Point(\n 490, 1)), \"name\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Titel\\\": %s\" % (err))\n\n # Find the module competencies\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf,\n i,\n (\"Lernziele / Kompetenzen\",\n \"Lernziele/Kompetenzen\"),\n (\"Voraussetzungen\",\n ),\n (Point(\n 120,\n 0),\n Point(\n 490,\n 1)),\n \"competencies\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Lernziele / Kompetenzen\\\": %s\" % (err))\n\n # Find the module requirements\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Voraussetzungen\",), (\"Niveaustufe\",), (Point(\n 120, 0), Point(\n 490, 1)), \"requirements\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Voraussetzungen\\\": %s\" % (err))\n\n # Do the extraction\n page_results: Dict = pdf.extract(selectors)\n\n # Add the pagenumber for convenience reasons\n page_results['page'] = i + 1\n\n # Trim extrated text\n page_results['id'] = page_results['id'].strip()\n page_results['name'] = page_results['name'].strip()\n\n # Split the extracted sentences (which also does a trim to each\n # sentence)\n page_results['competencies'] = split_sentences(\n page_results['competencies'])\n page_results['requirements'] = split_sentences(\n page_results['requirements'])\n\n results.append(page_results)\n\n return results",
"def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)",
"def _run(offset_start: int, offset_end: int, pages: int, symbol, stock_name):\n timer = Timings()\n tsver = TSVer(symbol)\n timings_offset = TimingsOffset(stock_name)\n if not isinstance(pages, type(int)):\n pages = int(pages)\n timer.estimate_duration(offset_end - offset_start, pages)\n timer.start_logged()\n current_runs = 0\n counter_items = 0\n sleep_time = timer.SLEEP_TIME\n final_results = []\n for _i in range(offset_start, offset_end):\n i = _i + 1\n timings_offset.update_num(_i)\n for j in range(0, pages):\n current_runs += 1\n timer.operation_logged()\n time.sleep(sleep_time)\n get_url = timings_offset.url_with_first_offset(j)\n res = requests.get(get_url)\n text = res.text\n\n # requests_html = pathlib.Path(os.path.join(Statics.WORK_DIR, 'requests_bing.html'))\n # requests_html.write_text(text, encoding=Statics.UTF8)\n\n b_results = SoupTags.using_type_and_id(text, 'ol', 'b_results')\n b_algo = SoupTags.using_type_and_class(\n str(b_results), 'li', 'b_algo')\n\n for item in b_algo:\n _desc = _get_stripped_item(item)\n item_str = str(item)\n item_str = item_str.replace(' H=', ' h=')\n if run_item(item_str, _desc, timings_offset):\n try:\n title = SoupTags.using_h_re_compile(\n item_str, 'ID=SERP,')\n _text, _desc, _href = OutputFormats.get_result(\n title.text,\n _desc.replace(timings_offset.bdy, ''),\n title.get('href')\n )\n final_results.append(\n f'{timings_offset.ymd}|{_href}|{_text}|{_desc}')\n counter_items += 1\n except Exception as error:\n print(error)\n\n if counter_items > 50:\n tsver.append_current(final_results)\n counter_items = 0\n\n if len(final_results) > 0:\n tsver.append_current(final_results)\n\n timer.stop_logged()",
"def sum_stats(stats_data):\n t_bounces = 0\n t_complaints = 0\n t_delivery_attempts = 0\n t_rejects = 0\n for dp in stats_data:\n t_bounces += int(dp['Bounces'])\n t_complaints += int(dp['Complaints'])\n t_delivery_attempts += int(dp['DeliveryAttempts'])\n t_rejects += int(dp['Rejects'])\n\n return {\n 'Bounces': t_bounces,\n 'Complaints': t_complaints,\n 'DeliveryAttempts': t_delivery_attempts,\n 'Rejects': t_rejects,\n }",
"def per_page():\n return 100",
"def test_total_totals_index():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n\n tt = total_totals_index(pressure, temperature, dewpoint)\n assert_almost_equal(tt, 45.10 * units.delta_degC, 2)",
"def processReports(self):\n count = 0\n for r in self.reports:\n #need to change the next two lines so that the fields are not hard-coded\n self.currentCase = r.id\n self.currentText = r.impression.lower()\n self.analyzeReport(self.currentText,\n \"disease\",\n modFilters=['indication','probable_existence',\n 'definite_existence',\n 'historical','future','pseudoneg',\n 'definite_negated_existence',\n 'probable_negated_existence'])\n\n self.recordResults()",
"def per_page_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:",
"def integrateStats(self, data):\n\n typeTags = ('AA', 'UU', 'UUE', 'UUD', 'UX', 'UnM', 'UrM',\n 'nMnM', 'rMrM', 'nMrM', 'nMA', 'rMA', 'XX',\n 'fr', 'rf', 'ff', 'rr')\n\n for pDict in data:\n for i in xrange(len(self.orderedStreams)):\n if self.orderedStreams[i].name == pDict['name']:\n stream = self.orderedStreams[i]\n # insert size stats\n for sizePair in pDict['stats']['sizes']:\n stream.stats['sizes'].append(sizePair)\n # Rname stats\n for statsKey in ('rnames', 'RNsingle', 'RNpairs'):\n for key,value in pDict['stats'][statsKey].items():\n try:\n stream.stats[statsKey][key] += value\n except KeyError:\n stream.stats[statsKey][key] = value\n # LL and ss match stats\n for tag in typeTags:\n try:\n stream.stats[tag] += pDict['stats'][tag]\n except KeyError:\n stream.stats[tag] = pDict['stats'][tag]\n # General counts and output files (var: fns)\n stream.count += 1\n fns = []\n for filename in pDict['files']:\n if stream.op(OP_SAM):\n fns.append('%s.%s' % (filename, OP_SAM))\n elif stream.op(OP_SAMPP):\n fns.append('%s.pp.%s' % (filename, OP_SAM))\n if stream.op(OP_FASTQPP):\n if stream.op(OP_SH):\n fns.append('%s.pp.sh.%s' % (filename, OP_FASTQ))\n else:\n fns.append('%s.pp.1.%s' % (filename, OP_FASTQ))\n fns.append('%s.pp.2.%s' % (filename, OP_FASTQ))\n elif stream.op(OP_FASTQ):\n if stream.op(OP_SH):\n fns.append('%s.sh.%s' % (filename, OP_FASTQ))\n else:\n fns.append('%s.1.%s' % (filename, OP_FASTQ))\n fns.append('%s.2.%s' % (filename, OP_FASTQ))\n for fname in fns:\n stream.fileswritten.add(fname)\n stream.outputfilenames.add(fname)\n break\n\n # Now add the so-called global stats from each dict - we only need\n # update the globalstats list in the first stream object\n for pDict in data:\n stream = self.orderedStreams[0]\n for tag in typeTags:\n try:\n stream.globalstats[tag] += pDict['globalstats'][tag]\n except KeyError:\n stream.globalstats[tag] = pDict['globalstats'][tag]\n\n # Update the heartbeat object with the number of record pairs\n # written. This will trigger a console update automatically when\n # needed.\n self.heartbeat.count = stream.globalstats['AA']\n self.heartbeat.update()",
"def index():\n rows=db.execute(\"SELECT * FROM portofolio WHERE user_id=:s\",s=session[\"user_id\"])\n row=db.execute(\"SELECT * FROM users WHERE id=:s\",s=session[\"user_id\"])\n overall=0\n for line in rows:\n overall+=line[\"total\"]\n overall+=row[0][\"cash\"]\n return render_template(\"portofolio.html\",rows=rows,cash=usd(row[0][\"cash\"]),overall=usd(overall))",
"def countProcesses(reportsDir = \"./reports/\", printProcNum = 4, skipLegal=False):\n \n legalProcNum = {}\n if not skipLegal:\n legalFiles = glob.glob(reportsDir+\"/leg/*.json\")\n for i, r in enumerate(legalFiles):\n cls()\n print(\"Legal progress: {0}/{1}\".format(i, len(legalFiles)))\n print(legalProcNum)\n num = numProcs(r)\n try:\n \n legalProcNum[num] += 1\n except KeyError:\n \n legalProcNum[num] = 1\n if num == printProcNum:\n print(\"{0}: {1}\".format(r, num))\n\n maliciousFiles = glob.glob(reportsDir+\"/mal/*.json\")\n malProcNum = {}\n procsToMalFiles = {}\n for i, r in enumerate(maliciousFiles):\n cls()\n print(\"Malicious progress: {0}/{1}\".format(i, len(maliciousFiles)))\n print(malProcNum)\n num = numProcs(r)\n try:\n procsToMalFiles[num].append(r)\n malProcNum[num] += 1\n except KeyError:\n procsToMalFiles[num] = []\n procsToMalFiles[num].append(r)\n malProcNum[num] = 1\n if num == printProcNum:\n print(\"{0}: {1}\".format(r, num))\n \n with open(\"./reports/malprocs.json\", \"w+\") as f:\n json.dump(procsToMalFiles, f, indent=4)\n\n if not skipLegal: \n print(\"In Legal reports: {0}\".format(legalProcNum))\n print(\"In Malicious reports: {0}\".format(malProcNum))\n print(\"Total: {0}\".format({ k: legalProcNum.get(k, 0) + malProcNum.get(k, 0) for k in set(legalProcNum) | set(malProcNum) }))",
"def get_total_pages() -> int:\n items_per_page = 4\n return math.ceil(len(database)/items_per_page)",
"def _DoPageProcessing(self, mr, nonce):\n with mr.profiler.Phase('common request data'):\n self._DoCommonRequestProcessing(self.request, mr)\n self._MaybeRedirectToBrandedDomain(self.request, mr.project_name)\n page_data = self.GatherBaseData(mr, nonce)\n\n with mr.profiler.Phase('page processing'):\n page_data.update(self.GatherPageData(mr))\n page_data.update(mr.form_overrides)\n template_helpers.ExpandLabels(page_data)\n self._RecordVisitTime(mr)\n\n return page_data",
"def iterate_all_reports(title, hikeurl):\n # lists how many reports are on the page\n r = requests.get(hikeurl + '/@@related_tripreport_listing').text\n soup = BeautifulSoup(r, 'lxml')\n numit = math.ceil(float(soup.find('div', {'id': 'count-data'}).text) / 5)\n for i in range(int(numit)):\n get_trail_report(title, hikeurl, params={'b_start:int': str(i * 5)})\n return None",
"def result_page():\n results = session.get('results')\n points = 0\n if results:\n for k,p in results.items():\n points += p['points']\n return render_template('results.html', results=results, points=points),",
"def get_all_data_from_main_table(soup_list):\n year_growth_list_all_pages = []\n\n for i in soup_list:\n year_growth_list_all_pages.append(get_data_from_main_table(i))\n return year_growth_list_all_pages",
"def stats_process():\n nonlocal d_stats, b_status\n log = slog()\n d_stats = self.stats_compute()\n if self.toConsole() or self.args['duf'] or self.args['du']:\n self.dp.qprint(d_stats['report'], level = self.debugLevel)\n slog_filter = filters_show()\n log.title_set('Size statistics')\n if self.args['table3D']: log.render3D()\n log('Total size (raw): %d\\n' % d_stats['totalSize'] )\n log('Total size (friendly): {:,}\\n'.format(d_stats['totalSize']) )\n log('Total size (human): %s\\n' % d_stats['totalSize_human'] )\n log('Total files: %s\\n' % d_stats['files'] )\n log('Total dirs: %s\\n' % d_stats['dirs'] )\n log('Total runtime: %5.3f s' % other.toc() )\n b_status = b_status and d_stats['status']\n return {\n 'status': b_status,\n 'filterLog': slog_filter,\n 'bodyLog': log\n }",
"def GOAL_TOTAL() -> int:\n return 21",
"def compute_bounce_rate(store_pages):\n d = dict()\n for sp in store_pages.iteritems():\n domain = sp[0]\n visitors_list = sp[1]\n \n # count page visited for each visitor\n visit_counts = dict()\n for v in visitors_list:\n if visit_counts.has_key(v):\n visit_counts[v] += 1\n else:\n visit_counts[v] = 1\n\n # count visitors who viewed only one page\n total_single_page_viewers = sum(\n 1 for vc in visit_counts.itervalues() if vc == 1)\n\n total_visits = len(visitors_list)\n\n # calculate bounce rate\n bounce_rate = total_single_page_viewers / float(total_visits)\n d[domain] = (bounce_rate, total_single_page_viewers, total_visits)\n return d",
"def apply_huge_pages(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n hpg = VppHugePageUtil(node)\n hpg.hugepages_dryrun_apply()",
"def utilization(allocs, n):\n assert n <= len(allocs)\n pages = defaultdict(int)\n in_use = 0\n mallocs = set()\n for (index, i) in enumerate(allocs):\n if index > n:\n break\n # Ignore objects larger than 1/2 the page size.\n if i[\"size\"] > Cheaper.__pagesize / 2:\n continue\n pageno = Cheaper.__pagesize * (i[\"address\"] // Cheaper.__pagesize)\n if i[\"action\"] == \"M\":\n mallocs.add(i[\"address\"])\n pages[pageno] += 1\n in_use += i[\"size\"]\n elif i[\"action\"] == \"F\":\n in_use -= i[\"size\"]\n if i[\"address\"] in mallocs:\n mallocs.remove(i[\"address\"])\n pages[pageno] -= 1\n if pages[pageno] == 0:\n del pages[pageno]\n if len(pages) > 0:\n return in_use / (Cheaper.__pagesize * len(pages))\n else:\n return 0",
"def get_pr_totals(articles, ambiguous_forms, uri_pr, skip_nils, ambiguous_only):\n total_per_form=get_freq_totals(articles, ambiguous_forms, skip_nils, ambiguous_only)\n form_pageranks=defaultdict(dict)\n for form, meanings in total_per_form.items():\n if ambiguous_only and form not in ambiguous_forms:\n continue\n #for uri, total in meanings.items():\n #acc_per_form_meaning[system][form][uri]=correct_per_form[form][uri]/total\n for uri in meanings.keys():\n if uri in uri_pr:\n form_pageranks[form][uri]=uri_pr[uri]\n return form_pageranks",
"def collector_numbers(db):\n\n # create indicies to make the important queries much faster.\n db.dbase.execute('CREATE INDEX IF NOT EXISTS cnames ON cards (cardname);')\n db.dbase.execute('CREATE INDEX IF NOT EXISTS pubexp ON published (expansion);')\n db.dbase.execute('CREATE INDEX IF NOT EXISTS pubnames ON published (name);')\n\n # get a cursor of the list of sets in the db\n setlist = db.dbase.execute('SELECT abbreviation FROM sets ORDER BY released')\n\n # HACK -- planar chaos split cards\n for scard in ['Boom // Bust', 'Dead // Gone', 'Rough // Tumble']:\n db.dbase.execute('UPDATE cards SET cn_position = 54 WHERE cards.cardname = ?', (scard,))\n\n for expansion in setlist:\n # special case for time spiral\n if expansion[0] == 'TSP':\n cardlist = db.dbase.execute('SELECT DISTINCT published.name,published.rarity FROM published JOIN cards ON cards.cardname = published.name WHERE published.expansion = ? AND cards.virtual = ? ORDER BY CASE published.rarity WHEN ? THEN 2 ELSE 1 END,cards.cn_position, published.name', (expansion[0], 'No', 'S'))\n\n # other sets\n else:\n cardlist = db.dbase.execute('SELECT DISTINCT published.name FROM published JOIN cards ON cards.cardname = published.name WHERE published.expansion = ? AND cards.virtual = ? ORDER BY cards.cn_position, published.name', (expansion[0], 'No'))\n\n cardnumber = 1\n for card in cardlist:\n db.dbase.execute('UPDATE published SET cnum = ? WHERE published.name = ? AND published.expansion = ?', (cardnumber, card[0], expansion[0]) )\n cardnumber += 1",
"def get_overview_pages(self):\n self.load_website()\n maxNumber = 1\n for pageIndex in self.soup.find_all('div', {'class':'paginate bg-muted'}):\n for link in pageIndex.find_all('a'):\n # try to convert string to number; if error it's not a number\n try:\n number = int(link.text)\n if number > maxNumber:\n maxNumber = number \n except ValueError:\n pass\n print('Screening complete: %d pages found - accessing first %s pages' % (maxNumber, self.maxPages))\n self.pages = [np.arange(1, maxNumber, 1)]",
"def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)",
"def performStats(dataArray):\n yearArray = [[0,0] for i in range(20)]\n for entry in dataArray:\n oSum = 0\n nSum = 0\n for k, v in entry.old.items():\n # print(k,v)\n oSum += v\n for k,v in entry.new.items():\n # print(k,v)\n nSum += v\n entry.oldSum = oSum\n entry.newSum = nSum\n idx = int(entry.year)%20 #0-19 index\n yearArray[idx][0] += entry.oldSum\n yearArray[idx][1] += entry.newSum\n return yearArray",
"def compileQcewStats(lid):\n\n\tregion = re.match('(^[a-zA-Z]+)', basename(lid)).group(1)\n\tlid_rows = selectQcewNearLid(lid, region)\n\n\tlists = defaultdict(list)\n\tcounts = defaultdict(float)\n\tprf_keys = ('NAME', 'NAICS')\n\tagg_keys = ('NAME', 'AVGEMP', 'TOTPAY', 'MEEI')\n\t\n\tfor lr in lid_rows:\n\t\t# non-profit\n\t\tif re.match('82.*|813.*', lr['NAICS']):\n\t\t\tcounts['np_emp'] += lr['AVGEMP']\n\t\t\tcounts['np_pay'] += lr['TOTPAY']\n\n\t\t\tnp_row = OrderedDict((k, lr[k]) for k in prf_keys)\n\t\t\tlists['np'].append(np_row)\n\n\t\t\t# reporting is for multiple locations (aggregated)\n\t\t\tif re.match('.*2.*|.*4.*', lr['MEEI']):\n\t\t\t\tagg_row = OrderedDict((k, lr[k]) for k in agg_keys)\n\t\t\t\tlists['agg'].append(agg_row)\n\t\t# for-profit\n\t\telse:\n\t\t\tcounts['fp_emp'] += lr['AVGEMP']\n\t\t\tcounts['fp_pay'] += lr['TOTPAY']\n\t\t\tcounts['fp_bus'] += 1\n\n\t\t\tp_row = OrderedDict((k, lr[k]) for k in prf_keys)\n\t\t\tlists['fp'].append(p_row)\n\n\t\t\t# reporting is for multiple locations (aggregated)\n\t\t\tif re.match('.*2.*|.*4.*', lr['MEEI']):\n\t\t\t\tcounts['agg_fp_emp'] += lr['AVGEMP']\n\t\t\t\tcounts['agg_fp_pay'] += lr['TOTPAY']\n\t\t\t\tcounts['agg_fp_bus'] += 1\n\n\t\t\t\tagg_row = OrderedDict((k, lr[k]) for k in agg_keys)\n\t\t\t\tlists['agg'].append(agg_row)\n\t\t\t# reporting address used is not physical address\n\t\t\tif lr['ATYPE'] != 'P':\n\t\t\t\tcounts['nph_fp_emp'] += lr['AVGEMP']\n\t\t\t\tcounts['nph_fp_pay'] += lr['TOTPAY']\n\t\t\t\tcounts['nph_fp_bus'] += 1\n\n\t\tcounts['emp'] += lr['AVGEMP']\n\t\tcounts['pay'] += lr['TOTPAY']\n\n\t# write the lists to csv\n\tcsv_template = '{0}_{1}_employers_{2}.csv'\n\tfor grp, l in lists.iteritems():\n\t\tcsv_name = csv_template.format(region, grp, yr)\n\t\tcsv_path = join(csv_dir, csv_name)\n\n\t\twith open(csv_path, 'wb') as csv_file:\n\t\t\tfields = [k for k in l[0]]\n\t\t\twriter = csv.DictWriter(csv_file, fields)\n\t\t\twriter.writeheader()\n\n\t\t\t# sort list (in some cases by multiple fields)\n\t\t\trsort = {('NAICS', 'NAME'): ['fp', 'np'], ('NAME',): ['agg']}\n\t\t\tsort = {i:k for k,v in rsort.iteritems() for i in v}\n\t\t\tfor row in sorted(l, key=lambda d: [d[k] for k in sort[grp]]):\n\t\t\t\twriter.writerow(row)\n\n\t# prep 'opverview' and 'for-profit' stats for writing\n\tstats = {}\n\tov_keys = ('fp_emp', 'np_emp', 'emp', 'fp_pay', 'np_pay', 'pay')\n\toverview = {k: int(counts[k]) for k in ov_keys}\n\tstats['overview'] = {'stats': overview, 'keys': ov_keys}\n\n\tp_emp = counts['fp_emp']\n\tp_pay = counts['fp_pay']\n\tp_bus = counts['fp_bus']\n\tfor_profit = OrderedDict([\n\t\t('for-prof emps', p_emp),\n\t\t('agg for-prof emp pct', \n\t\t\tround(counts['agg_fp_emp'] / p_emp, 4)),\n\t\t('non-phys addr for-prof emp pct', \n\t\t\tround(counts['nph_fp_emp'] / p_emp, 4)),\n\t\t\n\t\t('prof pay', p_pay),\n\t\t('agg for-prof pay pct', \n\t\t\tround(counts['agg_fp_pay'] / p_pay, 4)),\n\t\t('non-phys addr for-prof pay pct', \n\t\t\tround(counts['nph_fp_pay'] / p_pay, 4)),\n\t\t\n\t\t('agg for-prof bus pct', \n\t\t\tround(counts['agg_fp_bus'] / p_bus, 4)), \n\t\t('non-phys addr for-prof bus pct', \n\t\t\tround(counts['nph_fp_bus'] / p_bus, 4))])\n\tfp_keys = [k for k in for_profit]\n\tstats['for_profit'] = {'stats': for_profit, 'keys': fp_keys}\n\n\t# write stats to csv\n\tfor grp, d in stats.iteritems():\n\t\tcsv_name = '{0}_{1}_stats_{2}.csv'.format(region, grp, yr)\n\t\tcsv_path = join(csv_dir, csv_name)\n\t\twith open(csv_path, 'wb') as csv_file:\n\t\t\twriter = csv.DictWriter(csv_file, d['keys'])\n\t\t\twriter.writeheader()\n\t\t\twriter.writerow(d['stats'])",
"def main(plot):\n plot_coverage(plot)\n return 0",
"def __update_page_results(self):\n \n pages = []\n\n # Request id for pages associated to search term \n page_fields='page&fields=id,name,username,link'\n term = self.track[self.track_index]\n self.track_index += 1\n \n # Define url for http request to get pages id associated to search term \n page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&limit=%d&access_token=%s'%(term,page_fields,self.page_lim,self.access_token)\n \n while(True):\n # Try 100 times\n for i in range(100):\n \n page_response = requests.get(page_request_url)\n \n if 'error' in page_response.json() or page_response.status_code <> 200:\n print \"\\n !---- ERROR IN SEARCH REQUEST ----!\"\n print time.ctime()\n print \"Status Code: \", page_response.status_code\n print page_response.json()\n #raise StopIteration()\n time.sleep(1800) # Wait 30 minutes\n else:\n break\n \n page_json = page_response.json()\n pages = pages + page_json['data']\n time.sleep(5)\n \n if 'next' in page_json['paging']:\n page_request_url = page_json['paging']['next']\n else:\n break\n \n print \"Term: %s, Pages: %d\"%(term, len(pages))\n return pages",
"def _populate_totals(t_sheet, n_sheet):\n\n # Perform some trickery to work out overlapping datasets\n # First get the title row from the networks sheet.\n title_row = n_sheet[1]\n # Find out which columns contain WPAv1 and WPAv2 data\n for cell in title_row:\n if cell.value == 'WPA':\n wpa_col = cell.column\n continue\n if cell.value == 'WPA2':\n wpa2_col = cell.column\n # Create a slices containing the data from both columns\n wpa_cells = n_sheet[f'{wpa_col}']\n wpa2_cells = n_sheet[f'{wpa2_col}']\n # zip those slices to have the cells side by side (excluding the first row)\n wpa_wpa2_cells = zip(wpa_cells[1:], wpa2_cells[1:])\n wpa1_only = 0\n wpa2_only = 0\n wpa_and_wpa2 = 0\n for cells in wpa_wpa2_cells:\n if cells[0].value == 1 and cells[1].value == 0:\n wpa1_only += 1\n elif cells[0].value == 0 and cells[1].value == 1:\n wpa2_only += 1\n elif cells[0].value == 1 and cells[1].value == 1:\n wpa_and_wpa2 += 1\n\n data = [\n ['Data Set', 'Totals'],\n ['Hidden Networks', '=SUM(Networks[Hidden])'],\n ['Open Networks', '=SUM(Networks[Open])'],\n ['WEP Networks', '=SUM(Networks[WEP])'],\n ['WPAv1 Only', wpa1_only],\n ['WPAv1 And WPAv2', wpa_and_wpa2],\n ['WPAv2 Only', wpa2_only],\n ['Total WPAv1', '=SUM(Networks[WPA])'],\n ['Total WPAv2', '=SUM(Networks[WPA2])'],\n ['WPS Enabled', '=COUNTIF(Networks[WPS], \"Configured\")'],\n ['TKIP Encryption', '=SUM(Networks[TKIP])'],\n ['AES Encryption', '=SUM(Networks[AES])'],\n ['Total Networks', '=COUNTIF(Networks[BSSID], \"*\")'],\n # ['Total Client Count', '=SUM(Networks[No. Clients])']\n ]\n\n for row in data:\n t_sheet.append(row)",
"def test_total(self):\n # a, fpr, n, y\n args_tuples = [\n (100, 0.01, 10000, 30),\n (20, 0.02, 10000, 20),\n (40, 0.03, 200000, 10),\n (3, 0.3, 1000, 7),\n (9, 0.05, 5000, 399),\n (57, 0.09, 4500, 13),\n ]\n\n for a, fpr, n, y in args_tuples:\n py_tot, py_rows = self.py_search.total(a, fpr, n, y)\n cpp_tot, cpp_rows = params.total(self.cpp_search_ptr, a, fpr, n, y)\n print(f\"py: {py_tot}, {py_rows} | cpp: {cpp_tot}, {cpp_rows}\")\n self.assertAlmostEqual(py_tot, cpp_tot)\n self.assertEqual(py_rows, cpp_rows)",
"def runStats(df):\n\tpass",
"def get_summary_stats(self, output_csv=None):\n\n contig_size_list = []\n\n self.summary_info[\"ncontigs\"] = len(self.contigs)\n\n for contig_id, sequence in self.contigs.items():\n\n logger.debug(\"Processing contig: {}\".format(contig_id))\n\n # Get contig sequence size\n contig_len = len(sequence)\n\n # Add size for average contig size\n contig_size_list.append(contig_len)\n\n # Add to total assembly length\n self.summary_info[\"total_len\"] += contig_len\n\n # Add to average gc\n self.summary_info[\"avg_gc\"].append(\n sum(map(sequence.count, [\"G\", \"C\"])) / contig_len\n )\n\n # Add to missing data\n self.summary_info[\"missing_data\"] += sequence.count(\"N\")\n\n # Get average contig size\n logger.debug(\"Getting average contig size\")\n self.summary_info[\"avg_contig_size\"] = \\\n sum(contig_size_list) / len(contig_size_list)\n\n # Get average gc content\n logger.debug(\"Getting average GC content\")\n self.summary_info[\"avg_gc\"] = \\\n sum(self.summary_info[\"avg_gc\"]) / len(self.summary_info[\"avg_gc\"])\n\n # Get N50\n logger.debug(\"Getting N50\")\n cum_size = 0\n for l in sorted(contig_size_list, reverse=True):\n cum_size += l\n if cum_size >= self.summary_info[\"total_len\"] / 2:\n self.summary_info[\"n50\"] = l\n break\n\n if output_csv:\n logger.debug(\"Writing report to csv\")\n # Write summary info to CSV\n with open(output_csv, \"w\") as fh:\n summary_line = \"{}, {}\\\\n\".format(\n self.sample, \",\".join(\n [str(x) for x in self.summary_info.values()]))\n fh.write(summary_line)",
"def _add_values(self, pre_report, report):\n for f in FIELDS:\n pre_report[f] += report[f]\n\n clats = report.get(\"clats\")\n for i in range(len(clats)):\n value = float(clats[i][1])\n pre_report[\"clats\"][i][1] += value",
"def data_collect(gamertag):\n k_d = []\n for page in range(0, 2):\n # Used an f string literal to apply the input argument and for loop to the weblink which can be used by\n # any gamertag and page number\n page_url = f'http://halotracker.com/h5/games/{gamertag}?page={page}&mode=custom'\n res = requests.get(page_url)\n soup = bs4.BeautifulSoup(res.text, 'lxml')\n for value in soup.select('.game-stat-value'):\n k_d.append(float(value.text.strip()))\n k_d = k_d[1::2]\n return k_d",
"def page_counts(self):\n return 1 + (self.total_count - 1) / self.page_size",
"def scrape_state(page): \n\n top = 'http://environmentclearance.nic.in/'\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find(\"table\", {\"class\" : \"ez1\"})\n rows = table.findAll('tr')\n page = int(table.find('tr', {'class': 'black'}).span.text)\n\n data_page = []\n for row in rows[1:]:\n item = {}\n cols = row.findAll('td')\n\n if len(cols) == 33:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['teshil'] = cols[20].text.strip()\n item['proponent'] = cols[29].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[28].text.strip()\n item['date_tor_submit'] = cols[24].text.strip()\n item['date_ec_submit'] = cols[27].text.strip()\n item['status'] = cols[30].text.strip()\n eia = cols[31].findAll('img', {'src': 'images/eia.png'})\n ph = cols[31].findAll('img', {'src': 'images/pub.png'})\n risk = cols[31].findAll('img', {'src': 'images/Risk.gif'})\n add = cols[31].findAll('img', {'src': 'images/add.png'})\n cl = cols[31].findAll('img', {'src': 'images/coverletter1.jpg'})\n clearance = cols[31].findAll('img', {'src': 'images/ec.png'})\n item['eia_report'] = len(eia)\n item['pub_hearing_report'] = len(ph)\n item['risk_report'] = len(risk)\n item['additiona_report'] = len(add)\n item['cover_letter'] = len(cl)\n item['clearance_report'] = len(clearance)\n\n time_relative_url = cols[32].findAll('a', href = True)[0]['href']\n newurl = str(top + time_relative_url)\n timeline = requests.get(newurl)\n soup_time = BeautifulSoup(timeline.content, 'html.parser')\n table_time = soup_time.findAll('table')[1]\n rows_time = table_time.findAll('tr')\n \n for row_time in rows_time:\n cols_time = row_time.findAll('td')\n \n if len(cols_time) == 9:\n item['timeline_submitted1'] = unidecode(cols_time[0].text.strip())\n item['timeline_query_seiaa2'] = unidecode(cols_time[1].text.strip())\n item['timeline_resubmission3'] = unidecode(cols_time[2].text.strip())\n item['timeline_accept_seiaa4'] = unidecode(cols_time[3].text.strip())\n item['timeline_query_seac5'] = unidecode(cols_time[4].text.strip())\n item['timeline_resubmission6'] = unidecode(cols_time[5].text.strip())\n item['timeline_accept_seac7'] = unidecode(cols_time[6].text.strip())\n item['timeline_forward_seiaa8'] = unidecode(cols_time[7].text.strip())\n item['timeline_ec_granted9'] = unidecode(cols_time[8].text.strip())\n\n data_page.append(item)\n\n return data_page",
"def total_module(level):\n global total_modules\n global total_rods\n for levels in range(1, level+1):\n module_calc(levels)\n total_modules += modules\n total_rods += rods\n pass\n return total_modules, total_rods",
"def calculateTotalPerInterface(self):\n for intf, intfDict in self.activeNow.items():\n for _, bwDict in intfDict.items():\n self.params.setdefault(bwDict['master_intf'], {})\n self.params[bwDict['master_intf']].setdefault('total_allocated', 0)\n self.params[bwDict['master_intf']].setdefault('total_requests', 0)\n rate = self.convertToRate(bwDict['rules'])\n self.params[bwDict['master_intf']]['total_allocated'] += rate[0]\n self.params[bwDict['master_intf']]['total_requests'] += 1\n self.params[bwDict['master_intf']].setdefault(intf, 0)\n self.params[bwDict['master_intf']][intf] += rate[0]",
"def page(self):\n data = super(RunningCountPaginator, self).page()\n try:\n obj_count = len(data[self.collection_name])\n if obj_count:\n obj_count += self.get_offset()\n else:\n obj_count = -1\n data['meta']['running_count'] = obj_count\n del data['meta']['total_count']\n except KeyError:\n pass\n return data",
"def globalpopulation():\n startdate = datetime.datetime.now()\n enddate = startdate - datetime.timedelta(days=30)\n q = db.session.query(\n func.avg(PopSnap.popcount),\n func.strftime('%H', PopSnap.time)\n ).filter(\n PopSnap.time <= startdate,\n PopSnap.time >= enddate\n ).group_by(func.strftime('%H', PopSnap.time)).all()\n counts = [el[0] for el in q] # first piece of each grouped result\n hours = [el[1] for el in q] # second piece of each grouped result\n return render_template('populationstats.html', counts=counts, hours=hours)",
"def total(proportions):\n final = {}\n for i in proportions:\n if i in running_total:\n final[i] = proportions[i] * running_total[i]\n print(final)\n else:\n final[i] = 0\n print(final)\n\n total_sum = sum(final.values())\n return total_sum",
"def compute_daily_pageview_stats(timestamp):\n start, end = get_daily_iterval(timestamp)\n for adomain in AccountDomain.objects.all():\n page_views = list(PageView.objects.filter(adomain=adomain, timestamp__gte=start, timestamp__lt=end, visitor_session__end__isnull=False))\n visitor_sessions = set([p.visitor_session for p in page_views])\n VisitorPageviewDailyStats(**get_domain_page_view_stats(adomain, start, end)).save()\n return start, end",
"def _calc_pages(self, instance, block_size=4096):\n instance_type = instance_types.get_instance_type(\n instance['instance_type_id'])\n return (((int(instance_type['memory_mb']) * 1024) * 1024) / block_size)",
"def get_statistics_percentile(self,table,field):\n dict = {}\n for x in xrange(1,11):\n dict[x] = db.session.execute(\"select statistics_viewCount as percentile from meta order by percentile asc limit 1 OFFSET 19346*\"+str(x)+\"/10-1\").first().percentile"
] | [
"0.5614058",
"0.5522709",
"0.5506824",
"0.5450965",
"0.5391519",
"0.5355411",
"0.531937",
"0.5312551",
"0.5290875",
"0.5287495",
"0.52719283",
"0.5268888",
"0.5259223",
"0.5246753",
"0.5223558",
"0.52189976",
"0.5212078",
"0.51717114",
"0.5152129",
"0.5145579",
"0.51427794",
"0.5131529",
"0.510768",
"0.5098244",
"0.5095091",
"0.5072014",
"0.5052193",
"0.50403297",
"0.5038493",
"0.5036697",
"0.5029198",
"0.50228995",
"0.5014394",
"0.50138676",
"0.50097615",
"0.50070775",
"0.4998505",
"0.49955493",
"0.49903062",
"0.49901482",
"0.49858034",
"0.49772438",
"0.49764922",
"0.49713066",
"0.49662155",
"0.49520877",
"0.4949054",
"0.49411675",
"0.49390844",
"0.49388787",
"0.4935689",
"0.4933276",
"0.49325997",
"0.49314952",
"0.4922445",
"0.4914407",
"0.4913994",
"0.49135277",
"0.49061492",
"0.48967478",
"0.4890915",
"0.4885535",
"0.48831564",
"0.48826608",
"0.48741275",
"0.48713505",
"0.4864973",
"0.4862017",
"0.48599577",
"0.48591605",
"0.4851852",
"0.4848014",
"0.48299238",
"0.48237005",
"0.48163253",
"0.48146862",
"0.4811441",
"0.47996274",
"0.47989774",
"0.47958037",
"0.47945026",
"0.47943214",
"0.47937372",
"0.47929406",
"0.4792101",
"0.47917554",
"0.47885305",
"0.47823697",
"0.47818357",
"0.47771347",
"0.47767946",
"0.477402",
"0.47680417",
"0.4767693",
"0.47632962",
"0.47598603",
"0.47520941",
"0.47488675",
"0.4748018",
"0.47438535"
] | 0.56087923 | 1 |
Iterates through all the PDBS (pdf files) in the given from directory, and moves them to the to directory when they are finished. | def test_batch(from_dir, to_dir, doc_type):
if from_dir[-1] != "/":
from_dir = from_dir + "/"
if to_dir[-1] != "/":
to_dir = to_dir + "/"
os.chdir(from_dir)
for pdf_file in os.listdir(from_dir):
if pdf_file.endswith(".pdf"):
# Appends a row to the csv file "output.csv" with the stats from that particular document
analyze(from_dir, pdf_file, doc_type)
# Moving to the 'to' directory since we're done analyzing it.
destination = to_dir + pdf_file
shutil.move(from_dir+ pdf_file, destination) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)",
"def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")",
"def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()",
"def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))",
"def do_preprocess(pdf_files):\n\n for pdf_file in pdf_files:\n\n base, ext = os.path.splitext(pdf_file)\n \n create_intermediate_files()\n \n # 1) split a pdf file, a page a pdf\n num_pages = pdfutil.split(os.path.join(cwd, pdf_file), DIR_PAGE)\n\n for i in xrange(1, num_pages + 1):\n\n file = '%04d.pdf' % i\n page_pdf = os.path.join(DIR_PAGE, file)\n \n pdfutil.convert_srgb(page_pdf, DIR_SRGB)\n srgb_pdf = os.path.join(DIR_SRGB, file)\n \n pdfutil.convert_vti(srgb_pdf, DIR_VTI)\n vti_pdf = os.path.join(DIR_VTI, file)\n\n pdfutil.convert_tiff(vti_pdf, DIR_TIFF)\n pdfutil.convert_text(vti_pdf, DIR_TEXT)\n\n # merge background pdf files\n pdfutil.merge_to_single_pdf(DIR_TIFF, DIR_BACK, 'back')\n background_pdf = os.path.join(DIR_BACK, 'back.pdf')\n\n # merge foreground pdf files\n output_text_pdf = '%s_text' % base\n pdfutil.merge_to_single_pdf(DIR_TEXT, DIR_TEXT, output_text_pdf)\n foreground_pdf = os.path.join(DIR_TEXT, output_text_pdf + '.pdf')\n pdfutil.export_by_preview(foreground_pdf)\n\n # merge background and foreground\n merged_pdf = os.path.join(cwd, '%s_merge.pdf' % base)\n pdfutil.merge_text_and_back(foreground_pdf, background_pdf, merged_pdf)\n\n final_pdf = '%s_final' % base\n pdfutil.optimize(merged_pdf, final_pdf)\n final_pdf = os.path.join(cwd, final_pdf + '.pdf')\n\n # aggregate what we want\n for f in (foreground_pdf, final_pdf):\n shutil.move(f, DIR_FINAL)\n \n # clean up unused\n os.unlink(merged_pdf) \n cleanup_intermediate_files()",
"def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")",
"def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def pushDocsFromDir(docDir):\n\tfor i in os.listdir(docDir):\n\t\tif not(i.endswith(\".DS_Store\")):\n\t\t\tif not(docDir.endswith(\"/\")):\n\t\t\t\tfilename = (docDir+\"/\"+i)\n\t\t\telse:\n\t\t\t\tfilename = docDir + i\n\t\t\tpushDocumentToPhone(filename)\n\n\tprint \"Finished pushing files.\"",
"def process_directory(browser, directory):\n\n count = 0\n for pdb in find_pdb_files(directory):\n if run_wrappa(browser, pdb):\n logging.info(\"Processed %s\", pdb)\n count += 1\n logging.info(\"Fully processed %d pdb files\", count)",
"def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()",
"def go(self):\n self.status_label.set('Working')\n self.filelist = []\n numfiles = int(self.spinbox.get())\n for root, dirs, files in os.walk(self.fromdir):\n for name in files:\n #pokud by bylo treba delat filtr na typ souboru tak ho dat sem\n if name[-3:] == \"psd\":\n self.filelist.append(os.path.join(root, name))\n\n self.status_label.set(\"Found %s files\" % len(self.filelist))\n self.tarfilelist = []\n\n if len(self.filelist) < numfiles:\n self.tarfilelist = self.filelist\n else:\n for num in range(numfiles):\n work_item = self.filelist[random.randint(0,\n len(self.filelist) - 1)]\n self.tarfilelist.append(work_item)\n self.filelist.remove(work_item)\n\n for work_file in self.tarfilelist:\n dest_file_name = work_file[len(self.fromdir) + 1:].replace(\n os.path.sep, \"_\")\n print dest_file_name\n shutil.copy(work_file, os.path.join(self.todir, dest_file_name))\n\n return True",
"def map(self, clean=False):\n self.files = {}\n curs = DatabaseManager.Instance().cursor\n if clean:\n curs.execute(\"DELETE FROM DestinationsFilesList WHERE `destinationName`=%s;\", (self.name, ))\n DatabaseManager.Instance().connector.commit()\n for root, directory, files in os.walk(self.path):\n\n for file_ in files:\n path = os.path.join(root, file_)\n relative_path = self.get_relative_path(path)\n if self.filter.test(File(path)):\n fwh = None\n if not clean:\n sql = \"SELECT * FROM DestinationsFilesList WHERE `path`=%s AND `destinationName`=%s LIMIT 1;\"\n curs.execute(sql, (relative_path, self.name))\n res = curs.fetchone()\n # curs.fetchall()\n if res is not None:\n # file already in DB, so use it\n fwh = FileWithHash.from_sql_query(res)\n if fwh is None:\n fwh = FileWithHash(path, self.name, None, relative_path)\n sql2 = \"INSERT INTO DestinationsFilesList (`hash`, `path`, `destinationName`) VALUES(%s, %s, %s);\"\n # self.logger.info(\"%s add: %s\", [self.name, relative_path]\n curs.execute(sql2, (fwh.hash, relative_path, fwh.destination_name))\n DatabaseManager.Instance().connector.commit()\n self.files[fwh.hash] = fwh",
"def backup_parrec():\n \n backup_dir = os.getcwd() + '/rawdata_backup'\n\n if not os.path.exists(backup_dir):\n os.mkdir(backup_dir)\n\n PAR_files = glob.glob('*.PAR')\n REC_files = glob.glob('*.REC')\n\n to_move = zip(PAR_files, REC_files)\n \n for PAR,REC in to_move:\n shutil.move(PAR, backup_dir)\n shutil.move(REC, backup_dir)\n \n print \"Back-up completed for %i files\" % (len(to_move))",
"def cleanup(self):\n\tprint \"clean up on \" + self.dest\n for root, folders, files in os.walk(self.dest):\n for ignore_dir in self.ignore_dirs:\n if ignore_dir in folders:\n folders.remove(ignore_dir)\n\t\t \n for folder in folders:\n backupdir = os.path.join(root,folders)\n sourcedir = bakupdir.replace(destination,source) \n if not os.path.exists(sourcedir):\n trash = backupdir.replace(destination,trash_dir)\n # shutil.move(backupdir, trash)\n print(\"move\",backupdir,\"to\",trash)\n # os.utime(trash, None)\n \n for filename in files:\n checkfile = root + \"/\" + filename\n checkfile = checkfile.replace(self.dest, self.source)\n print(\"checking if \", checkfile, \"exists\")\n if not os.path.exists(checkfile): \n print os.path.join(root,filename)\n\t\t backupfile = checkfile.replace(self.source,self.dest)\n trash = self.trash + checkfile.replace(self.source, \"\")\n # shutil.move(backupfile, trash)\n print(\"move\",backupfile,\"to\",trash)\n # os.utime(trash, None)",
"def prepare_final(fastq_in, refs, aligndir, outdir):\n base = os.path.join(aligndir, os.path.splitext(os.path.basename(fastq_in))[0])\n for ref in refs:\n base_ref = os.path.basename(ref[\"file\"])\n if ref.get(\"chr_dist\", False):\n _copy_galaxy_files(base, base_ref, outdir)\n if ref.get(\"feature_prep\", False):\n _copy_igv_files(base, base_ref, ref[\"file\"], outdir)",
"def movefiles_subjectdirs(sub_dirs, ToProcess):\n \n \n # Create subdirectories\n for subjectDir in sub_dirs:\n os.chdir(subjectDir)\n \n mri_files = glob.glob('*.nii.gz')\n mri_dir_names = []\n \n for mriFile in mri_files:\n split_file = mriFile.split('_')\n from_idx = split_file.index('WIP')\n to_idx = split_file.index('SENSE')\n toAppend = \"_\".join(split_file[from_idx+1:to_idx]) \n mri_dir_names.append(toAppend)\n \n os.mkdir(toAppend)\n shutil.move(mriFile, toAppend)\n \n print \"Created the following subdirs for {0}: \".format(os.path.basename(subjectDir))\n for d in mri_dir_names:\n print d\n print \"\\n\"",
"def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)",
"def process_all_files(src_directory, dst_directory, simon_sez=None):\n error = False\n\n if not os.path.exists(src_directory):\n logger.error(\n \"Directory {0} does not exist. Exiting.\".format(\n src_directory))\n error = True\n\n if not os.access(dst_directory, os.W_OK):\n logger.error(\n \"Destination directory {0} is not writable. Exiting.\".format(\n dst_directory))\n error = True\n\n if error:\n logger.warn(\"Exiting due to errors.\")\n sys.exit(1)\n\n harvester = Harvester(src_directory, metadata_dst_directory=dst_directory)\n filemaps = harvester[\"filemaps\"]\n\n count = 0\n for fm in filemaps.get():\n count += 1\n src_fmd = FileMetadata(os.path.join(src_directory, fm.src_fn))\n if simon_sez:\n logger.info(\n \"Copying metadata from {} ==> {}\".format(\n fm.src_fn, fm.dst_fn))\n src_fmd.copy_metadata(os.path.join(dst_directory, fm.dst_fn))\n else:\n logger.info(\n \"DRY RUN: Copying metadata from {} ==> {}\".format(\n fm.src_fn, fm.dst_fn))\n if count == 0:\n logger.warn(\"No matching files found. Check src and dst.\")",
"def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)",
"def convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path):\n\n files = []\n\n if os.path.isdir(sdfs_folder_path):\n # so it's a directory, go through the directory and find all the sdf files\n if sdfs_folder_path[-1:] != os.sep:\n sdfs_folder_path = (\n sdfs_folder_path + os.sep\n ) # so add a / to the end of the directory\n\n files.extend(glob.glob(sdfs_folder_path + \"*.sdf\"))\n files.extend(glob.glob(sdfs_folder_path + \"*.SDF\"))\n files = list(set(files))\n if len(files) == 0:\n printout = \"\\nThere are no sdf's to convert to PDB's. There may be an issue with Gypsum.\\n\"\n print(printout)\n raise Exception(printout)\n\n # create a new subfolder if one doesn't already exist. folder will be with\n # the generation and will be titled PDBs pdb_subfolder_path will become\n # the the output folder\n pdb_subfolder_path = gen_folder_path + \"PDBs\" + os.sep\n if not os.path.isdir(pdb_subfolder_path):\n os.makedirs(pdb_subfolder_path)\n\n job_inputs = []\n for file_path in files:\n if \"params\" in file_path:\n continue\n job_inputs.append(tuple([pdb_subfolder_path, file_path]))\n job_inputs = tuple(job_inputs)\n\n # Check that there are .sdf files to test. If not raise Exception\n if len(job_inputs) == 0:\n printout = \"\\n\\nThere are no SDF files were found to convert to PDB. \"\n printout = printout + \"This may be a problem with the Gypsum-DL \"\n printout = printout + \"settings.\\nPlease check that the `--gypsum_timeout_limit` \"\n printout = printout + \"is appropriate relative to the `--gypsum_thoroughness` \"\n printout = printout + \"and `--max_variants_per_compound` parameters.\\n\"\n raise Exception(printout)\n\n # Convert sdf files to pdbs in multithread\n vars[\"parallelizer\"].run(job_inputs, convert_single_sdf_to_pdb)",
"def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)",
"def parse_pdfs():\n # get all of the pdf files in the dir\n pahopdffiles = [f for f in listdir(paho_raw_reports_dir) if isfile(join(paho_raw_reports_dir, f))]\n # set up a list to hold the data for all pdf files\n all_pdf_data = []\n # read in each pdf file\n for pahopdffile in pahopdffiles:\n try:\n logging.info(\"Now attempting to read in: \"+pahopdffile)\n fullfilepath = os.path.join(paho_raw_reports_dir, pahopdffile)\n tables = camelot.read_pdf(fullfilepath)\n # get the pandas dataframe from each pdf\n pdfdataframe = tables[0].df\n # ensure that this is a valid PAHO COVID19 report\n report_keywords = ['Cumulative','COVID-19','Americas'] \n if not all(x in pdfdataframe[0].iloc[0] for x in report_keywords):\n logging.error(pahopdffile+\" was not recognised as a normal PAHO pdf file. Skipping.\")\n continue\n # set up the list to hold the data for this file\n reportdata = []\n # create a variable to store the date of this report\n date = None\n # create a variable to store the last subregion seen\n subregion = None\n # PAHO has different formats for their tables, so we need to check the number of columns in the pdf\n numcolumns = len(pdfdataframe.columns)\n # get the row index for the last country\n lastcountryrowindex = pdfdataframe[1][pdfdataframe[1] == 'Total'].index[0]-1\n for rowindex,rowdata in pdfdataframe.iterrows():\n # set up variables to hold the data for the dict\n country_or_territory_name = None\n confirmed_cases = None\n probable_cases = None\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n if numcolumns == 6:\n # this is the old format that they started with\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].replace('Cumulative suspected and confirmed COVID-19 cases reported by \\ncountries and territories in the Americas, as of ','')\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n if not date:\n raise RuntimeError(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[5]\n # store null data for all other fields that were not present in the old reports\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n elif numcolumns == 9:\n # PAHO added in probable cases\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[2]:\n split_numbers = rowdata[2].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n transmission_type = rowdata[8]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[5]\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[6].replace(\",\",\"\"))\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[8]\n elif numcolumns == 10:\n # PAHO added in country ISO codes and special characters\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(3,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[2] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[2] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[2]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[3]:\n split_numbers = rowdata[3].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n transmission_type = rowdata[9]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[5].replace(\",\",\"\"))\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[6]\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[7].replace(\",\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n if rowdata[9] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[9]\n else:\n logging.error(\"Unrecognised number of columns in the pdf file. Skipping for now.\"+\n \"Check if the report format changed from PAHO.\")\n # if we were at least able to scrape the country or territory name, create a dict and add it to the list\n if country_or_territory_name is not None:\n # set up the dict to store each row of data\n reportdict = collections.OrderedDict()\n # add the values to the dict in the order that we want for the report\n reportdict['date'] = date\n reportdict['country_or_territory_name'] = country_or_territory_name\n reportdict['confirmed_cases'] = confirmed_cases\n reportdict['probable_cases'] = probable_cases\n reportdict['confirmed_deaths'] = confirmed_deaths\n reportdict['probable_deaths'] = probable_deaths\n reportdict['recovered'] = recovered\n reportdict['percentage_increase_confirmed'] = percentage_increase_confirmed\n reportdict['transmission_type'] = transmission_type\n # now add this dict to our list for this report/pdf\n reportdata.append(reportdict)\n # once we are done adding all data for this pdf, add this pdf report to the list of all reports\n # if the reportdata list is not empty\n if reportdata:\n all_pdf_data.append(reportdata)\n logging.info(\"Successfully parsed \"+pahopdffile)\n except Exception as exc:\n logging.exception(\"Problem found while parsing \"+pahopdffile)\n raise\n logging.info(\"Completed parsing all pdfs in folder.\")\n return all_pdf_data",
"def populate(self, db, n_procs=1, continuing=False):\n archives = self.get_file_list()\n\n sf_list = db.select_all(\n 'source_file',\n db.SourceFile.source == self.my_source\n )\n existing_arcs = [sf.name for sf in sf_list]\n\n q = mp.Queue(len(archives))\n wait_list = []\n for archive in archives:\n if continuing and archive in existing_arcs:\n logger.info(\"Skipping %s. Already uploaded.\" % archive)\n continue\n p = mp.Process(\n target=self.process_archive,\n args=(archive, ),\n kwargs={'q': q, },\n daemon=True\n )\n wait_list.append((archive, p))\n\n active_list = []\n\n def start_next_proc():\n if len(wait_list) is not 0:\n archive, proc = wait_list.pop(0)\n proc.start()\n active_list.append((archive, proc))\n\n # Start the processes running\n for _ in range(min(n_procs, len(archives))):\n start_next_proc()\n\n # Monitor the processes while any are still active.\n while len(active_list) is not 0:\n for a, p in [(a, p) for a, p in active_list if not p.is_alive()]:\n if a not in existing_arcs:\n db.insert('source_file', source=self.my_source, name=a)\n active_list.remove((a, p))\n start_next_proc()\n try:\n # This will not block until at least one is done\n label, tr_data, tc_data = q.get_nowait()\n except Exception:\n continue\n logger.info(\"Beginning to upload %s from %s...\" % label)\n self.upload_batch(db, tr_data, tc_data)\n logger.info(\"Finished %s from %s...\" % label)\n time.sleep(0.1)\n\n # Empty the queue.\n while not q.empty():\n try:\n tr_data, tc_data = q.get(timeout=1)\n except Exception:\n break\n self.upload_batch(db, tr_data, tc_data)\n\n return",
"def clean_pdf_dir():\n # Create the pdf directory if it does not exist\n if not os.path.isdir(pdf_dir):\n os.makedirs(pdf_dir)\n return\n\n # Get the pdf files list and remove them\n pdf_files = [f for f in os.listdir(pdf_dir) if f.lower().endswith('pdf')]\n for pdf_name in pdf_files:\n os.remove(os.path.join(pdf_dir, pdf_name))",
"def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)",
"def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if not os.path.exists(to_child_dir):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())",
"def MovieScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in movtypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(moviePath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Movies'",
"def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)",
"def process_lucidchart_allpages_pdf(pdf_path, outdir, overwrite=False):\n\n if not Path(pdf_path).is_file():\n raise Exception(f\"{pdf_path} must be a regular file\")\n\n if outdir.exists():\n if overwrite:\n shutil.rmtree(outdir)\n else:\n raise Exception(f\"outdir={outdir} must not exist\")\n\n outdir.mkdir()\n\n pdfpages_dir = outdir / \"tmp\"\n\n pdfpages_dir.mkdir()\n\n subprocess.run([\"pdfseparate\", os.path.relpath(pdf_path, pdfpages_dir), 'Page %d.pdf'], cwd=pdfpages_dir, check=True)\n\n zip_path = outdir / \"tmp.zip\"\n with ZipFile(zip_path, mode='w') as pdfpages_zip:\n for page in pdfpages_dir.iterdir():\n pdfpages_zip.write(page, arcname=page.name)\n\n tmpout = outdir / \"tmp_out\"\n tmppaths = process_lucidchart_zip(zip_path, 'pdf', tmpout, overwrite=True)\n\n shutil.rmtree(pdfpages_dir)\n zip_path.unlink()\n\n paths = []\n for p in tmppaths:\n dstpath = outdir / p.name\n p.rename(dstpath)\n paths += [dstpath]\n\n tmpout.rmdir() # should be empty by now\n\n return paths",
"def cleanPDFs(self):\n to_delete = []\n pdfs = self.getPDFList()\n now = datetime.now()\n\n # First we compute the list of files to delete.\n for filename in pdfs:\n filedate = pdfs[filename]\n delta = now - filedate\n\n if delta.seconds > 7200:\n to_delete.append(filename)\n\n existing_files = os.listdir(self.tempdir)\n for filename in to_delete:\n del pdfs[filename]\n if filename in existing_files:\n os.remove('%s/%s' % (self.tempdir,\n filename))\n self.setPDFList(pdfs)\n metadata = self._getMetadata()\n metadata['last_clean'] = now",
"def copy_source_files(self):\n\n LOGGER.info(f'start copying source files')\n count = 0\n for sfp in tqdm(sorted(self.source_fps), disable=self.disable_tqdm):\n try:\n meta = extract_law_meta(sfp)\n nodes = parse_xml_fp(sfp)\n tfp = self.stot(sfp)\n tfp.parent.mkdir(parents=True, exist_ok=True)\n save_law_tree(meta['LawTitle'], nodes, tfp)\n except Exception as e:\n LOGGER.error(f'failed to copy {sfp}: {e}')\n continue\n self.target_fps.add(tfp)\n LOGGER.debug(f'copied {sfp} to {tfp}')\n count += 1\n LOGGER.info(f'copied total {count} source files, now total {len(self.target_fps)} target files exist')",
"def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))",
"def clean(cls, pdb_object):\n if not cls.computed(pdb_object):\n return\n for successor in cls.successors:\n successor.clean(pdb_object)\n pdb_object.uncomplete(cls.name)\n for file in cls.files(pdb_object):\n file.delete()",
"def moveFiles(self, fids, pid):\n\n f = self.getFileInfo(fids[0])\n if not f or f.package == pid:\n return False\n if not self.getPackageInfo(pid):\n raise PackageDoesNotExists(pid)\n\n # TODO move real files\n\n self.db.moveFiles(f.package, fids, pid)\n\n return True",
"def _processNewDirectory(self, dirpath):\n self._parent.processDirectory(dirpath)",
"def separate(in_file, orig_dir, dest_dir):\n files = set()\n with open(in_file, encoding=\"utf8\") as f:\n for l in f:\n files.add(l.split()[0])\n \n dest = pathlib.Path(dest_dir)\n if not dest.exists():\n dest.mkdir()\n \n for p in pathlib.Path(orig_dir).iterdir():\n if p.stem in files:\n print(\"Moviendo\", p.name)\n p.rename(dest / p.name)",
"def cleanup_cbs(dest_folder):\n backup_folder = r'{dest_folder}\\CbsFix'.format(dest_folder=dest_folder)\n temp_folder = r'{backup_folder}\\Temp'.format(backup_folder=backup_folder)\n os.makedirs(backup_folder, exist_ok=True)\n os.makedirs(temp_folder, exist_ok=True)\n\n # Move files into temp folder\n cbs_path = r'{SYSTEMROOT}\\Logs\\CBS'.format(**global_vars['Env'])\n for entry in os.scandir(cbs_path):\n # CbsPersist files\n if entry.name.lower().startswith('cbspersist'):\n dest_name = r'{}\\{}'.format(temp_folder, entry.name)\n dest_name = non_clobber_rename(dest_name)\n shutil.move(entry.path, dest_name)\n temp_path = r'{SYSTEMROOT}\\Temp'.format(**global_vars['Env'])\n for entry in os.scandir(temp_path):\n # cab_ files\n if entry.name.lower().startswith('cab_'):\n dest_name = r'{}\\{}'.format(temp_folder, entry.name)\n dest_name = non_clobber_rename(dest_name)\n shutil.move(entry.path, dest_name)\n\n # Compress CbsPersist files with 7-Zip\n cmd = [\n global_vars['Tools']['SevenZip'],\n 'a', '-t7z', '-mx=3', '-bso0', '-bse0',\n r'{}\\CbsPersists.7z'.format(backup_folder),\n r'{}\\CbsPersist*'.format(temp_folder)]\n run_program(cmd)",
"def putDir(self, inlocaldir, inirodsdir):\n num=0\n utilities.log.info('putDir: Local tree {} into iRODS tree {}'.format(inlocaldir, inirodsdir))\n for root, dirnames, filenames in os.walk(inlocaldir):\n irodsdir = self.assembleIRODScollectionName(root, inlocaldir, inirodsdir)\n irodsColl = self.createSubCollection(newcollection=irodsdir)\n num += self.putFile(root, irodsColl, filenames)\n utilities.log.info('Copied a total of {} files to iRODS'.format(num))\n utilities.log.info('Finished copying dir {} to {} '.format(inlocaldir,inirodsdir))\n return num",
"def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'",
"def process_patients(self):\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for patient in os.listdir(self.root_dir):\n if \".csv\" in patient or \".md\" in patient:\n continue\n patient_pth = os.path.join(self.root_dir, patient)\n out_patient_pth = os.path.join(self.out_dir, patient)\n num_imgs = len(os.listdir(patient_pth)) // 2 # Half the length to exclude mask counts\n img_stack, msk_stack = self._stack_images_masks_flair(patient_pth, patient, num_imgs)\n if not os.path.exists(out_patient_pth):\n os.mkdir(out_patient_pth)\n self._make_slices(img_stack, msk_stack, patient, out_patient_pth)",
"def scrape_pdfs(db):\n process = CrawlerProcess()\n process.crawl(PdfSpider, db=db)\n process.start()",
"def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)",
"def copy(from_dir: tfds.typing.PathLike, to_dir: tfds.typing.PathLike) -> None:\n for full_name in tfds.core.load.list_full_names():\n from_full_name_dir = os.path.join(from_dir, full_name)\n to_full_name_dir = os.path.join(to_dir, full_name)\n\n # Skip if the dataset isn't generated or that metadata are already copied\n if not tf.io.gfile.exists(from_full_name_dir):\n logging.info('Skipping %s (not found)', from_full_name_dir)\n continue\n if tf.io.gfile.exists(to_full_name_dir) and not FLAGS.overwrite:\n logging.info('Skipping %s (already exists)', to_full_name_dir)\n continue\n\n _copy_metadata(from_dir=from_full_name_dir, to_dir=to_full_name_dir)",
"def poretools_fastq():\n dirs = os.listdir(my_dir)\n for folder in dirs:\n path_to_folder = os.path.join(my_dir, folder)\n subprocess.check_output(\"poretools fastq --type fwd {}//*.fast5 > {}_poretools.fq\"\n .format(path_to_folder, path_to_folder), shell=True)\n print(\"Finished folder {}\".format(folder))\n print(\"Finished extractions of FASTQs.\")",
"def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue",
"def move_from_temp_directory(self):",
"def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths",
"def batchProcessDirectory(self,baseDir,startTeam=1):\n\n import fnmatch\n\n # find all directories containing the target pattern\n resultDirs = {}\n patientNumbers = {}\n for root, dirnames, filenames in os.walk(baseDir):\n resultDirs[root] = []\n for filename in filenames:\n if fnmatch.fnmatch(filename, 'patient*tract_team*.vtk'):\n resultDirs[root].append(os.path.join(root, filename))\n patientNumbers[root] = filename[len('patient'):filename.index('_')]\n\n distanceMatrix = {}\n # calculate results for each pair of files in each directory\n for dir,files in resultDirs.items():\n if len(files) > 0:\n teamCount = len(files) / 2 # left and right per team\n teamRange = range(startTeam,startTeam+teamCount)\n for side in ('left','right'):\n for teamA in teamRange:\n for teamB in teamRange:\n fmt = 'patient%(patient)s_%(side)s_tract_team%(team)d.vtk'\n fileA = fmt % {'patient': patientNumbers[dir], 'side': side, 'team': teamA}\n fileB = fmt % {'patient': patientNumbers[dir], 'side': side, 'team': teamB}\n print (\"Compare %s with %s\" % (fileA, fileB))\n print((os.path.join(dir,fileA),os.path.join(dir,fileB)))\n\n # close the scene and calculate the distance\n slicer.mrmlScene.Clear(0) \n pathA, pathB = os.path.join(dir,fileA),os.path.join(dir,fileB)\n distanceMatrix[dir,side,teamA,teamB] = self.loadAndCalculate(pathA,pathB)\n print('\\n\\n' + str(distanceMatrix.keys()) + '\\n\\n')\n print(distanceMatrix)\n\n # write csv files\n import csv\n header = ['team',]\n for team in teamRange:\n header.append('team_%d' % team)\n for dir in resultDirs.keys():\n print ('checking %s' % dir)\n print (len(resultDirs[dir]))\n if len(resultDirs[dir]) > 0:\n for side in ('left','right'):\n fp = open(os.path.join(dir,\"../distanceMatrix-%s.csv\"%side),'w')\n csvWriter = csv.writer(fp, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\n csvWriter.writerow(header)\n for teamA in teamRange:\n teamARow = ['team_%d' % teamA,]\n for teamB in teamRange:\n teamARow.append(distanceMatrix[dir,side,teamA,teamB])\n csvWriter.writerow(teamARow)\n fp.close()\n\n return(distanceMatrix)",
"def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0",
"def extract_embeddings_recursive_from_dir(self, dir_from: PathLike, dir_to: PathLike) -> PathLike:\n pass",
"def concat_pdf_pages(files):\n for input_file in files:\n for page in PdfFileReader(input_file).pages:\n yield page",
"def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )",
"def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)",
"def process( self ):\n\t\t\n\t\tprint( self._query[\"header\"], file = self._file )\n\t\tself._file.flush()\n\n\t\tfor root, dirs, files in os.walk(self._directory):\n\t\t\tpath = root.split(os.sep)\n\n\t\t\tif( root.endswith(\"logFiles\") and ( root.find(\"template\") == -1 ) ):\n\t\t\t\tLogProcessor._process_dir(root, self._file_list, self._columns, self._file, self._meta)",
"def _process_relative_to(unpack_root, relative_to):\n if relative_to is None:\n return\n relative_root = unpack_root / relative_to\n if not relative_root.is_dir():\n get_logger().error('Could not find relative_to directory in extracted files: %s',\n relative_to)\n raise ExtractionError()\n for src_path in relative_root.iterdir():\n dest_path = unpack_root / src_path.name\n src_path.rename(dest_path)\n relative_root.rmdir()",
"def clean_files_from_dir(input_dir, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir) #Tạo thư mục\n input_files = os.listdir(input_dir) #trả về một danh sách chứa tên của các mục trong thư mục được cung cấp bởi đường dẫn. Danh sách theo thứ tự tùy ý. Nó không bao gồm các mục đặc biệt '.' và '..' ngay cả khi chúng có trong thư mục.\n for input_file in input_files:\n input_file_path = os.path.join(input_dir, input_file)\n if input_file.startswith('.') or os.path.isdir(input_file_path):\n continue\n output_file_path = os.path.join(output_dir, input_file)\n clean_html_file(input_file_path, output_file_path)",
"def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)",
"def copy_files(dic_param,src_dir,dest_dir):\n \n ### Make subdirectory\n if os.path.exists(dest_dir):\n answer=input('%s exists, want to remove what is inside? (y):\\n'%(dest_dir))\n if answer=='y':\n os.system('rm -rf %s/*pdf'%dest_dir)\n os.system('rm -rf %s/*log'%dest_dir)\n else:\n return\n else:\n os.mkdir(dest_dir)\n \n for id_key in dic_param['MODEL_KEY']:\n list_file=glob.glob(src_dir+'/*%s*'%id_key)\n for file in list_file:\n copy2(file,dest_dir)",
"def __get_files_to_rename(self, directory):\n return [file for file in self.__get_files(directory, \"pdf\") if \"_compress_\" in file]",
"def perform_parse(self):\n # get folder of pdf files\n folder = QFileDialog.getExistingDirectory(\n parent=self.parent(),\n caption='Get folder with PDF documents to parse'\n )\n if folder:\n # get list of fields and patterns\n field_list = self._get_fields()\n # performing parse\n results = make_parse(folder, field_list)\n self.open_result(results)",
"def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))",
"def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)",
"def moveBigFiles(self):\n if not self.bigFilesArea:\n self.logger.info('Moving of big files to a separate volume has not been requested.')\n return\n\n self.logger.info('Moving of big files to a separate volume is requested. Scanning...')\n \n if not os.path.exists(self.bigFilesArea):\n m = 'Cannot shift big files onto inexistent volume: %s' % self.bigFilesArea\n self.logger.error(m)\n return\n \n bigFiles = self.getBigFiles()\n\n if not [val for val in bigFiles.values() if val]:\n self.logger.info('No big files were found, returning.')\n return\n \n placeToDump = createBigFileIO(self.site, self.bigFilesArea, self.workDirs, self.isNightly).getJobDumpLocation(self)\n if not placeToDump:\n m = 'Unable to retrieve location of big files volume. Not moving big files.'\n self.logger.warning(m)\n return\n\n # We have files to move, let's move them\n for bigFileBaseDir, bigFiles in bigFiles.items():\n for bigFile in bigFiles:\n src = bigFile # file\n dst = placeToDump # directory\n self.moveBigFile(src, dst)\n # If big file origin is results path, replace with a soft link\n # to separate big file volume.\n if bigFileBaseDir == self.resPath:\n self.makeReplacementKeepFile(bigFile, placeToDump)",
"def read_files_as_soup(dir):\n for filename in os.listdir(dir):\n print \"processing\", filename,\n # convert local HTML page to a BS object:\n yield BeautifulSoup( open( dir + filename ).read() )",
"def sync_dir(self):\n\n # mark the trajectories that we have seen\n trajectories = os.listdir(self.trajectory_dir)\n \n for trajectory_file in trajectories:\n\n if trajectory_file not in self.seen_trajectories:\n\n created = self.upload_trajectory(trajectory_file)\n self.seen_trajectories.add(trajectory_file)\n\n if created is True:\n print \"Total of %s solved trajectories\" % \\\n SolvedTrajectory.objects.count(), created",
"def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)",
"def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()",
"def harvest(path):\n harvest_path = os.path.join(path, 'harvested')\n if not os.path.exists(harvest_path):\n os.mkdir(harvest_path)\n\n for name in os.listdir(path):\n if 'harvested' in name:\n continue\n if 'pdfs' in name:\n continue\n try:\n float(name) # should work for 000001111.123123 and 000001111\n except ValueError:\n continue\n\n dst_name = name.split('.')[0] if '.' in name else name\n\n src = os.path.join(path, name, \"%s.pdf\" % name)\n dst = os.path.join(harvest_path, \"%s.pdf\" % dst_name)\n\n if os.path.exists(src):\n yield \"mv %s %s\" % (src, dst)\n\n src = os.path.join(path, name, \"%s_taxa.txt\" % name)\n dst = os.path.join(harvest_path, \"%s.txt\" % dst_name)\n\n if os.path.exists(src):\n yield \"mv %s %s\" % (src, dst)",
"def batchupload(dir_path, upload_type=UPLOAD_TYPE):\n\n # get all pdf files\n for root, dirs, files in os.walk(dir_path.decode('cp950')):\n def is_processed(name):\n result = name.endswith('.pdf')\n postfix_titles = ['done', 'failed']\n\n for postfix_title in postfix_titles:\n result = result and not name.endswith('-%s.pdf' % postfix_title)\n return result\n pdf_files = filter(is_processed, files)\n\n for pdf_file in pdf_files:\n fullpath = os.path.join(root, pdf_file)\n try:\n upload(fullpath, upload_type=upload_type, close_after=True)\n except Exception as e:\n # when looking up FCODE failed, skip to the next case.\n print ('uploading %s failed.' % pdf_file), \n print 'Reason: %s' % e if DEBUG else ''\n if DEBUG:\n raise\n update_filename(fullpath, postfix='failed')\n continue\n update_filename(fullpath)",
"def move_files(origin=''):\n\tpng_file_list = glob.glob(origin+'*png')\n\tif png_file_list != []:\n\t\tif not os.path.exists(origin+'positions-histograms'):\n\t\t\tos.makedirs(origin+'positions-histograms')\n\t\tfor png in png_file_list:\n\t\t\tshutil.move(str(png), origin+'positions-histograms')",
"def add_from_proc(self, proc_dir):\n for dir_name in sorted(os.listdir(proc_dir)):\n if re.match(r'^[0-9]+$', dir_name):\n self.add_from_piddir(os.path.join(proc_dir, dir_name))",
"def wingrep(self):\n for folder, files_ in self.walk():\n listed_files = self.list_appro_files(folder, files_)\n for file_o in self.open_files(listed_files=listed_files):\n self.search_in(file_o)",
"def batch_mover(pattern, directory=None):\n if directory is None:\n directory = Path().cwd()\n\n for i in os.scandir(directory):\n if file_check(pattern, i.name):\n pass\n # shutil.move(i.name, yeah we gotta change a lot here",
"def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)",
"def main():\n\n #Getthefiles\n all_fna_file_path = []\n path_to_all_info = '/Users/gustavotamasco/Google Drive/Shared drives/Projeto MDR KRP/Dados_Sequenciamento/'\n dirpath=os.getcwd()\n os.chdir(path_to_all_info)\n directories = list_directories(path_to_all_info)\n\n '''Genomes'''\n genomes_path = \"{}{}\".format(path_to_all_info,directories[0])\n os.chdir(genomes_path)\n genome_dir = list_directories(genomes_path)\n for organism in genome_dir:\n fna_files = list_files(all_fna_file_path,genomes_path,organism)\n print_status(fna_files)\n\n '''Building a dir of fna files'''\n genomes_fna_path = \"{}genomes_parsnp\".format(dirpath)\n create_genomes_dir(genomes_fna_path)\n os.chdir(genomes_fna_path)\n for file in fna_files:\n move_file(file, genomes_fna_path)\n\n '''Adding extra organism from a different source'''\n klebs = \"/Users/gustavotamasco/mdrkrp/klebs\"\n k_files = list_files_new_source(klebs)\n for k_file in k_files:\n if \".fna\" in k_file:\n final_k_file = \"{}/{}\".format(klebs,k_file)\n move_file(final_k_file, genomes_fna_path)\n\n\n '''Run parsnp'''\n run_parsnp(dirpath, genomes_fna_path)",
"def compile_pdf(directory, filename, move_result_to):\n cmd = ['latexmk', '-pdf', '-dvi-', '-interaction=nonstopmode', filename]\n proc = subprocess.Popen(cmd, cwd=directory)\n proc.communicate()\n\n retcode = proc.returncode\n if not retcode == 0:\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n pdf_name = filename.replace('.tex', '.pdf')\n from_file = os.path.join(directory, pdf_name)\n to_file = os.path.join(move_result_to, pdf_name)\n \n if not os.path.exists(move_result_to):\n os.makedirs(move_result_to)\n \n os.rename(from_file, to_file)",
"def move_gcov(source_dir: str, target_dir: str) -> None:\n for fn in listdir(source_dir):\n if splitext(fn)[1] == '.gcov':\n move(join(source_dir, fn), target_dir)",
"def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')",
"def update_ccd_dir(self, components: str):\n\n for f in os.listdir(components):\n c = ccd_reader.read_pdb_cif_file(os.path.join(components, f)).component\n self.process_template(c)",
"def move_tracks_to_music_folder(self):\n home = os.path.expanduser(\"~\")\n dest = home + \"/Music/\"\n for each_file, artist in self.past_songs_db_data:\n sub_folder = artist + \"/\" if artist != \"\" else \"\" \n # possible race condition\n if not os.path.exists(dest + sub_folder):\n os.makedirs(dest + sub_folder)\n\n if os.path.isfile(each_file) and \\\n not os.path.isfile(dest + each_file): \n shutil.move(each_file, dest + sub_folder)",
"def _anonymize_files(dicom_directory_in, dicom_directory_out, fields_to_keep):\n\n # Make sure we have absolute paths\n dicom_directory_in = os.path.abspath(dicom_directory_in)\n dicom_directory_out = os.path.abspath(dicom_directory_out)\n\n # looping over all files\n for root, _, file_names in os.walk(dicom_directory_in):\n # New directory\n\n for file_name in file_names:\n # Create instance_UID\n fields_to_keep['SOPInstanceUID'] = pydicom.uid.generate_uid()\n\n dicom_file_in = os.path.join(root, file_name)\n current_dir = root[len(dicom_directory_in) + 1:]\n dicom_file_out = os.path.join(dicom_directory_out, current_dir, file_name)\n if common.is_dicom_file(dicom_file_in):\n logging.info(\"Processing \" + dicom_file_in)\n _anonymize_file(dicom_file_in, dicom_file_out, fields_to_keep)\n else:\n logging.info(\"Skipping \" + dicom_file_in + \", no dicom file\")",
"def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)",
"def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)",
"def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)",
"def process_files(compress, files):\n [compress.add_file(file) for file in files]\n\n compress.execute() # upload files to iLovePDF\n compress.download() # download resultant file\n print(\"Compression saved {}% of disk space.\".format(\n PDFWorkshop.__percentage_storage_saved(compress))\n )\n compress.delete_current_task()",
"def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)",
"def file_src_dest(self):\n yielded_dests = []\n for mgr_file in reversed(self.manager.contents):\n path = Path(mgr_file)\n for from_path in self.maybe_add_one_path(path):\n stem = from_path.relative_to(path) if path.is_dir() else path.name\n to_path = self.output_files_dir / stem\n resolved = str(to_path.resolve())\n if resolved in yielded_dests: # pragma: no cover\n self.log.debug(\"Already populated\", resolved)\n continue\n yielded_dests += [resolved]\n yield from_path, to_path",
"def open_pdf(directory):\n for sub_folder in os.listdir(directory):\n sub_directory = os.path.join(directory,sub_folder)\n for pdf_file in os.listdir(sub_directory):\n full_path = os.path.join(sub_directory,pdf_file)\n try:\n pdf_content = pdf_to_txt(full_path)\n if isinstance(pdf_content, str) and len(pdf_content) > 1000:\n yield full_path, pdf_content\n else:\n print('No text found, skipping \"{}\"..'.format(pdf_file))\n continue\n except Exception as e:\n print(e)\n print('Failed to parse \"%s\"' % pdf_file)",
"def copy_new_files(self, out_dir, answer_dir, filenames):\n if not answer_dir.exists():\n answer_dir.mkdir(parents=True)\n for filename in filenames:\n fromfile = out_dir / filename\n tofile = answer_dir / filename\n shutil.copyfile(fromfile, tofile)",
"def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)",
"def get_all_pdfs():\n\n return filter(lambda f: fnmatch.fnmatch(f, '*.pdf'), os.listdir(cwd))",
"def run(self):\n for filepage in self.generator:\n print (filepage)\n filepage.touch()",
"def mass_extract(source_directory, target_directory):\n\n import os\n import ZipFile\n\n source_directory = raw_input(\"Where are the zips? \")\n target_directory = raw_input(\"To where do you want to extract the files? \")\n \n if not os.path.exists(source_directory):\n print \"Sorry, that folder doesn't seem to exist.\"\n source_directory = raw_input(\"Where are the zips? \")\n\n if not os.path.exists(target_directory):\n os.mkdir(target_directory)\n \n for path, directory, filename in os.walk(source_directory):\n zip_file = ZipFile.ZipFile(filenames)\n ZipFile.extract(zip_file, target_directory)\n zip_file.close()\n\n print \"Done.\"",
"def process_dir(pool, topdir):\n for root, dirs, files in os.walk(topdir):\n # Not really needed, but makes things consistent.\n dirs.sort()\n files.sort()\n\n for path in files:\n process_file(pool, os.path.join(root, path))",
"def all_pdf_files_in_directory(path):\n return sorted([filename for filename in os.listdir(path) if pdf_file(filename)])",
"def process_m4(args, dirname, names):\n\n global processed_count\n global nonprocessed_count\n\n if len(args) < 2:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n if not args[0] or not args[1]:\n raise Exception(\"in or out path not configured, see example in main()\")\n\n inputdir = args[0]\n outputdir = args[1]\n\n #print \"dir: \" + dirname\n if dirname[-3:] == \"CVS\":\n return\n \n regex = re.compile(\"(.*)(%s)(.*)\" % inputdir)\n mobj = regex.search(dirname)\n if mobj:\n outputdir = outputdir + mobj.group(3)\n else:\n raise Exception(\"no mobj?\")\n \n if not os.path.exists(outputdir):\n os.mkdir(outputdir)\n if verbose_mode:\n print \"Created directory %s\" % outputdir\n \n for name in names:\n path = os.path.join(dirname, name)\n outpath = os.path.join(outputdir, name)\n if os.path.isdir(path):\n continue\n \n if name[-5:] != \".html\":\n cmd = \"%s %s %s\" % (CPPATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n nonprocessed_count += 1\n if verbose_mode:\n print \"Added %s\" % outpath\n else:\n cmd = \"%s -P <%s >%s\" % (M4PATH, path, outpath)\n ret = os.system(cmd)\n if ret:\n print \"cmd failed: %s\" % cmd\n else:\n processed_count += 1\n if verbose_mode:\n print \"Processed %s\" % outpath",
"def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))",
"def write_upload_files(self, appstruct):\n \n # Create the directory if it does not exist\n final_dir = \"thumbnails/%s\" % slugify(appstruct[\"serial\"])\n if not os.path.exists(final_dir):\n log.info(\"Make directory: %s\", final_dir)\n os.makedirs(final_dir)\n\n final_file = \"%s/uploaded.pdf\" % final_dir\n file_pointer = appstruct[\"pdf_upload\"][\"fp\"]\n self.single_file_write(file_pointer, final_file)",
"def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue",
"def process_pdf(pdf):\n\n if os.path.exists(legend_images_dir):\n subprocess.call([\"rm\", \"-rf\", legend_images_dir])\n os.makedirs(legend_images_dir)\n\n if os.path.exists(plot_images_dir):\n subprocess.call([\"rm\", \"-rf\", plot_images_dir])\n os.makedirs(plot_images_dir)\n\n if os.path.exists(csv_output_dir):\n subprocess.call([\"rm\", \"-rf\", csv_output_dir])\n os.makedirs(csv_output_dir)\n\n if os.path.exists(pdf_output_dir):\n subprocess.call([\"rm\", \"-rf\", pdf_output_dir])\n os.makedirs(pdf_output_dir)\n\n genImages(pdf)"
] | [
"0.565745",
"0.5612268",
"0.559464",
"0.54912335",
"0.54782933",
"0.5257978",
"0.5249441",
"0.5214788",
"0.5211769",
"0.51883787",
"0.51731247",
"0.5166325",
"0.51459086",
"0.5122646",
"0.51139665",
"0.51016736",
"0.5081973",
"0.50667685",
"0.5055424",
"0.5041548",
"0.5037065",
"0.50297254",
"0.5020584",
"0.5004428",
"0.49841562",
"0.4978255",
"0.49731952",
"0.49707142",
"0.49591392",
"0.49500313",
"0.49466005",
"0.49317536",
"0.49158728",
"0.4912131",
"0.4911828",
"0.4904949",
"0.49031395",
"0.49028885",
"0.4901128",
"0.48870465",
"0.4885047",
"0.48766008",
"0.48589537",
"0.4844475",
"0.48224762",
"0.4810116",
"0.48051316",
"0.47976786",
"0.47888067",
"0.47804308",
"0.47787818",
"0.47771764",
"0.4775708",
"0.47719145",
"0.47714043",
"0.47651777",
"0.47616565",
"0.4760326",
"0.47597092",
"0.47583762",
"0.4756119",
"0.47470847",
"0.47469652",
"0.47341216",
"0.47302967",
"0.47265542",
"0.47242627",
"0.47234407",
"0.47179225",
"0.47163218",
"0.4709229",
"0.46954483",
"0.46944863",
"0.46933466",
"0.46902993",
"0.46800056",
"0.46757606",
"0.46751213",
"0.46681908",
"0.4662189",
"0.4658935",
"0.46577278",
"0.46511176",
"0.46499765",
"0.4644805",
"0.46415854",
"0.46385375",
"0.46380055",
"0.46353146",
"0.4633266",
"0.46270272",
"0.4623616",
"0.46228114",
"0.46166325",
"0.46096906",
"0.4608845",
"0.46022576",
"0.4590636",
"0.4590101",
"0.45861492"
] | 0.5560684 | 3 |
Create a new text input instance. colorNames a sequence of strings (each color must start with a different letter) | def __init__(self, colorNames):
self._lengthOfPattern = 0 # will later be queried from the user
self._palette = '' # initials for color choices, e.g., R for red
for color in colorNames:
self._palette += color[0].upper() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, colorNames):\n self._colorOptions = '' # initials for color choices\n for color in colorNames:\n self._colorOptions += color[0].upper()\n # following will be reset when startGame is called\n self._currentTurnNum = self._lengthOfPattern = self._maxNumberOfTurns = 0",
"def mkColor(self, name):\n known_attrs = [ 'font-family', 'font-style', 'font-weight', 'font-size', 'text-decoration', 'color', 'background-color' ]\n stack = []\n color = Color(name)\n for token in self.tokenizer:\n if token.text == \";\":\n stack[0].assert_symbol_name\n if stack[0].text not in known_attrs: raise Exception(\"%d:%d: Unknown color attribute %s\" % (stack[0].line, stack[0].col, stack[0].text))\n stack[1].must_be(\":\")\n stack[2].must_match(\"^\\w\", \"%d:%d: Expected a color attribute value instead of %s\" % (stack[2].line, stack[2].col, stack[2].text))\n color.attrs[stack[0].text] = stack[2].text\n stack = []\n elif token.text == \"}\":\n return color\n else:\n stack += [token]\n raise Exception(\"%d:%d: End-of-file reached while scanning color %s defined here.\" % (name.line, name.col, name.text))",
"def from_string(cls, text_color):\n\n a = 255\n try:\n r, g, b, a = text_color.replace('rgb(', '').replace(')', '').split(',')\n except ValueError:\n r, g, b = text_color.replace('rgb(', '').replace(')', '').split(',')\n\n return cls(int(r), int(g), int(b), int(a))",
"def test_color__name_str_arg(self):\n for name in (\"aquamarine3\", \"AQUAMARINE3\", \"AqUAmArIne3\"):\n color = pygame.Color(name)\n\n self.assertEqual(color.r, 102)\n self.assertEqual(color.g, 205)\n self.assertEqual(color.b, 170)\n self.assertEqual(color.a, 255)",
"def __init__(self, name, color):\n self.name = name\n self.color = color",
"def __init__(self, text, start, end, color, alpha=1):\n self.text = text\n self.start = start\n self.end = end\n self.color = color\n self.alpha = alpha",
"def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))",
"def from_name (name_str):\n if name_str in colour_names:\n return Colour(*colour_names[name_str])\n raise KeyError(\"'%s' is not a recognized colour name\"%name_str)",
"def __init__(self, input_color, location, white_symbol, black_symbol):\n assert isinstance(input_color, Color)\n assert isinstance(location, Location)\n assert isinstance(white_symbol, str)\n assert isinstance(black_symbol, str)\n\n self.color = input_color\n self.location = location\n\n if self.color == color.white:\n self.symbol = white_symbol\n else:\n self.symbol = black_symbol",
"def __init__(self, red=Black.red, green=Black.green, blue=Black.blue):\n self.color = Color(red, green, blue)\n\n self.template = '\\ttextcolor = {textcolor};\\n'",
"def create(data):\n \n # init color\n color = Color(\n color_id = data.get('id'),\n name = data['name'],\n rgb = data['rgb'],\n is_trans = data['is_trans'])\n \n # get external names and IDs\n if 'external_ids' in data:\n for name, value in data['external_ids'].items():\n color.external_names[name] = [n for l in value['ext_descrs'] for n in l]\n color.external_ids[name] = value['ext_ids']\n \n return color",
"def create_color(cls, text_color: int, background_color: int) -> int:\n global _COLOR_COUNTER\n unicurses.init_pair(_COLOR_COUNTER, text_color, background_color)\n color = unicurses.color_pair(_COLOR_COUNTER)\n _COLOR_COUNTER += 1\n return color",
"def _colorstr(self, args):",
"def create_label(self, name, color):\n self.color = color #remove this line not needed\n json = None\n if name and color:\n data = {'name': name, 'color': color.strip('#')}\n # post url data\n # json = json resp\n return json",
"def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)",
"def _makeColor(self, renderer, name, space, color):\n # assemble the arguments\n args = (renderer.literal(value) for value in color)\n # build and return the expression\n return renderer.set(name=name, value=renderer.call(func=space, args=args))",
"def color_text(text, color_name):\n\n if use_color():\n return colorama.Fore.__dict__[color_name.upper()] + text + colorama.Style.RESET_ALL\n else:\n return text",
"def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)",
"def _color(self, text, color_name=None, bold=False):\n\n if self.disable_color == True:\n return text\n \n if color_name == None:\n color_name = 'YELLOW'\n\n if color_name in self.colors:\n return '\\033[{0};{1}m{2}\\033[0m'.format(\n int(bold), self.colors.index(color_name) + 30, text)\n\n raise Exception('ERROR: \"{0}\" is not a valid color.\\n'.format(color_name))\n raise Exception('VALID COLORS: {0}.\\n'.format(', '.join(self.colors)))",
"def __selectColorName(self):\n editor = e5App().getObject(\"ViewManager\").activeWindow()\n if editor is None:\n return\n \n if editor.hasSelectedText():\n currColor = editor.selectedText()\n if currColor not in QColor.colorNames():\n E5MessageBox.critical(\n self.__ui,\n self.tr(\"Color String\"),\n self.tr(\n \"\"\"<p>The selected string <b>{0}</b> is not a\"\"\"\n \"\"\" valid color name. Aborting!</p>\"\"\")\n .format(currColor))\n return\n else:\n currColor = \"\"\n \n from ColorString.ColorSelectionDialog import ColorSelectionDialog\n dlg = ColorSelectionDialog(currColor, self.__ui)\n if dlg.exec_() == QDialog.Accepted:\n colorStr = dlg.getColor()\n editor.beginUndoAction()\n if editor.hasSelectedText():\n editor.replaceSelectedText(colorStr)\n else:\n line, index = editor.getCursorPosition()\n editor.insert(colorStr)\n editor.setCursorPosition(line, index + len(colorStr))\n editor.endUndoAction()",
"def NamedColour(*args, **kwargs):\n val = _gdi_.new_NamedColour(*args, **kwargs)\n return val",
"def importColors(colorlist):\n colordict=getColorDict()\n scribus.statusMessage(\"Defining new colors...\")\n scribus.progressTotal(len(colorlist))\n i=0\n for color in colorlist:\n name=color[0]\n c=color[1]\n m=color[2]\n y=color[3]\n k=color[4]\n while colordict.has_key(name):# check if color already exists - then add PREFIX to name\n name = PREFIX+name\n \n scribus.defineColor(name, c, m, y, k)\n i=i+1\n scribus.progressSet(i)",
"def __init__(self, name, hunger, color=\"Green\"):\r\n super().__init__(name, hunger)\r\n self._color = color",
"def text(self, str: str, x: int, y: int, colour: int, /) -> None:",
"def setColors(self, colors, indexes=None):\n colors = np.array(colors, np.float32)\n if indexes is None:\n # Change colors to the whole string\n self.allVertices['rgba'][:] = glm.vec4(colors)\n for item in self.colors:\n item[-1] = colors\n else:\n indexes = np.array(indexes, np.int32)\n assert len(colors) == len(indexes)\n # Adjust indexes\n off = 0\n j = 0\n for i, c in enumerate(self.text):\n if c in self.NO_GLYPH_CHARS:\n off += 1\n if i == indexes[j]:\n if j < len(indexes) - 1:\n j += 1\n break\n continue\n elif i < indexes[j]:\n continue\n else:\n self.allVertices['rgba'][\n 4 * (i - off):4 * (i - off + 1)] = colors[j]\n self.colors[i][-1] = colors[j]\n if j < len(indexes) - 1:\n j += 1\n else:\n break\n self.mesh.update()",
"def color(name):\n\tif name not in colors:\n\t\traise ValueError('Bad color %s' % repr(name))\n\treturn u'§' + colors[name]",
"def create_color(colorstr):\n\ttry:\n\t\treturn pygame.Color(colorstr[:7])\n\texcept TypeError:\n\t\tprint >>sys.stderr, \"Invalid color: \", colorstr[:7]\n\t\treturn pygame.Color(0,0,0,255)",
"def colored (string_, color, attrs):\n return string_",
"def __init__(self,name,value,*args,**kargs):\n color = colors.colorName(value)\n self.input = QtGui.QPushButton(color)\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(color)\n self.connect(self.input,QtCore.SIGNAL(\"clicked()\"),self.setColor)\n self.layout().insertWidget(1,self.input)",
"def class_colors(names):\r\n return {name: (\r\n random.randint(0, 255),\r\n random.randint(0, 255),\r\n random.randint(0, 255)) for name in names}",
"def color(color_name, alpha=1.0):\n global _cmds\n _cmds = (f'color(\"{color_name}\",'\n f'{alpha})\\n') + _cmds",
"def get_color_words():\n color_word_dict = {}\n color_data = csv.reader(open('./color_names.csv'), delimiter=\",\", quotechar='\"')\n\n for row in color_data:\n if row[0] != \"Colour Name\":\n name = row[0].lower()\n family = row[2].lower()\n hex_value = row[3].lower()\n color_word_dict[name] = (hex_value, family)\n return color_word_dict",
"def __init__(self, name, rect, **kwargs):\n self.name = name\n self.rect = pg.Rect(rect)\n self.color = (128, 128, 128)\n self.font = pg.font.SysFont(\"arial\", 12)\n self.text = self.font.render(name, False, pg.Color(\"white\"))\n self.selected_text = self.font.render(name, False, pg.Color(\"black\"))\n self.text_rect = self.text.get_rect(center=self.rect.center)\n self.set_kwargs(kwargs)",
"def create_color_id(cls, text_color: int, background_color: int) -> int:\n global _COLOR_COUNTER\n unicurses.init_pair(_COLOR_COUNTER, text_color, background_color)\n color_id = _COLOR_COUNTER\n _COLOR_COUNTER += 1\n return color_id",
"def colorize(text, color):\n return COLOR_DICT[color] + str(text) + COLOR_DICT['end']",
"def main():\n color_name = input(\"Enter the name of color: \").strip().upper() # strip white spaces. lowercase inputs also work\n max_key_length = max([len(key) for key in NAME_TO_CODE.keys()])\n while color_name != \"\":\n if color_name in NAME_TO_CODE:\n print(\"{:{}} is {}\".format(color_name, max_key_length, NAME_TO_CODE[color_name],))\n else:\n print(\"Invalid color name\")\n color_name = input(\"Enter the name of color: \").strip().upper()",
"def __init__(self, colors=('red', 'blue'), **kwargs):\n self._colors = list(map(mcolors.to_rgba, colors))\n super().__init__(color=self._colors[0], **kwargs)",
"def __init__(self, colors=('red', 'blue'), **kwargs):\n self._colors = list(map(mcolors.to_rgba, colors))\n super().__init__(color=self._colors[0], **kwargs)",
"def __init__(self, color: str, smell: str):\n self.color = color\n self.smell = smell",
"def test_color_str_to_trio(self):\n for case in self.__class__.TEXTS:\n with self.subTest(case=case):\n self.assertEqual(colors.color_str_to_trio(case[0]), case[1])",
"def create_new_text(self, *args, **kw):\n shape_id = self._create('text', args, kw)\n self.variables.shape_ids.append(shape_id)\n canvas_coords = args[0]\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id",
"def create_color_and_id(cls, text_color: int, background_color: int) -> Tuple[int, int]:\n global _COLOR_COUNTER\n unicurses.init_pair(_COLOR_COUNTER, text_color, background_color)\n color_id = _COLOR_COUNTER\n color = unicurses.color_pair(color_id)\n _COLOR_COUNTER += 1\n return color, color_id",
"def from_text(name, ttl, rdclass, rdtype, *text_rdatas):\n\n return from_text_list(name, ttl, rdclass, rdtype, text_rdatas)",
"def colorText(s, c):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n HEAD = \"\\033[\"\n TAIL = \"m\"\n\n color = \"39;49\"\n lastDifference = 800\n\n for i in COLORS:\n diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color\n if diff < lastDifference:\n lastDifference = diff #chooses closest match\n color = i[3]\n\n return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code",
"def parse_color(self):\n begin = self.tokenizer.next()\n begin.must_be('{')\n for name in self.tokenizer:\n if name.text == '}': return\n name.must_match(\"^[A-Za-z]\", \"%d:%d: Expected a color name, got %s instead.\" % (name.line, name.col, name.text))\n midpunct = self.tokenizer.next()\n if midpunct.text == \"{\":\n color = self.mkColor(name)\n if color in self.ColorDefinitions:\n raise Exception(\"%d:%d: Color %s has already been defined.\" % (name.line, name.col, name.text))\n self.ColorDefinitions[name.text] = color\n elif midpunct.text == ':':\n stack = []\n for token in self.tokenizer:\n if token.text == \".\":\n self.OrderedColorMappings += [Mapping(name,stack)]\n break\n elif token.text == \"}\": raise Exception(\"%d:%d: Color section ended while defining mapping for color %s\" % (name.line, name.col, name.text))\n try:\n stack += [ self.GlobalSymbolDict[token.text] ]\n except:\n raise Exception(\"%d:%d: Literal %s does not occur in the grammar\" % (token.line, token.col, token.text))\n \n elif midpunct.text == '}': raise Exception(\"%d:%d: Coloring section ended unexpectedly here.\" % (token.line, token.col))\n else: raise Exception(\"%d:%d: Expected : or {, not %s\" % (midpunct.line, midpunct.col, midpunct.text))\n raise Exception(\"%d:%d: Unexpected end-of-file while scanning color definition section beginning here.\" % (begin.line, begin.col))",
"def _colored(self, text, *color_args):\n if self.allow_colors and color_args:\n return termcolor.colored(text, *color_args)\n return text",
"def label_rgb(colors):\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))",
"def __init__(self, label, LEDStrips, colors):\n\n self._label = label\n self._LEDStrips = LEDStrips\n self._colors = colors",
"def ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)",
"def plt_color_text(colors):\n num_vals = len(colors)\n xvals = [10]*num_vals\n yvals = range(num_vals)\n for idx, (xval, yval, color) in enumerate(zip(xvals, yvals, colors)):\n plt.scatter(xval, yval, s=1000, marker='s', color=color)\n plt.text(xval+.004, yval, color, fontsize=20, va='center')",
"def color_letter(self, letter, lst_labels, plain_text_widget, encrypted_text_widget):\r\n new_letter, txt_encryption = self.simulator_enigma.encrypt_letter(letter)\r\n lst_encryption_letter_stages = [i[-1] for i in txt_encryption.split(\"\\n\")]\r\n lst_encryption_letter_stages.remove(')')\r\n self.simulator_encryption.append((txt_encryption, lst_encryption_letter_stages))\r\n lst_labels[ord(new_letter) - 65].config(bg=\"yellow\")\r\n lst_labels[ord(new_letter) - 65].after(300, lambda: lst_labels[ord(new_letter) -\r\n 65].config(bg=\"khaki\"))\r\n\r\n plain_text_widget.config(state=NORMAL)\r\n plain_text_widget.insert(END, letter)\r\n plain_text_widget.config(state=DISABLED)\r\n encrypted_text_widget.config(state=NORMAL)\r\n encrypted_text_widget.insert(END, new_letter)\r\n encrypted_text_widget.config(state=DISABLED)",
"def __init__(self, text: str) -> None:\n\n super().__init__()\n\n self._width = 0\n self._opacity = 255\n self._sprites = []\n self._text = text\n for index, c in enumerate(text):\n y_offset = 0\n if c in Text.characters:\n if Text.characters[c][1]:\n y_offset = 2\n c = Text.characters[c][0]\n elif c.isupper():\n c = c.lower() + \"_maj\"\n self._sprites.append(\n cocos.sprite.Sprite(pyglet.image.load(PATH + '/assets/img/common/font/{0}.png'.format(c))))\n self._sprites[index].position = self._width, (self._sprites[index].height - 11) / 2 - y_offset\n self._width += self._sprites[index].width\n self.add(self._sprites[index])",
"def test_color__name_str_arg_from_colordict(self):\n for name, values in THECOLORS.items():\n color = pygame.Color(name)\n\n self.assertEqual(color.r, values[0])\n self.assertEqual(color.g, values[1])\n self.assertEqual(color.b, values[2])\n self.assertEqual(color.a, values[3])",
"def __init__(self, name, **properties):\n\n # Initialize the base class.\n apply(qm.fields.TextField.__init__, (self, name, \";\"), properties)",
"def fork_string(name):\n\n return colored(name, 'cyan')",
"def single_color_func(word=None, font_size=None, position=None,\n orientation=None, font_path=None, random_state=None):\n if random_state is None:\n random_state = Random()\n r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1))\n return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max,\n b * rgb_max)",
"def text_to_qcolor(text):\r\n color = QColor()\r\n text = str(text)\r\n if not is_text_string(text):\r\n return color\r\n if text.startswith('#') and len(text)==7:\r\n correct = '#0123456789abcdef'\r\n for char in text:\r\n if char.lower() not in correct:\r\n return color\r\n elif text not in list(QColor.colorNames()):\r\n return color\r\n color.setNamedColor(text)\r\n return color",
"def __init__(self, *rgb):\n self.alpha = 255\n if len(rgb) == 1:\n\t #Accept a string in the hext fromat made by color_rgb func.\n\t if isinstance(rgb[0],str):\n self.rgb = rgb_color(rgb[0])\n\t else:\n self.rgb=rgb[0]\n elif len(rgb) == 3:\n self.rgb = rgb\n elif len(rgb) == 4:\n self.rgb = rgb[:-1]\n self.alpha = rgb[-1]\n else:\n raise AttributeError, \"invalid arguments to Color(); needs at least 3 integers: red, green, blue (transparency optional)\"\n self.rgb = map(lambda v: int(max(min(v,255),0)), self.rgb)",
"def anyTextToColor(self, mystr, r=None):\n\n if len(mystr) < 3:\n # pad up with zeros\n while len(mystr) % 3 != 0:\n mystr += \"0\"\n\n i = 0\n sum1 = 0\n sum2 = 0\n sum3 = 0\n for c in mystr:\n if i % 3 == 0:\n sum1 += int( str(ord(c)) + str(i)[::-1])\n if i % 3 == 1:\n sum2 += int(str(ord(c)) + str(i)[::-1])\n if i % 3 == 2:\n sum3 += int(str(ord(c)) + str(i)[::-1])\n i += 1\n\n x1 = sum1 % 255\n x2 = sum2 % 255\n x3 = sum3 % 255\n\n if r is not None:\n x1 = r\n\n # if we wants to force a shade of green\n # x2 = 255\n\n outstr = \"%x%x%x\" % (x1, x2, x3)\n\n while len(outstr) < 6:\n outstr += \"a\"\n\n return outstr",
"def __init__(self, text, font, pos, color=(255, 255, 255)):\r\n self.pos = pos\r\n self.label = font.render(text, 1, color)",
"def __createStyleFromString(self, string):\n\n matches = re.findall(r\"([^=]+)=([^;]+)(;|$)\", str(string).lower());\n if not matches :\n return False;\n\n\n style = OutputFormatterStyle();\n for match in matches:\n if ('fg' == match[0]) :\n style.setForeground(match[1]);\n elif ('bg' == match[0]) :\n style.setBackground(match[1]);\n else :\n style.setOption(match[1]);\n\n\n\n return style;",
"def test_color__html_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"#a1B2c3D4\")\n\n self.assertEqual(color.r, 0xA1)\n self.assertEqual(color.g, 0xB2)\n self.assertEqual(color.b, 0xC3)\n self.assertEqual(color.a, 0xD4)",
"def __init__(self, name, colour, mass, system):\n self.name = name\n self.colour = colour\n self.mass = mass\n self.system = system",
"def colorize(**kwargs) -> str:\r\n # Get text\r\n try:\r\n text = kwargs[\"text\"]\r\n if not isinstance(text, str):\r\n raise TypeError(\"'text' has to be of type 'str'\")\r\n except KeyError:\r\n raise TypeError(\"You need to give a text!\")\r\n\r\n # Get color\r\n try:\r\n color = kwargs[\"color\"]\r\n except KeyError:\r\n color = None\r\n\r\n # Get background or bg (short for background)\r\n try:\r\n bg = kwargs[\"background\"]\r\n except KeyError:\r\n try:\r\n bg = kwargs[\"bg\"]\r\n except KeyError:\r\n bg = None\r\n\r\n # Get bold\r\n try:\r\n bold = kwargs[\"bold\"]\r\n if not isinstance(bold, bool):\r\n raise TypeError(\"'bold' has to be of type 'bool'!\")\r\n except KeyError:\r\n bold = False\r\n\r\n # Get underline\r\n try:\r\n underline = kwargs[\"underline\"]\r\n if not isinstance(underline, bool):\r\n raise TypeError(\"'underline' has to be of type 'bool'!\")\r\n except KeyError:\r\n underline = False\r\n\r\n # Get start\r\n try:\r\n start = int(kwargs[\"start\"])\r\n except KeyError:\r\n start = 0\r\n except TypeError:\r\n raise TypeError(\"'start' has to be of type 'int'!\") from None\r\n # Get end\r\n try:\r\n end = int(kwargs[\"end\"])\r\n except KeyError:\r\n end = len(text)\r\n except TypeError:\r\n raise TypeError(\"'end' has to be of type 'int'!\") from None\r\n\r\n ret = text[:start]\r\n if color:\r\n try:\r\n ret += colors.__getattribute__(color.upper())\r\n except AttributeError:\r\n raise ColorNotFoundError(color.lower()) from None\r\n\r\n if bg:\r\n try:\r\n ret += background_colors.__getattribute__(bg.upper())\r\n except AttributeError:\r\n raise BackgroundColorNotFound(bg.lower()) from None\r\n\r\n if bold:\r\n ret += Special.BOLD\r\n if underline:\r\n ret += Special.UNDERLINE\r\n\r\n ret += text[start:end]\r\n ret += Special.ENDC\r\n ret += text[end:]\r\n\r\n return ret",
"def color_from(color_value, name=None):\n default_error_message = 'A color should be given as a tuple or string.'\n\n if not isinstance(color_value, (str, tuple)):\n raise InvalidColorRepresentationError(color_value,\n default_error_message)\n\n if isinstance(color_value, tuple):\n return _color_from_rgb(color_value, name)\n\n return _color_from_str(color_value, name)",
"def _create_color_lot(color_names, color_subnames, color_dict_rgb):\n lot = {}\n i = 0\n for sn in np.arange(len(color_subnames)):\n for n in np.arange(len(color_names)):\n lot[i] = color_dict_rgb[color_names[n]][color_subnames[sn]]\n i += 1\n\n return lot",
"def __init__(self, edit: QtWidgets.QTextEdit, out=None, color=None):\n self.edit = edit\n self.out = out\n self.color = color",
"def __init__(self, center, text, color_map=None, width=None, height=50, font=None):\n self.name = text\n if color_map is None:\n color_map = [colors['BGCOLOR'], colors['DARKGREEN']]\n if font is None:\n font = pygame_font.Font('src/Cubellan.ttf', 16)\n if width is None:\n width = int((len(text) ** 0.8) * FONT_TO_PIXEL_FACTOR * 1.5)\n PygameObj.__init__(self, center, width, height, color_map,\n [{'type': 'rect',\n 'color': None,\n 'settings': {\n 'center': [0, 0],\n 'width': width,\n 'height': height}}],\n text, Font=font)",
"def group_name(name):\n\n return colored(name, attrs=['bold', 'underline'])",
"def get_color_names(self, format_string):\n names = set()\n # Tokenize the format string and process them\n for token in self.tokens(format_string):\n if token.group(\"command\"):\n name = dict(parse_qsl(token.group(\"command\"))).get(\"color\")\n if (\n not name\n or name in COLOR_NAMES_EXCLUDED\n or name in COLOR_NAMES\n or name[0] == \"#\"\n ):\n continue\n names.add(name)\n return names",
"def __init__(self, name, color, rider):\n self.name = name\n self.color = color\n self.rider = rider\n self.eat_count = 0",
"def __init__(self, font, color, text=\"\", top=0, left=0, bottom=None, right=None):\n self.text = text\n self.font = font\n self.color = color\n self.top = top\n self.left = left\n self.bottom = bottom\n self.right = right\n self.renderLabel()",
"def __init__(self, diffuse=RGB(1,1,1), Kd=1.0, specular=RGB(1,1,1), Ks=0.0, \n shininess=8.0, Kt=0.0, ior=1.0, name=None):\n \n if name is None:\n name = \"Material %d\" % Material._num_materials\n \n Material._num_materials += 1\n \n self.name = name\n self.diffuse = diffuse\n self.Kd = Kd\n self.specular = specular\n self.Ks = Ks\n self.shininess = shininess\n self.Kt = Kt\n self.ior = ior",
"def color_text(txt, foreground=PALETTE['white'], background=PALETTE['black']):\n if isinstance(foreground, str) and foreground.startswith('#'):\n foreground = hex_to_rgb(foreground)\n if isinstance(background, str) and background.startswith('#'):\n background = hex_to_rgb(background)\n return '{}{}{}{}'.format(_f(*foreground), _b(*background), txt, _r())",
"def _color(self, args):",
"def __init__(self,name,value,*args,**kargs):\n self._is_string_ = type(value) == str\n self._plain = kargs.get('plain',False)\n self.input = QtGui.QTextEdit()\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)",
"def colour_titles(self):\n self.processed = []\n for colour_cnt, title_colour in enumerate(self.title_colours):\n self.row_cnt = 1\n self.word = self.titles[colour_cnt]\n self.colour = title_colour\n self.colour_specific_word()",
"def colorful_text(text, color=Fore.RESET):\n return color + text + Fore.RESET",
"def __init__(self, month, day, name, color_code):\n self.color_code = color_code\n self.day = day\n self.month = month\n self.name = name",
"def colorname(line):\n strline = line.split('\\t')\n\n # get color name and hex\n clname = unidecode.unidecode(strline[0])\n clname = re.sub(BAD_CHARS, '', clname, 0, re.MULTILINE | re.IGNORECASE)\n clname = clname.lower()\n\n hexcol = strline[1].replace('#', '')\n return (clname, hexcol.upper(), strline[0])",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)",
"def __init__(self, colour):\n self.colour = colour\n self.name = \"Player\"",
"def __init__(self, text, start, end, color, start_marker=None, end_marker=None):\n super().__init__(text, start, end, color)\n self.start_marker = start_marker\n self.end_marker = end_marker",
"def __init__(self, name, color, size = 1):\r\n self._name = name\r\n self._color = color\r\n self._size = size\r\n if size == 1:\r\n self._money = 10\r\n elif size == 2:\r\n self._money = 40\r\n elif size == 3:\r\n self._money = 100",
"def load_colors():\n\n print \"Color\"\n\n for key, value in css3_hex_to_names.items():\n color_hex, color_name = key, value\n color = Color(color_hex=color_hex,\n color_name=color_name)\n\n db.session.add(color)\n\n # Once we're done, we should commit our work\n db.session.commit()",
"def XCAFDoc_DocumentTool_ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)",
"def _proc_color(self, tokens):\n\n keys = tokens.keys()\n if \"red\" in keys: # RGB(A)\n rr, gg, bb = tokens[\"red\"], tokens[\"green\"], tokens[\"blue\"]\n hex2int = lambda h: int(h, 16)\n if \"alpha\" in keys:\n a = tokens[\"alpha\"]\n c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))\n else:\n c = str((hex2int(rr), hex2int(gg), hex2int(bb)))\n elif \"hue\" in keys: # HSV\n r, g, b = hsv_to_rgb(tokens[\"hue\"],\n tokens[\"saturation\"],\n tokens[\"value\"])\n c = str((int(r*255), int(g*255), int(b*255)))\n else:\n c = tokens[\"color\"]\n\n return c",
"def from_array(img):\n img = np.array(img)\n return Technicolor(img)",
"def add_labels(\n self,\n data,\n *,\n num_colors=50,\n properties=None,\n color=None,\n seed=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n multiscale=None,\n ) -> layers.Labels:\n layer = layers.Labels(\n data,\n num_colors=num_colors,\n properties=properties,\n color=color,\n seed=seed,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n multiscale=multiscale,\n )\n self.add_layer(layer)\n return layer",
"def __init__(self, name, coords, colorspace='rgb'):\r\n self.Name = name\r\n\r\n if isinstance(coords, str): # assume is hex format\r\n self.Coords = rgb_tuple_to_hsv(string_to_rgb(coords))\r\n elif colorspace == 'rgb':\r\n self.Coords = rgb_tuple_to_hsv(tuple(coords))\r\n elif colorspace == 'hsv':\r\n self.Coords = tuple(coords)\r\n else:\r\n raise ValueError(\r\n \"Unknown colorspace %s: valid values are rgb, hsv\" %\r\n colorspace)",
"def textCurves(*args, font: AnyStr=\"\", name: AnyStr=\"\", object: bool=True, text: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def set_color(objname, rgb):\r\n return f'\\ncmd.set_color(\"{objname}\", {(rgb[0], rgb[1], rgb[2])})'"
] | [
"0.6373945",
"0.61426556",
"0.6063771",
"0.60201144",
"0.5951335",
"0.5880087",
"0.58047354",
"0.5626059",
"0.56203663",
"0.55802214",
"0.55743384",
"0.556991",
"0.5532483",
"0.5503226",
"0.5380365",
"0.5372865",
"0.53544044",
"0.53481925",
"0.5348108",
"0.5277353",
"0.5235065",
"0.5209213",
"0.51882446",
"0.51632047",
"0.51484644",
"0.5142133",
"0.51390916",
"0.51094925",
"0.5097588",
"0.50815237",
"0.5077487",
"0.50600386",
"0.50434697",
"0.50086576",
"0.5007014",
"0.49954554",
"0.49930087",
"0.49930087",
"0.49734285",
"0.49631175",
"0.49557757",
"0.49461302",
"0.4931178",
"0.4930089",
"0.49274653",
"0.49079818",
"0.49017742",
"0.48951975",
"0.48849258",
"0.48781538",
"0.48773092",
"0.4874601",
"0.4874408",
"0.48630285",
"0.4860634",
"0.48573834",
"0.48540726",
"0.4851724",
"0.48498607",
"0.4841804",
"0.48118675",
"0.48087332",
"0.48081237",
"0.48062563",
"0.48045716",
"0.48036802",
"0.47967896",
"0.4796172",
"0.47953302",
"0.4789068",
"0.47758982",
"0.4767785",
"0.47673693",
"0.47666746",
"0.4763528",
"0.4757063",
"0.47509053",
"0.4746012",
"0.47328418",
"0.47252202",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47229335",
"0.47228718",
"0.47110653",
"0.47033784",
"0.47017893",
"0.4700554",
"0.46929738",
"0.46860987",
"0.46676674",
"0.46673158",
"0.46607396",
"0.4656761"
] | 0.7074272 | 0 |
Robustly prompt the user for an integer from small to large. | def _readInt(self, prompt, small, large):
prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? '
answer = small - 1 # intentionally invalid
while not small <= answer <= large:
try:
answer = int(raw_input(prompt))
if not small <= answer <= large:
print 'Integer must be from '+str(small)+' to '+str(large)+'.'
except ValueError:
print 'That is not a valid integer.'
return answer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')",
"def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value",
"def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))",
"def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)",
"def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response",
"def get_integer_entry(prompt=\"0\", text=\"Input integer value\"):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n return abs(int(data))\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n continue",
"def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number",
"def get_raw_input() -> int:\n return int(input(\"> \"))",
"def get_positive_int(prompt):\n while True:\n n = int(input(prompt), 10)\n if n > 0:\n break\n \n return n",
"def Demo():\n print(\"Users input:\", GetInteger())\n print(\"Users input:\", GetInteger(lowerbound=-3, upperbound=10))\n input(\"Please press <Enter> to exit the demo.\")",
"def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question))\n return response",
"def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass",
"def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def _int_input_in_range(self, print_out, range_):\n try:\n i = int(input(print_out))\n assert range_[0] <= i <= range_[1]\n return i\n except AssertionError:\n print('Please, enter a vaild number')\n return None\n except ValueError:\n print('Please, enter a number not a string')\n return None",
"def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice",
"def get_num(*, prompt='Number? '):\n num = 0\n while True:\n try:\n num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return num",
"def useti(self, prompt=None, default=None):\n \n i = 0\n abak = copy(default) # Backup our default value\n\n a = abak\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n a = int(a)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter an integer!!\"\n print \n # reload the default\n a = abak\n i = i+1\n \n return(a)",
"def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")",
"def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')",
"def get_num(prompt='Number? '):\n _num = 0\n while True:\n try:\n _num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return _num",
"def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")",
"def sanitized_int_input(s: str) -> int:\n\n v = input(s)\n if is_convertible_to_int(v):\n return int(v)\n else:\n print(\"There was an error, please enter a number.\")\n return sanitized_int_input(s)",
"def getNumber(prompt):\n output = input(prompt)\n if output.lower() == 'exit':\n return -1\n while output.isdigit() == False or int(output) > 9 or int(output) < 1:\n output = input(prompt)\n return int(output)",
"def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input",
"def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer",
"def get_int():\n\twhile True:\n\t\ttry:\n\t\t\tX = int(raw_input())\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint \"Could not convert input to integer\"\n\t\t\tcontinue\n\treturn X",
"def get_employee_input_int(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = int(user_input)\n break\n except ValueError:\n print('You must enter a whole number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number",
"def get_number_input(msg=\"Provide a number: \", num_type=int):\n while True:\n try:\n num = num_type(input(msg))\n except ValueError:\n print(f\"Whoops!! Please enter a correct number of {num_type}!!\")\n continue\n else:\n print(\"Number accepted!!\")\n return num",
"def user_choice():\n number_choice=50 #for enter in a loop\n while number_choice < 0 or number_choice > 49:\n try:\n number_choice=int(input(\"enter number between 0 and 49 :\")) #ask user a number and convert it in integer\n except ValueError: # if number_choice not a number\n print(\"your enter is not a number\") #display error message\n number_choice = 50 #return in a loop\n if number_choice < 0 or number_choice >49:\n print(\"your enter is not included in range\") #display error message if number is out of range\n return number_choice",
"def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number",
"def query_number(question, default=1):\n if default is None:\n prompt = \" [] \"\n else:\n prompt = \" [%d] \" % default\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return int(default)\n elif choice.isdigit():\n return int(choice)\n else:\n sys.stdout.write(\"Please respond with a number\\n\")",
"def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()",
"def read_integer():\n while True:\n valor = input(\"Choose an option: \")\n try:\n valor = int(valor)\n return valor\n except ValueError:\n print(\"\")\n print(\"¡Haa haaaa! ¡¿Didn't explode right?! ¡Try again! xp\")\n print(\"\")",
"def inputNumber(parameter_name):\n\torder = 'Write the %s paramter value as a positive integer: ' %(parameter_name)\n\twhile True:\n\t\ttry:\n\t\t\tuserInput = int(raw_input(order))\n\t\t\tif userInput < 0:\n\t\t\t\tprint(\"Not a positive integer! Try again.\")\n\t\t\t\tcontinue\n\t\texcept ValueError:\n\t\t\tprint(\"Write postive integer in numerical form! Try again.\")\n\t\t\tcontinue\n\t\telse:\n\t\t\treturn userInput\n\t\t\tbreak",
"def q2(question):\r\n while True:\r\n try:\r\n q2 = int(input(\"2. What year did Disneyland open? \"))\r\n except ValueError:\r\n print(\"Not an integer. Try again.\")\r\n else:\r\n return q2",
"def stubborn_asker(low, high):\n import random\n a=random.randint(1,100)\n for i in range(1,10):\n n=input('enter the number: ')\n if n.isdigit():\n n=int(n)\n if n==a:\n return('Correct')\n break\n elif n>a:\n return('The number is bigger.')\n elif n<a:\n return('The number is smaller.')\n else:\n return('please enter an integer.')\n i+=1",
"def get_int(lo, hi):\n while True:\n n = input(f\"Please enter an integer from {lo} to {hi}: \")\n try:\n n = int(n) \n except ValueError: \n print(\"It must be an integer!\") \n continue\n if n < lo: \n print(\"You can't use negative numbers...\")\n continue # needed, otherwise enters the else statement.\n if n > hi: \n print(\"Think smaller\")\n else:\n break # exit to return if meets conditions.\n return n",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high, 1):\n response = input(question)\n return response",
"def ask_numbers(question, error):\n while True:\n value = 0\n try:\n value = int(input(question))\n except ValueError:\n print(error)\n except UnboundLocalError:\n print(error)\n except Exception:\n print(error)\n if value <= 0:\n print(\"Syötä positiivinen luku, joka on suurempi kuin 0\\n->\")\n else:\n break\n return value",
"def GetInteger(prompt=\"Please enter a number:\",\n lowerbound=0, upperbound=99,\n smaller_prompt=\"It's Smaller, please re-enter:\",\n bigger_prompt=\"It's Bigger, please re-enter:\",\n not_int_prompt=\"You did not enter a number, please re-enter:\"):\n user_input = input(prompt)\n\n def InternalFunc1(num):\n while True:\n try:\n return int(num)\n except ValueError:\n num = input(not_int_prompt)\n result = InternalFunc1(user_input)\n\n while not lowerbound <= result <= upperbound:\n if result < lowerbound:\n user_input = input(smaller_prompt)\n result = InternalFunc1(user_input)\n if upperbound < result:\n user_input = input(bigger_prompt)\n result = InternalFunc1(user_input)\n return result",
"def get_positive_int(prompt):\n while True:\n n = get_int(prompt)\n if n > 0 and n < 9 :\n break\n return n",
"def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")",
"def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer",
"def get_integer_input(message):\n\n value_as_string = input(message)\n while not value_as_string.isnumeric():\n print('The input must be an integer')\n value_as_string = input(message)\n\n return int(value_as_string)",
"def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()",
"def q3(question):\r\n while True:\r\n try:\r\n q3 = int(input(\"3. What is Space Mountain's top speed? \"))\r\n except ValueError:\r\n print(\"Not an integer. Try again.\")\r\n else:\r\n return q3",
"def validate_num(number):\n\n if number <= 0:\n new_num = int(raw_input(\"Oops, your number has to be greater than 0. Please pick again: \"))\n return validate_num(new_num)\n\n else:\n return number",
"def _ask_user_range(question, first, last, default):\n\n while True:\n answer = input(question)\n if answer == \"\":\n answer = default\n break\n if re.findall(r\"[0-9+]\", answer):\n if int(answer) in range(first, last + 1):\n break\n else:\n print(\n \"Please a value between {} and {} or Return.\".format(\n first, last\n )\n )\n else:\n print(\n \"Please a number between {} and {} or Return.\".format(first, last)\n )\n\n return int(answer)",
"def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice",
"def get_int(message, high, low=0):\r\n intValue = 1\r\n while True:\r\n try:\r\n intValue = int(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if intValue <= low or intValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return intValue",
"def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt",
"def get_positive_int(prompt):\n while True:\n n = get_int(prompt)\n if 0 <= n and n < 24:\n break\n return n",
"def PickNumber(lenList, message = ' To select the correct option pick a number in range ',min = 1, typeInput = int):\n while True:\n try:\n input1 = typeInput(input('\\n'+message+str(min)+'-'+str(lenList)+': \\t'))\n except ValueError:\n print( 'That\\'s not a number!')\n else:\n if min <= input1 <= lenList:\n return input1\n else:\n print( 'Number out of range. Try again!')",
"def pedir_entero(msg, min, max):\n while True:\n n = str(raw_input(msg))\n if not n.isdigit() :\n show_msg(\"Oops! Parece que eso no era un numero entero\")\n continue\n n = int(n)\n if n <= max and n >= min :\n return n\n else:\n show_msg(\"Numero fuera de rango\")\n continue",
"def numeric_input(input_value: str) -> int:\n try:\n input_value = int(input_value)\n except ValueError:\n pass\n if not isinstance(input_value, int):\n return ArgumentTypeError(\"Please specify number\")\n if input_value < 1 or input_value > 4:\n return ArgumentTypeError(\"Value should be in range from 1 to 4\")\n return input_value",
"def get_input():\n number = input(\"Enter a number: \")\n return number",
"def getAnswer():\n return int(input(\"What is your answer:\"))",
"def enter_score(self):\n int_invalid = True # Initialise to this as no int entered yet\n # To ensure that an 0<=integer>=10, and an integer only, is enetered\n while int_invalid:\n try:\n score = int(raw_input(\"Please only enter a number and \" +\n \"confirm with <ENTER>\\n\"))\n if (score <= 10 and score >=0): # possible range\n int_invalid = False\n else:\n int_invalid = True\n except ValueError: # entered value not int\n int_invalid = True\n return score",
"def gen_input_check(self, n):\r\n assert (\r\n isinstance(n, numbers.Number) and float(n).is_integer()\r\n ), \"Input must be an integer value.\"\r\n assert n >= 0, \"Input must be nonnegative\"\r\n\r\n return int(n)",
"def int_input():\n while True:\n try:\n n = int(input(\"Enter amount of cubes(n): \"))\n if n < 1 or n > 100:\n print(\"Input must be a positive integer [1, 100]!\")\n continue\n except ValueError:\n print(\"Not an integer!\")\n continue\n\n print(\"There are %d different stairs that can be build from %d cubes.\" % (amount_of_stairs(n), n))\n break",
"def guess_number(min_guess_range, max_guess_range):\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)",
"def input_loop(menu_range):\n def check(inp, rng):\n\n try:\n chk = int(inp)\n except ValueError:\n return False\n\n if chk in range(0, rng):\n return True\n else:\n return False\n\n print('-' * 20) # spacer\n\n inpu = input('choose option: ')\n\n while not check(inpu, menu_range):\n inpu = input('try again: ')\n\n return int(inpu)",
"def prompt_with_limits(prompt, default=None, low_limit=None, high_limit=None):\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n try:\n v = float(value)\n if (low_limit is not None and v < low_limit) or \\\n (high_limit is not None and v > high_limit):\n value = None\n except (ValueError, TypeError):\n value = None\n elif default is not None:\n value = default\n\n return value",
"def inputZip() -> int:\n while True:\n try:\n return int(input(\"Enter your zipcode for concerts near you: \"))\n except ValueError:\n print(\"Input only accepts numbers.\")",
"def QueryInt(cls, varName: str) -> str:\n\n # userInput needs to be raised to global so that it can be updated\n # while recursively calling this function in the except clause.\n global userInput\n\n try:\n userInput = input(\"{}: \".format(varName.capitalize()))\n\n # Raises a ValueError if userInput cannot be recast as integer.\n if not userInput.isdigit():\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(\"\\nPlease enter a valid {}.\".format(varName))\n cls.QueryInt(varName)\n\n # If input somehow causes an error which is not a ValueError, this catches it,\n # and prints the error message \"Oops something is buggy\".\n except Exception:\n # Only runs for errors which are not ValueErrors.\n # Assignment says to catch all input errors like this.\n # Don't blame me if it's silly!\n print(\"\\nOops something is buggy\")\n\n return userInput",
"def get_number():\n n = input(\"Enter value for n: \")\n try:\n return int(n)\n except ValueError:\n return",
"def _validate_input_integer(display_name, value):\n\n if isinstance(value, int) is False:\n raise ValueError(display_name + \" must be integer.\")",
"def users_chosen_number():\n number_chosen = int(input(\"Please select a number: \")\n return (number_chosen)\n\nusers_chosen_number()",
"def input_to_int(value):\n \n if value == \"1\" or value == \"2\" or value == \"3\" or value == \"4\" or value == \"5\" or value == \"6\":\n\n value = int(value)\n\n return value\n else:\n\n print(\"Your input was invalid. Please choose from one of the options next time.\")\n\n return False",
"def prompt_user_money_to_deposit():\n print('What amount of money do you want to deposit?:')\n return input()",
"def getHenhouseDisplayMenuChoice ():\r\n while True :\r\n try :\r\n choice = int(input('Select an option: '))\r\n if 0 <= choice <= 2 :\r\n break \r\n else :\r\n print('Please enter a valid option')\r\n except ValueError :\r\n print('Please enter a valid option')\r\n return(choice)",
"def get_int() -> int:\n line = input().strip()\n return int(line)",
"def get_int_input(prompt, invalid_prompt):\n\n input_value = 0\n is_input_valid = False\n while not is_input_valid:\n txt = input(prompt)\n\n if len(txt) == 0:\n break\n\n try:\n input_value = int(txt)\n is_input_valid = True\n except ValueError:\n if invalid_prompt != None:\n print(invalid_prompt.format(input_value))\n else:\n break\n\n return (is_input_valid, input_value)",
"def valid(question, first, last):\n\n while 1:\n try:\n choice = input(question)\n if choice < first or choice > last or not isinstance(choice, int):\n print \"\\nInvalid input, please try again.\"\n else:\n return choice\n except Exception:\n print \"\\nInvalid input, please try again.\"",
"def get_input(self):\n option = input(\"Enter the number of your choice: \")\n return option",
"def int_acceptor(question):\n #Keeps asking the question until user\n #inputs an integer.\n ender = False\n while not ender:\n try:\n reply = int(input(question+' (Enter an integer): '))\n except:\n reply = 'x'\n if type(reply) == int:\n return reply\n else:\n print(error_msg)",
"def get_number():\n\n while True:\n user_number_str = input('Digite um número para saber o seu fatorial: ').strip()\n\n if user_number_str.isnumeric():\n return int(user_number_str)\n else:\n print('Valor inválido.')",
"def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess",
"def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")",
"def guess_number():\n searched_number = random.randint(1, 10)\n while True:\n try:\n users_number = int(input(\"Guess the number: \"))\n except ValueError:\n print(\"It's not a number!\")\n continue\n if users_number > searched_number:\n print(\"Too big!\")\n elif users_number < searched_number:\n print(\"Too small!\")\n else:\n return \"You win!\"",
"def prompt_user_money_to_withdrawl():\n print('What amount of money do you want to withdrawl?:')\n return input()",
"def get_int_input(prompt: str, x: int, y: int) -> int:\n print(rpipes.terminal.clear, end=\"\")\n draw_boundary()\n previous_input = \"\"\n while True:\n print(rpipes.terminal.move_xy(x, y) + \" \" * len(prompt + previous_input), end=\"\")\n previous_input = input(rpipes.terminal.move_xy(x, y) + prompt)\n try:\n return int(previous_input)\n\n except ValueError:\n print(rpipes.terminal.move_xy(x, y + 1) + \"Invalid input!\")",
"def posint_acceptor(question):\n #Keeps asking the question until user\n #inputs a positive integer.\n ender = False\n while not ender:\n try:\n reply = int(input(question+' (Enter a positive integer): '))\n except:\n reply = 0\n if reply > 0:\n return reply\n else:\n print(error_msg)",
"def __user_input(self, str_message):\n\n label = \"\"\n while label not in [\"0\", \"1\"]:\n label = input(str_message + \"\\n \")\n return int(label)",
"def prompt_number(self):\r\n self.area_code = int(input(\"Area Code: \"))\r\n self.prefix = int(input(\"Prefix: \"))\r\n self.suffix = int(input(\"Suffix: \"))",
"def _int_validator(arg):\n if arg is None or type(arg) != int:\n raise ValueError('Incorrect value: input should be an int')",
"def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number",
"def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")",
"def getNumFromUser(valueType, prompt, enforcePositiveValue=False, specifyDefaultValue=False, defaultValue=0):\n while True:\n try:\n if valueType == \"integer\" or valueType == \"int\":\n\n userInput = input(prompt + \"\\n> \")\n\n if defaultValue and userInput == \"\":\n try:\n userInput = int(defaultValue)\n except ValueError:\n raise RuntimeError(\"The specified default value ({}) is invalid.\".format(defaultValue))\n else:\n try:\n userInput = int(userInput)\n except (ValueError, TypeError) as e:\n print(\"Unable to cast value ({}) as an integer. Please try again.\\n\".format(userInput))\n continue\n\n if valueType == \"float\":\n\n userInput = input(prompt + \"\\n> \")\n\n if defaultValue and userInput == \"\":\n try:\n userInput = float(defaultValue)\n except ValueError:\n raise RuntimeError(\"The specified default value ({}) is invalid.\".format(defaultValue))\n else:\n try:\n userInput = float(userInput)\n except (ValueError, TypeError) as e:\n print(\"Unable to cast value ({}) as a float. Please try again.\\n\".format(userInput))\n continue\n\n if enforcePositiveValue:\n if userInput > 0:\n break\n else:\n print(\"Number was specified as positive, but is not positive ({}). Please try again.\\n\".format(userInput))\n else:\n break\n except:\n raise RuntimeError(\"Invalid data type passed to getNumFromUser under the set conditions ({}).\".format(valueType))\n return userInput",
"def input_grid_size(n: int = None) -> int:\n if n is None:\n print(\"Please enter grid size as integer\")\n in_size = input(\"Grid size: \")\n try:\n processed_size = int(in_size)\n if processed_size <= 0:\n raise GridError(\"Incorrect grid size, should be more than 0\")\n return processed_size\n except ValueError:\n print(\"Not a correct integer number\")\n else:\n try:\n processed_size = int(n)\n if processed_size <= 0:\n raise GridError(\"Incorrect grid size, should be more than 0\")\n return processed_size\n except ValueError:\n print(\"Not a correct integer number\")",
"def lentero():\r\n\twhile True:\r\n\t\tn = raw_input(\"Ingrese el valor deseado: \")\r\n\t\ttry:\r\n\t\t\tn_1 = int(n)\r\n\t\t\treturn n_1\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"POR FAVOR: Ingrese un valor numerico y entero\")",
"def voteInput(number):\n\n counter = False\n while counter == False:\n\n if isInteger(number) == True:\n number = int(number)\n if voteCheck(number) == True:\n counter = True\n else:\n print(\"\\n\\t\\tPlease enter an integer between {} and {}\"\n .format(MIN_VOTES, MAX_VOTES))\n number = input(\"\\n\\tEnter votes: \")\n\n else:\n print(\"\\n\\t\\tPlease enter an integer between {} and {}\"\n .format(MIN_VOTES, MAX_VOTES))\n number = input(\"\\n\\tEnter votes: \")\n\n return number",
"def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True",
"def age_input(message):\n try:\n age = int(raw_input(message))\n return age\n except:\n return age_input(\"Enter a number: \")",
"def get_inp_factorial():\n print(\"Please, type n for factorial: \")\n try:\n n = int(input())\n return n if n >= 0 else get_inp_factorial()\n except:\n print(\"Number must be integer and not negative\")\n return get_inp_factorial()",
"def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response",
"def get_choice():\n response = raw_input().rstrip(\"\\n\")\n\n if response == 'exit':\n #this doesn't work\n raise SystemExit()\n\n if not response.isdigit():\n get_choice()\n\n if not 0 <= int(response) < MATCH_LIMIT+2:\n get_choice()\n\n return int(response)",
"def ask_with_input(string, range_param: int, str_choices: tuple,\n custom_validation: (callable, None) = None):\n while True:\n reply = input(string)\n try:\n if reply not in str_choices and not (\n custom_validation is not None and custom_validation(\n reply)):\n if range_param <= 0:\n continue\n elif int(reply) not in range(1, range_param + 1):\n continue\n except ValueError:\n continue\n break\n\n return reply"
] | [
"0.7585431",
"0.74412924",
"0.7407625",
"0.7393844",
"0.7116075",
"0.7100335",
"0.7029069",
"0.7021394",
"0.69807756",
"0.69662374",
"0.69629014",
"0.69590944",
"0.6943949",
"0.69393766",
"0.69393766",
"0.690722",
"0.68896306",
"0.6840757",
"0.6829219",
"0.6797315",
"0.6749956",
"0.671389",
"0.6706469",
"0.67045593",
"0.67034775",
"0.6695541",
"0.66953796",
"0.668686",
"0.66528195",
"0.6650516",
"0.6642516",
"0.6603826",
"0.6572528",
"0.6570548",
"0.65528035",
"0.65519303",
"0.65432835",
"0.6539283",
"0.65384597",
"0.653633",
"0.65324384",
"0.6525527",
"0.6505812",
"0.6503727",
"0.6501822",
"0.65003985",
"0.6498765",
"0.6485093",
"0.6455822",
"0.6449246",
"0.6413162",
"0.6412274",
"0.6406646",
"0.63909686",
"0.6382523",
"0.6367486",
"0.634047",
"0.63285905",
"0.6324314",
"0.63202167",
"0.6318988",
"0.63109803",
"0.6306929",
"0.6306495",
"0.62905246",
"0.6267714",
"0.6259592",
"0.62489855",
"0.6220801",
"0.6212602",
"0.6191567",
"0.61899644",
"0.61768144",
"0.6169081",
"0.61687505",
"0.61658204",
"0.615698",
"0.61518604",
"0.6150044",
"0.61443216",
"0.6141983",
"0.6139115",
"0.6131785",
"0.6125745",
"0.6123442",
"0.6119217",
"0.61176515",
"0.6109928",
"0.60933006",
"0.6089801",
"0.60733426",
"0.60657907",
"0.60508615",
"0.60504967",
"0.604852",
"0.6035426",
"0.60079885",
"0.59746367",
"0.5962533",
"0.5962223"
] | 0.77045965 | 0 |
Ask the user how many pegs in the secret pattern. | def queryLengthOfPattern(self):
self._lengthOfPattern = \
self._readInt('How many pegs are in the secret', 1, 10)
return self._lengthOfPattern | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True",
"def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))",
"def user_avoid_count():\n\tforbidden = input('Enter a string of forbidden letters.\\n> ')\n\tprint(len({w for w in word_set if avoids(w, forbidden)}))",
"def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue",
"def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length",
"def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length",
"def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries",
"def guesses():\n tries = 3\n print (\" You may choose your maximum number of tries per question.\"\n \"The default is 3.\")\n player_prompt = \" Please type in your preferred number: \"\n while tries > 0:\n user_choice = raw_input(player_prompt)\n if user_choice.isdigit():\n print \"\\n OK, {} {} allowed per blank. Here we go!\\n\".format(user_choice, how_many(user_choice))\n return int(user_choice)\n tries -= 1\n player_prompt = (\" Silly, that's not a valid number of guesses! {} more {}. \\n\"\n \" Try again: \").format(tries, how_many(tries))\n if tries == 0:\n print \" You defaulted your number of guesses, so 3 it is!\"\n return 3",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def find_max_guesses():\n print(\"You'll get 5 guesses per problem!\")\n return 5",
"def countGuesses(hidden):\r\n guess = random.choice(range(0, 100)) # 0 to 99, inclusive\r\n numguesses = 1 # we just made one guess, above\r\n while guess != hidden:\r\n guess = random.choice(range(0, 100)) # guess again!\r\n numguesses += 1 # add one to our number of guesses\r\n return numguesses",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def user_pick(self):\n player_taking = True\n while player_taking:\n play_take = int(input(\"How many dots would you like to remove?(1-4)\"))\n if not 1 <= play_take <= 4:\n print(\"You may only take between 1 and 4 balls\")\n else:\n player_taking = False\n return play_take",
"def get_puzzle_no():\r\n \r\n puzzle_no = int(input(\"Enter the number of the puzzle to print the trace of (1-25): \"))\r\n while puzzle_no < 1 or puzzle_no > 25:\r\n print(\"Choice is invalid! Try again\")\r\n puzzle_no = int(input(\"Enter the number of the puzzle to print solution of (1-25): \"))\r\n \r\n return puzzle_no",
"def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice",
"def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r",
"def user_picks():\r\n print (\"Enter the second to last posted Fantasy 5 lotto numbers from 1 to 42:\")\r\n ui = []\r\n while len(ui) < 5:\r\n print (len(ui) + 1,)\r\n try:\r\n i = int(input(\"--> \" ))\r\n # check if i is unique and has a value from 1 to 42\r\n # and is an integer, otherwise don't append\r\n if (i not in ui) and (1 <= i <= 42): \r\n ui.append(i)\r\n except:\r\n print (\"Enter an integer number!\")\r\n return ui",
"def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length",
"def exercise4():\n rolls = easygui.integerbox('How many 7s:', 'Input', '', 0, 2 ** 31)\n total = count_sevens( rolls )\n percent = rolls * 100 / total\n easygui.msgbox(\"{} out of {} rolls ({:.2f}%) were 7.\".format(rolls, total, percent))",
"def take_user_input():\n window = turtle.Screen()\n window.bgcolor(\"black\")\n size = int(window.textinput(\n \"Maze Creation\", \"Size of the maze:\"))\n if size % 2 == 0:\n size += 1\n return size",
"def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy",
"def setup_number_of_faces():\n \n while True:\n faces = int(input(\"Geben Sie die Seitenanzahl der Würfel an (2 - 100) oder tippe '0' zum A\\\nbbruch: \"))\n if 2 <= faces <= 100:\n break\n elif faces == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine Zahl zwischen 2 und 100 eingeben!\")\n print()\n print()\n return faces",
"def get_dungeon_size():\n size = input(\"Choose the size of the dungeon... (4 - 24)\\n>\")\n size = int(size)\n while size < 4 or size > 24:\n print(\"Pick a number between four and 24.\")\n size = input(\"Choose the size of the dungeon... (4 - 24)\\n>\")\n size = int(size)\n return size",
"def enterGuess(self):\n validPattern = False\n while not validPattern:\n print # intentional blank line\n prompt = 'Enter a guess (colors are '\n prompt += self._palette[:self._numColorsInUse] + '): '\n patternString = raw_input(prompt)\n \n validPattern = True\n if len(patternString) != self._lengthOfPattern:\n print 'The pattern must have', self._lengthOfPattern, 'pegs'\n validPattern = False\n else:\n for i in range(self._lengthOfPattern):\n if patternString[i].upper() not in self._palette[:self._numColorsInUse]:\n validPattern = False\n if not validPattern:\n print 'The color options are', self._palette[:self._numColorsInUse]\n \n if validPattern:\n pattern = Pattern(self._lengthOfPattern)\n for i in range(self._lengthOfPattern):\n pattern.setPegColor(i, self._palette.index(patternString[i].upper()))\n\n return pattern",
"def how_many(number):\n if int(number) == 1:\n return \"guess\"\n return \"guesses\"",
"def get_num_hexagons():\n num_hexagons = float(input('Пожалуйста, введите количество шестиугольников, располагаемых в ряд: '))\n while not (4 <= num_hexagons <= 20):\n num_hexagons = float(input('Оно должно быть от 4 до 20. Пожалуйста, повторите попытку: '))\n return num_hexagons",
"def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret",
"def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message",
"def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()",
"def get_dimension():\n\n dimension = 0\n while (dimension != '2') and (dimension != '3'):\n dimension = input(\"Which dimension do you want (2/3)? \")\n if (dimension != '2') and (dimension != '3'):\n print(\"This program doesn't support that dimension, please input again\")\n dimension = int(dimension)\n return dimension",
"def guess_option(self,secret,dashes):\n\n\n\n\n try1 = input(\"enter the word:\")\n if (try1!=secret):\n self.badGuess = self.badGuess + 1\n g = (list(try1))\n\n for i in range(len(secret)):\n if secret[i] == g[i]:\n dashes = self.update_dashes(secret, dashes, secret[i])\n\n # guess_calculation(dashes)\n print(dashes)\n self.count=self.count+1\n for i in range(len(dashes)):\n if secret[i] != dashes[i]:\n self.total = self.total + String_Database.frequency.get(secret[i])\n\n print(\"Your guess is wrong: \", \"no of wrong guesses \", self.badGuess)\n return dashes\n\n\n else:\n print(\"Success\")\n self.status=\"Success\"\n\n\n dashes=secret\n if self.total==0:\n for i in dashes:\n self.total=self.total+String_Database.frequency.get(i)\n\n\n\n\n self.calculate_finalscore()\n print(\"final_score for guess-otpion\",self.finalScore)\n return dashes",
"def get_number_of_decks():\n number_of_decks = None\n while not(type(number_of_decks)) == int:\n try:\n number_of_decks = int(input(\"How many decks would you like in the shoe? \"))\n if number_of_decks == 0:\n raise zeroDecksError\n elif number_of_decks > 6:\n raise tooManyDecksError\n except zeroDecksError:\n print(\"The game needs at least 1 player\")\n number_of_decks = None\n except tooManyDecksError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_decks= None\n except:\n number_of_decks = None\n return number_of_decks",
"def exercise3():\n # You _DO_NOT_ need to modify this code for Lab 10.\n start = easygui.integerbox( \"Enter start value:\", \"Input\", \"\", -2 ** 31, 2 ** 31 )\n stop = easygui.integerbox( \"Enter stop value:\", \"Input\", \"\", -2 ** 31, 2 ** 31 )\n step = easygui.integerbox( \"Enter divisor value:\", \"Input\", \"\", -2 ** 31, 2 ** 31 )\n easygui.msgbox( \"There are {} multiples of {} in the range [{},{}].\".format(\n count_multiples( start, stop, step ), step, start, stop ), \"Result\" )",
"def problem(args:int) -> int:\r\n\ts = 0\r\n\tfor i in range(1, args + 1):\r\n\t\ts += letter_count(i)\r\n\treturn s",
"def validate_profile_choice(dims):\n\n if dims[0] > 1:\n profile_choice = int(input(\"Multiple profiles detected.\\nplease choose which profile to use.\\n\"))\n while profile_choice not in range(dims[0]):\n profile_choice = int(input(\"Incorrect selection.\\nplease choose {}.\\n\".format(range(dims[0])))) \n else:\n profile_choice = 0\n\n\n return profile_choice",
"def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt",
"def max_guesses():\n\tprint \"\\n\" + \"How many guesses would you like per problem?\" + \"\\n\"\n\tmax_guesses = None\n\twhile max_guesses is None:\n\t\ttry:\n\t\t\tmax_input = int(raw_input('Please enter a positive integer number: '))\n\t\t\tif max_input < 1:\n\t\t\t\tprint \"\\n\" + \"You need at least one guess!\" + \"\\n\"\n\t\t\telse:\n\t\t\t\tmax_guesses = max_input\n\t\t\t\tprint \"\\n\" + \"OK, you'll have %d try(ies) for each question. Good luck!\" % max_guesses\n\t\t\t\treturn max_guesses\n\t\t\t# else:\n\t\t\t# \tprint \"\\n\" + \"You need at least one guess!\" + \"\\n\"\n\t\texcept ValueError:\n\t\t\tprint \"\\n\" + \"You entered a non-integer. Please enter a positive integer.\" + \"\\n\"",
"def ex9():\n\n length = prompt_int('What is the length of the room? ')\n width = prompt_int('What is the width of the room? ')\n\n total_sq_ft = length * width\n total_paint_cans = ceil(total_sq_ft / GALLON_SQUARE_FEET)\n\n message = (\n 'You will need to purchase {} cans of paint to cover {} square feet'\n .format(total_paint_cans, total_sq_ft))\n print(message)",
"def AskHowManyPlayers():\n\n\t# Loop forever until the user enters an integer between 1 and 10, inclusive.\n\twhile True:\n\t\tprint \"How many players? Enter a number between 1 and 10, or press enter for default 2:\"\n\t\tnum_players = SolicitInteger( lobound=1, hibound=10, default_return=2 )\n\t\tif num_players != None:\n\t\t\tprint \"Ok, {} players.\".format( num_players )\n\t\t\treturn num_players",
"def get_number_of_players():\n number_of_players = None\n while not(type(number_of_players)) == int:\n try:\n number_of_players = int(input(\"How many players are there? \"))\n if number_of_players == 0:\n raise zeroPlayersError\n elif number_of_players > 6:\n raise tooManyPlayersError\n except zeroPlayersError:\n print(\"The game needs at least 1 player\")\n number_of_players = None\n except tooManyPlayersError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_players = None\n except:\n number_of_players = None\n return number_of_players",
"def validate(n = 5):",
"def host():\n\n print(\"\"\" Bienvenue sur l'application Pur Beurre\n --------------------------------------------\n 1: Quel aliment souhaitez-vous remplacer ?\n 2: Retrouver mes aliments substitués\n 3: Quitter\"\"\")\n\n while True:\n try:\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice in range(1, 4):\n break\n except ValueError:\n continue\n\n return choice",
"def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)",
"def plants(ground, grain):\n print(SOWING_OF_GRAIN)\n answer_2 = input()\n while answer_2.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer_2 = input()\n answer_2 = int(answer_2)\n variants = [0, 1, 2]\n plant = random.choice(variants)\n phrase = ''\n if plant == 0:\n grain += answer_2 * 0.5\n phrase = NOT_HARVEST_YEAR\n elif plant == 1:\n grain += answer_2 * 1.5\n else:\n grain += answer_2 * 2\n phrase = HARVEST_YEAR\n ground -= answer_2 // 10\n return int(grain), int(ground), phrase",
"def human_input(marbles_left):\n\twhile True:\n\t\ttry:\n\t\t\thuman_choice = int(input('Your turn: How many marbles will you remove (1-3)? '))\n\t\texcept:\n\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\treturn 0\n\t\telse:\n\t\t\tif human_choice not in range(1, 4):\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telif human_choice > marbles_left:\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\tprint('You removed {} marbles.'.format(human_choice))\n\t\t\t\treturn human_choice",
"def tell_me(self,secret,dashes):\n\n\n result=\"\"\n self.total=0\n for i in range(len(secret)):\n if secret[i] != dashes[i]:\n result=result+secret[i]\n\n\n self.total=self.total+String_Database.frequency.get(secret[i])\n\n self.status=\"Gave up\"\n\n print(\"The corect word is : \", secret,\", you missed :\",\"letters: \",result)\n self.finalScore=0-self.total\n\n\n return 1",
"def get_num_of_tries(return_show_hidden_word, old_letters_guessed):\r\n wrong_tries = []\r\n for letter in old_letters_guessed:\r\n if letter not in return_show_hidden_word:\r\n wrong_tries.append(letter)\r\n num_of_tries = len(wrong_tries)\r\n return num_of_tries",
"def test_integration():\n input_values = read_in_range('day04/input.txt')\n n_valid = count_valid_passwords(input_values)\n assert n_valid == 511",
"def input_pomos():\n active = True\n daily_pomos = \"\"\n while active:\n daily_pomos = input('\\nEnter the number of completed Pomodoros: ')\n try: # check if the input is an integer\n if float(daily_pomos) in range(0, 20):\n active = False\n except ValueError:\n print('ERROR: Please enter an integer as the number of Pomodoros.')\n daily_hours = float(daily_pomos) / 2 # two pomodoros are equal to one hour\n return daily_hours",
"def new_game(range):\n global secret_number\n global counter\n global n\n n = range\n \n secret_number = random.randrange(0, n)\n counter = int(math.ceil(math.log(n + 1)/math.log(2)))\n \n print \"New Game. Range is from 0 to\", n\n print \"Number of remaining guesses is\",counter\n print \"\"",
"def escoge_numero(a,b,count_clue):\n count = 0\n numero_aleatorio = random.randint(a,b)\n while count == 0:\n user_input = int(input(f'Ingresa un numero entre {a} y {b}'))\n while user_input.type() != int:\n user_input = input('Ingresa un numero entero entre {a} y {b}:\\n==> ') \n if user_input == numero_aleatorio:\n count += 1\n else:\n print('Numero incorrecto')\n while True:\n try:\n user_pista = input('Desear utilizar una pista Si(s) o No(n): {}').lower()\n while user_pista != 's' and user_input != 'n':\n useruser_pista_pista = input('Ingresa Si(s) o No (n):\\n==> ') \n raise Exception\n break\n except:\n print('Ingreso un valor erroneo')\n if user_pista == 's':\n if count_clue > 0:\n if numero_aleatorio - user_input > 0 and numero_aleatorio - user_input < 2:\n print('Esta muy cerca por arriba')\n count_clue -=1\n elif numero_aleatorio - user_input > 2 and numero_aleatorio - user_input > 5:\n print('Estas cerca por arriba')\n count_clue -=1 \n elif numero_aleatorio - user_input > 5:\n print('Estas muy lejos por arriba')\n count_clue -=1\n elif numero_aleatorio - user_input < 0 and numero_aleatorio - user_input > -2:\n print('Estas muy cerca por abajo')\n count_clue -=1\n elif numero_aleatorio - user_input < -2 and numero_aleatorio - user_input > -5:\n print('Estas cerca por abajo')\n count_clue -=1\n else:\n print('Estas muy lejos por abajo') \n count_clue -=1\n else:\n print('No tienes mas pistas')\n else:\n print('Vuelve a intentarlo')\n \n return True, count_clue",
"def get_number_of_instances_from_user(self):\n\t\ttry:\n\t\t\t# The int interpretation of the user input\n\t\t\ttemp = int(raw_input(\"Enter the number of car instances: \"))\n\n\t\t\t# If input is > 0\n\t\t\tif temp > 0:\n\t\t\t\tself.number_of_instances = temp\n\n\t\t\t# If input is <= 0\n\t\t\telse:\n\t\t\t\tprint \"Number has to be an int greater than 0. Please enter a number greater than 0.\"\n\n\t\t\t\tself.get_number_of_instances_from_user()\n\n\t\t# If user doesnt enter an int\n\t\texcept Exception:\n\t\t\tprint \"Number was invalid. Please enter a positive number.\"\n\n\t\t\tself.get_number_of_instances_from_user()",
"def attempts(difficulty):\n if difficulty == 'easy':\n attempts = 5\n else:\n attempts = 10\n \n return attempts",
"def day25():\n parse_args()\n lines = read_input(25)\n points = next(parse_points(lines))\n print(\"Part 1\")\n print(count_constellations(points))",
"def option_to_change_word(secret_word, count):\n if count > 8:\n if change_word() is True:\n print '####################'\n secret_word = return_word().lower()\n show_size_secret_word(secret_word)\n guess_the_word(secret_word)\n exit()\n else:\n pass",
"def int_input():\n while True:\n try:\n n = int(input(\"Enter amount of cubes(n): \"))\n if n < 1 or n > 100:\n print(\"Input must be a positive integer [1, 100]!\")\n continue\n except ValueError:\n print(\"Not an integer!\")\n continue\n\n print(\"There are %d different stairs that can be build from %d cubes.\" % (amount_of_stairs(n), n))\n break",
"def get_new_user_pword():\n pword = input(\"Password (3–20 alphanumeric characters): \")\n menu.option_to_exit(pword)\n try:\n if len(pword) < 3 or len(pword) > 20 or not pword.isalnum():\n raise ValueError\n except ValueError:\n print(\"Password must be 3–20 alphanumeric characters.\\n\"\n \"Please try again.\\n\")\n return get_new_user_pword()\n else:\n return pword",
"def get_wager_input(max_chips, min_chips=1):\n chips = 0\n while chips < min_chips or chips > max_chips:\n chips = input('How many chips do you wager? (min %d, max %d): ' % (min_chips, max_chips))\n try:\n chips = int(chips)\n except:\n chips = 0\n return chips",
"def get_help_text(self):\n msg = (\n ungettext(\n \"Your password must contain less than %d occurrences of the same letter.\",\n \"Your password must contain less than %d occurrences of the same letter.\",\n self.get_max_count(),\n )\n % self.get_max_count()\n )\n return msg",
"def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid",
"def process():\r\n\t### INPUT ####\r\n\tpq = eat(str)\r\n\tp, q = map(int, pq.split('/'))\r\n\t\r\n\t### COMPUT ####\r\n\td = gcd(p, q)\r\n\tp, q = p//d, q//d\r\n\teprint(bin(q), bin(p))\r\n\tif len(bin(q).strip('0')) != 2:\r\n\t\treturn IMPOSSIBLE\r\n\t#length of p\r\n\tbinp = bin(p)\r\n\tbinq = bin(q)\r\n\treturn len(binq) - len(binp)\r\n\t\r\n\t### OUTPUT ####\r\n\treturn solve()",
"def main_method():\r\n choice = 0\r\n precision = 0\r\n # loop to display menu and validate user's input\r\n while choice != 6:\r\n display_menu()\r\n choice = input(\"Enter choice(1-6):\")\r\n print(\"\\n\")\r\n\r\n # validate choice before casting to integer\r\n if choice.isdigit():\r\n choice = int(choice)\r\n\r\n if choice == 1:\r\n length, has_upper, has_lower, has_numbers, has_special_char, \\\r\n is_all_no = 0, \" \", \" \", \" \", \" \", False\r\n\r\n print(\"-- Generating Password --\")\r\n\r\n # Prompt user for password attribute's\r\n # And validate input\r\n while length < 10 or has_upper not in valid_statement or \\\r\n has_lower not in valid_statement or \\\r\n has_numbers not in valid_statement or \\\r\n has_special_char not in valid_statement or is_all_no:\r\n\r\n print(\r\n \"Length MUST be a number 10 or greater | ALL questions are \"\r\n \"'yes' or 'no' | At LEAST 1 yes required:\")\r\n length = input(\"Enter length of password (minimum 10):\")\r\n\r\n # Validate length is digit before casting to int\r\n if length.isdigit():\r\n length = int(length)\r\n else:\r\n length = 0\r\n\r\n # Prompt user for password complexity\r\n has_upper = input(\"Should password contain uppercase?\")\r\n has_lower = input(\"Should password contain lowercase?\")\r\n has_numbers = input(\"Should password contain numbers?\")\r\n has_special_char = input(\"Should password contain special characters?\")\r\n print(\"\\n\")\r\n\r\n # Boolean check if all answers are no\r\n # This would mean no characters to make password\r\n is_all_no = has_upper in no and has_lower in no and has_numbers in no \\\r\n and has_special_char in no\r\n\r\n # Data is valid so generate password\r\n choice_1(length, has_upper, has_lower, has_numbers, has_special_char)\r\n elif choice == 2:\r\n print(\"-- Calculate a Percentage --\")\r\n\r\n # Prompt user for numerator, denominator and decimal precision\r\n # NOTE: Validate numerator and denominator and precision are integers\r\n # NOTE: Validate denominator is NOT 0\r\n\r\n numerator, denominator, precision = 0, 0, 0\r\n while True:\r\n print(\"Only whole numbers accepted! | decimal precision must be positive!\")\r\n numerator = input(\"What is the numerator?\")\r\n denominator = input(\"What is the denominator?\")\r\n precision = input(\"How many decimal precision needed?\")\r\n print(\"\\n\")\r\n\r\n if numerator[0] == \"-\":\r\n numerator_sign = -1\r\n numerator = numerator[1:]\r\n else:\r\n numerator_sign = 1\r\n\r\n if denominator[0] == \"-\":\r\n denominator_sign = -1\r\n denominator = denominator[1:]\r\n else:\r\n denominator_sign = 1\r\n\r\n if numerator.isdigit() and denominator.isdigit() and \\\r\n precision.isdigit() and denominator != \"0\":\r\n numerator = int(numerator) * numerator_sign\r\n denominator = int(denominator) * denominator_sign\r\n precision = int(precision)\r\n break\r\n\r\n choice_2(numerator, denominator, precision)\r\n elif choice == 3:\r\n choice_3()\r\n elif choice == 4:\r\n print(\"-- Calculate Leg of a Triangle --\")\r\n\r\n side_ac, side_cb, angle_acb, precision = 0, 0, 0, 0\r\n # Prompt user for side AC\r\n # Prompt user for side CB\r\n # Prompt user for angle <ACB\r\n\r\n while True:\r\n print(\"All input must be a positive whole number!\")\r\n side_ac = input(\"Enter length for side AC:\")\r\n side_cb = input(\"Enter length for side CB:\")\r\n angle_acb = input(\"Enter angle for <ACB:\")\r\n precision = input(\"How many decimal precision needed?\")\r\n\r\n # Validate data entered are integers\r\n if side_ac.isdigit() and side_cb.isdigit() and angle_acb.isdigit() \\\r\n and precision.isdigit():\r\n side_ac = int(side_ac)\r\n side_cb = int(side_cb)\r\n angle_acb = int(angle_acb)\r\n precision = int(precision)\r\n break\r\n choice_4(side_ac, side_cb, angle_acb, precision)\r\n elif choice == 5:\r\n print(\"-- Volume of Right Circular Cylinder --\")\r\n\r\n radius, height, precision = 0, 0, 0\r\n\r\n while True:\r\n radius = input(\"Enter radius of cylinder:\")\r\n height = input(\"Enter height of cylinder:\")\r\n precision = input(\"Enter decimal precision for answer:\")\r\n\r\n if radius.isdigit() and height.isdigit() and precision.isdigit():\r\n radius = int(radius)\r\n height = int(height)\r\n precision = int(precision)\r\n break\r\n\r\n choice_5(radius, height, precision)\r\n elif choice == 6:\r\n print(\"Exiting program.\")\r\n else:\r\n print(\"Invalid choice. Must be a number (1 to 6)\")",
"def cnt_points(self, field:str, all_ghost_out:bool):\r\n\r\n eat_dot = False\r\n\r\n # When field is 'e' that means Pac-Man ate an energizer, so he can now eat ghosts and +50 will be added to his point counter\r\n if field == 'e':\r\n self.play_powerpellet()\r\n eat_dot = True\r\n self.eat_ghost = True\r\n self.energizer_flag = True\r\n self.point_counter += 50\r\n self.dot_counter += 1\r\n if self.first_eaten:\r\n self.global_counter += 1\r\n\r\n # When field is 'p' Pac-Man ate a normal point, which adds +10 to his point counter\r\n elif field != None and field[0] == 'p': \r\n self.play_chomp()\r\n eat_dot = True\r\n self.point_counter += 10\r\n self.dot_counter += 1\r\n if self.first_eaten:\r\n self.global_counter += 1\r\n\r\n # If not all ghosts are out of the ghost house the hourglass will be reset\r\n if eat_dot and not all_ghost_out:\r\n self.hourglass_counter = 0",
"def new_game(secret_words):\n\n\tattempts=0\n\tword_index = random.randint(0,5)\n\tword_to_guess = secret_words[word_index]\n\tglobal mask\n\tmask = \" _ \" * len(secret_words[word_index])\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn",
"def user_input(self, screen_height, screen_width):\n global num_dots\n num_dots = \"x\"\n print(\"Welcome to the game of Nim!\")\n while not num_dots.isnumeric():\n num_dots = input(\"How many dots do you want to play with? \")\n num_dots = int(num_dots)\n (width, height) = self.calculate_size(num_dots)\n dot_distance = screen_width\n first = False\n while not self.is_valid_size(width, height, dot_distance, screen_width, screen_height):\n if first:\n print(\"That won't fit on the screen; pick a smaller number\")\n dot_distance = input(\"How far apart are the dots? \")\n while not dot_distance.isnumeric():\n dot_distance = input(\"Let's try an integer instead. \\nHow far apart are the dots? \")\n first = True\n dot_distance = int(dot_distance)\n return dot_distance, height, width",
"def main():\n number = int(input())\n count = 0\n for i in range(1, number+1):\n if i%3 == 0 or i%5 == 0:\n count += i\n print(count)",
"async def pizza(ctx):\r\n author = ctx.message.author\r\n await ctx.send(author.mention + \" has eaten \" + str(randint(2, 120)) + \" slices of pizza today.\")\r\n ctx.counter(n)",
"def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total",
"def get_num_algo(algos: tuple):\n length = len(algos)\n try:\n n = int(input())\n if n not in range(1, length+1):\n print_rules(length)\n return algo_selection(algos)\n else:\n return n\n except:\n print_rules(length)\n return algo_selection(algos)",
"def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count",
"def number_pick():\n secret_number = randint(1, 100)\n\n return secret_number",
"def num_selection(message):\n return int(input(f\"{message}\\n\"))",
"def pickSecretNumber(): \n return random.randrange(1, 11)",
"def setNumSamples(self):\n while True:\n try:\n self.numberOfSamples = int(input(\n \" How many samples? [%s]: \" % self.numberOfSamples) or self.numberOfSamples)\n if self.numberOfSamples >= 1:\n break\n else:\n print (\" Integer >= 1 needed!\")\n except ValueError:\n print (\" Integer >= 1 needed!\")",
"def get_inp_permutations():\n print(\"Please, type number of elements: \")\n try:\n n = int(input())\n return n if n >= 0 else get_inp_permutations()\n except:\n print(\"Number must be integer and not negative\")\n return get_inp_permutations()",
"def setup_number_of_dices():\n \n while True:\n number = int(input(\"Geben Sie die Anzahl Würfel an (1 - 10) oder tippe '0' zum Abbruch: \"))\n if 1 <= number <= 10:\n break\n elif number == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine Zahl zwischen 1 und 10 eingeben!\")\n print()\n print()\n return number",
"def get_user_text() -> str:\n validinput = False\n while not validinput:\n intext = input(\"Which of your most favorite quotes can Polly cook up for you?\")\n if len(intext) > POLLY_CHAR_LIMIT:\n print(\"You have entered in more text that Polly can support in one call.\")\n validinput = False\n else:\n validinput = True\n return intext",
"def _process_user_choice(self):\n verifying_choice = True\n idx = 0\n print(\"Current occupants: %s\" % self.get_occupants())\n while verifying_choice:\n user_choice = raw_input(\"Choose a hut number to enter (1-5): \")\n # --------------------------------------------------------------\n # try...except illustration for chapter on exception handling.\n # (Attack Of The Orcs v1.1.0)\n # --------------------------------------------------------------\n try:\n idx = int(user_choice)\n except ValueError as e:\n print(\"Invalid input, args: %s \\n\" % e.args)\n continue\n\n try:\n if self.huts[idx-1].is_acquired:\n print(\"You have already acquired this hut. Try again.\"\n \"<INFO: You can NOT get healed in already acquired hut.>\")\n else:\n verifying_choice = False\n except IndexError:\n print(\"Invalid input : \", idx)\n print(\"Number should be in the range 1-5. Try again\")\n continue\n\n return idx",
"def exercise_b2_24():\r\n number = input(\"Insert the number: \")\r\n flag = 0\r\n count = 0\r\n divisors_list =[]\r\n while flag <= int(number):\r\n flag +=1\r\n if (int(number) % flag) == 0:\r\n count += 1\r\n divisors_list.append(flag)\r\n print(\"\"\"\\nThe amount of divisors are: %s\"\"\"\r\n \"\"\"\\nThe numbers are: %s\\n\"\"\" % (count, divisors_list))\r\n return",
"def get_number(pnum):\n global piles\n \n while True:\n userInput = int(input(\"How many? \"))\n if userInput >= 1 and userInput <= piles[pnum]:\n return userInput\n break",
"def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice",
"async def character_popularity(\n position: P('number', 'Please select a number between 1 and 20', min_value = 1, max_value = 20)\n):\n return MOST_POPULAR_TOUHOU_CHARACTERS[position - 1]",
"def dogs():\r\n print(\"Please give 3 dogs you want to going hunting with names\")\r\n for x in range(3):\r\n d = str(input())\r\n x += 1\r\n print(\"dog name:\" + d)\r\n print(\"What wonderful choices of dogs\")",
"def main():\n pokedict = dict()\n inputn = int(input())\n for _ in range(inputn):\n temp = input().split(\" \")\n pokedict[temp[0]] = int(temp[1])\n check = input()\n if check.isnumeric():\n for i in pokedict:\n if pokedict[i] == int(check):\n print(i)\n else:\n print(pokedict[check])",
"def count_sheep(n):\t\n\treturn ''.join([str(x) + ' sheep...' for x in range(1, n + 1)])",
"def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length",
"def validate_puzzle_string(self):\n is_puzzle_string_valid = False\n while is_puzzle_string_valid is False:\n question = \"Enter a valid puzzle. (81 inline digits where zeros \" +\\\n \"represent empty spots) E.g. 01040506.... and so on\\npuzzle\"\n puzzle_parameter = self.ask_user_input(question)\n if not puzzle_parameter.isdigit():\n print(\"The puzzle should contain only digits, please try again\")\n elif len(puzzle_parameter) == 81:\n is_puzzle_string_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"The puzzle should contain exactly 81 digits, please try again\")\n return is_puzzle_string_valid",
"def puzzle_02() -> None:\n\n containers = load_containers()\n combinations_lengths = tuple(map(\n lambda combination: len(combination),\n filter(lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)])))\n print_puzzle_solution(combinations_lengths.count(min(combinations_lengths)))",
"def limit_number_prompts(state: SessionState):\n if state.prompts is not None and len(state.prompts) > 1:\n state.prompts = [state.prompts[0]]",
"def get_num_names_from_user():\n valid = False\n num_names = 1 #default value\n\n # user info\n print(\"\\nType number of names to generate.\")\n print(\"(You can directly Enter to generate 1 name)\")\n\n while not valid:\n num_names = input(\"Your input: \")\n\n if num_names.strip() == \"\":\n num_names = 1\n break\n\n try:\n num_names = max(int(num_names), 1)\n valid = True\n except:\n print(\"\\nPlease type an integer number.\\n\")\n\n return num_names",
"def validate_puzzle_param(self, name):\n is_puzzle_parameter_valid = False\n while is_puzzle_parameter_valid is False:\n puzzle_parameter = self.ask_user_input(\"Enter a valid '\" + name + \"'\")\n if not puzzle_parameter.isdigit():\n print(\"Not a number, please try again\")\n elif 1 <= int(puzzle_parameter) <= 9:\n is_puzzle_parameter_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"Number is out of the valid range (1 to 9), please try again\")\n return is_puzzle_parameter_valid",
"def get_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words):\n\n\tprint \"\\n The word to guess is: \", mask\t\n\tprint \"\\n # of attempts: \", attempts\n\tprint \"\\n Insert a letter or a number \\n\"\n\tthe_guess = raw_input()\n\tthe_guess = the_guess.lower()\n\t# Check if the input is a valid character\n\tvalidity = check_validity(the_guess, valid_characters, user_guesses)\n\tif (validity is True):\n\t\t# CHeck if the user has guessed the letter\n\t\tif (check_if_guessed(the_guess, word_to_guess) >= 0):\n\t\t\tprint \"\\n Great! your choosed the correct letter!\"\n\t\t\tuser_guesses += the_guess\n\t\t\tmask = calculate_mask(user_guesses, word_to_guess)\n\t\t\tyou_won = check_if_won(user_guesses, word_to_guess, secret_words)\n\t\t\tif you_won is True:\n\t\t\t\t# If the user has won it stop the game\n\t\t\t\treturn\n\t\telse:\n\t\t\tattempts = attempts + 1\n\t\t\tprint \"\\n Sorry! the letter is not present in the word! you have now %d guess left\" % (6 - attempts)\n\t\t\tyou_lost = check_if_lost(attempts, secret_words)\n\t\t\tif you_lost is True:\n\t\t\t\t# If he user has lost it stop the game\n\t\t\t\treturn\n\telse:\n\t\tprint \"\\n The input is not valid! Insert a valid input\"\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn",
"def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def play_game(n):\n tries = 0\n magic_number = generate_random(n)\n print(\"Let's play the mimsmind0 game.\")\n # Get and validate user's first guess\n while True:\n try:\n guess = int(input(\"Guess a {}-digit number: \".format(n)))\n tries += 1\n break\n except:\n print(\"That is not a valid number, try again.\") \n while True:\n # Check guess against magic number and give directional guidance if incorrect\n try:\n if magic_number > guess:\n guess = int(input(\"Try again. Guess a higher number: \"))\n tries += 1\n elif magic_number < guess:\n guess = int(input(\"Try again. Guess a lower number: \"))\n tries += 1\n else:\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(tries))\n break\n except:\n print(\"That's not a valid number.\")",
"def hotp(secret, count, digits=None):\n if not digits:\n digits = 6\n\n count_hex = '%x' % count\n\n count_hex = '0' * (16-len(count_hex)) + count_hex\n\n result = \"\"\n for i in xrange(0, 8):\n result += count_hex[i*2:i*2+2].decode('hex')\n\n hash = hmac.new(secret, result, digestmod=sha1).hexdigest()\n\n offset = int(hash[-1], 16)\n\n part = hash[(offset*2):(offset*2)+8]\n\n part_int = int(part, 16) & int(\"7fffffff\", 16)\n\n return part_int % 10**digits",
"def nb_elephants(self, x):\n print('Warning ! Changing the number of Elephant is not possible!')",
"def input_guess(guess):\n global counter\n global secret_number\n \n guess_int = int(guess)\n counter = counter - 1\n \n print \"Guess was\", guess_int\n if guess_int == secret_number:\n print \"Correct!\"\n print \"\"\n new_game(n)\n return\n elif guess_int > secret_number and counter != 0:\n print \"Number of remaining guesses is\", counter\n print \"Lower!\"\n print \"\"\n elif guess_int < secret_number and counter != 0:\n print \"Number of remaining guesses is\", counter\n print \"Higher!\"\n print \"\"\n else:\n print \"You ran out of guesses. The number was\",secret_number\n print \"\"\n new_game(n)",
"def process_user_choice():\n msg = \"\\033[1m\" + \"Odaberi kućicu za ulaz (1-5): \" + \"\\033[0m\"\n user_choice = input(\"\\n\" + msg)\n idx = int(user_choice)\n return idx",
"def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1"
] | [
"0.6076266",
"0.60246533",
"0.6022768",
"0.5874914",
"0.57561094",
"0.5753638",
"0.57490975",
"0.5721011",
"0.5687204",
"0.56637275",
"0.5620408",
"0.56060576",
"0.559462",
"0.5586774",
"0.5576528",
"0.5574137",
"0.5513518",
"0.5511759",
"0.5488277",
"0.54585576",
"0.5408952",
"0.54017985",
"0.5360931",
"0.5356373",
"0.53487307",
"0.5335186",
"0.5326323",
"0.532297",
"0.53083026",
"0.52886444",
"0.527687",
"0.52761906",
"0.5274057",
"0.5269359",
"0.5266782",
"0.5252991",
"0.52478474",
"0.52441555",
"0.5242501",
"0.52348495",
"0.52194303",
"0.5215026",
"0.5207199",
"0.5201218",
"0.51897377",
"0.5179408",
"0.5168417",
"0.5165746",
"0.51595855",
"0.5142092",
"0.5135239",
"0.51351005",
"0.51314193",
"0.51225173",
"0.51211095",
"0.511327",
"0.5102669",
"0.50803584",
"0.50767004",
"0.5075714",
"0.5060826",
"0.5053172",
"0.50523347",
"0.5041185",
"0.50411093",
"0.50357556",
"0.5030283",
"0.50215024",
"0.5019385",
"0.50163645",
"0.50038284",
"0.50028676",
"0.4997707",
"0.4992909",
"0.49918696",
"0.49890316",
"0.49871844",
"0.4985701",
"0.4981745",
"0.49814856",
"0.4981147",
"0.4980219",
"0.49737656",
"0.49676484",
"0.49484515",
"0.4943961",
"0.4940921",
"0.49406597",
"0.4940411",
"0.49402916",
"0.4935906",
"0.493228",
"0.4930638",
"0.49299324",
"0.49281943",
"0.492092",
"0.49202442",
"0.49196452",
"0.4919394",
"0.49183124"
] | 0.661876 | 0 |
Ask the user how many colors to use for secret pattern. | def queryNumberOfColors(self):
self._numColorsInUse = \
self._readInt('How many colors are available', 2, len(self._palette))
return self._numColorsInUse | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unique_color(n=1):\n \n if n == 1:\n c='xkcd:red'\n elif n== 2:\n c='xkcd:green'\n elif n== 3:\n c='xkcd:yellow'\n elif n== 4:\n c='xkcd:blue'\n elif n== 5:\n c='xkcd:orange'\n elif n== 6:\n c='xkcd:purple'\n elif n== 7:\n c='xkcd:cyan'\n elif n== 8:\n c='xkcd:magenta'\n elif n== 9:\n c='xkcd:lime green'\n elif n== 10:\n c='xkcd:candy pink'\n elif n== 11:\n c='xkcd:teal'\n elif n== 12:\n c='xkcd:lavender'\n elif n== 13:\n c='xkcd:brown'\n else:\n c='xkcd:mint'\n\n return c",
"def enterGuess(self):\n validPattern = False\n while not validPattern:\n print # intentional blank line\n prompt = 'Enter a guess (colors are '\n prompt += self._palette[:self._numColorsInUse] + '): '\n patternString = raw_input(prompt)\n \n validPattern = True\n if len(patternString) != self._lengthOfPattern:\n print 'The pattern must have', self._lengthOfPattern, 'pegs'\n validPattern = False\n else:\n for i in range(self._lengthOfPattern):\n if patternString[i].upper() not in self._palette[:self._numColorsInUse]:\n validPattern = False\n if not validPattern:\n print 'The color options are', self._palette[:self._numColorsInUse]\n \n if validPattern:\n pattern = Pattern(self._lengthOfPattern)\n for i in range(self._lengthOfPattern):\n pattern.setPegColor(i, self._palette.index(patternString[i].upper()))\n\n return pattern",
"def main():\n color_name = input(\"Enter the name of color: \").strip().upper() # strip white spaces. lowercase inputs also work\n max_key_length = max([len(key) for key in NAME_TO_CODE.keys()])\n while color_name != \"\":\n if color_name in NAME_TO_CODE:\n print(\"{:{}} is {}\".format(color_name, max_key_length, NAME_TO_CODE[color_name],))\n else:\n print(\"Invalid color name\")\n color_name = input(\"Enter the name of color: \").strip().upper()",
"def cli():\n palette = \"Set1\"\n num_vals = 10\n for arg in sys.argv[1:]:\n if arg.isdigit():\n num_vals = int(arg)\n else:\n palette = arg\n run(palette, num_vals)",
"def getPetalColor():\n return input(\"What color do you want the petals to be?\")",
"def __init__(self, colorNames):\n self._lengthOfPattern = 0 # will later be queried from the user\n self._palette = '' # initials for color choices, e.g., R for red\n for color in colorNames:\n self._palette += color[0].upper()",
"def quiz1_q3(colors):\n for index, value in enumerate(colors):\n print(f\"{index + 1}: My favourite color is {value}.\")",
"def showColors(self):\n\t\tcolors = ['white', 'red', 'green', 'orange', 'blue', 'purple', 'cyan', 'lightgrey',\n\t\t\t\t 'darkgrey', 'light red', 'light green', 'yellow', 'light blue', 'purple', 'cyan', 'dark white']\n\t\tmax = curses.COLORS if curses.COLORS <= 16 else 16\n\t\tself.screen.clear()\n\t\tfor c in range(0, max):\n\t\t\tself.wts(c + 2, 1, \"color \" + str(c) + ' : ' + colors[c], c)\n\t\tself.wts(18, 1, \"color 16 : red on white\", 16)\n\t\tself.wts(20, 1, 'Color demo, displaying ' + str(max) + ' colors + 1 special')\n\t\tself.screen.refresh()\n\t\tch = False\n\t\twhile not ch:\n\t\t\tch = self.screen.getch()\n\t\tself.exit('Color demo complete')",
"async def color(self, ctx, *, value = None):\n if not value:\n await ctx.send(\"Usage: `{}color [value]`\".format(ctx.prefix))\n return\n\n value = value.lower()\n\n if not any(value.startswith(x) for x in [\"#\", \"rgb\", \"cmyk\"]):\n await ctx.send(\"Invalid value color format, please choose from rgb, cmyk or hex\")\n return\n\n error = False\n\n if value.startswith('rgb'):\n count = value.count('(') + value.count(')') + value.count(',')\n if count != 4:\n error = True\n\n number_list = value.lower().replace(\"rgb\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\" \", \"\")\n try:\n r, g, b = map(int, number_list.split(','))\n\n if (r < 0 or r > 255) or (g < 0 or g > 255) or (b < 0 or b > 255):\n error = True\n\n except:\n error = True\n\n if error:\n await ctx.send(\"Invalid RGB color format!\")\n return\n\n _hex = self._rgb_to_hex(r,g,b)\n c, m, y, k = self._rgb_to_cmyk(r, g, b)\n\n embed_color = int(\"0x{}\".format(_hex.replace(\"#\", '')), 16)\n embed = discord.Embed(color=embed_color)\n\n embed.title = \"Color {}\".format(value.replace(\" \", \"\"))\n embed.add_field(name=\"Hex\", value=_hex)\n embed.add_field(name=\"CMYK\", value=\"cmyk({}, {}, {}, {})\".format(c, m, y, k))\n\n elif value.startswith('#'):\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', value)\n if not match:\n await ctx.send(\"Invalid Hex color format!\")\n return\n\n embed_color = int(\"0x{}\".format(value.replace('#', '')), 16)\n embed = discord.Embed(color=embed_color)\n r, g, b = self._hex_to_rgb(value)\n c, m, y, k = self._rgb_to_cmyk(r, g, b)\n\n embed.title = \"Color {}\".format(value.replace(\" \", \"\"))\n embed.add_field(name=\"RGB\", value=\"rgb({}, {}, {})\".format(r, g, b))\n embed.add_field(name=\"CMYK\", value=\"cmyk({}, {}, {}, {})\".format(c, m, y, k))\n\n elif value.startswith('cmyk'):\n count = value.count('(') + value.count(')') + value.count(',')\n if count != 5:\n error = True\n\n number_list = value.lower().replace(\"cmyk\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\" \", \"\")\n\n try:\n c, m, y, k = map(int, number_list.split(','))\n\n if (c < 0 or c > 255) or (m < 0 or m > 255) or (y < 0 or y > 255) or (k < 0 or k > 255):\n error = True\n\n except:\n error = True\n\n if error:\n await ctx.send(\"Invalid CMYK color format!\")\n return\n\n r, g, b = self._cmyk_to_rgb(c, m, y, k)\n _hex = self._rgb_to_hex(r, g, b)\n\n embed_color = int(\"0x{}\".format(_hex.replace(\"#\", '')), 16)\n embed = discord.Embed(color=embed_color)\n\n embed.title = \"Color {}\".format(value.replace(\" \", \"\"))\n embed.add_field(name=\"Hex\", value=_hex)\n embed.add_field(name=\"RGB\", value=\"rgb({}, {}, {})\".format(r, g, b))\n\n await ctx.send(embed=embed)",
"def same_color_distribution():\n \n \n return 0.03, \"Fail to Reject\"",
"def test_colors_number(self):\n self.assertEqual(len(COLORS), 4)",
"def getCenterColor():\n return input(\"What color do you want the center color to be?\")",
"def color_creator(number_of_films):\n if 0 < number_of_films < 100:\n return 'green'\n elif 101 < number_of_films < 200:\n return 'orange'\n elif 201 < number_of_films < 500:\n return 'red'\n else:\n return 'purple'",
"def get_petal_color():\n petal_color = input(\"What color should the flower petals be?\")\n return petal_color",
"def set_pattern(colors=('green', 'blue', 'red')): # (10)\n for i in range(0, int(ceil(float(NUM_LEDS)/float(len(colors))))):\n for color in colors:\n push_color(color)",
"def setLeds(number: int, red: int, green: int, blue: int):\n pass",
"def clean_colors(self):\n err = _(\"Color must be a valid hex triplet.\")\n colors = ['background_color_custom', 'font_color_custom']\n colors2 = colors + ['background_color', 'font_color']\n # If there are custom colors specified in settings, length of\n # self.COLORS will be > 6, so check for validity\n if len(self.COLORS) > 6:\n colors = colors2\n for color in colors:\n c = getattr(self, color)\n l = len(c)\n if l:\n if l != 6:\n raise ValidationError(err)\n else:\n try:\n int(c, 16)\n except ValueError:\n raise ValidationError(err)",
"def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb",
"def CreateCode():\n\n global Code\n\n colors = [\"black\", \"white\", \"red\", \"green\", \"yellow\", \"blue\"]\n codepicked = False\n\n print(\"good to see you\")\n print(\"want to test the limits of my knowledge?\")\n\n while not codepicked:\n #print(\"The possible colors for a code are: Red, Blue, Yellow, Green, Black and White\")\n Code = (str(input(\"Choose the colors (letters) for your code: \")))\n\n return Allcombos()",
"def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret",
"def getDistinguishableColors(numColors, bgColors = [(1, 1, 1)]):\n\n\t# Start out by generating a sizeable number of RGB triples. This represents our space \n\t# of possible choices. By starting out in the RGB space, we ensure that all of the colors \n\t# can be generated by the monitor.\n\n\t# Number of grid divisions along each axis in RGB space\n\tnumGrid = 30\n\tx = np.linspace(0, 1, numGrid)\n\t[R, G, B] = np.meshgrid(x, x, x)\n\trgb = np.concatenate((R.T.reshape((numGrid*numGrid*numGrid, 1)), \\\n\t\tG.T.reshape((numGrid*numGrid*numGrid, 1)), \\\n\t\tB.T.reshape((numGrid*numGrid*numGrid, 1))), axis = 1)\n\tif numColors > rgb.shape[0] / 3:\n\t\traise ValueError('You cannot really distinguish that many colors! At most 9000 colors')\n\n\t# If the user specified multiple bgColors, compute distance from the candidate colors\n\t# to the background colors.\n\tmindist = np.full(rgb.shape[0], np.inf)\n\tfor c in bgColors:\n\t\tcol = np.full(rgb.shape, 1)\n\t\tcol[:,0] = c[0]\n\t\tcol[:,1] = c[1]\n\t\tcol[:,2] = c[2]\n\t\tdx = np.sum(np.abs(rgb - col), axis = 1)\n\t\tmindist = np.minimum(mindist, dx)\n\n\t# Initialize a list of colors\n\tcolors = []\n\tlastColor = bgColors[-1]\n\tfor i in range(numColors):\n\t\tcol = np.full(rgb.shape, 1)\n\t\tcol[:,0] = lastColor[0]\n\t\tcol[:,1] = lastColor[1]\n\t\tcol[:,2] = lastColor[2]\n\t\tdx = np.sum(np.abs(rgb - lastColor), axis = 1)\n\t\tmindist = np.minimum(mindist, dx)\n\t\tindex = np.argmax(mindist)\n\t\tchosenColor = (rgb[index,0], rgb[index,1], rgb[index,2])\n\t\tcolors.append(chosenColor)\n\t\tlastColor = chosenColor\n\n\treturn colors",
"def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"",
"def random_colors(N,bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i/N,1,brightness)for i in range(N)]\n colors = list(map(lambda c: clolorsys.hsv_to_rgb(*c),hsv))\n random.shuffle(colors)\n return colors",
"def user(num):\n while True:\n print(\"Option: {}\".format(num))\n\n line = input()\n\n try:\n if line[0] == 'h':\n print(help_message)\n else:\n white, black = map(int, line.split())\n return white, black\n except:\n print(invalid_option.format(line))",
"def quiz1_q5(colors, punctuations):\n for index, value in enumerate(punctuations):\n print(f\"{index + 1}: My favourite color is {colors[index]}{value}\")",
"def random_color():\n return random.choice(colors)",
"def _random_color_picker(self, num_of_categories: int) -> List[str]:\n color_list = list()\n color_picker = Tab10()\n\n if num_of_categories > len(color_picker):\n raise IndexError(\"Requested number of colors {} > {}. Please use dataclass that has more colors available.\")\n\n i = 0\n while i < num_of_categories:\n ran_color = color_picker.getrandomcolor()\n if ran_color not in color_list:\n color_list.append(ran_color); i += 1\n\n return color_list",
"def drawColorPick(colorPick, colorWindow):\n for i in range(4): #Draw the 4 colors\n colorPick[i].draw(colorWindow)",
"def get_good_colors(N):\n HSV_tuples = [(x*1.0/N, 0.5, 1) for x in range(N)]\n return(255 * np.array(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)))",
"def random_color(num):\n # 为每个类别的边界框随机匹配相应颜色\n np.random.seed(80)\n COLORS = np.random.randint(0, 256, size=(num, 3), dtype='uint8') #\n return COLORS",
"def setColorConf(colors,ngroups)->list:\n if colors == \"hcl\":\n try:\n from colorspace import sequential_hcl\n color_repo = sequential_hcl(h=[15,375],l=65,c=70)\n colors_list = color_repo.colors(ngroups + 1)\n except ImportError:\n print('hcl colorspace package has not being installed.')\n print('please try the following command:')\n print('pip install git+https://github.com/retostauffer/python-colorspace')\n else:\n colors = list(plt.get_cmap(colors).colors)\n colors_list = [to_hex(color) for color in colors]\n colors_list = colors_list[:ngroups]\n\n return colors_list",
"def get_center_color():\n center_color = input(\"What color should the center of the flower be?\")\n return center_color",
"def quiz1_q2(color, punctuation, loops):\n for i in range(1, loops + 1):\n print(f\"{i}: My favourite color is {color}{punctuation}\")",
"def get_color(tense, seq=0):\n if tense in ['Perfekt', 'present perfect', 'pretérito perfecto compuesto', 'passé composé', 'vtt',\n 'passato prossimo', 'PresPerf']:\n return '#1f77b4'\n elif tense in ['Präsens', 'simple present', 'presente', 'présent', 'ott', 'Present', 'present imperfective', 'present']:\n return '#ff7f0e'\n elif tense in ['Präteritum', 'simple past', 'pretérito perfecto simple', 'indefinido', 'passé simple', 'ovt', 'Past', 'past perfective', 'past']:\n return '#2ca02c'\n elif tense in ['Plusquamperfekt', 'past perfect', 'pretérito pluscuamperfecto', 'plus-que-parfait', 'vvt',\n 'trapassato prossimo', 'PastPerf', 'past+infinitive']:\n return '#d62728'\n elif tense in ['Futur I', 'simple future', 'futur', 'futuro', 'ottt', 'future']:\n return '#9467bd'\n elif tense in ['Futur II', 'future perfect', 'futur antérieur', 'futuro perfecto', 'ovtt', 'future past']:\n return '#8c564b'\n elif tense in ['present perfect continuous', 'Cont', 'present/adjective']:\n return '#e377c2'\n elif tense in ['pasado reciente', 'passé récent', 'RecentPast', 'copular']:\n return '#7f7f7f'\n elif tense in ['pretérito imperfecto', 'imparfait', 'Imperfecto', 'past imperfective', 'past+present']:\n return '#bcbd22'\n elif tense in ['present participle', 'participio', 'Gerund', 'gerund', 'gerund perfective']:\n return '#17becf'\n elif tense in ['Infinitiv', 'infinitief', 'infinitif', 'infinitivo', 'infinitive']:\n return '#aec7e8'\n elif tense in ['present continuous', 'PresGer', 'existential']:\n return '#ffbb78'\n elif tense in ['condicional', 'conditionnel', 'Rep']:\n return '#98df8a'\n elif tense in ['past continuous']:\n return '#ff9896'\n elif tense in ['past perfect continuous']:\n return '#c5b0d5'\n elif tense in ['future continuous']:\n return '#c49c94'\n elif tense in ['future in the past', 'futuro perfecto']:\n return '#f7b6d2'\n elif tense in ['future in the past continuous']:\n return '#c7c7c7'\n elif tense in ['infinitivo perfecto']:\n return '#dbdb8d'\n elif tense in ['futur proche', 'futuro próximo']:\n return '#9edae5'\n elif tense in ['futur proche du passé', 'futuro próximo en imperfecto']:\n return '#393b79'\n elif tense in ['conditionnel passé']:\n return '#5254a3'\n elif tense in ['subjuntivo presente']:\n return '#e7cb94'\n elif tense in ['subjuntivo pretérito imperfecto']:\n return '#8c6d31'\n elif tense in ['participle past perfective active']:\n return '#843c39'\n elif tense in ['gerund imperfective']:\n return '#393b79'\n\n # Mandarin\n elif tense in ['unmarked']:\n return '#1f77b4'\n elif tense in ['rvc']:\n return '#ff7f0e'\n elif tense in ['le1', 'le']:\n return '#2ca02c'\n elif tense in ['le12']:\n return '#d62728'\n elif tense in ['guo']:\n return '#9467bd'\n elif tense in ['zhe']:\n return '#8c564b'\n elif tense in ['zai']:\n return '#e377c2'\n elif tense in ['unmarked duplication']:\n return '#7f7f7f'\n elif tense in ['adv']:\n return '#bcbd22'\n elif tense in ['adj']:\n return '#17becf'\n elif tense in ['conj']:\n return '#aec7e8'\n elif tense in ['mood']:\n return '#ffbb78'\n elif tense in ['noun']:\n return '#98df8a'\n elif tense in ['non-verb', 'other']:\n return '#ff9896'\n\n # ViB\n elif tense in ['adjectif']:\n return '#e6194b'\n elif tense in ['adverbe']:\n return '#3cb44b'\n elif tense in ['article défini']:\n return '#ff0000'\n elif tense in ['article défini pluriel']:\n return '#bf0000'\n elif tense in ['article défini singulier']:\n return '#ff0051'\n elif tense in ['article indéfini']:\n return '#ff8400'\n elif tense in ['article indéfini pluriel']:\n return '#8c4800'\n elif tense in ['article indéfini singulier']:\n return '#4c2800'\n elif tense in ['déterminant défini pluriel']:\n return '#adb300'\n elif tense in ['déterminant démonstratif']:\n return '#56bf00'\n elif tense in ['déterminant indéfini']:\n return '#285900'\n elif tense in ['déterminant possessif']:\n return '#00e686'\n elif tense in ['expression']:\n return '#e377c2'\n elif tense in ['nom commun']:\n return '#7f7f7f'\n elif tense in ['nom propre']:\n return '#bcbd22'\n elif tense in ['nom propre gén']:\n return '#dbdb8d'\n elif tense in ['numéral']:\n return '#17becf'\n elif tense in ['pronom démonstratif']:\n return '#5b008c'\n elif tense in ['pronom indéfini']:\n return '#2200ff'\n elif tense in ['pronom interrogatif']:\n return '#0058e6'\n elif tense in ['pronom personnel']:\n return '#006773'\n elif tense in ['pronom personnel adverbial']:\n return '#00331e'\n elif tense in ['pronom relatif']:\n return '#285900'\n elif tense in ['pronom réfléchi']:\n return '#00e686'\n\n # Contraction\n elif tense in ['contracted', 'bare noun']:\n return '#2f5597'\n elif tense in ['uncontracted', 'demonstrative']:\n return '#fd8f8e'\n\n else:\n return COLOR_LIST[seq % len(COLOR_LIST)]",
"def __init__(self, colorNames):\n self._colorOptions = '' # initials for color choices\n for color in colorNames:\n self._colorOptions += color[0].upper()\n # following will be reset when startGame is called\n self._currentTurnNum = self._lengthOfPattern = self._maxNumberOfTurns = 0",
"def validate_colors(colors):\n colors_list = []\n\n if isinstance(colors, str):\n if colors in PLOTLY_SCALES:\n return\n elif 'rgb' in colors or '#' in colors:\n colors_list = [colors]\n else:\n raise exceptions.PlotlyError(\n \"If your colors variable is a string, it must be a \"\n \"Plotly scale, an rgb color or a hex color.\"\n )\n\n elif isinstance(colors, tuple):\n if isinstance(colors[0], Number):\n colors_list = [colors]\n else:\n colors_list = list(colors)\n\n if isinstance(colors, dict):\n colors_list.extend(colors.values())\n\n elif isinstance(colors, list):\n colors_list = colors\n\n # Validate colors in colors_list\n for j, each_color in enumerate(colors_list):\n if 'rgb' in each_color:\n each_color = color_parser(\n each_color, unlabel_rgb\n )\n for value in each_color:\n if value > 255.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your rgb colors \"\n \"tuples cannot exceed 255.0.\"\n )\n\n elif '#' in each_color:\n each_color = color_parser(\n each_color, hex_to_rgb\n )\n\n elif isinstance(each_color, tuple):\n for value in each_color:\n if value > 1.0:\n raise exceptions.PlotlyError(\n \"Whoops! The elements in your colors tuples \"\n \"cannot exceed 1.0.\"\n )\n return colors",
"def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)",
"def _confirm_color(self, event = None):\n color = self._entry.get().strip()\n if color != \"\":\n self._color = color\n self._window.destroy()",
"def uniquecolors(n):\n hues = [360.0 / n * i for i in range(n)]\n hs = [math.floor(hue / 60) % 6 for hue in hues]\n fs = [hue / 60 - math.floor(hue / 60) for hue in hues]\n return [('rgb({}%, {}%, {}%)'.format(*tuple(a * 100 for a in rgbcolor(h, f)))) for h, f in zip(hs, fs)]",
"def magic_colors(self,parameter_s = ''):\n \n new_scheme = parameter_s.strip()\n if not new_scheme:\n print 'You must specify a color scheme.'\n return\n try:\n self.shell.outputcache.set_colors(new_scheme)\n except:\n warn('Error changing prompt color schemes.\\n'\n + str(sys.exc_info()[1]))\n else:\n self.shell.rc.colors = \\\n self.shell.outputcache.color_table.active_scheme_name\n try:\n self.shell.InteractiveTB.set_colors(scheme = new_scheme)\n self.shell.SyntaxTB.set_colors(scheme = new_scheme)\n except:\n warn('Error changing exception color schemes.\\n'\n + str(sys.exc_info()[1]))\n if self.shell.rc.color_info:\n try:\n self.shell.inspector.set_active_scheme(new_scheme)\n except:\n warn('Error changing object inspector color schemes.\\n'\n + str(sys.exc_info()[1]))\n else:\n self.shell.inspector.set_active_scheme('NoColor')",
"def some_colors(number = 5):\n import colorsys\n N = number\n HSV_tuples = [(x*1.0/N, 1.0, 1.0) for x in range(N)]\n RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)\n\n # if only one color is required don't put in in the list\n if number == 1:\n RGB_tuples = RGB_tuples\n return RGB_tuples",
"def pickColor(colorWindow, colorPick):\n picked = False #False until choice is made\n while not picked:\n inputClick = colorWindow.getMouse() #gets input on where click has been made\n for i in range(4): #Runs all 4 choices to see if any match\n sqr_to_check = colorPick[i] #Compares to new square\n #Compares X and Y of square to see if click matches. If True square i has been clicked\n if sqr_to_check.p1.x < inputClick.x < sqr_to_check.p2.x and sqr_to_check.p1.y < inputClick.y < sqr_to_check.p2.y: \n if i == 0: \n return \"Blue\"\n if i == 1:\n return \"Green\"\n if i == 2:\n return \"Yellow\"\n if i == 3:\n return \"Red\"",
"def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3",
"def generateColor(text):\n random.seed(text)\n return ('#%06X' % random.randint(0,0xFFFFFF))",
"def quiz1_q4(color, punctuations):\n for index, value in enumerate(punctuations):\n print(f\"{index + 1}: My favourite color is {color}{value}\")",
"def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid",
"def get_colors(n):\n color = cm.rainbow(np.linspace(0, 1, n))\n return color",
"def c_prnt(self, text, color):\n if color == 'pink':\n a = self.pink\n elif color == 'blue':\n a = self.blue\n elif color == 'green':\n a = self.green\n elif color == 'dgrn':\n a = self.dgrn\n elif color == 'yel':\n a = self.yellow\n elif color == 'amber':\n a = self.amber\n else:\n raise Exception('The color you selected is not acceptable')\n print(a + text + self.ENDC)",
"def run(palette='hsv', num_vals=12):\n fout_png = 'colors_{P}_{N}.png'.format(P=palette, N=num_vals)\n _, axis = plt.subplots(1, 1, figsize=(6, 6))\n colobj = MplColorHelper(palette, 0, num_vals-1)\n colors = [colobj.get_hexstr(yval) for yval in range(num_vals)]\n plt_color_text(colors)\n for idx, color in enumerate(reversed(colors)):\n print('{N:2} {COLOR}'.format(N=idx, COLOR=color))\n axis.set_title('{N} Discrete Colors from {MAP}'.format(N=num_vals, MAP=palette))\n plt.show()\n plt.savefig(fout_png)\n print(' WROTE: {PNG}'.format(PNG=fout_png))",
"def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))",
"def interface_scramble(max_moves, puzzle, command_color=\"#ff8800\", arg_color=\"#5588ff\", error_color=\"#ff0000\"):\n try:\n puzzle.scramble(max_moves=int(max_moves), arg_color=arg_color)\n except:\n puzzle.scramble(arg_color=arg_color)",
"def randcolour():\n colour = [0,0,0]\n while sum(colour)<450:\n for i in range(3):\n colour[i] = int(random.random()*255)\n return(tuple(colour))",
"def random_colors(self, N, bright=True):\r\n brightness = 1.0 if bright else 0.7\r\n hsv = [(i / N, 1, brightness) for i in range(N)]\r\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\r\n random.shuffle(colors)\r\n return colors",
"def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors",
"def unique_colors_rgb(n):\r\n hues = []\r\n # i is in the range 0, 1, ..., n - 1\r\n for i in range(1, n + 1):\r\n hues.append(360.0 / i)\r\n\r\n hs = []\r\n for hue in hues:\r\n h = math.floor(hue / 60) % 6\r\n hs.append(h)\r\n\r\n fs = []\r\n for hue in hues:\r\n f = hue / 60 - math.floor(hue / 60)\r\n fs.append(f)\r\n\r\n rgbcolors = []\r\n for h, f in zip(hs, fs):\r\n v = 1\r\n p = 0\r\n q = 1 - f\r\n t = f\r\n if h == 0:\r\n color = v, t, p\r\n elif h == 1:\r\n color = q, v, p\r\n elif h == 2:\r\n color = p, v, t\r\n elif h == 3:\r\n color = p, q, v\r\n elif h == 4:\r\n color = t, p, v\r\n elif h == 5:\r\n color = v, p, q\r\n rgbcolors.append(color)\r\n\r\n return rgbcolors",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def random_colors(N, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def createColors():\n\n colors = \"Blue\", \"Green\", \"Yellow\", \"Red\"\n color_list = []\n color_colum = []\n\n for i in range(15): #Create 2D list of 15*25 with colors\n color_colum = []\n for k in range(25):\n color_colum.append(random.choice(colors))\n color_list.append(color_colum)\n \n return color_list",
"async def showcolor(self, ctx: discord.ext.commands.Context, *args):\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1:\n argstring = str(args[0]).strip()\n # request the color informations to the api\n if argstring.startswith(\"(\") and argstring.endswith(\")\"):\n url = \"http://www.thecolorapi.com/id?rgb=rgb(\"\n rgblist = argstring[1:-1].split(',')\n for color in rgblist:\n url += color.strip() + \",\"\n url = url[:-1] + \")\"\n elif argstring.startswith(\"#\"):\n url = \"http://www.thecolorapi.com/id?hex=\" + argstring[1:]\n else:\n await message_channel.send(\n \"Color format non valid, for more see \" + self.command_prefix + \"help showcolor\")\n return\n reply_error = False\n request_result = None\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp: # the website use get\n if not str(resp.status) == \"200\":\n reply_error = True\n else:\n request_result = await resp.json()\n if reply_error:\n await message_channel.send(\"*An error occurred requesting the color... is your color code valid?*\")\n else:\n embed = discord.Embed(title=\"Color Display\", url=request_result[\"image\"][\"bare\"],\n color=(request_result[\"rgb\"][\"r\"] << 16) + (request_result[\"rgb\"][\"g\"] << 8) +\n request_result[\"rgb\"][\"b\"])\n embed.set_author(name=\"Color asked by by \" + ctx.message.author.name,\n icon_url=ctx.message.author.avatar_url)\n embed.add_field(name=\"Color Hex Value:\", value=request_result[\"hex\"][\"value\"], inline=False)\n embed.add_field(name=\"Color RGB Value:\", value=request_result[\"rgb\"][\"value\"], inline=False)\n embed.set_footer(text=self.botVariables.get_description(),\n icon_url=self.botVariables.get_bot_icon())\n await message_channel.send(embed=embed)\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"showcolor #COLORHEX/\\\"(R,G,B)\\\", for more see \"\n + self.command_prefix + \"help showcolor\")",
"def get_solution(player_mode):\n from random import sample as randomizer\n\n # Red | Green | Blue | Yellow | Orange | Purple\n possible_inputs = \"R G B Y O P\".split()\n\n if player_mode == '2':\n solution = input(\"MasterMind, please enter a solution of 4 colors: \").split()\n # Check length\n if len(solution) != 4:\n print('Your input is invalid. Please enter a solution of 4 colors.\\n')\n return get_solution('2')\n\n # Length's good, check whether the input's valid\n else:\n for i in solution:\n if i not in possible_inputs:\n print('Your input is invalid. Please select from', possible_inputs)\n print()\n return get_solution('2')\n return solution\n\n else:\n return randomizer(possible_inputs, 4)",
"def my_color_handler(handler_input):\n # type: (HandlerInput) -> Response\n slots = handler_input.request_envelope.request.intent.slots\n\n if color_slot in slots:\n fav_color = slots[color_slot].value\n handler_input.attributes_manager.session_attributes[\n color_slot_key] = fav_color\n speech = (\"Now I know that your favorite color is {}. \"\n \"You can ask me your favorite color by saying, \"\n \"what's my favorite color ?\".format(fav_color))\n reprompt = (\"You can ask me your favorite color by saying, \"\n \"what's my favorite color ?\")\n else:\n speech = \"I'm not sure what your favorite color is, please try again\"\n reprompt = (\"I'm not sure what your favorite color is. \"\n \"You can tell me your favorite color by saying, \"\n \"my favorite color is red\")\n\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response",
"def _color(self, args):",
"def prompt_style():\r\n font_numbers = {'0', '1', '2', '3', '4', '5', '6'}\r\n print(\"Background Color\")\r\n background_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(background_color) != 7 or background_color[0] != '#':\r\n while background_color not in COLORS:\r\n print(\"Illegal format\")\r\n background_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(background_color) == 7 and background_color[0] == '#':\r\n break\r\n see_font = str.lower(input(\"Do you want to see what the fonts look like? [yes]\\t\"))\r\n if see_font == \"yes\" or see_font == \"\":\r\n print(\"Close the window when you have made your choice\")\r\n turtle_fonts()\r\n print(\"Choose a font by its number\",\r\n \"0: Arial, size 14\",\r\n \"1: Comic Sans MS, size 14\",\r\n \"2: Lucida Grande, size 14\",\r\n \"3: Tahoma, size 14\",\r\n \"4: Verdana, size 14\",\r\n \"5: Helvetica, size 14\",\r\n \"6: Times New Roman, size 14\", sep='\\n')\r\n font = input(\">> \")\r\n while font not in font_numbers:\r\n font = input(\"Invalid font number, enter from 0 - 6\\t\")\r\n if font == \"0\":\r\n font = \"Arial\"\r\n elif font == \"1\":\r\n font = \"Comic Sans MS\"\r\n elif font == \"2\":\r\n font = \"Lucida Grande\"\r\n elif font == \"3\":\r\n font = \"Tahoma\"\r\n elif font == \"4\":\r\n font = \"Verdana\"\r\n elif font == \"5\":\r\n font = \"Helvetica\"\r\n elif font == \"6\":\r\n font = \"Times New Roman\"\r\n print(\"Paragraph Text Color\")\r\n paragraph_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(paragraph_color) != 7 or paragraph_color[0] != '#':\r\n while paragraph_color not in COLORS:\r\n print(\"Illegal format\")\r\n paragraph_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(paragraph_color) == 7 and paragraph_color[0] == '#':\r\n break\r\n print(\"Heading Color\")\r\n head_color = str.lower(input(\"Choose the name of a color, or in format '#XXXXXX':\\t\"))\r\n if len(head_color) != 7 or head_color[0] != '#':\r\n while head_color not in COLORS:\r\n print(\"Illegal format\")\r\n head_color = str.lower(input(\"Choose the color name or #XXXXXX\\t\"))\r\n if len(head_color) == 7 and head_color[0] == '#':\r\n break\r\n return background_color, font, paragraph_color, head_color",
"def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)",
"def pretty_colours(how_many):\r\n golden_ratio_conjugate = (1 + math.sqrt(5)) / 2\r\n hue = random.random() # use random start value\r\n final_colours = []\r\n for tmp in range(how_many):\r\n hue += golden_ratio_conjugate * (tmp / (5 * random.random()))\r\n hue = hue % 1\r\n temp_c = [x for x in hsv_to_rgb(hue, 0.5, 0.95)]\r\n final_colours.append(temp_c)\r\n # originally returned ['rgb(123,123,123)', 'rgb(123,123,123)']\r\n # now [[0.123,0.123,0.123],[0.123,0.123,0.123]]\r\n return final_colours",
"def main():\n print('Generating random color')\n c = create_color()\n attr = input('Specify an attribute: ')\n if hasattr(c,attr):\n print('The '+attr+' attribute is '+str(getattr(c,attr)))\n else:\n print('There is no attribute '+attr+'.')",
"def random_colors(N, bright=True):\n import random\n import colorsys\n\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def mutate_color(color):\n color[random.randrange(0, 3)] = random.random() % 1\n return color",
"def mutate_color(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_color(mutated_genome)\n elif seed == 1:\n change_color(mutated_genome)\n else: #seed == 2:\n switch_colors(mutated_genome)\n #else: seed == 3: # depricated\n # shuffle_colors(mutated_genome)",
"def noisy_color(col, noise, amount) :\n if random.random() < noise :\n red = (col[0] + random.randrange(-amount,amount))\n green = (col[1] + random.randrange(-amount,amount))\n blue = (col[2] + random.randrange(-amount,amount))\n red = clamp(red,0,255)\n green = clamp(green,0,255)\n blue = clamp(blue,0,255)\n return (red,green,blue)\n else :\n return col",
"def color_from_seed(seed):\n supported_colors = []\n for name, hex in matplotlib.colors.cnames.items():\n supported_colors.append(hex)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n selection = ascii_character_sum % len(supported_colors)\n \n return supported_colors[selection]",
"def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))",
"def brut_mind(code = Code(), display=False):\n\tfrom itertools import permutations\n\tprint(f\"code is {code}\")\n\n\tturn = 0\n\tcompleted = False\n\tused_colors = []\n\twhile not completed:\n\t\t# 1st phase\n\t\tfor color in range(9):\n\t\t\tif display:\n\t\t\t\tprint(f'trying {color}')\n\t\t\tscore = code.score([color,color,color,color,color])\n\t\t\tused_colors+= [color for i in range(score[0])]\n\t\t\tif len(used_colors) == 5:\n\t\t\t\tbreak\n\t\tif display:\n\t\t\tprint(f\"colors found are {used_colors}\")\n\n\t\t# 2nd phase\n\t\tfor attempt in list(permutations(used_colors)):\n\t\t\tif display:\n\t\t\t\tprint(f\"attempt {attempt}\")\n\t\t\tscore = code.score(list(attempt))\n\t\t\tcompleted = score == (5,0)\n\t\t\tif completed:\n\t\t\t\tbreak\n\t\t\tturn +=1\n\tprint(f\"the code was {attempt}\")\n\n\tprint(f\"completed in {turn} turn(s)\")",
"def check_color(i, j, k):\n img.show()\n image = Image.new(\"RGB\", (200, 200), (int(Y), int(Y), int(Y)))\n image.show()\n image = Image.new(\"RGB\", (200, 200), (int(i), int(j), int(k)))\n image.show()",
"def colortopalette(color, palette):\n for a,b in palette:\n if color >= a and color < b:\n return b",
"def random_colors(n, bright=True):\n brightness = 1.0 if bright else 0.7\n hsv = [(i / n, 1, brightness) for i in range(n)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n",
"def color_picker(self):\n\n\t\tnum_to_select = 4\n\t\tpeg_color_list = [] #creates the list to store the peg object\n\n\t\t#write a for loop to set a loop to select 4 colors from SOLUTION in mm_model\n\t\tfor i in range(num_to_select): #use i just to run the loop, variable is not used elsewhere \n\t\t\t# print(i)\n\t\t\tcolor = random.choice(MasterModel.COLORS)\n\t\t\t# print(color)\n\t\t\t#associate color with peg objects\n\t\t\tpeg = ColorPeg(color)\n\n\t\t\t#append the peg_color list to make a list of peg objects \n\t\t\tpeg_color_list.append(peg)\n\t\t\t# print (peg_color_list)\n\t\t\n\t\t#create object for solution so it can be stored in model.py\n\t\tsolution = Guess(peg_color_list)\n\n\t\t#put solution into the self.guesses dictionary in the model\n\t\tself.model.guesses[\"solution\"] = solution\n\n\n\t\t#Testing Stuff:\n\t\t# for peg in peg_color_list:\n\t\t# \tprint(peg.peg_color)\n\n\t\t# print(self.model.guesses[\"solution\"])",
"def color_sample():\r\n env = dict()\r\n setup_quiet_build(env)\r\n for item in env.iteritems():\r\n print item[0],item[1]",
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))",
"def shuffle_colors(mutated_genome):\n mutated_genome",
"def default_render_color_maker(num:int):\n if num < 7:\n log.warn('Fewer than 7 rendering colors are being generated. This may cause issues if a URDF with a 6+ axis robot is loaded.')\n\n b = np.linspace(0,255,num).astype(int) # Blue values are always unique\n\n g = [0] * b.size\n r = np.abs(255 - 2*b)\n\n colors = []\n for idx in range(num):\n colors.append([b[idx],g[idx],r[idx]])\n return colors",
"def change(widget, colors): \n\t\n new_val = '#'\n for name in ('red', 'green', 'blue'):\n new_val += colors[name].get()\n widget['bg'] = new_val",
"def changecolor (color):\n valid_colors = (\"red\", \"grey\", \"yellow\", \"green\")\n if color in valid_colors:\n if changecolor.times:\n print(\"The color was last changed at \", changecolor.times[-1])\n print (color)\n changecolor.times.append(time.asctime())\n else:\n n = valid_colors.__len__()\n not_last = valid_colors[:n-1]\n last = valid_colors[-1]\n\n message = ', '.join(not_last) + ' and ' + last\n print (\"sorry, a color can only be\", message)",
"def random_colors(N, bright=True):\n import random\n import colorsys\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors",
"def test_color__sequence_arg(self):\n color_values = (33, 44, 55, 66)\n for seq_type in (tuple, list):\n color = pygame.Color(seq_type(color_values))\n\n self.assertEqual(color.r, color_values[0])\n self.assertEqual(color.g, color_values[1])\n self.assertEqual(color.b, color_values[2])\n self.assertEqual(color.a, color_values[3])",
"def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)",
"def random_color(search=None): \r\n if search: c = choice(search_color(search))\r\n else: c = choice(THECOLORS.values())\r\n \r\n #debug: print type(c), c # returns Color()\r\n return c \r\n #todo: exception on color search fail? OR just default to white.\r",
"def test_color__int_arg(self):\n for value in (0x0, 0xFFFFFFFF, 0xAABBCCDD):\n color = pygame.Color(value)\n\n self.assertEqual(color.r, (value >> 24) & 0xFF)\n self.assertEqual(color.g, (value >> 16) & 0xFF)\n self.assertEqual(color.b, (value >> 8) & 0xFF)\n self.assertEqual(color.a, value & 0xFF)",
"def get_color(activePerMillion):\n activePer100k = activePerMillion / 10.0\n if activePer100k < 100:\n return \"#aaf0d1\"\n elif activePer100k < 500:\n return \"#a3f7bf\"\n elif activePer100k < 1000:\n return \"#90EE90\"\n elif activePer100k < 1500:\n return \"#00ff7f\"\n elif activePer100k < 2000:\n return \"#77dd77\"\n elif activePer100k < 2500:\n return \"#32cd32\"\n elif activePer100k < 3000:\n return \"#4cbb17\"\n elif activePer100k < 3500:\n return \"#228b22\"\n elif activePer100k < 4000:\n return \"#355e3b \"\n else:\n return \"#006400\"",
"def display_usable_colors(num_colors, radius, thickness):\n\n global screen\n\n # The list of colors to be displayed\n colors = list(CRAYONBOX.keys())[3:num_colors]\n\n # How much to offset the display by\n offset = 50\n\n for i in range(0, len(colors)):\n\n x = int(i * offset + offset / 2)\n y = offset\n\n rad = int(radius / 2)\n thick = int(thickness / 2)\n\n color = colors[i]\n\n pygame.draw.circle(screen, CRAYONBOX[color], (x, y), rad - thick)\n pygame.draw.circle(screen, CRAYONBOX[\"BLACK\"], (x, y), rad, thick)",
"def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))",
"def message_colour_tick():\n global colour_count\n colour_count += 1",
"def get_random_color(color_list, **kwargs):\n color = None\n n_color_candidates = kwargs.get('n_color_candidates', 10)\n color_candidates_matrix = np.random.rand(n_color_candidates, 3) # creating matrix of candidate rgb values\n norm = 0.\n for i in range(n_color_candidates):\n candidate_color = color_candidates_matrix[i]\n candidate_norm = np.min([np.linalg.norm(existing_color-candidate_color) for existing_color in color_list])\n if candidate_norm > norm:\n norm = candidate_norm\n color = candidate_color\n color_list.append(color)\n return color",
"def nthColor(i):\n if i < len(colors):\n return colors[i]\n\n c1 = colors[i % len(colors)]\n c2 = nthColor(i // len(colors))\n\n return \"#\" + hex((int(c1[1:],16) + int(c2[1:],16)) // 2)[2:]",
"def _random_color(self):\n levels = range(0, 256)\n return tuple(random.choice(levels) for _ in range(3))",
"def get_colors(num_colors):\n import colorsys\n colors = []\n for i in np.arange(0., 360., 360. / num_colors):\n hue = i/360.\n lightness = (50 + np.random.rand() * 10)/100.\n saturation = (90 + np.random.rand() * 10)/100.\n colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))\n return colors",
"def is_colorstr(arg):\n try:\n assert len(arg) == 6\n for c in arg:\n assert c in COLORMAP\n except AssertionError:\n raise argparse.ArgumentTypeError('%s is not a valid color string' % arg)\n return arg",
"def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))"
] | [
"0.63058114",
"0.6133298",
"0.6033995",
"0.5919279",
"0.589637",
"0.58485484",
"0.5842015",
"0.5833451",
"0.58159053",
"0.57597435",
"0.57432437",
"0.5722553",
"0.5701675",
"0.5683642",
"0.5675337",
"0.56218904",
"0.5573346",
"0.5539116",
"0.5536698",
"0.55081004",
"0.5491249",
"0.5488135",
"0.5433827",
"0.54330397",
"0.5410673",
"0.53964627",
"0.5383462",
"0.5371267",
"0.5362221",
"0.5356136",
"0.5307256",
"0.5280523",
"0.52789503",
"0.52762455",
"0.5243587",
"0.52355325",
"0.5230908",
"0.5221442",
"0.5218153",
"0.520475",
"0.52022874",
"0.52016807",
"0.51956135",
"0.518468",
"0.5177751",
"0.5161222",
"0.51574713",
"0.5156494",
"0.5145449",
"0.5142925",
"0.51369643",
"0.51295424",
"0.51237833",
"0.5122092",
"0.5112383",
"0.51018375",
"0.51018375",
"0.51018375",
"0.51003975",
"0.509787",
"0.50972944",
"0.50852156",
"0.5084719",
"0.5083993",
"0.50734556",
"0.50589144",
"0.50575686",
"0.50566226",
"0.505415",
"0.5048199",
"0.5045791",
"0.5045528",
"0.5040529",
"0.50359297",
"0.5027064",
"0.5026354",
"0.5022997",
"0.5019778",
"0.50194687",
"0.50181204",
"0.50174594",
"0.5009382",
"0.5008546",
"0.50041795",
"0.49952713",
"0.4991446",
"0.4991203",
"0.49863812",
"0.4984878",
"0.49768737",
"0.49698797",
"0.4969341",
"0.49552125",
"0.49540123",
"0.49523553",
"0.49519783",
"0.49516034",
"0.49490297",
"0.49483657",
"0.4947016"
] | 0.6033873 | 3 |
Ask the user maximum number of guesses to be allowed. | def queryNumberOfTurns(self):
return self._readInt('How many turns are allowed', 1, 20) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_max_guesses():\n print(\"You'll get 5 guesses per problem!\")\n return 5",
"def max_guesses():\n\tprint \"\\n\" + \"How many guesses would you like per problem?\" + \"\\n\"\n\tmax_guesses = None\n\twhile max_guesses is None:\n\t\ttry:\n\t\t\tmax_input = int(raw_input('Please enter a positive integer number: '))\n\t\t\tif max_input < 1:\n\t\t\t\tprint \"\\n\" + \"You need at least one guess!\" + \"\\n\"\n\t\t\telse:\n\t\t\t\tmax_guesses = max_input\n\t\t\t\tprint \"\\n\" + \"OK, you'll have %d try(ies) for each question. Good luck!\" % max_guesses\n\t\t\t\treturn max_guesses\n\t\t\t# else:\n\t\t\t# \tprint \"\\n\" + \"You need at least one guess!\" + \"\\n\"\n\t\texcept ValueError:\n\t\t\tprint \"\\n\" + \"You entered a non-integer. Please enter a positive integer.\" + \"\\n\"",
"def guesses():\n tries = 3\n print (\" You may choose your maximum number of tries per question.\"\n \"The default is 3.\")\n player_prompt = \" Please type in your preferred number: \"\n while tries > 0:\n user_choice = raw_input(player_prompt)\n if user_choice.isdigit():\n print \"\\n OK, {} {} allowed per blank. Here we go!\\n\".format(user_choice, how_many(user_choice))\n return int(user_choice)\n tries -= 1\n player_prompt = (\" Silly, that's not a valid number of guesses! {} more {}. \\n\"\n \" Try again: \").format(tries, how_many(tries))\n if tries == 0:\n print \" You defaulted your number of guesses, so 3 it is!\"\n return 3",
"def play_again(max_guesses):\n user_choice = raw_input(\" Would you like to play again? (Y/N)\").lower()\n if user_choice == \"y\" or user_choice == \"yes\":\n user_choice = raw_input(\" Would you like the same number of guesses per question? (Y/N)\").lower()\n if user_choice == \"n\" or user_choice == \"no\":\n max_guesses = None\n launch_quiz(max_guesses)\n else:\n print \"\\n Thank you for playing Shauna's Fill-in-the-Blanks Quiz!\"\n print \" Goodbye!\"\n return None",
"def limit_number_prompts(state: SessionState):\n if state.prompts is not None and len(state.prompts) > 1:\n state.prompts = [state.prompts[0]]",
"def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries",
"def guest_num(max=20):\n rand_num = random.randint(1, 101)\n retries = 0\n while retries <= max:\n try:\n n = int(input('Input a number: '))\n if n == rand_num:\n print('YOU WIN!')\n break\n elif n > rand_num:\n print('Iputed number is great than result number. Just retry!')\n retries += 1\n else:\n print('Iputed number is less than result number. Just retry!')\n retries += 1\n except ValueError:\n print('Only can input a number!')\n except:\n print('Only can input a number!')\n else:\n print('YOU LOST!')",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid",
"def maximum():\n if len(a_variable.get()) > MAX_CHARACTERS:\n messagebox.showwarning(title=\"Max Characters Exceeded!\",\n message=\"Please enter no more than 25\\n\"\n \"characters, thanks.\")\n clear_box() # Clears the entry field",
"def AskHowManyPlayers():\n\n\t# Loop forever until the user enters an integer between 1 and 10, inclusive.\n\twhile True:\n\t\tprint \"How many players? Enter a number between 1 and 10, or press enter for default 2:\"\n\t\tnum_players = SolicitInteger( lobound=1, hibound=10, default_return=2 )\n\t\tif num_players != None:\n\t\t\tprint \"Ok, {} players.\".format( num_players )\n\t\t\treturn num_players",
"def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r",
"def countGuesses(hidden):\r\n guess = random.choice(range(0, 100)) # 0 to 99, inclusive\r\n numguesses = 1 # we just made one guess, above\r\n while guess != hidden:\r\n guess = random.choice(range(0, 100)) # guess again!\r\n numguesses += 1 # add one to our number of guesses\r\n return numguesses",
"def guess_number(min_guess_range, max_guess_range):\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")",
"def get_number_of_players():\n number_of_players = None\n while not(type(number_of_players)) == int:\n try:\n number_of_players = int(input(\"How many players are there? \"))\n if number_of_players == 0:\n raise zeroPlayersError\n elif number_of_players > 6:\n raise tooManyPlayersError\n except zeroPlayersError:\n print(\"The game needs at least 1 player\")\n number_of_players = None\n except tooManyPlayersError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_players = None\n except:\n number_of_players = None\n return number_of_players",
"def guess_a_number():\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function\n random_number=randint(0,100)\n \n '''versuche=0\n max_versuche=5\n guess=-1\n test= False\n while guess != random_number:\n while test == False:\n guess= input('Gib eine Zahl zwischen 0 und 100 ein: ')\n try:\n guess= int(guess)\n test=True\n except ValueError:\n print('Try Again')\n \n if guess == random_number:\n print('Du hast die Zahl erraten!')\n elif guess > random_number:\n print('Die Zahl ist zu gross')\n versuche=versuche+1\n else:\n print('Die Zahl ist zu klein')\n versuche=versuche+1'''",
"def LimitedInput(message, limit, isNumber=False):\n keepAsking = True\n while keepAsking:\n answer = input(message)\n if len(answer) > limit:\n print(\"The input must be\", limit, \"characters or less.\")\n else:\n keepAsking = False\n if isNumber is True and CheckNumber(answer) is False:\n print(\"The input must be a number.\")\n keepAsking = True\n return answer",
"def guess_number():\n searched_number = random.randint(1, 10)\n while True:\n try:\n users_number = int(input(\"Guess the number: \"))\n except ValueError:\n print(\"It's not a number!\")\n continue\n if users_number > searched_number:\n print(\"Too big!\")\n elif users_number < searched_number:\n print(\"Too small!\")\n else:\n return \"You win!\"",
"def maximum_retry_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def play_game(n):\n tries = 0\n magic_number = generate_random(n)\n print(\"Let's play the mimsmind0 game.\")\n # Get and validate user's first guess\n while True:\n try:\n guess = int(input(\"Guess a {}-digit number: \".format(n)))\n tries += 1\n break\n except:\n print(\"That is not a valid number, try again.\") \n while True:\n # Check guess against magic number and give directional guidance if incorrect\n try:\n if magic_number > guess:\n guess = int(input(\"Try again. Guess a higher number: \"))\n tries += 1\n elif magic_number < guess:\n guess = int(input(\"Try again. Guess a lower number: \"))\n tries += 1\n else:\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(tries))\n break\n except:\n print(\"That's not a valid number.\")",
"def attempt_limit(self) -> int:\n return self._attempt_limit",
"def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True",
"def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)",
"def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()",
"def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess",
"def getMaxNumber():\n maxNumber = int(input(\"what is the maximum number that you want:\"))\n return maxNumber",
"def test_limit_num_users(self):\n survey = self._create_test_survey()\n\n survey.save_user_answers(self.student, self.student_answers, self.course_id)\n survey.save_user_answers(self.student2, self.student2_answers, self.course_id)\n\n # even though we have 2 users submitted answers\n # limit the result set to just 1\n all_answers = survey.get_answers(limit_num_users=1)\n assert len(list(all_answers.keys())) == 1",
"def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number",
"def get_max_number():\n max_number = float(input(\"What is the max number you want?\"))\n return max_number",
"def not_a_number(update: Update, context: CallbackContext):\n text = update.message.text\n update.message.reply_text(f'{text} is not an available number of questions, please use the keyboard provided!')\n\n return HOW_MANY_QUESTIONS",
"def setup_number_of_faces():\n \n while True:\n faces = int(input(\"Geben Sie die Seitenanzahl der Würfel an (2 - 100) oder tippe '0' zum A\\\nbbruch: \"))\n if 2 <= faces <= 100:\n break\n elif faces == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine Zahl zwischen 2 und 100 eingeben!\")\n print()\n print()\n return faces",
"def launch_quiz(max_guesses=None):\n print \"\\n Yahoo!\\n You're playing Shauna's Fill-in-the-Blanks Quiz!\"\n level = choose_level()\n revealed_answers = [\"__1__\", \"__2__\", \"__3__\", \"__4__\", \"__5__\", \"__6__\", \"__7__\"]\n answer_key, text = text_to_display(level)\n if max_guesses is None:\n print text.format(*revealed_answers)\n max_guesses = guesses()\n for index in range(len(answer_key)):\n if not current_question(level, revealed_answers, index, max_guesses):\n return\n print \" Congratulations!!! You won the {} quiz!\\n\".format(level)\n play_again(max_guesses)",
"def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice",
"def advancedGuessingGame():\n\n print(\"\\nWelcome to the guessing game!\")\n print(\"A number between _ and _ ?\")\n\n lowerBound = not_number_rejector(\"Enter Lower Bound: \")\n\n higher_number = False # we need to set an upper and lowerbound for game\n\n while not higher_number:\n upperBound = not_number_rejector(\"Enter Upper Bound: \")\n if upperBound > lowerBound:\n higher_number = True\n else:\n print(\"The upperbound is lower than you lowerbound: TRY AGAIN\")\n\n # above code ensures upper > lower, see stubbon_asker in EX1\n\n print(\"OK then, guess a number between {} and {} ?\".format(lowerBound, upperBound))\n lowerBound = int(lowerBound) # ensures integer is give (Not a letter)\n upperBound = int(lowerBound)\n\n actualNumber = random.randint(lowerBound, upperBound)\n\n guessed = False\n\n while not guessed:\n guessedNumber = not_number_rejector(\"Make a guess: \")\n print(\"You guessed {},\".format(guessedNumber),)\n if guessedNumber == actualNumber:\n print(\"HOW DID YOU GET THAT! It was {}\".format(actualNumber))\n guessed = True\n elif guessedNumber > upperBound:\n print(\"This is higher than the upperbound! Try again!\")\n elif guessedNumber < lowerBound:\n print(\"This is lower than the lowerbound! Try again!\")\n elif guessedNumber < actualNumber:\n print(\"{} is too small, try again\".format(actualNumber))\n else:\n print(\"{} is too big, try again \".format(actualNumber))\n return \"You got it!\"\n # the tests are looking for the exact string \"You got it!\". Don't modify that!",
"def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)",
"def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue",
"def max_unavailable(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_unavailable\")",
"def guess_the_number():\n # get a random number from 1 to 1000\n number = random.randrange(1, 1000)\n\n guess = 0\n gcounter = 0\n # compare guess and selected number\n while guess != number:\n # get user input\n guess = int(input('Guess my number between 1 to 1000: '))\n # compare with number\n if guess > number:\n print('Too high. Try again')\n gcounter += 1\n elif guess < number:\n print('Too low. Try again')\n gcounter += 1\n else:\n # if equal, congratulate the user\n print('Congratulations, you guessed the number!')\n print(f'You used {gcounter} guesses')\n # check the number of guesses and provide feedback\n if gcounter > 10:\n print('You should be able to do better')\n else:\n print('Either you know the secret or you got lucky.')\n # give the option to restart the game or quit.\n response = input((\"Would you like to play it again? \"\n \"('yes' or 'no'): \"))\n # check user response\n if response == 'yes':\n number = random.randrange(1, 100)\n guess = 0\n gcounter = 0\n elif response == 'no':\n print('Bye.')\n break\n else:\n print('Invalid response. Quitting...')\n break",
"def game():\n\n secret_number = random.randint(1, 10)\n\n guess_list = []\n\n while len(guess_list) < 5:\n guess = input(\"Guess a number between 1 and 10: \")\n\n try:\n guess = int(guess)\n except ValueError:\n print(\"{} is not a number!\".format(guess))\n else:\n if guess == secret_number:\n print(\"You guessed it! My number was {}\".format(secret_number))\n play_again()\n break\n elif guess > secret_number:\n print(\"{} is too high!\".format(guess))\n else:\n print(\"{} is too low!\".format(guess))\n\n guess_list.append(guess)\n else:\n print(\"You ran out of guesses!\")\n play_again()",
"def guess_a_number():\n x = check_raw()\n random_number=randint(0,100)\n count_tries = 0\n\n while x != random_number:\n count_tries = count_tries + 1\n if count_tries == 10:\n print ('GAME OVER! You failed too many times!')\n break\n x = evaluate_my_number(x,random_number)\n if x == random_number:\n print ('Your number is correct! You needed {} tries.'.format(count_tries))\n break\n\n new_game = str(input(\"Do you want to play again? If so, say 'yes'! If not, say 'no' \"))\n if new_game == 'yes':\n guess_a_number()\n else:\n print('Goodbye!')\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function",
"def get_num_names_from_user():\n valid = False\n num_names = 1 #default value\n\n # user info\n print(\"\\nType number of names to generate.\")\n print(\"(You can directly Enter to generate 1 name)\")\n\n while not valid:\n num_names = input(\"Your input: \")\n\n if num_names.strip() == \"\":\n num_names = 1\n break\n\n try:\n num_names = max(int(num_names), 1)\n valid = True\n except:\n print(\"\\nPlease type an integer number.\\n\")\n\n return num_names",
"def check_limit(limit_value):\n try:\n limit = int(limit_value)\n except ValueError:\n raise SystemExit('The argument \"limit\" should be a positive number')\n else:\n if limit < 1:\n raise SystemExit('The argument \"limit\" should be greater than 0')\n else:\n return limit",
"def how_many(number):\n if int(number) == 1:\n return \"guess\"\n return \"guesses\"",
"def max_tokens_for_prompt(self, prompt: str) -> int:\n num_tokens = self.get_num_tokens(prompt)\n\n # get max context size for model by name\n max_size = self.modelname_to_contextsize(self.model_name)\n return max_size - num_tokens",
"def part2():\n random_number = random.randrange(1,10,1)\n user_input = input(\"Guess the number: \")\n while(user_input != \"exit\"):\n if(int(user_input) > random_number):\n print(\"Too high\")\n elif(int(user_input) < random_number):\n print(\"Too low\")\n else:\n print(\"Exactly right\")\n user_input = input(\"Guess the number: \")",
"def human_input(marbles_left):\n\twhile True:\n\t\ttry:\n\t\t\thuman_choice = int(input('Your turn: How many marbles will you remove (1-3)? '))\n\t\texcept:\n\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\treturn 0\n\t\telse:\n\t\t\tif human_choice not in range(1, 4):\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telif human_choice > marbles_left:\n\t\t\t\tprint('Sorry, that is not a valid option. Try again!')\n\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\tprint('You removed {} marbles.'.format(human_choice))\n\t\t\t\treturn human_choice",
"def game_number():\n \n total_guesses = 0 # Initializes total number of guesses as 0 when game starts\n rand_number = randint(1,20) # Creates a random number between 1 and 20\n print(\"\\nThe number you shall guess is between 1 and 20.\" \n \" You have 3 guesses.\")\n\n while total_guesses < 3: # Ensures user only recieves 3 attempts\n\n print(\"Enter your guess below.\") # Prompts user to enter guess\n\n # Notifies user which attempt they are on\n if total_guesses == 0:\n print(\"This is your first attempt. \\t\") \n if total_guesses == 1:\n print(\"This is your second attempt. \\t\") \n if total_guesses == 2:\n print(\"This is your final attempt. \\t\") \n \n # Assigns guess to be the input as well as an \n # integer value for guessing the random number\n guess = input() \n guess = int(guess)\n \n total_guesses = total_guesses + 1 # Tracks number of total guesses used\n\n # Helps user confine their guesses based on clues given by the game\n if guess < rand_number:\n print(\"\\nYour guess is below the value of the random number!\")\n if guess > rand_number:\n print(\"\\nYour guess is above the value of the random number!\")\n if guess == rand_number:\n correct_guess(total_guesses)\n if guess != rand_number and total_guesses == 3:\n incorrect_guess(rand_number)",
"def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass",
"def get_choice(attempt):\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice",
"def findMax(self, questions, guessed, actual):\n return min(guessed, actual) + questions - max(guessed, actual)",
"def get_puzzle_no():\r\n \r\n puzzle_no = int(input(\"Enter the number of the puzzle to print the trace of (1-25): \"))\r\n while puzzle_no < 1 or puzzle_no > 25:\r\n print(\"Choice is invalid! Try again\")\r\n puzzle_no = int(input(\"Enter the number of the puzzle to print solution of (1-25): \"))\r\n \r\n return puzzle_no",
"def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")",
"def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")",
"def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")",
"def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")",
"def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")",
"def user_pick(self):\n player_taking = True\n while player_taking:\n play_take = int(input(\"How many dots would you like to remove?(1-4)\"))\n if not 1 <= play_take <= 4:\n print(\"You may only take between 1 and 4 balls\")\n else:\n player_taking = False\n return play_take",
"def get_wager_input(max_chips, min_chips=1):\n chips = 0\n while chips < min_chips or chips > max_chips:\n chips = input('How many chips do you wager? (min %d, max %d): ' % (min_chips, max_chips))\n try:\n chips = int(chips)\n except:\n chips = 0\n return chips",
"def new_game(range):\n global secret_number\n global counter\n global n\n n = range\n \n secret_number = random.randrange(0, n)\n counter = int(math.ceil(math.log(n + 1)/math.log(2)))\n \n print \"New Game. Range is from 0 to\", n\n print \"Number of remaining guesses is\",counter\n print \"\"",
"def suggested(max: int = None):\n for user_dict in client.suggested(max=max):\n print(json.dumps(user_dict))",
"def get_number_of_decks():\n number_of_decks = None\n while not(type(number_of_decks)) == int:\n try:\n number_of_decks = int(input(\"How many decks would you like in the shoe? \"))\n if number_of_decks == 0:\n raise zeroDecksError\n elif number_of_decks > 6:\n raise tooManyDecksError\n except zeroDecksError:\n print(\"The game needs at least 1 player\")\n number_of_decks = None\n except tooManyDecksError:\n print(\"Sorry you can't have more than 6 players\")\n number_of_decks= None\n except:\n number_of_decks = None\n return number_of_decks",
"def part2():\n randomNum = random.randint(1,9)\n guess = input('Please guess a number:')\n while (guess != randomNum) and (guess != \"exist\"):\n if randomNum > guess:\n print('too low')\n elif randomNum < guess:\n print('too high')\n guess = input('Please guess another number!:')",
"def create_number_of_players(self):\n self.number_of_players = pyip.inputInt(\n prompt='\\nEnter number of players (1 to 4):\\n', min=1, max=4)",
"def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")",
"def range1000():\n global range, guesses_made, guesses_remaining, correct_num, victory_condition\n\n range = 1000\n guesses_made = 0\n guesses_remaining = 10#calculate_remaining_guesses(range)\n correct_num = random.randrange(range)\n victory_condition = False\n\n print \"New Game! Guess between 1 and \", range\n print \"Remaining guesses: \", guesses_remaining",
"def check_resource_limit(self, selection_count, population_count):\n p = self.ctx.policy\n max_resource_limits = MaxResourceLimit(p, selection_count, population_count)\n return max_resource_limits.check_resource_limits()",
"def choose_number(maximum):\n while True:\n try:\n skip_lines(1)\n index = int(input(f\"Choose a number between 1 and {int(maximum)}: \"))\n if index <= 0 or index > maximum:\n raise IndexError\n break\n except ValueError:\n print(\"Oops! That's not a valid number. Try again...\")\n except IndexError:\n print(f\"Oops! That number is not possible. It has to be between 1 and {int(maximum)}. Try again...\")\n return index",
"def max_delivery_attempts(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delivery_attempts\")",
"def maximum_retry_attempts(self) -> Optional[int]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def check_answer(challenge: int, answer: int) -> None:\n for index in reversed(range(challenge)):\n print(f\"You have {index} attempt(s) remaining to guess the number.\")\n guess = int(input(\"Make a guess: \"))\n if guess == answer:\n print(f\"You won! It took you {index * -1 + challenge} guesses.\")\n return\n elif guess > answer:\n print(\"Too high.\")\n elif guess < answer:\n print(\"Too low.\")\n if index == 1:\n print(\"You lose...\")\n return\n print(\"Guess again.\")",
"def get_max_cleverbot_requests(self):\n return int(self.bot_data_file[\"maxCleverbotRequests\"])",
"def setNumSamples(self):\n while True:\n try:\n self.numberOfSamples = int(input(\n \" How many samples? [%s]: \" % self.numberOfSamples) or self.numberOfSamples)\n if self.numberOfSamples >= 1:\n break\n else:\n print (\" Integer >= 1 needed!\")\n except ValueError:\n print (\" Integer >= 1 needed!\")",
"def ask_for_threshold():\n threshold_question = [\n {\n 'type': 'input',\n 'message': 'Enter the threshold value that you want to consider (similarities below that value will not be considered):',\n 'name': 'threshold',\n 'validate': NumberValidator\n }\n ]\n threshold_answer = prompt(threshold_question, style=style)\n return threshold_answer.get(\"threshold\")",
"def max_surge(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_surge\")",
"def _determine_limit(self, limit):\n\n # Note: +1 is allowed here because it allows\n # the user to fetch one beyond to see if they\n # are at the end of the list\n if not limit:\n res = conf.api_configuration.max_returned_num + 1\n else:\n res = min(conf.api_configuration.max_returned_num + 1, limit)\n\n return res",
"def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number",
"async def _check_n_entries(self, ctx: commands.Context, number_of_people_to_display: int) -> int:\n max_entries = AocConfig.leaderboard_max_displayed_members\n author = ctx.message.author\n if not 0 <= number_of_people_to_display <= max_entries:\n log.debug(\n f\"{author.name} ({author.id}) attempted to fetch an invalid number \"\n f\" of entries from the AoC leaderboard ({number_of_people_to_display})\"\n )\n await ctx.send(\n f\":x: {author.mention}, number of entries to display must be a positive \"\n f\"integer less than or equal to {max_entries}\\n\\n\"\n f\"Head to {self.private_leaderboard_url} to view the entire leaderboard\"\n )\n number_of_people_to_display = max_entries\n\n return number_of_people_to_display",
"def test_remain():\r\n global pickno\r\n #Change pick number to the total amount of balls\r\n # Ex. If we have 3 balls remaining the user cannot pick 4\r\n if total <= 4:\r\n pickno = total",
"def max_unavailable(self) -> Optional[pulumi.Input[Union[int, str]]]:\n return pulumi.get(self, \"max_unavailable\")",
"def max_unavailable(self) -> Optional[pulumi.Input[Union[int, str]]]:\n return pulumi.get(self, \"max_unavailable\")",
"async def max(self, ctx, limit: int):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"max\"] = limit\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Warn limit is now: \\n{}\".format(limit))",
"def maximum_retry_attempts(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_retry_attempts\")",
"def max_count(self):\n return self.config.get('max_count', 500)",
"def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()",
"def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None",
"def get_answers(limit: int = -1,\n prompt: str = '') -> Generator[str, None, None]:\n count = 0\n answer = 'init'\n while (limit < 0 or count < limit) and answer:\n answer = input(prompt)\n count += 1\n if answer:\n yield answer",
"def guess_number():\n guess = 0\n while guess < 1:\n guess = int(input(\"Your guess: \"))\n return guess",
"def card_info_attempts(entered, stored):\r\n attempts = 3\r\n # Starts the countdown of tries\r\n while entered != stored:\r\n if attempts != 0:\r\n attempts -= 1\r\n print(\"Invalid card information. \\nAttempts remaining: \", attempts)\r\n print(\"Please try again.\")\r\n entered = input(\"\")\r\n else:\r\n print(\"Attempt maximum exceeded\")\r\n quit()",
"def ask_question(mad_lib, blank_num, answers, max_attempts=4):\n limit_attempts = 1\n attempts_left = max_attempts\n to_replace = \"___\" + str(blank_num) + \"___\"\n prompt = make_display(mad_lib, to_replace, attempts_left, max_attempts)\n user_guess = input(prompt).lower()\n while user_guess != answers.lower() and attempts_left > limit_attempts:\n attempts_left -= 1\n prompt = make_display(mad_lib, to_replace, attempts_left, max_attempts)\n user_guess = input(prompt).lower()\n if attempts_left > limit_attempts:\n print(\"\\nCorrect!\\n\")\n return (mad_lib.replace(to_replace, answers), blank_num + 1)\n return (None, blank_num + 1)",
"def backoff_limit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"backoff_limit\")",
"def guessnum3(num):\n low = 1 # lowest number we could guess\n high = 101 # highest number plus 1\n tries = 0\n\n # use a for loop instead of a while\n # guarantees we won't get stuck\n for _ in range(100): # we can replace the i with an '_' because we don't care about using the index\n my_guess = (low+high) // 2 # this is the mean rounded down\n tries += 1\n if my_guess == num:\n return tries # breaks loop\n elif my_guess > num:\n high = my_guess # this readjusts the higher portion of the halving algorithm\n else: # when your guess is lower than the number\n low = my_guess + 1 # readjusts the lower portion of the halving algorithm",
"def range100():\n global range, guesses_made, guesses_remaining, correct_num, victory_condition\n \n range = 100\n guesses_made = 0\n guesses_remaining = 7 #calculate_remaining_guesses(range)\n correct_num = random.randrange(range)\n victory_condition = False\n\n print \"New Game! Guess between 1 and \", range\n print \"Remaining guesses: \", guesses_remaining",
"def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")",
"def go(self, comp):\n self.attempt = 1\n number = random.randint(1, 20)\n\n comp.call(util.Confirm('I choose a number between 1 and 20. Try to guess it'))\n\n while True:\n x = comp.call(util.Ask('Try #%d: ' % self.attempt))\n if not x.isdigit():\n continue\n\n x = int(x)\n\n if x > number:\n comp.call(util.Confirm('Choose a lower number'))\n\n if x < number:\n comp.call(util.Confirm('Choose a greater number'))\n\n if x == number:\n comp.call(util.Confirm(self.final_text % self.attempt))\n break\n\n self.attempt += 1",
"def exceeded_max(self):\n return self.total_max is not None and self.counter > self.total_max",
"def get_number_of_instances_from_user(self):\n\t\ttry:\n\t\t\t# The int interpretation of the user input\n\t\t\ttemp = int(raw_input(\"Enter the number of car instances: \"))\n\n\t\t\t# If input is > 0\n\t\t\tif temp > 0:\n\t\t\t\tself.number_of_instances = temp\n\n\t\t\t# If input is <= 0\n\t\t\telse:\n\t\t\t\tprint \"Number has to be an int greater than 0. Please enter a number greater than 0.\"\n\n\t\t\t\tself.get_number_of_instances_from_user()\n\n\t\t# If user doesnt enter an int\n\t\texcept Exception:\n\t\t\tprint \"Number was invalid. Please enter a positive number.\"\n\n\t\t\tself.get_number_of_instances_from_user()",
"def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer",
"def validate(n = 5):"
] | [
"0.77235156",
"0.7366993",
"0.7222083",
"0.6790302",
"0.65760106",
"0.65610594",
"0.65526646",
"0.6540081",
"0.6443365",
"0.62116224",
"0.6182453",
"0.61225075",
"0.6075408",
"0.6065941",
"0.60258347",
"0.6022477",
"0.5970776",
"0.5969266",
"0.5950388",
"0.59448254",
"0.5898527",
"0.5882177",
"0.5871567",
"0.5834399",
"0.5822063",
"0.58146465",
"0.58065313",
"0.57578206",
"0.57227814",
"0.5702767",
"0.56941706",
"0.5691441",
"0.5690692",
"0.5686034",
"0.56855637",
"0.56822944",
"0.5671408",
"0.56523377",
"0.563113",
"0.5616135",
"0.5615277",
"0.5602812",
"0.5594446",
"0.5578845",
"0.5575925",
"0.5566028",
"0.5558641",
"0.55551076",
"0.5553777",
"0.55372214",
"0.5523558",
"0.5517535",
"0.551425",
"0.5513806",
"0.5513806",
"0.5513806",
"0.5513806",
"0.5513806",
"0.5493861",
"0.5487605",
"0.5475315",
"0.54747075",
"0.5473112",
"0.54704136",
"0.54676795",
"0.5446073",
"0.5442973",
"0.54419506",
"0.5441754",
"0.5430103",
"0.54295474",
"0.54267156",
"0.5421995",
"0.5410329",
"0.5406316",
"0.5402604",
"0.5401028",
"0.53927815",
"0.5388491",
"0.5380639",
"0.53760767",
"0.53760767",
"0.53737",
"0.5369093",
"0.5364551",
"0.5358931",
"0.53561324",
"0.5345048",
"0.5344199",
"0.53414834",
"0.5329423",
"0.53284514",
"0.53235626",
"0.5311489",
"0.53083444",
"0.5305487",
"0.5305444",
"0.5301818",
"0.52989966",
"0.5295723"
] | 0.56145835 | 41 |
Offer the user a new game. Return True if accepted, False otherwise. | def queryNewGame(self):
print
response = raw_input('Would you like to play again? ')
return response.lower() in ('y', 'yes') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def new_game():\n if enough_players():\n GAME.new_game()\n await update_players()",
"def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True",
"def add_game(self, game: OverwatchGameSummary) -> bool:\n if self.start < game.time:\n raise ValueError(f'Cannot add a game to the middle/beginning of a session')\n elif self.account != game.player_name:\n return False\n elif self.game_mode != game.game_type:\n return False\n elif self.start - (game.time + game.duration) > SESSION_MAX_TIME_BETWEEN_GAMES * 60:\n return False\n else:\n self.games.append(game)\n return True",
"def isValid(self, game):\n return True",
"async def check_in_game(user_id, ctx): # this is meant for when it is accessed by commands outside of BlackJack.\n check = ex.first_result(await ex.conn.fetchrow(\"SELECT COUNT(*) From blackjack.games WHERE player1 = $1 OR player2 = $1\", user_id))\n if check:\n await ctx.send(f\"> **{ctx.author}, you are already in a pending/active game. Please type {await ex.get_server_prefix_by_context(ctx)}endgame.**\")\n return True",
"def ask_if_new_game(secret_words):\n\tprint \"Do you want to play another game? [y/n]\"\n\tanswer = raw_input()\n\tif answer not in [\"y\",\"n\"]:\n\t\tprint \"Invalid input, press y or n!\"\n\t\task_if_new_game(secret_words)\n\tif answer == \"y\":\n\t\tnew_game(secret_words)\n\telse:\n\t\treturn",
"def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True",
"def add_player(self, user):\n # Make sure the user can play\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n raise ValueError(\"Not enough credits to pay entrance fee.\")\n if self.is_user_playing(user):\n raise ValueError(\"User already in tournament.\")\n \n # Handle the money transfer to join the tournament\n user_profile.credits = user_profile.credits - self.entrance_fee\n user_profile.save()\n self.prize_pool = self.prize_pool + self.entrance_fee\n self.save()\n \n # Join the tournament\n new_player = Player(user=user,\n tournament=self,\n credits=self.starting_credits)\n new_player.save()\n return True",
"def is_game_won(self):\n return True",
"def create_new_game (self, game_name = None, ai_game = False):\n if (game_name == None):\n game_name = \"default_game_name\" + str (time.time ())\n self.cur_game_name = game_name\n data =\\\n {\n \"new_game\": True,\n \"player_secret\": self.secret,\n \"game_name\": self.cur_game_name\n }\n if (ai_game):\n data['ai_game'] = True\n r = requests.post (self.url_endpoint, data)\n if (r.status_code != 201):\n print (\"Failed to create game:\\n\", r.text)\n return r\n # Not sure if there is any need for this--editing perhaps? Unimplemented.\n game_data = json.loads (r.text)\n self.cur_game_secret = game_data['game_secret']\n return self.cur_game_name",
"async def add(self, ctx, game):\n\n user = ctx.message.author\n\n if add(game, user.id):\n await self.bot.say(\"{}, {} was added to your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"{}, you already have this game in your library.\".format(user.mention))",
"def NewGame():\n\twhile(True):\n\t\tos.system('cls')\n\t\tprint ('\\n')\n\t\tprint ( ' Muy bien, ¡un nuevo juego!')\n\t\tprint ( ' Vamos a comenzar \\n')\n\t\tsleep(1)\n\t\tans = input(' ¿Piedra, Papel o tijera? >> ')\n\t\tcpu = random.choice(outcomes)\n\t\tsleep(1)\n\t\tprint (' CPU: '+cpu)\n\t\tPaperRockScissor(ans,cpu)\n\t\tsleep(1)\n\t\t\n\t\taksAgain = input(' Deseas seguir jugando?: Y/N ')\n\t\t\"\"\" Verificamos que su respuesta no sea un no, en caso de serlo rompremos el ciclo \"\"\"\n\t\tif aksAgain in [\"N\",\"NO\",\"n\",\"no\"]:\n\t\t\tbreak",
"async def add_bj_game(self, user_id, bid, ctx, mode):\n await ex.conn.execute(\"INSERT INTO blackjack.games (player1, bid1, channelid) VALUES ($1, $2, $3)\", user_id, str(bid), ctx.channel.id)\n game_id = await self.get_game_by_player(user_id)\n if mode != \"bot\":\n await ctx.send(f\"> **There are currently 1/2 members signed up for BlackJack. To join the game, please type {await ex.get_server_prefix_by_context(ctx)}joingame {game_id} (bid)** \")",
"def new_game(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist.')\n game = Game.new_game(user.key)\n print game.target\n return game.to_form('Good luck playing Silicon Valley Hangman!!')",
"def do_start_new_game(request_json, this_player_user_id):\n game = Game(this_player_user_id)\n logger.debug(f\"do_start_new_game with {request_json}\")\n parsed_values, message = game.parse_requested_config(request_json)\n if parsed_values:\n # initalise a session\n logger.debug(f\"do_start_new_game parsed values, creating session to commit save.\")\n c = common_db.Common_DB()\n this_session = c.common_Sessionmaker()\n result, message = game.save(this_session)\n if not result:\n logger.error(\"do_start_new_game save failed, rolling back\")\n this_session.rollback()\n response = {\"startnewgame\": False,\n \"message\": message}\n else:\n logger.info(\"do_start_new_game save ok, committing\")\n this_session.commit()\n msg = quote_plus(f\"Game created successfully with ID {game.state.game_id}\"\n \". Now let's wait for some other players to join.\")\n response = {\"startnewgame\": True,\n \"new_game_id\": game.state.game_id,\n \"message\": message,\n \"redirect_querystring\": f\"?msg={msg}\"}\n this_session.close()\n logger.debug(\"do_start_new_game completed successfully - returning: %s\",\n jsonpickle.encode(response, unpicklable=False))\n return response, game\n else:\n logger.error(\"do_start_new_game unable to parse values: %s\", message)\n response = {\"startnewgame\": False,\n \"message\": message}\n return response, None",
"def game_new():\n posted = request.get_json()\n\n # Parse out the settings from the post\n settings = {}\n possible_settings = Game.DEFAULT_SETTINGS.keys()\n for setting in possible_settings:\n if setting in posted:\n settings[setting] = int(posted[setting])\n\n # Create and start the game -- the start is called immediately for now\n # as we do not have a way to change settings or add players in the UI\n game = Game({\"settings\": settings})\n game.start()\n\n # Save the game to the database\n queries.insert_game(game)\n queries.insert_game_event(game.game_id, {\"type\": \"start\"})\n\n response = json.jsonify(game=game.get_game_state())\n return response",
"def new_game(self):\n dialog = CreateGameDialog(self.root, \"New Game\")\n self.create_player_grid(dialog.grid_size)\n players_dialog = PlaceShipsDialog(\n self.root, title=\"Add Player\", game=self.game, grid_size=dialog.grid_size)",
"def game_allowed(self, uid=0):\n return True",
"def join_game(game):\n game = int(game)\n if 0 > game or game > len(games):\n return \"Not a valid gameBike\"\n if games.join_game(game):\n return \"Registration done\"\n else:\n return \"Not valid registration\"",
"def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)",
"def new_game(blank_game, user_id=None):\n if user_id:\n g.db.remove({'_id': user_id}, justOne=True)\n new_id = g.db.insert({'game': blank_game.serialise()})\n flash('New user successfully created')\n return new_id",
"def new_game(cls, user):\n game = Game(user=user,\n game_state=\".........\",\n game_over=False)\n game.put()\n return game",
"def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()",
"def do_add_to_game(game):\n if not game:\n raise ValueError(\"Tried to do_add_to_game without game\")\n # looking to add themselves to this game\n # check whether this is allowed.\n c = common_db.Common_DB()\n this_session = c.common_Sessionmaker()\n \n\n action_result, message = game.add_players_to_game(game.state.this_player_id)\n if action_result:\n # added to the game. Check if the game is ready\n if game.ready_to_start:\n # do the deal\n game.deal()\n action_result, message = game.save(this_session)\n else:\n action_result, message = game.save(this_session)\n if action_result:\n message = \"Added you to the game. Now sit tight and wait for enough other players to join.\"\n if action_result:\n this_session.commit()\n else:\n this_session.rollback()\n this_session.close()\n\n return action_result, message",
"def enough_players():\n return True",
"def is_game_complete(game):\n game_round = min(len(game.creator_scores), len(game.invitee_scores))\n creator_score = sum(game.creator_scores[:game_round])\n invitee_score = sum(game.invitee_scores[:game_round])\n return creator_score >= GAME_SCORE_TO_WIN or invitee_score >= GAME_SCORE_TO_WIN",
"def is_game_win(self):\n return not self.deck and not self.hand",
"def joinGame(self, playerID, startFreshP):\n\n # Log the join attempt\n logStrF = \"joinGame called w/ playerID %d (fresh game requested?: %s)\"\n TournamentSystem._logger.debug(logStrF, playerID, str(startFreshP))\n\n # Add the player to a pending game if one exists\n for gameID, game in self.games.iteritems():\n if game.status == ChessMatch.STATUS_PENDING:\n color = game.join(playerID, p2ReqFreshStart=startFreshP)\n if color:\n logStrF = \"Added player %d to existing game %d (sfP=%s)\"\n TournamentSystem._logger.debug(logStrF,\n playerID,\n gameID,\n str(startFreshP))\n return (True, {\"gameID\": gameID,\n \"startFreshP\": startFreshP})\n\n # Add a player to a new game otherwise\n newMatch = ChessMatch(firstPlayerID=playerID,\n p1ReqFreshStart=startFreshP)\n newID = _getUniqueInt(self.games.keys())\n self.games[newID] = newMatch\n TournamentSystem._logger.debug(\"Added player %d to new game %d\",\n playerID, newID)\n return (True, {\"gameID\": newID})",
"def hasWin(self) :\n comparison = self.compareNumberUser()\n if (comparison == 'equal') :\n return True\n else :\n return False",
"async def signups_helper(self, ctx, game: str, minimum: int=2, maximum: int=50, rounds: int=1) -> bool:\n guild = ctx.guild.id #`guild` is actually the guild's id, but using guild to shorten the variable\n # Check if there is an existing game\n self._existing_game(ctx)\n\n # Creation of embed to start signups\n embed = discord.Embed(title=f\"Game of '{game.capitalize()}' by {ctx.author}\",\n description=f\"Sign up by reacting 🙋♂️ to this message!\\n{rounds} Rounds\\nMinimum Players: {minimum}\\nMaximum Players: {maximum}\",\n color=discord.Colour(random.randint(0, 16777215)))\n embed.add_field(name=\"Current Signups\", value='None', inline=True)\n embed.set_footer(text=f\"React ▶️ to close signups and start the game or react ⏹️ to cancel the game.\\nOnly the host or server moderators can start or cancel the game.\")\n self.games_info[guild][0] = await ctx.send(embed=embed)\n\n reactions = ('🙋♂️', '▶️', '⏹️')\n for emoji in reactions:\n await self.games_info[guild][0].add_reaction(emoji)\n self.games_info[guild][1] = True\n \n # Not sure if it is a bug, but somehow the bot when it reacts the stop button,\n # can stop the game. No idea how, but just to resolve it:\n await asyncio.sleep(1)\n\n # Wait for signal to start or cancel game\n def stop_signups_check(reaction, user:discord.Member):\n return (reaction.emoji in ['▶️', '⏹️']\n and reaction.message.id == self.games_info[guild][0].id\n and (user.id == ctx.author.id \n or ctx.channel.permissions_for(user).manage_guild))\n while True:\n signal, user = await self.bot.wait_for('reaction_add', check=stop_signups_check)\n if signal.emoji == '▶️':\n player_count = len(self.games_info[guild][2])\n # Check if number of players fits the requirement\n if player_count >= minimum and player_count <= maximum:\n self.games_info[guild][1] = False # Ensure that number of players don't change\n await ctx.send(f\"Request by {user}: Starting Game\")\n return True\n else:\n await ctx.send(f\"Recevied request to start game by {user}, but number of players does not meet requirement.\")\n elif signal.emoji == '⏹️':\n await ctx.send(f\"Game cancelled by {user}.\")\n self.games_info[guild] = gamesDict()\n return False\n else:\n raise Exception # Shouldn't happen by the nature of the above code",
"def game_f(msg: telebot.types.Message):\n user = utils.get_user_or_none(msg.from_user)\n if not user:\n bot.send_message(\n msg.from_user.id,\n 'Probably, you are not registered. Press /start.'\n )\n return\n\n if not utils.get_users_game(user):\n user.state = states.USER_IN_MENU\n utils.update_user(user)\n else:\n bot.send_message(\n msg.from_user.id,\n \"Hey, you are in active game.\"\n )\n return\n\n logger.info(f'New /game command from id: {msg.from_user.id}.')\n\n bot.send_message(\n msg.from_user.id,\n 'Wanna play?',\n reply_markup=buttons.get_play_markup()\n )",
"def game_on(self) -> None:\n if self.msg.sender != self.owner:\n revert(f'Only the game owner can turn it on.')\n if not self._game_on.get():\n self._game_on.set(True)\n self._day.set(self.now() // U_SECONDS_DAY)",
"def game_created(self, pname, game):\n logging.debug('Game Created:')\n logging.debug(game)\n g = self.games.get(game['matchid'], None)\n if g:\n g.roomid = game['roomid']\n g.tableindex = game['tableindex']\n self.comm.game_ready(g)",
"def add_game():\n # Check if user has admin permission to access this page\n is_admin = True if \"admin\" in session else False\n\n if request.method == \"POST\":\n # Check if game currently exists in DB\n existing_game = mongo.db.games.find_one(\n {\"game_name\": re.compile(\n \"^\" + request.form.get(\"game_name\") + \"$\", re.IGNORECASE)})\n\n if existing_game:\n flash(Markup(\"Game is currently supported. You can manage \"\n \"supported games <a href='games'>here</a>.\"),\n category=\"error\")\n # Credit for using Markup to display link in flash message:\n # https://pythonpedia.com/en/knowledge-base/21248718/how-to-flashing-a-message-with-link-using-flask-flash-\n return redirect(url_for(\"add_game\"))\n\n # Gather form data\n game_details = {\n \"game_name\": request.form.get(\"game_name\").upper()\n }\n\n # Submit data to DB\n mongo.db.games.insert_one(game_details)\n\n flash(\"Game successfully added\", category=\"success\")\n return redirect(url_for(\"get_games\"))\n\n if is_admin:\n return render_template(\"add_game.html\")\n else:\n flash(\"You do not have permission to access this page\",\n category=\"error\")\n return redirect(url_for(\"get_terms\"))",
"def player_accepts_invite(self, player_email=None, game_id=None, account_id=None):\n if not self.verify_player_pending(player_email):\n raise ValueError('PlayerNotPending')\n\n player_id = register_pending_player(game_id=game_id, account_id=account_id)\n\n session = get_new_db_session()\n\n self.remove_player_from_pending(player_email=player_email, session=session)\n self.add_accepted_player(player_id=player_id, player_email=player_email, session=session)",
"def run_game(self):\n game = Poker()\n AI_win = game.play_round(self.name)\n self.update_scores(AI_win)\n message = 'Would you like to play another round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()",
"def testing_create_game():\n black_user = request.form['black_email']\n white_user = request.form['white_email']\n stones = json.loads(request.form['stones'])\n create_game_internal(black_user, white_user, stones)\n return ''",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables): #\n return True, -1\n\n return False, -1",
"def new_game(cls, user, answer, attempts):\n if attempts < 1:\n raise ValueError('Number of attempts has be greater than 1!')\n game = Game(user=user,\n attempts_remaining=attempts,\n game_over=False)\n game.answer = list(answer)\n game.check_answer = ['']*len(game.answer)\n game.move_histories = []\n game.put()\n return game",
"def create_new_game(game_name, player_name, points_to_win=POINTS_TO_WIN,\n min_players=MIN_PLAYERS, max_players=MAX_PLAYERS):\n do_house_keeping()\n if not can_create_new_game():\n return {}\n game_name = game_name or generate_game_name()\n player_name = player_name or generate_player_name()\n points_to_win = points_to_win or POINTS_TO_WIN\n min_players = min_players or MIN_PLAYERS\n max_players = max_players or MAX_PLAYERS\n if min_players < 2:\n min_players = 2\n if max_players > 10:\n max_players = 10\n game_id = generate_id(GAME_ID_LENGTH)\n game_data = {\n 'id': game_id,\n 'name': game_name,\n 'deck': create_deck(),\n 'stack': [],\n 'created_at': serialize_datetime(datetime.utcnow()),\n 'started_at': None,\n 'ended_at': None,\n 'active': False,\n 'reverse': False,\n 'min_players': min_players,\n 'max_players': max_players,\n 'players': [],\n 'points_to_win': points_to_win\n }\n add_player_to_game(game_data, player_name, True)\n msg = make_info_message(\n 'Click \"Start\" after all player(s) have joined')\n flash_broadcast(game_data, msg)\n result = save_state(game_data)\n if result:\n return game_data\n return {}",
"def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame",
"def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()",
"def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1",
"def test_game_has_ended_match(self):\n game = Game.objects.create(\n allowed_attempts=10,\n end_date=date.today() + timedelta(1),\n min_value=1,\n max_value=10\n )\n self.assertTrue(game.is_active)\n self.assertIsNone(game.compleation_date)\n\n game.attempt_set.create(\n our_number=10,\n user_number=10,\n ip_addr='127.0.0.1'\n )\n self.assertEqual(len(self.recieved), 1)\n self.assertListEqual(self.recieved[0], ['match'])\n self.assertFalse(game.is_active)\n self.assertIsNotNone(game.compleation_date)",
"def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return",
"def doCheck(self):\n self.protocol.sendPacket(networkpackets.PacketPokerCheck(**self._serial_and_game_id))",
"async def new(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n host = ctx.message.author\n if room not in tod_games:\n tod_games[room] = {'host': host.name, 'host_id': host.name, 'participants': {}, 'last': None}\n tod_games[room]['current'] = host.name\n tod_games[room]['last'] = host.name\n tod_games[room]['participants'][host.name.lower()] = {'spins': 0}\n await amor_manager.say(\"New Game of Truth Or Dare started in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Truth or Dare already in progress in {}. Game host: {}\".format(room, host))",
"def add(self):\n try:\n form_result = PlayerSchema.to_python(request.params)\n except formencode.Invalid, e:\n return {'success': False, 'data': str(e)}\n player = model.Player()\n for k, v in form_result.items():\n setattr(player, k, v)\n\n player.confirmed = False\n self._createConfirmCode(player)\n\n meta.Session.add(player)\n meta.Session.commit()\n self._sendConfirmMail(player)\n return {'success': True}",
"def finish_game(self) -> bool:\n if not self.started and self.finished:\n return\n\n self.started = False\n self.finished = True\n\n players_list = self.games_list[self.game_index][\"players\"]\n self.games_list[self.game_index][\"players\"] = \\\n list(dict.fromkeys(players_list))\n\n return",
"def accept_offer(self, pname, matchid):\n logging.debug('%s accepts offer %s' % (pname, matchid))\n o = self.offers.get(matchid, None)\n if o:\n o.acceptors.add(pname)\n if len(o.acceptors) == len(o.get_pnames()):\n logging.debug('All players accept offer %s' % matchid)\n self.games[matchid] = o\n self.offers.pop(matchid, None)\n self.comm.announce_game(o)",
"def isNew(self):\n bot = self.storage.find_one({\"user\": self.user_id})\n if not bot:\n return True\n return False",
"def test_valid_new_game(self):\n self._game.new_game()\n self.assertIsRUNNING(self._game)\n self.assertIsNotNone(self._game.table)\n self.assertEqual(self._game.table.player_turn, 1,\n \"Game.table unsuccessful init.\")",
"def newGame():\n result = cs411_game.newGame()\n return prepJSON(result)",
"def test_new(self):\n result = self.client.get('/new-game')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New Game', result.data)",
"def create_game(self, request):\n player = Player.query(Player.name == request.player_name).get()\n if not player:\n raise endpoints.NotFoundException(\n 'A Player with that name does not exist!, '\n 'we need one player in order to create the game')\n try:\n game = Game.new_game(player)\n except ValueError:\n raise endpoints.BadRequestException('sarasa')\n\n # Use a task queue to update the average attempts remaining.\n # This operation is not needed to complete the creation of a new game\n # so it is performed out of sequence.\n\n return game.to_form('Game created!, we only need one player '\n 'to join in order to start the game', player.name)",
"def new_game():\n # Prints the welcome message to the terminal\n welcome_message()\n # Gets the players name\n player_name = name_input()\n # Creates the players game board\n player_board = GameBoard(player_name, 'player')\n # Creates the players guess board\n user_guess = GameBoard('GUESS', 'user guess')\n # Creates the computers board\n computer_board = GameBoard(\"COMPUTER's\", 'computer')\n # Creates the computers guess board\n computer_guess = GameBoard('COMPUTER GUESS', 'computer guess')\n # Randomly places the computers ships on their board\n computer_board.place_ships()\n # Prints the players board to the terminal for reference\n player_board.print_board()\n # Allows the player to place their ships\n player_board.place_ships()\n time.sleep(2)\n # Prints the players guess board to terminal for reference\n print(PHASE)\n print(' ')\n # Takes turns attacking until winner\n run_game(player_board, user_guess, computer_board, computer_guess)\n # Asks the player if they want to play again or quit\n play_again()",
"def start_game(self):\n while self.can_deal:\n self.take_turn()",
"def _transit_to_voting(self, **kwargs):\n logging.debug(\"in _transit_to_voting\")\n handler = kwargs['handler']\n\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if game.state != self.state_name:\n logging.info(\"game state %s not valid\", game.state)\n return False\n game.state = 'voting'\n game.put()\n return True",
"def handle_new_user_event(name, sid, methods=['GET', \"POST\"]):\n game.add_player(name, sid)\n print(f\"there are {len(game.players)} players in the game\")\n for player in game.players:\n print(player.name + \" is in the game\")",
"def add_game(user, date_played, level, was_won, score, time_taken):\n\n game = Game.objects.get_or_create(user=user, date_played=date_played)[0]\n game.level = level\n game.was_won = was_won\n game.score = score\n game.time_taken = time_taken\n\n game.save()\n return game",
"def new_game(self, msg=None):\r\n if msg is None:\r\n msg = \"new game\"\r\n SlTrace.lg(msg)\r\n self.stop_game(msg)\r\n self.restart_game = True # Signal to restart\r",
"def playerCanPlay(game, situation, player):\r\n return True",
"async def finish_game(self, game_id, channel):\n game = await self.get_game(game_id)\n player1_score = await self.get_player_total(game[1])\n player2_score = await self.get_player_total(game[2])\n if player2_score < 12 and self.check_if_bot(game[2]):\n await self.add_card(game[2])\n else:\n winner = self.determine_winner(player1_score, player2_score)\n player1_current_bal = await ex.u_currency.get_balance(game[1])\n player2_current_bal = await ex.u_currency.get_balance(game[2])\n if winner == 'player1':\n await ex.u_currency.update_balance(game[1], player1_current_bal + int(game[4]))\n if not self.check_if_bot(game[2]):\n await ex.u_currency.update_balance(game[2], player2_current_bal - int(game[4]))\n await self.announce_winner(channel, game[1], game[2], player1_score, player2_score, game[4])\n elif winner == 'player2':\n if not self.check_if_bot(game[2]):\n await ex.u_currency.update_balance(game[2], player2_current_bal + int(game[3]))\n await ex.u_currency.update_balance(game[1], player1_current_bal - int(game[3]))\n await self.announce_winner(channel, game[2], game[1], player2_score, player1_score, game[3])\n elif winner == 'tie':\n await self.announce_tie(channel, game[1], game[2], player1_score)\n await self.delete_game(game_id)",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def can_exist_outside_of_game(self):\n return True",
"def can_exist_outside_of_game(self):\n return True",
"def new_game(self, req):\n return models.BattleShip.create(req.left, req.right)",
"def setNextTurn(self):\n\t\tliveplayers = self.getLivePlayers()\n\t\tif len(liveplayers) == 1:\n\t\t\twinner = liveplayers.pop()\n\t\t\tfor i in range(len(winner.betAmount)):\n\t\t\t\tself.handOutMoney([winner], i)\n\t\t\tself.setUpNextGameRound()\n\t\telif len(self.getSuitablePlayers()) == 0:\n\t\t\tself.earlyEvaluation()\n\t\telse:\n\t\t\tplayerUnsuitable = True\n\t\t\twhile playerUnsuitable:\n\t\t\t\tif self.roundEndSeat == self.turn:\n\t\t\t\t\tself.setUpNextBetRound()\n\t\t\t\t\tplayerUnsuitable = False\n\t\t\t\telse:\n\t\t\t\t\t_, self.turn = self.findNthPlayerFromSeat(self.turn, 1)\n\t\t\t\t\tif self.playerList[self.turn] in self.playerRemoveList:\n\t\t\t\t\t\tself.playerList[self.turn].isHandLive = False\n\t\t\t\t\tif self.playerList[self.turn].money > 0 and self.playerList[self.turn].isHandLive == True:\n\t\t\t\t\t\tplayerUnsuitable = False",
"def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()",
"def start_new_game(cls, name, max_players):\n new_game_id = str(uuid.uuid4())\n new_game = {\n \"game_id\": new_game_id,\n \"board\": [[cls.EMPTY for i in range(cls.BOARD_ROWS)]\n for j in range(cls.BOARD_COLS)],\n \"game_status\": cls.OPEN,\n \"players\": [name],\n \"turn\": name,\n \"max_players\": max_players,\n }\n db.save_game(new_game_id, new_game)\n return new_game_id",
"def test_game_has_ended_attempts(self):\n game = Game.objects.create(\n allowed_attempts=1,\n end_date=date.today() + timedelta(1),\n min_value=1,\n max_value=10\n )\n self.assertTrue(game.is_active)\n self.assertIsNone(game.compleation_date)\n\n game.attempt_set.create(\n our_number=1,\n user_number=10,\n ip_addr='127.0.0.1'\n )\n self.assertEqual(len(self.recieved), 1)\n self.assertListEqual(self.recieved[0], ['attempts'])\n self.assertFalse(game.is_active)\n self.assertIsNotNone(game.compleation_date)",
"def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, \n date=date.today(), \n won=won,\n attempts_remaining=self.attempts_remaining, \n answer=self.answer\n )\n score.put()",
"def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()",
"async def signups(self, ctx: commands.Context):\n if ctx.invoked_subcommand is None:\n if ctx.subcommand_passed is None:\n # No subcommand passed at all\n return await ctx.send(f\"Use '{self.prefix(ctx)}help signups' for more information.\")\n else:\n # Invalid subcommand passed\n return await ctx.send(\"No such game exists.\")\n else:\n if ctx.channel.id != self.data[ctx.guild.id]['channel']:\n raise GamesError(\"Games can only be played in the designated channel.\")",
"def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False",
"def check_guess(user_id, current, game, guess):\n user = ViewModel.get_user_data(user_id)\n \n current_relations = get_current_relations(current)\n \n guess = guess.lower()\n if guess == current.name.lower():\n return False\n\n if guess in current_relations:\n\n new_strike = False\n\n connection = ViewModel.check_connection(guess, game)\n\n if connection: return False\n\n chain = ViewModel.get_chain(game)\n round_number = len(game) + 1\n\n parent = current\n child = ViewModel.get_choice_data(guess)\n\n ViewModel.add_round(user.id, round_number, parent.id, child.id) \n\n else:\n new_strike = True\n\n ViewModel.update_user(user.id, new_strike)\n db.session.commit()\n return True",
"def create_new_game(request):\n\n if request.method == 'POST':\n form = NewGameForm(request.POST, request.FILES)\n if form.is_valid():\n # Save known fields\n game = Game()\n game.name = request.POST['game_name']\n\n # Check if there exist another game wuth that name\n games = Game.objects.filter(name=game.name)\n if len(games) > 0:\n error_msg = 'There already exists a game with that name!'\n return render_to_response('gaming/new_game.html',\n {\n 'form': form,\n 'error_msg': error_msg,\n },\n context_instance=RequestContext(request))\n\n game.rules_file = request.FILES['game_rules']\n game.judge_source_file = request.FILES['game_judge']\n game.max_players = request.POST['max_players']\n game.judge_lang = request.POST['judge_language']\n game.save()\n game.moderators.add(request.user)\n game.compile_judge()\n \n return HttpResponseRedirect('/game_details/' + str(game.id) + '/')\n else:\n form = NewGameForm()\n return render_to_response('gaming/new_game.html',\n {\n 'form': form,\n },\n context_instance=RequestContext(request))",
"async def new_game(self, players): \r\n if len(players) != 10:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"You cannot start a game with only {} players\".format(len(players)))\r\n self.teams = {\"A\": [], \"B\" : []}\r\n self.previous_captains = self.captains\r\n self.captains = {\"A\" : None, \"B\" : None}\r\n self.nick_to_player = {get_member_name(p) : p for p in players}\r\n self.previous_players = self.remaining\r\n self.remaining = players.copy()\r\n self.turn = 1\r\n self.order = []\r\n self.map_dict = {k : True for k in self.map_dict.keys()}\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"New game started\".format(len(players)))",
"def check_for_end_of_game(self):\n return self.player_1.score + self.player_2.score >= self.number_of_cells",
"def check_winner(self):\n pass",
"def check_end_game(self):\n return False if (any(self.p1_pits()) and any(self.p2_pits())) else True",
"def end_game(self, user):\r\n self.game_over = True\r\n self.put()\r\n # Add the game to the score 'board'\r\n score = Score(user=user, date=date.today(), won=True,\r\n guesses=self.turn)\r\n score.put()\r\n winner = user.get()\r\n print \"winner: \", winner.name\r\n winner.wins += 1\r\n winner.winloss_ratio = float(\r\n winner.wins / (winner.wins + winner.losses))\r\n winner.total_guesses += self.turn\r\n winner.put()\r\n if self.player1 == user:\r\n loser_key = self.player2\r\n else:\r\n loser_key = self.player1\r\n score = Score(user=loser_key, date=date.today(), won=False,\r\n guesses=self.turn)\r\n score.put()\r\n loser = loser_key.get()\r\n loser.losses += 1\r\n loser.winloss_ratio = float(loser.wins / (loser.wins + loser.losses))\r\n loser.total_guesses += self.turn\r\n loser.put()",
"def game_loop():\n play_game = True\n\n while play_game:\n user = input(\"Rock, paper or scissors?\").lower()\n computer = get_computers_choice().lower()\n\n compare(user, computer)\n\n play_again = input(\"Would you like to play again: Y/N?\")\n\n if play_again.lower() == 'n':\n play_game = False\n\n print(\"Thanks for playing!\")",
"async def check(self, ctx, game, user: discord.Member=None):\n\n game_list = get_library()\n\n # Check if a user has the game\n if user:\n if not check_key(user.id):\n await self.bot.say(\"{} does not have a game library yet. Use {}help game to start adding games!\".format(user.nick, ctx.prefix))\n return\n\n user_game_list = get_library(user.id)\n\n if game in user_game_list:\n await self.bot.say(\"Aye {}, you have {} in your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"Nay {}, you do not have that game in your library.\".format(user.mention))\n return\n\n users_with_games = []\n\n # Check which users have the game\n for discord_id, user_details in game_list.items():\n if game in user_details[\"games\"]:\n user = ctx.message.server.get_member(discord_id)\n if user:\n users_with_games.append(user.nick or user.name)\n\n if not users_with_games:\n await self.bot.say(\"None of you have {}!\".format(game))\n else:\n await self.bot.say(\"The following of you have {}: {}\".format(game, box(\"\\n\".join(users_with_games))))",
"def lostStage():\n print(\"\\n***** You lost the game. *****\\n***** Bettur Luck Next Time\"\n \"*****\\n\")\n while (True):\n userInput = input(\n \"Do you want to restart the game?\\nPress 1 to restart the game\"\n \"\\nPress 0 to exit the game\\n\")\n if userInput == '1':\n return True\n elif userInput == '0':\n return False\n else:\n print(\"\\nPlease enter a valid input\\n\")",
"def create_game(cls, user, misses_allowed, secret_word, current_solution):\n game = cls(parent=user,\n user=user,\n misses_allowed=misses_allowed,\n misses_remaining=misses_allowed,\n secret_word=secret_word,\n current_solution=current_solution)\n game.put()\n return game",
"def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1",
"def create_game(self, user, attempts_remaining, attempts_used,\n score, current_level):\n\n game = Game(user=user, attempts_remaining=attempts_remaining,\n score=score, attempts_used=attempts_used,\n current_level=current_level)\n game.put()\n return game",
"def handle_new_conn(self):\n self.latest_client_socket, self.latest_client_address = self.server_listen_socket.accept()\n print(\"Accepted new connection from {0}:{1}\".format(self.latest_client_address[0], self.latest_client_address[1]))\n user = self.receive_message(self.latest_client_socket)\n if user is False:\n return False\n\n self.sockets_list.append(self.latest_client_socket)\n self.clients[self.latest_client_socket] = user\n print(\"Accepted new user: {0}\".format(user['data'].decode('utf-8')))\n return True",
"def supports_game(cls, game: Game) -> bool:\n return game in cls.SUPPORTED_GAMES",
"def _game_turn(trick_id: int, landed: bool, user_name: str, game_id: int,\n client: FlaskClient, server_app_context: AppContext) -> None:\n user_att = models.Attempt(trick_id=trick_id,\n game_id=game_id,\n user=user_name,\n landed=landed,\n time_of_attempt=datetime.datetime.utcnow())\n models.db.session.add(user_att)\n models.db.session.commit()",
"def can_accept(self, user):\n if user.has_perm('funding.make_application_decisions'):\n # Funding manager can override / update decisions, if required\n # But we still need to have had a offer made\n if self.status in ['G', 'A', 'N']:\n return True\n # Applicants can only decide on granted applications\n if self.status == 'G':\n if self.applicant == user:\n return True\n return False",
"def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, date=date.today(), won=won,\n guesses=self.attempts_allowed - self.attempts_remaining)\n score.put()",
"def game(self):\n sender = self.sender()\n if(sender.text() == \" \"):\n sender.setText(\"x\" if self.firstPlayer else \"0\")\n self.firstPlayer = not(self.firstPlayer)\n res = self.checkForResult()\n if(res[0] == True):\n self.endGame(res[1])",
"def handleGuess(self, guess):\n res = self.checkAnswer(guess)\n if res is True:\n print (\"Correct!\")\n return True\n print (\"Bulls: %s, Cows: %s\" % res)\n return False",
"def determineEndGame(self):\n\n print(\"noWinners: \" + str(self.noWinners) + \", noTotKids: \" + str(self.noTotKids))\n\n # TODO scegliere come determinare la fine del gioco\n # if self.noWinners == self.noTotKids - 1: # end-game test\n if self.noWinners == self.noTotKids:\n print(\"ho determinato la fine del gioco\")\n return True\n else:\n print(\"colore toccato ma la partita non e' finita\")\n return False",
"def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans",
"async def run_game(self):\n await self.run_betting()\n self.force_bet()\n await self.print_players_with_bet()\n time.sleep(self.MESSAGE_GAP)\n cards_msg = await self.send_message(self.channel, \"Retrieving a new deck, shuffling, and dealing cards! Please hold!\")\n self.deal_cards()\n time.sleep(self.MESSAGE_GAP)\n await self.edit_message(cards_msg, cards_msg.content + \"\\n\\n\" + self.str_players_with_hand())\n time.sleep(self.MESSAGE_GAP)\n while self.still_playing_game():\n await self.run_round()\n self.ready_new_round_players()\n await self.send_message(self.channel, \"There are no more players eligible to play, so the game is over!\"\n \" Here evaluation to see who won!\\n\" + self.evaluate_game())\n time.sleep(self.MESSAGE_GAP)\n await self.send_message(self.channel, \"Resetting players for next game...\")\n time.sleep(self.MESSAGE_GAP)\n self.reset_players()"
] | [
"0.6719708",
"0.6387805",
"0.6320438",
"0.627329",
"0.623334",
"0.6186488",
"0.6180574",
"0.6068623",
"0.6004475",
"0.59953415",
"0.5935983",
"0.59331787",
"0.5884073",
"0.5848409",
"0.5813085",
"0.5809577",
"0.5796485",
"0.5780554",
"0.5772659",
"0.57724905",
"0.5761467",
"0.5745624",
"0.57386196",
"0.5726988",
"0.571664",
"0.5686557",
"0.56635165",
"0.56614286",
"0.5624282",
"0.5596374",
"0.55902374",
"0.55854297",
"0.5581836",
"0.558136",
"0.5571463",
"0.55681497",
"0.5558295",
"0.5541743",
"0.5540533",
"0.55301356",
"0.5525931",
"0.55132145",
"0.55104053",
"0.5504389",
"0.5504389",
"0.5495572",
"0.5493696",
"0.54752934",
"0.5473856",
"0.5464916",
"0.5461397",
"0.54541963",
"0.5450127",
"0.5445787",
"0.54403865",
"0.5439945",
"0.54356325",
"0.54314417",
"0.54125494",
"0.5400013",
"0.53979206",
"0.5391894",
"0.5390876",
"0.5387418",
"0.5387066",
"0.5374887",
"0.5374266",
"0.5374266",
"0.53622216",
"0.5362194",
"0.5348615",
"0.53441584",
"0.5334147",
"0.5332764",
"0.53270006",
"0.5324786",
"0.53247625",
"0.53218746",
"0.53218704",
"0.53162193",
"0.5310588",
"0.53082144",
"0.5301895",
"0.5300474",
"0.52936417",
"0.5289395",
"0.5287648",
"0.5287374",
"0.52681047",
"0.52668554",
"0.52592146",
"0.52560735",
"0.5254907",
"0.5253128",
"0.52502733",
"0.5247097",
"0.52438617",
"0.5243402",
"0.52402085",
"0.52392584"
] | 0.61691105 | 7 |
Get a guess from the user and return it as a Pattern instance. | def enterGuess(self):
validPattern = False
while not validPattern:
print # intentional blank line
prompt = 'Enter a guess (colors are '
prompt += self._palette[:self._numColorsInUse] + '): '
patternString = raw_input(prompt)
validPattern = True
if len(patternString) != self._lengthOfPattern:
print 'The pattern must have', self._lengthOfPattern, 'pegs'
validPattern = False
else:
for i in range(self._lengthOfPattern):
if patternString[i].upper() not in self._palette[:self._numColorsInUse]:
validPattern = False
if not validPattern:
print 'The color options are', self._palette[:self._numColorsInUse]
if validPattern:
pattern = Pattern(self._lengthOfPattern)
for i in range(self._lengthOfPattern):
pattern.setPegColor(i, self._palette.index(patternString[i].upper()))
return pattern | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_guess(self):\n return self._guess",
"def get_guess(self):\n new_guess = \"\"\n try:\n new_guess = input(\"Enter a letter: \").lower()\n if len(new_guess) > 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too long. Make sure that it is only one character\")\n elif len(new_guess) < 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too short. Make sure that it is only one character\")\n elif ord(new_guess) < 97 or ord(new_guess) > 122:\n new_guess = \"INVALID\"\n raise ValueError(\"Your input was deemed invalid! Please make sure input is a character a-z\")\n elif new_guess in self.guesses:\n print(f\"You already guessed the letter {new_guess}, try again\")\n new_guess = \"INVALID\"\n except ValueError as err:\n print(err)\n return new_guess",
"def pattern_factory(self):\n\t\treturn self.args[1]",
"def guess(self):\n\t\t\n\t\tpeg_guess_color_list = []\n\t\tguess_input = self.view.input_guess()\n\n\t\t# Convert guess_input into a list- each color being a string\n\t\tguess_color_list = re.split(\",\", guess_input)\n\t\t\n\n\t\tfor each_color in guess_color_list:\n\n\t\t\t#associate each string with a peg object\n\t\t\tpeg_guess = ColorPeg(each_color)\n\t\t\t\n\t\t\t# Append the peg_guess color list to make a list of peg guess objects\n\t\t\tpeg_guess_color_list.append(peg_guess)\n\n\t\t\t# Plug our peg objects into our guess object\n\t\t\tuser_guess = Guess(peg_guess_color_list)\n\n\t\t\t# Store guess object in our MasterModel\n\t\t\tself.model.guesses[self.model.status] = user_guess\n\n\t\t\t# Make a variable that\n\n\n\t\t# ### TESTS ###\n\t\t# print (\"This is each color: \", each_color)\n\t\t# print (\"print guess input again: \", guess_input)\n\t\t# print(\"prints each peg color for guess: \", peg_guess)\n\t\t# print(\"Prints the list of color guesses: \", peg_guess_color_list)\n\t\t# for peg_guess in peg_guess_color_list:\n\t\t# \tprint(\"Prints the list of guess pegs: \", peg_guess.peg_color)\n\n\t\t# print(\"Prints out the first list of guesses. Key = Guess 1\", self.model.guesses[\"Guess 1\"])",
"def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result",
"def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number",
"def get_pattern(self):\n if self.pattern is None:\n pattern_str = self.blueprint.pattern()\n pattern_file = self.remgr.lookup_pattern_file(self.blueprint, self.provider)\n self.pattern = pattern.Pattern(pattern_str, pattern_file)\n self.pattern.set_provider(self)\n return self.pattern",
"def get_choice(attempt):\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice",
"def get_guess(self):\n guess = self.player.higher_or_lower",
"def get_guess():\n print('Choose a letter:')\n return input()",
"def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match",
"def get_pattern(guess, true_word):\n return sum(\n value * (3**i)\n for i, value in enumerate(pattern_trit_generator(guess, true_word))\n )",
"def get_atom_guess(self):\r\n return self._player.get_atom_guesses()",
"def getPattern(self):\n return self.pattern",
"def get_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words):\n\n\tprint \"\\n The word to guess is: \", mask\t\n\tprint \"\\n # of attempts: \", attempts\n\tprint \"\\n Insert a letter or a number \\n\"\n\tthe_guess = raw_input()\n\tthe_guess = the_guess.lower()\n\t# Check if the input is a valid character\n\tvalidity = check_validity(the_guess, valid_characters, user_guesses)\n\tif (validity is True):\n\t\t# CHeck if the user has guessed the letter\n\t\tif (check_if_guessed(the_guess, word_to_guess) >= 0):\n\t\t\tprint \"\\n Great! your choosed the correct letter!\"\n\t\t\tuser_guesses += the_guess\n\t\t\tmask = calculate_mask(user_guesses, word_to_guess)\n\t\t\tyou_won = check_if_won(user_guesses, word_to_guess, secret_words)\n\t\t\tif you_won is True:\n\t\t\t\t# If the user has won it stop the game\n\t\t\t\treturn\n\t\telse:\n\t\t\tattempts = attempts + 1\n\t\t\tprint \"\\n Sorry! the letter is not present in the word! you have now %d guess left\" % (6 - attempts)\n\t\t\tyou_lost = check_if_lost(attempts, secret_words)\n\t\t\tif you_lost is True:\n\t\t\t\t# If he user has lost it stop the game\n\t\t\t\treturn\n\telse:\n\t\tprint \"\\n The input is not valid! Insert a valid input\"\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn",
"def guess_word(self):\r\n guess = input(\"# Guess the Word :\")\r\n if not guess:\r\n print(\"Please enter a valid word.\")\r\n else:\r\n if game_instance.check_word(guess):\r\n print(\"Correct! You did it Champ!\")\r\n game_instance.calculate_score(self.frequency)\r\n self.instances.append(game_instance)\r\n obj.create_new_game()\r\n else:\r\n print(\"Wrong Guess. Try Again!\")",
"def user_guess():\n return list(input(\"What is your guess?\"))",
"def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()",
"def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")",
"def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")",
"def get_pattern(self, name):\n return self._pattern_reg[name]",
"def get_guess(already_guessed):\n\n while True:\n print('Guess a letter.')\n guess = (input()).lower()\n if len(guess) != 1:\n print('Please enter a single letter.')\n elif guess == ' ':\n print('Space is not a valid entry. Please enter a single letter.')\n elif guess in already_guessed:\n print('\"Already guessed the letter. Choose again.')\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\n print('Please enter a LETTER.')\n else:\n return guess",
"def guess(self, message, db_session):\n user = self.ts.get_user(message)\n if db_session.query(db.MiscValue).filter(db.MiscValue.mv_key == 'guessing-enabled').one().mv_value == 'True':\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n guess = msg_list[1]\n if guess.isdigit() and int(guess) >= 0:\n self._set_current_guess(user, guess, db_session)\n self._add_to_whisper_queue(user, \"{} your guess has been recorded.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, that's not a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user,\n \"Sorry {}, !guess must be followed by a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, guessing is disabled.\".format(user))",
"def guess():\n word = request.args[\"word\"]\n board = session[\"board\"]\n\n # create response by the response of the function if word is valid\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})",
"def eval_guess(self, Guess):\n\n\t\t# pulls comparison from win check and assigns peg responses \n\n\t\t# returns a list to be in hint_response\n\n\t\t# displays as part of big display in view.\n\n\t\t\"\"\"Borrow the logic from win_check to implement eval_guess. Use variables right and wrong to \n\t\tevaluate. Right = Black peg. Wrong = no peg. \n\n\t\tWhite will be generated from a third loop to compare the entire list\"\"\"\n\n\n\t\tpass",
"def pattern(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"pattern\")",
"def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)",
"def test_guessing(self):\n self.classifier.guess(self.message)",
"def guess(cls, docstring):",
"async def _guess(self, ctx):\n reply = '\\n'\n for i, entry in enumerate(db.get_leaderboard(\n ctx.message.server.id,\n 'guess-leaderboard')):\n for key, value in entry.items():\n if key == \"discord_id\":\n name = self.get_name(ctx, value)\n elif key == 'date':\n date = value\n else:\n score = value\n reply += '{}. {} - {} ({})\\n'.format(\n i+1,\n score,\n name,\n datetime.datetime.fromtimestamp(\n int(date)).strftime('%d-%m-%Y')\n )\n await self.bot.say(reply)",
"def new_game(secret_words):\n\n\tattempts=0\n\tword_index = random.randint(0,5)\n\tword_to_guess = secret_words[word_index]\n\tglobal mask\n\tmask = \" _ \" * len(secret_words[word_index])\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn",
"def test_040_query_pattern(self):\n # Test query user\n\n testflow.step(\"Querying for users by pattern\")\n out_user = USER_CLI.run('show', TEST_USER1)[1]\n for k, v in {\n 'firstName': 'userX1',\n 'department': 'QA',\n 'description': 'our sysadmin',\n 'displayName': 'Uzivatel',\n 'email': 'userX1@internal',\n 'lastName': 'Blabla',\n 'title': 'user',\n }.iteritems():\n rc, out = self.query_cli.run(\n what='user',\n pattern='%s=%s' % (k, v)\n )\n assert rc, 'Unable to find user by its %s' % k\n assert out_user == out, \"Correct user wasn't found by %s\" % k\n\n # Test query group\n testflow.step(\"Querying for groups by pattern\")\n out_group = GROUP_CLI.run('show', TEST_GROUP1)[1]\n for k, v in {\n 'description': 'Admin Group',\n 'displayName': 'Group1',\n }.iteritems():\n rc, out = self.query_cli.run(\n what='group',\n pattern='%s=%s' % (k, v)\n )\n assert rc, 'Unable to find group by its %s' % k\n assert out_group == out, \"Correct group wasn't found by %s\" % k",
"def clues_generator(code, userGuess):\n if userGuess == code:\n return \"Code Cracked!\"\n\n clues = []\n\n # Compare guess to code\n for ind, num in enumerate(userGuess):\n if num == code[ind]:\n clues.append(\"Match\")\n elif num in code:\n clues.append(\"Close\")\n if clues == []:\n return [\"Nope\"]\n else:\n return clues",
"def _find_pattern(self, locator):\n assert locator is not None and len(locator) > 0\n locator = locator.strip().lower()\n (pattern, sensitivity) = self._parse_locator(locator)\n\n if (sensitivity != None):\n sensitivity = float(sensitivity)\n pattern = Pattern(pattern).similar(sensitivity)\n else:\n pattern = pattern\n return pattern",
"def guess_input(self):\n try:\n self.player_guess = input('Guess a letter: ').lower()\n Character(self.player_guess, self.selected_phrase)\n except ValueError:\n print(\"That was not a valid input. Please pick a number between 1 and 10\")\n if self.player_guess == \"\":\n print (\"Please enter a letter,try again.\")\n if not self.player_guess.isalpha():\n print (\"Please only enter a letter(a-z),try again.\")\n if len(self.player_guess) > 1:\n print(\"Please enter only one letter at a time.\")",
"def compile(format):\n try:\n return _cache[format]\n except KeyError:\n _cache[format] = retval = SF_Pattern.__new__(SF_Pattern, format)\n return retval",
"def new_game(cls, user, target, attempts):\n #if the user hasn't given us a word use a random word\n if not target:\n target = WORDS[random.randint(0,9)]\n\n game = Game(user = user,\n target = target,\n revealed_word = '*' * len(target),\n attempts_allowed = attempts,\n attempts_remaining = attempts,\n game_over = False,\n moves = [])\n game.put()\n return game",
"def generate_regex_from_string(self):\n tries = 0\n while tries < self.max_tries:\n try:\n tries += 1\n if tries % 100 == 0:\n print(f\"Tries: {tries}\", end=\"\\r\")\n patterns_to_try = self.generate_regex_pattern()\n for _, pattern in patterns_to_try:\n if re.fullmatch(pattern, self.string):\n self.found_patterns.add(pattern)\n else:\n print(f\"Doesn't Match! {pattern} -> {self.string}\")\n except Exception as e:\n pass\n if self.negative_string:\n self.found_patterns = self.best_pattern()",
"def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans",
"def __init__(self, pattern):\r\n self.pattern = pattern",
"def pattern(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"pattern\")",
"async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(GetFullUserRequest(previous_message.from_id))\n else:\n user = event.pattern_match.group(1)\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user",
"def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))",
"def pattern(self):\n return self.get_data(\"pattern\")",
"async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(\n GetFullUserRequest(previous_message.sender_id)\n )\n else:\n user = event.pattern_match.group(1)\n\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user",
"def guess_number():\n guess = 0\n while guess < 1:\n guess = int(input(\"Your guess: \"))\n return guess",
"def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message",
"def get_pattern(self, name):\n return self.__patterns[name]",
"def to_pattern(obj):\n if isinstance(obj, Pattern):\n return obj\n return Glob(str(obj))",
"def pattern(self):\n return self[\"pattern\"]",
"def pattern(self):\n return self[\"pattern\"]",
"def __new__(cls, format):\n self = super(SF_Pattern, cls).__new__(cls)\n\n if isinstance(format, bytes):\n uni_str = format.decode('ISO-8859-1') # decode to unicode\n trans_str = translate(uni_str) # translate only works with unicode\n re_fmt = trans_str.encode('ISO-8859-1') # encode back to bytes\n self._spec = _gbspec\n else:\n re_fmt = translate(format)\n self._spec = _gspec\n\n self._format = format\n self._re = cre = re.compile(re_fmt)\n\n if cre.groupindex and len(cre.groupindex) != cre.groups:\n raise RuntimeError('cannot mix mapped and unmapped specifiers')\n elif not cre.groupindex:\n self._retfunc = self._return_tuple\n self._type = tuple\n else:\n self._retfunc = self._return_dict\n self._type = dict\n\n self._casts = self._get_types()\n\n return self",
"def Pattern(self):\r\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.statrequest.pattern.pattern import Pattern\r\n\t\treturn Pattern(self)",
"def main():\n answer = random_word().upper()\n dashed_word = ''\n for i in range(len(answer)):\n dashed_word += '-'\n guess_times = 0\n while True:\n if guess_times == N_TURNS:\n # This is the last chance to guess and user failed\n print('You are completely hung :\\'(')\n break\n print('The word looks like: ' + dashed_word + '\\nYou have ' + str(N_TURNS - guess_times) + ' guesses left.')\n guess = input('Your Guess: ')\n if len(guess) == 1 and guess.isalpha():\n # Legal format\n guess = guess.upper()\n if answer.find(guess) != -1:\n # The guess is correct and should uncover the dashed_word\n print('You are correct!')\n dashed_word = uncover_dash(guess, answer, dashed_word)\n if not dashed_word.find('-') > -1:\n # No dash left.\n print('You win!!')\n break\n else:\n # Wrong guess\n guess_times += 1\n print('There is no ' + guess + '\\'s in the word.')\n else:\n print('Illegal format')\n print('The word was: ' + answer)",
"def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")",
"def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------",
"def parse(self, pattern):\n phrasal_pattern = self.convert_parse_tree_to_phrasal_pattern(\n self.parse_tree(pattern))\n return phrasal_pattern",
"def decode_guess(self, label, buf, pos):\n try:\n print(str(pos) + \" Guess1: trying len delim\")\n return self.decode_lendelim_message(label, buf, {}, pos), 'message'\n except Exception:\n print(str(pos) + \" Guess2: trying bytes\")\n return self.decode_bytes(buf, pos), 'bytes'",
"def guess(word, old_ans):\n life = N_TURNS\n while life > 0:\n guess_ch = input('Your guess: ')\n guess_ch = guess_ch.upper()\n if guess_ch.isalpha() != True or len(guess_ch) != 1:\n print('Illegal format.')\n else:\n ans = ''\n if word.find(guess_ch) == -1:\n # when user doesn't find the right character\n print('There is no ' + guess_ch + \"'s in the word.\")\n life -= 1\n life = life\n for ch in word:\n if ch == guess_ch:\n ans += ch\n else:\n ans += '-'\n else:\n # when user make a correct guess that find out the right character of the word\n print('You are correct!')\n for ch in word:\n if ch != guess_ch:\n ans += '-'\n else:\n ans += guess_ch\n new_ans = ''\n for i in range(len(old_ans)):\n # to keep the previous right guess' result\n ch = old_ans[i]\n if ch.isalpha():\n new_ans += ch\n elif ch != ans[i]:\n new_ans += guess_ch\n else:\n new_ans += ch\n old_ans = new_ans\n if old_ans.isalpha():\n # when the user find all characters of the random word ans still alive\n print('You win!!')\n print('The word was: '+word)\n break\n else:\n if life > 0:\n print('The word looks like '+old_ans)\n print('You have '+str(life)+' guesses left.')\n # when the user make wrong guesses and finish all his/her guess opportunities\n if life == 0:\n print('You are completely hung : (')\n print('The word was: '+word)",
"def guess_letter(self, request):\n return games_ctrl.guess_letter(request.urlsafe_game_key,\n request.letter_guess)",
"def factory(**pattern):\n\n class_name = pattern.get('class')\n del (pattern['class'])\n\n # pprint(inspect.stack()[1][0].f_globals)\n _cls = inspect.stack()[1][0].f_globals[class_name]\n\n # _cls = globals()[class_name]\n return _cls(**pattern)",
"def get_inputs(self):\n self.console.write(self.words.show_lines() + \"\\n\")\n self.console.write(self.jumper.jumper_output())\n guess = input(\"Guess a letter [a-z]: \").lower()\n self.good_guess = self.words.get_lines(guess)",
"def lookup_pattern(name):\n\treturn _registered_patterns[name]",
"def new_round(guesses, letters_guessed = letters_guessed):\n\n # print(get_guessed_word(secret_word, letters_guessed) )\n print(\"You have \" + str(guesses) + \" guesses left.\")\n print(\"Available letters: \" + get_available_letters(letters_guessed))\n ans = input(\"Please guess a letter: \")\n if ans.isalpha():\n return ans.lower()\n else:\n return None",
"def replace( scr ):\n d = ReplaceDialog(scr)\n value = d.main()\n if not \"pattern\" in value:\n return (None,None)\n else:\n return (value[\"pattern\"],value[\"replace\"])",
"def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]",
"async def find_match(\n input_str: str,\n tz: pytz.timezone = pytz.utc,\n finished_only: Optional[bool] = None\n) -> Match:\n args = shlex.split(input_str)\n if len(args) < 2:\n raise necrobot.exception.ParseException('Need at least two arguments to find a match.')\n\n racer_1 = await userlib.get_user(any_name=args[0])\n if racer_1 is None:\n raise necrobot.exception.NotFoundException(\"Can't find any racer by the name `{0}`.\".format(args[0]))\n args.pop(0)\n\n racer_2 = await userlib.get_user(any_name=args[0])\n if racer_2 is None:\n raise necrobot.exception.NotFoundException(\"Can't find any racer by the name `{0}`.\".format(args[0]))\n args.pop(0)\n\n match_date = None\n match_date_str = ''\n for arg in args:\n match_date_str += arg + ' '\n if match_date_str:\n match_date_str = match_date_str[:-1]\n match_date = dateparse.parse_datetime(match_date_str, tz)\n\n match_id = await matchdb.get_match_id(\n racer_1_id=racer_1.user_id,\n racer_2_id=racer_2.user_id,\n scheduled_time=match_date,\n finished_only=finished_only\n )\n if match_id is None:\n raise necrobot.exception.NotFoundException(\n \"Can't find any match between `{0}` and `{1}`.\".format(racer_1.display_name, racer_2.display_name)\n )\n\n return await matchutil.get_match_from_id(match_id)",
"def guess_word(self, request):\n return games_ctrl.guess_word(request.urlsafe_game_key,\n request.word_guess)",
"def get_new_password(self, user):\r\n print (_NEW_PASS_PROMPT)\r\n msg_pw = \"Enter a password for the user '{0}': \".format(user)\r\n msg_cf = \"Please confirm the password for the user '{0}': \".format(user)\r\n\r\n while True:\r\n passwd = raw_input(msg_pw).strip()\r\n if passwd == raw_input(msg_cf).strip():\r\n if ' ' not in passwd and self.pass_validator(passwd):\r\n return passwd\r\n else:\r\n print('Password does not contain appropriate characters.')\r\n else:\r\n print('Passwords do not match.')",
"def __init__(self, targetChar, guessChar, leftChar=None, rightChar=None):\r\n self.guessChar = guessChar\r\n self.result = self.validateCharacter(targetChar, guessChar, leftChar=leftChar, rightChar=rightChar)",
"def pattern_gen():\n pattern = \"\"\n\n return pattern",
"def get_solution(player_mode):\n from random import sample as randomizer\n\n # Red | Green | Blue | Yellow | Orange | Purple\n possible_inputs = \"R G B Y O P\".split()\n\n if player_mode == '2':\n solution = input(\"MasterMind, please enter a solution of 4 colors: \").split()\n # Check length\n if len(solution) != 4:\n print('Your input is invalid. Please enter a solution of 4 colors.\\n')\n return get_solution('2')\n\n # Length's good, check whether the input's valid\n else:\n for i in solution:\n if i not in possible_inputs:\n print('Your input is invalid. Please select from', possible_inputs)\n print()\n return get_solution('2')\n return solution\n\n else:\n return randomizer(possible_inputs, 4)",
"def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern",
"def draw_you_guess_it():\n window = rg.TurtleWindow()\n\n tx = rg.SimpleTurtle('turtle')\n tx.pen = rg.Pen('blue', 20)\n tx.speed = 5 # Medium\n\n tx.left(60)\n tx.forward(200)\n\n tx.pen_up()\n tx.left(120)\n tx.forward(100)\n tx.left(120)\n\n tx.pen_down()\n tx.forward(200)\n\n window.close_on_mouse_click()",
"def guess():\n\tprint request.form\n\tprint session['random_num']\n\tsession['match'] = ''\n\tsession['times_played'] = request.form['times_played']\n\tsession['guess'] = request.form['guess']\n\tprint session['guess']\n\tif int(session['guess']) == session['random_num']:\n\t\tsession['match'] = True\n\telse:\n\t\tif int(session['guess']) > int(session['random_num']):\n\t\t\tsession['value'] = 'HIGH'\n\t\telse:\n\t\t\tsession['value'] = 'LOW'\n\tprint session['match']\n\t# session['counter'] = str(int(session['counter']) + 2)\n\treturn redirect('/')",
"def get_guess():\n letter = input(\"Please input a letter to check\").lower()\n if len(letter) != 1:\n print(\"Please input a single letter\")\n get_guess()\n elif letter not in \"abcdefghijklmnopqrstuvxyz\":\n print (\"Only input letters\")\n get_guess()\n else:\n return letter",
"def guessNumb():\n #Really need to define as global guess, and can't do it on the same line\n global guess\n guess = int(input(\"Guess a number\"))",
"def sanitize_guess(self, letter): # helper function to incorrect_guess()\n self.guess = letter.lower().strip()\n if not self.guess.isalpha():\n Donatello.turtle_text(\n \"No special characters or numbers\") # no numbers, special characters or multiple words allowed\n return False\n # TODO if type != str raise assert\n return self.guess",
"def comment_prompt(): \n\n print(\"Valid comments contain only the characters 'a-z' and ',',\" \n \"e.g. daddy,puff. This field may be left blank.\")\n \n while True: \n comment = str(input(\"Enter user comments, or press 'return' twice \" \\\n \"to leave blank: \"))\n confirm_comment = str(input(\"To confirm, re-enter comments: \"))\n \n if comment != confirm_comment or not re.match(\"^[a-z,]*$\", comment):\n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Comments match. Continuing...\") \n return comment",
"def attack_input(self):\n while True:\n if self.user == 'player':\n print(\"ITS YOUR TURN TO ATTACK!\\n\")\n try:\n column = input('ENTER DESIRED COLUMN (A-J): \\n').upper()\n if not re.match('^[A-J]*$', column):\n print('PLEASE ENTER A VALID LETTER BETWEEN A-J')\n else:\n column = self.letters_to_numbers[column]\n break\n except KeyError:\n print('PLEASE ENTER A LETTER')\n elif self.user == 'computer guess':\n column = self.comp_attack_column()\n if column == range(0, 10):\n break\n else:\n column = random.randint(0, 9)\n break\n while True:\n if self.user == 'player':\n try:\n row = input('ENTER DESIRED ROW (0-9): \\n')\n if row in self.row_input:\n row = int(row)\n break\n else:\n raise ValueError\n except ValueError:\n print('PLEASE ENTER A VALID NUMBER BETWEEN 0-9')\n elif self.user == 'computer guess':\n row = self.comp_attack_row()\n if row == range(0, 10):\n break\n else:\n row = random.randint(0, 9)\n break\n return column, row",
"def match(self, target, guess):\r\n return guess == target",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)",
"def check_guess(guess):\n while True:\n print(\" Was \" + str(guess) + \" too high, too low, or correct?\")\n answer = input()\n answer= answer.lower()\n \n if answer == 'too low' or answer == 'to low':\n return -1\n elif answer == 'too high' or answer == 'to high':\n return 1\n elif answer == 'correct':\n return 0\n else:\n print(\"I don't understand. Please enter 'too low', too high', or 'correct'.\")",
"def find_by_pattern(self):\n while True: \n word = input(\"Enter a regular expression ex: \\d\\d\\w+. Press Q to \"\n \"quit to the main screen: \")\n if word.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n return self.dict_list\n self.find_by_pattern_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(word, value):\n self.find_by_pattern_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_pattern_list)\n break\n self.del_or_edit()",
"def match(self, text):\r\n match = re.match(self.regex, text)\r\n if match is None:\r\n return None\r\n else:\r\n return Bricks.flatten(self._matchPart(match[0]))",
"def guess_breed(dbo, s):\n s = str(s).lower()\n guess = db.query_int(dbo, \"SELECT ID FROM breed WHERE LOWER(BreedName) LIKE '%\" + db.escape(s) + \"%'\")\n if guess != 0: return guess\n return configuration.default_breed(dbo)",
"def game_code(user_input, secret_word, my_letters, guess_count):\n#if str.isalpha(myinput1) == True and myinput1 not in my_letters and guess_count > 0:\n if user_input in secret_word and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Good guess: ' + mytempstr1)\n return 0\n elif user_input in ['a','e','i','o','u'] and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 1\n elif len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 2",
"def create_new_guess():\n next_choice = next(permutation_iterator) \n while inconsistent(next_choice, guesses):\n try:\n next_choice = next(permutation_iterator)\n except StopIteration:\n print(\"Error: Your answers were inconsistent!\")\n return ()\n return next_choice",
"def favorite_character(email):\n if email == '[email protected]':\n return SwCharacterFactory.human('1001')\n else:\n return SwCharacterFactory.droid('2001')",
"def from_regex(pattern:str) -> str:\n raise NotImplementedError()",
"def make_game(self, input, start, end, elements):\n e = elements\n\n if re.match(r\"(1-0|0-1|1/2-1/2|\\*)\", e[4].text):\n s = Score(e[4].text)\n else:\n s = Score('*')\n g = Game(e[0], e[2], e[3], s)\n return g",
"def correct_guess(self, guess):\n \n if self.code == guess:\n return True\n return False",
"def evaluate_my_number(guess, random_number):",
"def AIguessing(lijst):\n\n global Code\n global allcombos\n\n\n AIguess = choice(lijst)\n\n print(f\"The original code was {Code}\")\n print(f\"my guess this time is {AIguess}, how did I do?\")\n while not feedbackgiven:\n correct = int(input(\"Write down how many colors are in the right spot: \"))\n semicorrect = int(input(\"Write down how many colors are correct but not in the right spot: \"))\n\n feedback = correct + semicorrect\n if feedback <= 4:\n return NewFeedbackSystem(AIguess, correct, semicorrect, lijst)\n else:\n print(\"please use numbers 1-4 where the total <= 4\")\n continue",
"def advancedGuessingGame():\n\n print(\"\\nWelcome to the guessing game!\")\n print(\"A number between _ and _ ?\")\n\n lowerBound = not_number_rejector(\"Enter Lower Bound: \")\n\n higher_number = False # we need to set an upper and lowerbound for game\n\n while not higher_number:\n upperBound = not_number_rejector(\"Enter Upper Bound: \")\n if upperBound > lowerBound:\n higher_number = True\n else:\n print(\"The upperbound is lower than you lowerbound: TRY AGAIN\")\n\n # above code ensures upper > lower, see stubbon_asker in EX1\n\n print(\"OK then, guess a number between {} and {} ?\".format(lowerBound, upperBound))\n lowerBound = int(lowerBound) # ensures integer is give (Not a letter)\n upperBound = int(lowerBound)\n\n actualNumber = random.randint(lowerBound, upperBound)\n\n guessed = False\n\n while not guessed:\n guessedNumber = not_number_rejector(\"Make a guess: \")\n print(\"You guessed {},\".format(guessedNumber),)\n if guessedNumber == actualNumber:\n print(\"HOW DID YOU GET THAT! It was {}\".format(actualNumber))\n guessed = True\n elif guessedNumber > upperBound:\n print(\"This is higher than the upperbound! Try again!\")\n elif guessedNumber < lowerBound:\n print(\"This is lower than the lowerbound! Try again!\")\n elif guessedNumber < actualNumber:\n print(\"{} is too small, try again\".format(actualNumber))\n else:\n print(\"{} is too big, try again \".format(actualNumber))\n return \"You got it!\"\n # the tests are looking for the exact string \"You got it!\". Don't modify that!",
"def scanf(self, string):\n match = self._re.match(string)\n return self._retfunc(match) if match is not None else None",
"def prompt_guess(self, success_code):\n user = input()\n if user == str(success_code):\n return True\n return False",
"def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))",
"def not_number_rejector(message):\n actual_number = False\n\n while not actual_number:\n guess = str(input(message))\n if guess.isdigit():\n actual_number = True\n return int(guess)\n else:\n print(\"Not a number\")"
] | [
"0.55911654",
"0.55187875",
"0.5507677",
"0.5500603",
"0.5459489",
"0.54325235",
"0.53727347",
"0.52810353",
"0.52759",
"0.51961994",
"0.5180741",
"0.5148151",
"0.5114034",
"0.50629675",
"0.5012794",
"0.5007291",
"0.49607527",
"0.4953114",
"0.4934326",
"0.4934326",
"0.4837743",
"0.4833182",
"0.4827561",
"0.47882834",
"0.47829786",
"0.4779183",
"0.47734258",
"0.4764575",
"0.4763804",
"0.4760488",
"0.47331026",
"0.47136623",
"0.4681375",
"0.4674718",
"0.46609163",
"0.46120435",
"0.45852268",
"0.45747837",
"0.453619",
"0.45317596",
"0.45296925",
"0.45212486",
"0.45205176",
"0.451894",
"0.4496225",
"0.44955087",
"0.44948116",
"0.44869316",
"0.4457853",
"0.44563523",
"0.44563523",
"0.44486102",
"0.44319737",
"0.44274196",
"0.44254872",
"0.44248796",
"0.44090447",
"0.44073883",
"0.4404218",
"0.43989566",
"0.43897077",
"0.43730298",
"0.43581787",
"0.43509242",
"0.43462637",
"0.4339922",
"0.43356907",
"0.4328579",
"0.43211654",
"0.431989",
"0.43147883",
"0.4311795",
"0.42959565",
"0.42923146",
"0.42791978",
"0.42704505",
"0.42682442",
"0.42504594",
"0.42444724",
"0.4230421",
"0.42274556",
"0.42256206",
"0.42184553",
"0.42166013",
"0.4215088",
"0.42125005",
"0.42050067",
"0.42045406",
"0.41985396",
"0.41977948",
"0.4194268",
"0.41904998",
"0.41882426",
"0.41859",
"0.4179476",
"0.41699705",
"0.4166534",
"0.4161303",
"0.4160255",
"0.41583294"
] | 0.7146551 | 0 |
Return welcome page for nonlogin user | def login():
user_type = get_admin_type_in_session()
login_form = LoginForm(request.form)
current_app.logger.info(f'user_type{user_type}')
if user_type != UserTypeEnum.NOT_LOGIN:
return redirect(url_for('admin.admin'))
if 'login' in request.form:
# read form data
username = request.form.get('username')
password = request.form.get('password')
remember = True if request.form.get('remember') else False
staff_checked = db.session.query(Staff).filter(Staff.username == username).first()
if not staff_checked or not check_password_hash(staff_checked.password, password):
return render_template('accounts/login.html', msg='Không Có Tài Khoản hoặc mật khẩu sai, vui lòng kiểm tra',
form=login_form)
else:
session['admin'] = username
session['admin_type'] = UserTypeEnum.ADMIN_LOGIN
session['admin_id'] = staff_checked.id
session['remember'] = remember
session['admin_avatar'] = staff_checked.avatar_url if staff_checked.avatar_url else ''
return redirect(url_for('admin.admin'))
return render_template('accounts/login.html',
form=login_form) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index():\n is_admin = dbhandler.is_admin(current_user())\n return render_template('./welcome.html', username=current_user(), is_admin=is_admin)",
"def welcome(self):\n if self.user:\n return self.render('welcome.html')\n self.redirect('/register')",
"def get(self):\n if self.user:\n self.render('welcome.html', username = self.user.name)\n else:\n self.redirect('/signup')",
"def home_page():\n if not g.user:\n flash(\"Please login to view.\", \"warning\")\n return redirect('/login')\n return render_template('index.html')",
"def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")",
"def welcome(request):\n return dict(\n user=request.user\n )",
"def welcome_page():\n return redirect(\"/static/welcome.html\")",
"def home():\n if not session.get('logged_in'):\n return redirect(url_for('welcome'))\n return render_template('home.html', filename=\"yarg.jpg\")",
"def welcome():\n return render_template(\"NFL.html\")",
"def welcome_page():\n \n username = session.get('username')\n reset()\n if request.method == 'POST':\n if not username:\n session['username'] = request.form['username']\n if username:\n return redirect(url_for('question_page'))\n return render_template('welcome.html', username=username)",
"def welcome(request):\n return HttpResponse('Welcome from Site')",
"def show_index():\r\n if 'username' in flask.session:\r\n return flask.redirect(flask.url_for('home')) # Need to fix redirect\r\n\r\n return flask.render_template(\"index.html\")",
"def ShowLogin():\n current_user = helpers.get_current_user()\n if current_user is None:\n return render_template('login.html')\n else:\n return redirect('/')",
"def index(self):\n\n # try and pull the user's data\n user = get_active_user_data()\n\n if not user:\n # they are not logged in give them the login form\n return render('/login_form.html')\n\n # they are logged in, pass them to the home page\n redirect('/')",
"def index():\n if auth.user:\n message=\"Welcome: \"\n user=auth.user\n else:\n message=\"Please use login for testing...\"\n user=None\n return dict(message=message, user=user)",
"def home():\n return render_template('login.html')",
"def homepage( request ):\n if \"email\" in request.session:\n return redirect( '/home' )\n return render_to_response( 'index.html' )",
"def home(request):\n if request.user.is_authenticated():\n return HttpResponse(\"{0} <a href='/accounts/logout'>exit</a>\".format(request.user))\n else:\n return HttpResponse(\"<a href='/login/vk-oauth2/'>login with VK</a>\")",
"def default():\n\treturn render_template(\"login.html\")",
"def index():\n aaa.require(fail_redirect='/login')\n return 'Welcome! <a href=\"/admin\">Admin page</a> <a href=\"/logout\">Logout</a>'",
"def index():\n if 'name' in session:\n return render_template('home.html')\n return redirect(url_for('log_in'))",
"def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))",
"def home(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n return render(request, 'esihapp/index1.html')",
"def hello_page(request):\n text = \"Welcome to test_project\"\n if not request.user.is_anonymous:\n text = \"Welcome '%s' to test_project\" % request.user.username\n return HttpResponse(text, content_type='text/plain')",
"def home(request):\n if request.user.is_authenticated:\n return redirect('/start')\n return render(request, 'home/home.html')",
"def home(result=None):\n print(inspect.stack()[1][3])\n\n if not session.get('logged_in') and not result:\n return render_template('login.html')\n else:\n # Based on the user_id passed, print Details, URLS and all.\n # return render_template('dashboard.html', username=result.name, user_id=result.user_type)\n return render_template('webpage/index1.html', username=result.name, user_id=result.user_type)",
"def index():\n if (session_get_int(\"user_id\") is not None):\n return render_template(\"dashboard.html\")\n else:\n return render_template(\"index.html\")",
"def welcome(request):\n return render(request, 'code_challenge/welcome.html', {})",
"def index():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n return render_template('index.html')",
"def home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('done')\n else:\n return render_to_response('home.html', RequestContext(request))",
"def index(request):\n\n\tif request.user.is_authenticated:\n\t\treturn HttpResponseRedirect('home')\n\treturn HttpResponseRedirect('login')",
"def home(request):\n if request.user.is_authenticated:\n return render(request, 'wantedly_app/home.html')\n\n # Execute the below if the user is not authenticated.\n if request.method == 'POST':\n user = authenticate(username=request.POST['username'], password=request.POST['password'])\n\n # If the user exists in the DB,\n if user is not None:\n\n # If the user is active,\n if user.is_active:\n auth_login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n messages.add_message(request, messages.SUCCESS, 'ログインしました!')\n return redirect('home')\n\n # If the user is not active,\n else:\n messages.add_message(request, messages.ERROR, 'ユーザーのアクティベーションがまだ完了していません。')\n\n # If the user does not exists in the DB,\n else:\n messages.add_message(request, messages.ERROR, 'ログインに失敗しました。ユーザーが存在しないかパスワードが間違っています。')\n\n context = {'login_form': LoginForm()}\n return render(request, 'wantedly_app/top.html', context)",
"def home():\n\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n else:\n return redirect(url_for('show_registrations'))",
"def user():\n if \"username\" in session:\n username = session[\"username\"]\n return f\"<h1>{username}</h1>\"\n return redirect(url_for(\"login\"))",
"def login():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )",
"def index(request):\n try:\n if request.user.is_authenticated:\n return render(request, \"pages/index.html\")\n else:\n return redirect('login')\n\n except:\n return redirect('login')",
"def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))",
"def homepage():\n form = LoginForm()\n return render_template(\"admin/index.html\", title=\"Admin\", form=form)",
"def index(self):\n raise cherrypy.HTTPRedirect('/user')",
"def home():\n\n # sets the page to load depending on the type of user\n # if none specified the login screen will be displayed\n pageName = ''\n userType = session.get('UserType', None)\n if userType == None:\n pageName = 'anonHome.jade'\n elif userType == 'Seeker':\n pageName = 'indexJob.jade'\n elif userType == 'Manager':\n pageName = 'indexManager.jade'\n\n frogHop = url_for('static', filename='loop frog.gif')\n uName = session.get('UserName', 'Unknown') # load a default value if retrieval fails\n return render_template(\n pageName,\n title='Home',\n name=uName,\n getFrog=frogHop,\n year=datetime.now().year,\n )",
"def game():\n\tif \"username\" in session:\n\t\treturn render_template(\"index.html\")\n\telse:\n\t\treturn redirect(url_for(\"default\"))",
"def home(request):\n assert isinstance(request, HttpRequest)\n iscapable =False\n if request.user.username in get_librarians():\n iscapable=True;\n\n return render(\n request,\n 'app/index.html',\n {\n 'title':'Home Page',\n 'iscapable':iscapable,\n 'year':datetime.now().year,\n }\n )",
"def home(request):\n # if request.user.is_authenticated():\n # return redirect('/fastapp')\n return context()",
"def home():\n # if session.get('username'):\n # return redirect(url_for('categories'))\n # else:\n return render_template('home.html')",
"def home(request):\n default_url = reverse(root_nodes)\n\n if request.user.moderator_profile != None:\n home_url = request.user.moderator_profile.get_home_url()\n if home_url != None:\n return HttpResponseRedirect(home_url)\n\n return HttpResponseRedirect(default_url)",
"def home():\n settings = PageSetting.find_settings()\n if not (settings.enabled) and not (authenticated(session)):\n return render_template(\"errors/maintenance.html\")\n else:\n return render_template(\"layout/index.html\", settings=settings)",
"def index():\n return render_template('index.html', username=session['username'])",
"def get(self):\n user = self.get_active_user()\n if user:\n self.render_newpage(user=user)\n else:\n self.redirect('/login')",
"def main():\n if 'username' in session:\n flash(f'Logged in as {session[\"username\"]}')\n else:\n flash('You are not logged in.')\n return render_template(\"main.html\", title=\"Main\")",
"def home_view(request):\n if request.authenticated_userid:\n return HTTPFound(location=request.route_url('app_view')) # pragma no cover\n return {} # pragma no cover",
"def root(request):\n\n return render(request, 'users/index.html')",
"def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))",
"def home(request):\n #print (\"home\")\n if request.user.is_authenticated():\n return redirect('done')\n return context()",
"def index(request):\n if request.method == 'GET':\n return render(request, 'welcome/index.html', {})\n elif request.method == 'POST':\n email = request.POST.get('email', None)\n password = request.POST.get('password', None)\n\n login_successful, message, email, full_name = login(email, password)\n\n if login_successful:\n request.session['email'] = email\n request.session['full_name'] = full_name\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams') \n else:\n messages.add_message(request, messages.INFO, message)\n return redirect('welcome:index') \n else:\n return Http404('Not allowed')",
"def home_page():\n return redirect('/users')",
"def special(request):\n return HttpResponse(\"You are logged in !\")",
"def welcome():\n return 'Welcome to Flask!'",
"def kualalumpur():\n if \"username\" in session:\n return render_template(\"kualalumpur.html\")\n return abort(401)",
"def landing_page():\n\n print session\n\n if 'acct' in session:\n acct = get_current_account(session['acct'])\n search = False\n return render_template(\"index.html\", acct=acct, search=search)\n\n else:\n return redirect(\"/signup\")",
"def greet_user():\n username = load_user_data()\n if username != None:\n print(\"Welcome back, \" + username)\n else:\n register_user()",
"def home():\n return render_template('home.html',\n face=session.get(app.config['SESSION_KEY'], None))",
"def homepage():\n return redirect('index.html')",
"def login():\n\n return render_template('login.html')",
"def index(request):\n if request.user.is_authenticated:\n return redirect('/dashboard')\n else:\n context = {'client_id': settings.OPENHUMANS_CLIENT_ID,\n 'oh_proj_page': settings.OH_ACTIVITY_PAGE}\n\n return render(request, 'main/index.html', context=context)",
"def singapore():\n if \"username\" in session:\n return render_template(\"singapore.html\")\n return abort(401)",
"def index(request):\n if request.user.is_authenticated():\n return redirect('/matrix/')\n else:\n form = AuthenticationForm(request)\n return render(request, 'registration/login.html', {'form': form})",
"def show_home_page():\n\n login_form = LoginForm()\n # create register form instance to go in modal\n register_form = UserAddForm()\n\n # handle login form validation\n if login_form.validate_on_submit():\n email = login_form.email.data\n password = login_form.password.data\n\n user = User.authenticate(email, password)\n\n # handle use case for a user being returned with valid password entered\n if user and user != 'invalid password':\n do_login(user)\n flash(f'Hello, {user.username}!', 'secondary')\n return render_template('home.html', user=user)\n # handle invalid password entry\n elif user == 'invalid password':\n login_form.password.errors = [\"Incorrect Password.\"]\n return render_template('home_anon.html', login_form=login_form, register_form=register_form)\n # handle user being not found\n else:\n login_form.email.errors = [\n 'Invalid Credentials. Please check email/password and try again']\n return render_template('home_anon.html', login_form=login_form, register_form=register_form)\n if CURRENT_USER_KEY in session:\n user = User.query.get(session[CURRENT_USER_KEY])\n if user:\n return render_template('home.html', user=user, home_active='active')\n\n # redirect to sign in page if no user is logged in\n\n return render_template('home_anon.html', login_form=login_form, register_form=register_form, img_cls='hidden')",
"def homepage():\n \n return render_template(\"coursePage.html\",courseName = \"Welcome\", Courses = COURSES, blank = 1)",
"def get(self):\n user = self.get_active_user()\n if not user:\n self.render(\"login_signupbase.html\",\n login=self.LOGIN_FORM,\n main_heading=self.MAIN_HEADING)\n else:\n self.render(\"redirect_in_8.html\",\n message=\"\"\"You are already signed in! <a href='/logout'>\n Log out</a> before signing in with a new\n account or return to the\n <a href='/'>front page</a>.\"\"\")",
"def homepage():\n return render_template(\"home/index.html\", title=\"Welcome\")",
"def nlogin(request):\n assert isinstance(request, HttpRequest)\n context = { 'title':'Not logged in', \n 'message':'Radboud University CESAR utility.',\n 'year':datetime.now().year,}\n return render(request,'nlogin.html', context)",
"def homepage():\n return render_template('home/index.html', title=\"Welcome\")",
"def homepage():\n return render_template('home/index.html', title=\"Welcome\")",
"def me():\n if g.USER:\n return redirect(url_for(\"profile\", username=g.USER.username))\n return redirect(url_for(\"home\"))",
"def homepage():\n\treturn render_template(\"home/a_homepage.html\", title=\"Welcome\")",
"def welcome_page(self):\n return self.properties.get(\"WelcomePage\", None)",
"def home():\n return render_template(\n 'index.html',\n title='Automation Center',\n year=datetime.now().year,\n message='Welcome to the Automation Center'\n )",
"def index():\n print(\"Inside index()\")\n if \"display_name\" not in session:\n return render_template(\"create_account.html\")\n\n return f\"Hello, {session['display_name']}\"",
"def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))",
"def index():\n return render_template(\"loginTest.html\")",
"def home(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('logged-in')\n else:\n home_view = 1\n return render_to_response('content/home.html', {'version': version, \"home_view\":home_view},\n RequestContext(request))",
"def welcome_callback():\n return render_template('welcome.html', title='TheCrew Casting Agency')",
"def user_home(request, user_name):\n query = User.objects.filter(username=user_name)\n if query.count() == 0:\n raise Http404(\"Can't find a user named: %s\" % user_name)\n else:\n user = query[0]\n if UserProfile.objects.filter(user=user):\n user_profile = UserProfile.objects.filter(user=user)[0]\n groups = get_user_groups(user)\n return render_to_response('user_home.html', locals())",
"def get(self):\n if self.logged_in:\n self.render('home.html', {\n 'name': self.current_user.name,\n 'server': self.current_user.server,\n 'faction': factions.get(self.current_user.faction),\n 'home': True,\n 'page_id': 'home'\n })\n else:\n self.render('home.html', {\n 'servers': servers,\n 'factions': factions,\n 'destination_url': '/settings',\n 'home': True,\n 'page_id': 'home'\n })",
"def home(request):\n return render(request, 'users/dashboard.html')",
"def home():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n if check_authentication(session_id, user_id):\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), user=user_id,\n session_id=session_id, authjs=True, preview_length=get_cars_preview().__len__())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=True,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def view_home(self):\n with self.client.get(\"/home\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Not logged on: Got redirect to /login\")",
"async def root(request: Request):\n return templates.TemplateResponse(\"welcome.html\", {\"request\": request})",
"def unauthorized_handler(self):\n return flask.redirect(\"/login\")",
"def employee_login():\n return Response(render_template('admin/login.html'))",
"def home(request):\n if request.user.is_authenticated():\n domain = request.get_host()\n profile_picture = request.user.default_profile_picture\n full_name = request.user.full_name\n phone_number = request.user.phone_number\n context = {\n 'domain': domain,\n 'profile_picture': profile_picture,\n 'full_name': full_name,\n 'phone_number': phone_number,\n }\n return render(request, 'home.html', context)\n return render(request, 'home.html', {})",
"def get(self):\n self.render(\"login.html\")",
"def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)",
"def login_get():\n next_url = url_for('index.index')\n if g.session:\n flash(gettext('You are already logged in'), 'success')\n return redirect(next_url)\n\n return render_template('sites/auth/login.html', title=gettext('Login'))",
"def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False",
"def about():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n if check_authentication(session_id, user_id):\n return render_template('about.html', user=user_id, session_id=session_id)\n else:\n return render_template('about.html')",
"def login():\n return render_template('auth/login.html')",
"def login():\n return render_template('auth/login.html')",
"def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))",
"def admin_login():\n return Response(render_template('admin/login.html'))",
"def login_view():\n return url(r'^login/$', login, {'template_name': 'miniuser/login.html'}, name='login')"
] | [
"0.77734786",
"0.76969486",
"0.74549574",
"0.73873097",
"0.7311019",
"0.72966236",
"0.728712",
"0.72041076",
"0.7171604",
"0.71527934",
"0.7149935",
"0.71144354",
"0.71122956",
"0.7080843",
"0.705263",
"0.6983225",
"0.69771266",
"0.6955535",
"0.6934617",
"0.69343626",
"0.69167787",
"0.68704706",
"0.685504",
"0.6841191",
"0.6830997",
"0.6813968",
"0.681332",
"0.68024755",
"0.6775515",
"0.6775393",
"0.67563665",
"0.672242",
"0.66937757",
"0.66707593",
"0.66613656",
"0.66611737",
"0.6645133",
"0.66252744",
"0.66216105",
"0.66166663",
"0.6588773",
"0.6584355",
"0.65681463",
"0.6562946",
"0.65382135",
"0.6528447",
"0.6525014",
"0.65206254",
"0.6512105",
"0.6510724",
"0.6505432",
"0.64671385",
"0.64664865",
"0.6418828",
"0.64163005",
"0.63501513",
"0.6341278",
"0.63324547",
"0.6331914",
"0.63156426",
"0.6312349",
"0.6303927",
"0.6300267",
"0.62991464",
"0.6294597",
"0.62930286",
"0.62870955",
"0.6267879",
"0.6267256",
"0.62654316",
"0.625112",
"0.6246512",
"0.6246512",
"0.6241207",
"0.6230777",
"0.6229147",
"0.62270427",
"0.6225699",
"0.62179416",
"0.6208784",
"0.62082535",
"0.6201549",
"0.61875707",
"0.6179261",
"0.61768794",
"0.61752266",
"0.61717105",
"0.6158393",
"0.6155724",
"0.6145412",
"0.6143269",
"0.612577",
"0.6125739",
"0.61221117",
"0.6105363",
"0.61013895",
"0.6092942",
"0.6092942",
"0.6086101",
"0.60859466",
"0.6083049"
] | 0.0 | -1 |
Creates a progress bar for lengthy processes, depending on the time spent iterating over the generator. | def optional_progressbar(iter: Generator[T, None, None],
title: Optional[str] = None,
n: Optional[int] = None,
progress: Optional[bool] = None,
time_threshold: float = 5.0) -> Generator[T, None, None]:
# tqdm is unavailable, use original generator
if tqdm is None:
yield from iter
return
# Config override
if progress is None and not config.Config.get_bool('progress'):
yield from iter
return
# If length was not given, try to determine from generator (if, e.g., list)
if n is None:
try:
n = len(iter)
except (TypeError, AttributeError):
n = None
# Collect starting data
if progress is True:
pbar = tqdm(total=n, desc=title)
else:
pbar = None
start = time.time()
for counter, elem in enumerate(iter):
if pbar is None and (time.time() - start) > time_threshold:
pbar = tqdm(total=n, desc=title, initial=counter)
yield elem
if pbar is not None:
pbar.update(1)
if pbar is not None:
pbar.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def progressbar(iterator, verbosity, length=None):\n\n if verbosity == logging.INFO:\n if not length:\n length = len(iterator)\n\n with click.progressbar(iterator, length=length) as _iterator:\n yield _iterator\n else:\n yield iterator",
"def make_progress_bar():\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args",
"def print_progress_bar(self, iter_num, start_time):\n iteration = iter_num + 1\n prefix = \"Progress: \"\n length = 50\n fill = '█'\n percent = (\"{0:.\" + str(1) + \"f}\").format(100 *\n (iteration / float(self.num_games)))\n exact_progress = \"{}/{}\".format(iteration, self.num_games)\n filled_length = int(length * iteration // self.num_games)\n total_time = int(time()-start_time)\n time_remaining = (time() - start_time)/(float(iter_num)+0.1)\n time_remaining = str(int(time_remaining*(self.num_games-iter_num)))\n bars = fill * filled_length + '-' * (length - filled_length)\n\n print('\\r%s |%s| (%s) %s%% | ETA: %ss (%ss)\\t' %\n (prefix, bars, exact_progress,\n percent, time_remaining,\n total_time), end='\\r')\n\n # Print New Line on Complete\n if iteration >= self.num_games:\n print(\"\\r\\n\\r\\n\")",
"def progress_bar(iterable, prefix='', suffix='', decimals=1, length=50, fill='█', print_end=\"\\r\"):\n total = len(iterable)\n start_time = time()\n\n # Progress Bar Printing Function\n def printProgressBar(iteration):\n delta_time = (time() - start_time) / (iteration + 1)\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}, {delta_time:.2f} it/s, {iteration = }', end=print_end)\n\n # Initial Call\n printProgressBar(0)\n\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()",
"def make_show_progress():\n \n start_time = time.time()\n lines_read = 0\n\n def show_progress(chunk_length):\n \"\"\"Displays a progress line. Created by make_show_progress.\"\"\"\n \n nonlocal lines_read\n\n lines_read += chunk_length\n elapsed_time = int(time.time() - start_time)\n print('{:,} lines read | time {:,}s'.format(lines_read, elapsed_time))\n\n return show_progress",
"def progressBar(iterable, prefix = 'Progress:', suffix = 'Complete', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n\n #-- Progress Bar Printing Function\n def printProgressBar (iteration):\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {iteration:,} of {total:,} ({100 * (iteration / float(total)):.1f}%) {suffix}', end = printEnd)\n\n #-- Initial Call\n printProgressBar(0)\n\n #-- Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n\n #--- Print New Line on Complete\n print()",
"def printWaitBar(i, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n import sys\n\n # total can never be zero because we divide by total\n if total == 0:\n total = 0.0001\n\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (i / float(total)))\n filled = int(length * i // total)\n bar = fill * filled + '-' * (length - filled)\n\n sys.stdout.write('\\r%s |%s| %s%% %s' %(prefix, bar, percent, suffix))\n sys.stdout.flush()\n\n if i == total:\n print()",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def printProgressBar(iteration, total, prefix='Progress: ', suffix='Complete',\n decimals=1, length=50, fill='█'):\n global start_time\n if iteration == 0:\n start_time = time.time()\n value = 100 * (iteration / float(total))\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(value)\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n elapsed_time = int(time.time() - start_time)\n m = str(elapsed_time // 60).zfill(2)\n s = str(elapsed_time % 60).zfill(2)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def progress_bar(progress):\n bar_length = 50\n block = int(round(bar_length * progress))\n text = 'Progress: [{0}] {1}'.format('#' * block + '-' * (bar_length - block),\n progress * 100)\n # Print progress after removing the previous progress\n sys.stdout.write('\\r' + text)\n sys.stdout.flush()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def updateProgress (self, iteration, total, prefix='Progress', suffix='complete', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n if iteration == 0:\n self.start_time = timer()\n ETC = '' #Estimated Time to Completion\n if (iteration/total)*100 >= self.updates[self.update_counter]:\n elapsed = timer() - self.start_time\n if iteration != 0:\n minutes = int((elapsed * total/iteration - elapsed)//60)\n seconds = int((elapsed * total/iteration - elapsed)%60)\n ETC = \"(~{:d} mins {:d}s left)\".format(minutes, seconds)\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n # Unfortunately \\r doesn't work in the pycharm console, so we have to reprint the whole bar everytime,\n # clogging the console.\n #print(f'\\r{prefix} |{bar}| {percent}% {suffix} {ETC}', end = printEnd)\n print(f'{prefix} |{bar}| {percent}% {suffix} {ETC}')\n # Print New Line on Complete\n if iteration == total:\n print()\n self.update_counter += 1",
"def progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()",
"def progressBar(iterable, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n def printProgressBar (iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n if total == 0:\n \treturn\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s\\r' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def progressBar(iterable, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n total = len(iterable)\n # Progress Bar Printing Function\n\n def printProgressBar(iteration):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 *\n (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)\n # Initial Call\n printProgressBar(0)\n # Update Progress Bar\n for i, item in enumerate(iterable):\n yield item\n printProgressBar(i + 1)\n # Print New Line on Complete\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def print_progressbar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(prefix, bar, percent, suffix)\n print('{} |{}| {} {}'.format(prefix, bar, percent, suffix), end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '.' * (length - filledLength)\r\n print('\\r %s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n # Print New Line on Complete\r\n if iteration == total:\r\n print()\r\n print()",
"def print_progress_bar(iteration: int, total: int, prefix: str = '', suffix: str = '', decimals: int = 1,\n length: int = 100, fill: str = '█', print_end: str = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=print_end)\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (iteration, total, prefix = '\\tProgress', suffix = 'Complete', decimals = 2, length = 30, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (self,iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()",
"def _printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '$'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n sys.stdout.write('\\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix))\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n return '%s %s %s%% %s' % (prefix, bar, percent, suffix)",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '*'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=50, fill='█'):\n if int(iteration % (total / 100)) == 0 or iteration == total or prefix is not '' or suffix is not '':\n # calculated percentage of completeness\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n # modifies the bar\n bar = fill * filledLength + '-' * (length - filledLength)\n # Creates the bar\n print('\\r\\t\\t{} |{}| {}% {}'.format(prefix, bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\r\n # Print New Line on Complete\r\n if iteration == total: \r\n print()",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100, fill = '█'):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(barLength * iteration // total)\n bar = fill * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r{0} |{1}| {2}% {3}'.format(prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 30, fill = '█'):\n total = float(total)\n\n percent = (\"0.0\") if total == 0 else (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / total))\n filledLength = 0 if total == 0 else int(length * iteration // total)\n\n bar = fill * filledLength + '-' * (length - filledLength)\n\n output = '\\r %s |%s| %s%% %s' % (prefix, bar, percent, suffix)\n\n console_columns = 80\n padding = ''\n\n if len(output) < console_columns:\n padding = ' ' * (console_columns - len(output))\n \n suffix = suffix + padding\n print('\\r %s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd, flush=True)\n # Print New Line on Complete\n if iteration == total: \n print()",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()",
"def start_progress(title):\n global progress_x\n sys.stdout.write(title + \": [\" + \"-\" * 40 + \"]\" + chr(8) * 41)\n sys.stdout.flush()\n progress_x = 0\n return 0",
"def printProgressBar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '|'):\n\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n\tfilledLength = int(length * iteration // total)\n\tbar = fill * filledLength + '-' * (length - filledLength)\n\tprint('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n\t# Print New Line on Complete\n\tif iteration == total:\n\t\tprint()",
"def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))",
"def printProgressBar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()",
"def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return",
"def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'):\r\n\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n\tfilledLength = int(length * iteration // total)\r\n\tbar = fill * filledLength + '.' * (length - filledLength)\r\n\tprint('\\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n\t# Print New Line on Complete\r\n\tif iteration == total: \r\n\t\tprint()",
"def printProgressBar(iteration, total, pbar=False, prefix = '', suffix = '', decimals = 1, length = 50, fill = 'X', verbose=False):\n\n from .module_exists import module_exists\n from .in_ipynb import in_ipynb\n\n if module_exists('tqdm'):\n if type(pbar) == bool:\n if in_ipynb():\n if verbose: print('- NOTEBOOK MODE -')\n from tqdm import tqdm_notebook as tqdm\n else:\n if verbose: print('- PYTHON/BASH MODE -')\n from tqdm import tqdm\n pbar = tqdm(total=total)\n pbar.update(iteration)\n else:\n pbar.update(iteration-pbar.last_print_n)\n if iteration == total: pbar.close()\n return pbar\n\n else:\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n #print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n print('\\r{0} |{1}| %{2} %{3}'.format(prefix, bar, percent, suffix))\n # Print New Line on Complete\n if iteration == total:\n print()",
"def bar(self, progress):\n if not hasattr(self, \"_limit\") or not self._limit:\n self._limit = self.terminal_size()\n graph_progress = int(progress * self._limit)\n self.stdout.write(\"\\r\", ending=\"\")\n progress_format = \"[%-{}s] %d%%\".format(self._limit)\n self.stdout.write(\n self.style.SUCCESS(\n progress_format\n % (self.progress_symbol * graph_progress, int(progress * 100))\n ),\n ending=\"\",\n )\n self.stdout.flush()",
"def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 *\n (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()",
"def batch_progress_bar(batch_num, report_interval, last_loss):\n progress = (((batch_num - 1.0) % report_interval) + 1.0) / report_interval\n fill = int(progress * 40)\n print \"\\r\\tBATCH [{}{}]: {} (ELBO: {:.4f})\".format(\n \"=\" * fill,\n \" \" * (40 - fill),\n batch_num,\n last_loss)",
"def __printProgressBar (self,iteration, total,size,speedd='n', prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n\t\tpercent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n\t\tfilledLength = int(length * iteration // total)\n\t\tsize='%.1f'%size\n\t\tbar = fill * filledLength + '-' * (length - filledLength)\n\t\tprint('\\r(%sMG) |%s| %s%% [%s]kbs size=[%s]MB %s' % (prefix, bar, percent,speedd,size, suffix), end = '\\r')",
"def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def print_progress_bar(iteration, total, suffix=\"\", decimals=1, length=100, fill=\"█\", print_end=\"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + \"-\" * (length - filled_length)\n\n prefix = \" {}/{}\".format(iteration, total)\n\n print(f\"\\r{prefix} |{bar}| {percent}% {suffix}\", end=print_end)\n # print new line on complete\n if iteration >= total:\n print()",
"def log_progress(sequence, every=None, size=None, name='Items'):\n from ipywidgets import IntProgress, HTML, VBox\n from IPython.display import display\n\n is_iterator = False\n if size is None:\n try:\n size = len(sequence)\n except TypeError:\n is_iterator = True\n if size is not None:\n if every is None:\n if size <= 200:\n every = 1\n else:\n every = int(size / 200) # every 0.5%\n else:\n assert every is not None, 'sequence is iterator, set every'\n\n if is_iterator:\n progress = IntProgress(min=0, max=1, value=1)\n progress.bar_style = 'info'\n else:\n progress = IntProgress(min=0, max=size, value=0)\n label = HTML()\n box = VBox(children=[label, progress])\n display(box)\n\n index = 0\n try:\n for index, record in enumerate(sequence, 1):\n if index == 1 or index % every == 0:\n if is_iterator:\n label.value = '{name}: {index} / ?'.format(\n name=name,\n index=index\n )\n else:\n progress.value = index\n label.value = '{name}: {index} / {size}'.format(\n name=name,\n index=index,\n size=size\n )\n yield record\n except:\n progress.bar_style = 'danger'\n raise\n else:\n progress.bar_style = 'success'\n progress.value = index\n label.value = \"{name}: {index}\".format(\n name=name,\n index=str(index or '?')\n )",
"def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=30, fill='='):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n if filledLength == 0:\n bar = '.' * length\n elif filledLength == length:\n bar = fill * filledLength\n else:\n bar = fill * (filledLength - 1) + '>' + '.' * (length - filledLength)\n # allow for some extra padding at the end for variable line length\n print(f'\\r{prefix} [{bar}] {percent}% {suffix}', end='')\n # Print New Line on Complete\n if iteration == total:\n print()",
"def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd, flush=True)\n # Print New Line on Complete\n if iteration == total:\n print(flush=True)",
"def bar(length, progress):\n dots = floor(progress * length)\n spaces = length - dots\n\n percent = int(progress * 100)\n\n return f'{percent}% |' + dots * '*' + spaces * ' ' + '|'",
"def tqdm(iterable, desc='', total=None, leave=True, file=sys.stderr,\n mininterval=0.05, miniters=1, extra=\"\"):\n if total is None:\n try:\n total = len(iterable)\n except TypeError:\n total = None\n \n prefix = desc+': ' if desc else ''\n\n do_rgb = not os.getenv(\"STY\")\n do_ascii = not not os.getenv(\"STY\")\n \n sp = StatusPrinter(file)\n sp.print_status(prefix + format_meter(0, total, 0, do_rgb, do_ascii, extra=extra))\n \n start_t = last_print_t = time.time()\n last_print_n = 0\n n = 0\n for obj in iterable:\n yield obj\n # Now the object was created and processed, so we can print the meter.\n n += 1\n if n - last_print_n >= miniters:\n # We check the counter first, to reduce the overhead of time.time()\n cur_t = time.time()\n if cur_t - last_print_t >= mininterval:\n sp.print_status(prefix + format_meter(n, total, cur_t-start_t, do_rgb, do_ascii, extra=extra))\n last_print_n = n\n last_print_t = cur_t\n \n if not leave:\n sp.print_status('')\n sys.stdout.write('\\r')\n else:\n if last_print_n < n:\n cur_t = time.time()\n sp.print_status(prefix + format_meter(n, total, cur_t-start_t, do_rgb, do_ascii, extra=extra))\n file.write('\\n')",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '█' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n Sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n Sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')",
"def dl_progress(count, block_size, total_size):\n length = 50\n current_size = count * block_size\n done = current_size * length // total_size\n togo = length - done\n prog = \"[\" + done * \"=\" + togo * \"-\" + \"]\"\n sys.stdout.write(prog)\n if(current_size < total_size):\n sys.stdout.write(\"\\r\")\n else:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n filledLength = int(round(barLength * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), decimals)\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('%s [%s] %s%s %s\\r' % (prefix, bar, percents, '%', suffix)),\n sys.stdout.flush()\n if iteration == total:\n print(\"\\n\")",
"def update_progress(job_title, progress):\n \n length = 20 # modify this to change the length\n block = int(round(length*progress))\n msg = \"\\r{0}: [{1}] {2}%\".format(job_title, \"#\"*block + \"-\"*(length-block), round(progress*100, 2))\n if progress >= 1: msg += \" DONE\\r\\n\"\n sys.stdout.write(msg)\n sys.stdout.flush()",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=40):\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def progress_bar(iteration: int, total: int, prefix: str = '',\n suffix: str = '') -> None:\n columns, _ = shutil.get_terminal_size(fallback=(80, 24))\n bar_len = columns - (len(suffix) + 2) - (len(prefix) + 2) - 7\n percent = iteration / total\n fill_len = int(bar_len * percent) + 1\n bar = '=' * fill_len + '-' * (bar_len - fill_len)\n sys.stdout.write(\n '{prefix} [{bar}] {percent}% {suffix}\\r'.format(\n prefix=prefix,\n bar=bar,\n percent=round(percent * 100, 1),\n suffix=suffix\n )\n )\n sys.stdout.flush()",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n import sys\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s ' %\n (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def progress_bar(iteration, total, prefix='', suffix=''):\n terminal_width = shutil.get_terminal_size()[0]\n bar_length = terminal_width - len(prefix) - len(suffix) - 13\n\n if total > 0:\n filled_length = int(round(bar_length * iteration / float(total)))\n percents = round(100.00 * (iteration / float(total)), 2)\n else:\n filled_length = bar_length\n percents = 100\n\n print('{} [{:{fill}<{width}}] {:6.2f}% {}'.format(\n prefix, '#' * filled_length, percents, suffix, width=bar_length, fill='-'),\n end='\\r', flush=True)\n if iteration == total:\n print()",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\r\n str_format = \"{0:.\" + str(decimals) + \"f}\"\r\n percents = str_format.format(100 * (iteration / float(total)))\r\n filled_length = int(round(bar_length * iteration / float(total)))\r\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\r\n\r\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\r\n\r\n if iteration == total:\r\n sys.stdout.write('\\n')\r\n sys.stdout.flush()",
"def timer(name):\n t0 = time.time()\n print('[%s] in progress' % name)\n yield\n print('[%s] done in %.0f s' %(name, time.time() - t0))",
"def print_progress_bar(iteration,\n total,\n prefix=\"\",\n suffix=\"\",\n decimals=1,\n length=100,\n fill=u\"\\u25A0\"):\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filled_length = int(length * iteration // total)\n bar = fill * filled_length + '-' * (length - filled_length)\n print(\"\\r%s |%s| %s%% %s\" % (prefix, bar, percent, suffix), end=\"\\r\")\n if iteration == total:\n print()",
"def printProgressBar (prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n def progress_hook(count, blockSize, totalSize):\n progress = count * blockSize / totalSize\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(progress * 100)\n filledLength = int(length * progress)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)\n\n return progress_hook",
"def printProgress(iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percent = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '#'* filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '*' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def progress(count, total, suffix=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', suffix))\n sys.stdout.flush()",
"def progress(iteritem, update=1, stderr=False, start_newline=True):\n if stderr:\n stream = sys.stderr\n else:\n stream = sys.stdout\n start_time = time.time()\n curr_iter = 0\n if start_newline:\n stream.write('\\n')\n\n max_iter = len(iteritem)\n dlen = len(str(max_iter))\n memory = 0\n for idx, item in enumerate(iteritem):\n\n elapsed = int(time.time() - start_time)\n\n curr_iter += 1\n not_update = elapsed % update\n\n if not not_update and elapsed != memory:\n memory = elapsed\n remain = (max_iter - curr_iter) * (curr_iter / elapsed)\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: %d sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed, remain)\n stream.write(out)\n stream.flush()\n\n yield item\n\n out = '\\r%*d/%*d | Elapsed: %d sec | Remaining: 0 sec '\\\n % (dlen, curr_iter, dlen, max_iter, elapsed)\n stream.write(out)\n stream.flush()",
"def printProgress(iteration, total, prefix = '', suffix = '', decimals = 2, barLength = 100):\n\tfilledLength\t= int(round(barLength * iteration / float(total)))\n\tpercents\t\t= round(100.00 * (iteration / float(total)), decimals)\n\tbar\t\t\t = '#' * filledLength + '-' * (barLength - filledLength)\n\tsys.stdout.write('%s [%s] %s%s %s (%s/%s total)\\r' % (prefix, bar, percents, '%', suffix, iteration, total))\n\tsys.stdout.flush()\n\tif iteration == total:\n\t\tprint(\"\\n\")",
"def next(self):\n if self.skip:\n return\n\n self.counter += 1\n if self.pbar is None and (time.time() - self.start_time) > self.threshold:\n self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter)\n elif self.pbar is not None:\n self.pbar.update(1)",
"def startprogress(title):\n global progress_x, title_global\n title_global = title\n sys.stdout.write(title + \": [\" + \"-\" * 40 + \"] 00% \")\n sys.stdout.flush()\n progress_x = 0",
"def print_progress(iteration, total, prefix='', suffix='', decimals=1,\n bar_length=100):\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%',\n suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n sys.stdout.flush()",
"def progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n \n if count >= total: \n sys.stdout.write('[%s] %s%s ...%s%s\\r' % (bar, percents, '%', status, '\\n'))\n sys.stdout.flush()\n else:\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush()",
"def progress(count):\n items = range(count)\n\n def process_slowly(item):\n time.sleep(0.002 * random.random())\n\n def filter(items):\n for item in items:\n if random.random() > 0.3:\n yield item\n\n with click.progressbar(items, label='Processing accounts',\n fill_char=click.style('#', fg='green')) as bar:\n for item in bar:\n process_slowly(item)\n\n def show_item(item):\n if item is not None:\n return 'Item #%d' % item\n\n with click.progressbar(filter(items), label='Committing transaction',\n fill_char=click.style('#', fg='yellow'),\n item_show_func=show_item) as bar:\n for item in bar:\n process_slowly(item)\n\n with click.progressbar(length=count, label='Counting',\n bar_template='%(label)s %(bar)s | %(info)s',\n fill_char=click.style(u'█', fg='cyan'),\n empty_char=' ') as bar:\n for item in bar:\n process_slowly(item)\n\n with click.progressbar(length=count, width=0, show_percent=False,\n show_eta=False,\n fill_char=click.style('#', fg='magenta')) as bar:\n for item in bar:\n process_slowly(item)\n\n # 'Non-linear progress bar'\n steps = [math.exp(x * 1. / 20) - 1 for x in range(20)]\n count = int(sum(steps))\n with click.progressbar(length=count, show_percent=False,\n label='Slowing progress bar',\n fill_char=click.style(u'█', fg='green')) as bar:\n for item in steps:\n time.sleep(item)\n bar.update(item)",
"def progress_bar(iteration, total, prefix='', suffix='', decimals=1,\n length=40, fill='#', miss=\".\", end=\"\\r\", stay=True,\n fixed_len=False):\n if fixed_len:\n bar_len = length - len(prefix) - len(suffix)\n else:\n bar_len = length\n\n percent = f\"{100*(iteration/float(total)):.{decimals}f}\"\n filled_length = int(bar_len * iteration // total)\n bar = f\"{fill * filled_length}{miss * (bar_len - filled_length)}\"\n to_print = f\"\\r{prefix} [{bar}] {percent}% {suffix}\"\n print(to_print, end=end)\n\n # Print New Line on Complete\n if iteration >= total:\n if stay:\n print()\n else:\n # clean line given lenght of lase print\n print(\" \"*len(to_print), end=end)",
"def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)",
"def progress_bar(self, count, total, status):\n\n bar_len = 50\n filled_len = int(round(bar_len * count / float(total)))\n\n file_size_bytes = f\"{count:,}/{total:,} Bytes\"\n transfer_percent = round(100.0 * count / float(total), 2)\n file_bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n prefix = f\"[{self.LOGGER.host}:{self.LOGGER.port}]\"\n sys.stdout.write(f\"{prefix} -> |{file_bar}| {file_size_bytes} | {transfer_percent}% | {status}...\\r\")\n sys.stdout.flush()\n\n if count >= total: print()",
"def progressbar(total, alive, desc=None):\n with tqdm(total=total, unit=\"B\", unit_scale=True, mininterval=BAR_MININTERVAL, miniters=BAR_MINITERS, desc=desc,\n unit_divisor=1024, ncols=BAR_NCOLS, postfix={'td': 0, 'tps': 0}) as pbar:\n size = 0\n tmp_latency = '0ms'\n tmp_tps = '0'\n while size < total:\n size = pbar_get_size()\n size = size if size < total else total\n updata = size - pbar.n\n updata = check_value_threshold(updata, 0, total)\n\n pbar.set_description(desc)\n pbar.update(updata)\n td, tps = get_latency_tps()\n if td != tmp_latency or tps != tmp_tps:\n pbar.set_postfix(td=td, tps=tps)\n tmp_latency, tmp_tps = td, tps\n\n if alive.value > 0:\n if alive.value == 1 and not globl.get_value('force_exit').value:\n pbar.update(total - pbar.n)\n break\n time.sleep(BAR_SLEEP_FOR_UPDATE)",
"def call_progress_bar(result_parts, line_no):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n try:\n from tqdm.autonotebook import tqdm as tqdm_notebook\n except ImportError:\n raise ImportError(\"Please pip install tqdm to use the progress bar\")\n from IPython import get_ipython\n\n try:\n cell_no = get_ipython().execution_count\n # This happens if we are not in ipython or jupyter.\n # No progress bar is supported in that case.\n except AttributeError:\n return\n pbar_id = f\"{cell_no}-{line_no}\"\n futures = [\n block\n for row in result_parts\n for partition in row\n for block in partition.list_of_blocks\n ]\n bar_format = (\n \"{l_bar}{bar}{r_bar}\"\n if \"DEBUG_PROGRESS_BAR\" in os.environ\n and os.environ[\"DEBUG_PROGRESS_BAR\"] == \"True\"\n else \"{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}\"\n )\n bar_lock.acquire()\n if pbar_id in progress_bars:\n if hasattr(progress_bars[pbar_id], \"container\"):\n if hasattr(progress_bars[pbar_id].container.children[0], \"max\"):\n index = 0\n else:\n index = 1\n progress_bars[pbar_id].container.children[index].max = progress_bars[\n pbar_id\n ].container.children[index].max + len(futures)\n progress_bars[pbar_id].total = progress_bars[pbar_id].total + len(futures)\n progress_bars[pbar_id].refresh()\n else:\n progress_bars[pbar_id] = tqdm_notebook(\n total=len(futures),\n desc=\"Estimated completion of line \" + str(line_no),\n bar_format=bar_format,\n )\n bar_lock.release()\n\n threading.Thread(target=_show_time_updates, args=(progress_bars[pbar_id],)).start()\n\n modin_engine = Engine.get()\n engine_wrapper = None\n if modin_engine == \"Ray\":\n from modin.core.execution.ray.common.engine_wrapper import RayWrapper\n\n engine_wrapper = RayWrapper\n elif modin_engine == \"Unidist\":\n from modin.core.execution.unidist.common.engine_wrapper import UnidistWrapper\n\n engine_wrapper = UnidistWrapper\n else:\n raise NotImplementedError(\n f\"ProgressBar feature is not supported for {modin_engine} engine.\"\n )\n\n for i in range(1, len(futures) + 1):\n engine_wrapper.wait(futures, num_returns=i)\n progress_bars[pbar_id].update(1)\n progress_bars[pbar_id].refresh()\n if progress_bars[pbar_id].n == progress_bars[pbar_id].total:\n progress_bars[pbar_id].close()"
] | [
"0.711327",
"0.6973596",
"0.68691814",
"0.67567205",
"0.6627047",
"0.658841",
"0.65598375",
"0.6529973",
"0.6528152",
"0.65117425",
"0.65101254",
"0.6501197",
"0.64991385",
"0.64991385",
"0.64561707",
"0.64438474",
"0.6441481",
"0.6441481",
"0.6441481",
"0.6438204",
"0.6436589",
"0.6436589",
"0.6428914",
"0.6416337",
"0.640998",
"0.6408168",
"0.6405037",
"0.6396997",
"0.6376493",
"0.63761175",
"0.6371199",
"0.6367639",
"0.6367639",
"0.6367639",
"0.6367639",
"0.6367639",
"0.6366703",
"0.63512886",
"0.63414484",
"0.6336357",
"0.6332669",
"0.6314102",
"0.6314102",
"0.6314102",
"0.6314102",
"0.63112074",
"0.630782",
"0.6299513",
"0.62994206",
"0.6294411",
"0.6281595",
"0.62748355",
"0.6269343",
"0.6266068",
"0.62652797",
"0.6264236",
"0.62500215",
"0.622003",
"0.62199205",
"0.6216073",
"0.6207713",
"0.62025106",
"0.62020534",
"0.619718",
"0.6196763",
"0.6190527",
"0.61852175",
"0.6169081",
"0.6169081",
"0.6168098",
"0.61665577",
"0.6151653",
"0.61441445",
"0.6142088",
"0.61374956",
"0.61352515",
"0.61220396",
"0.61220396",
"0.6121343",
"0.61173046",
"0.6113985",
"0.6107224",
"0.6104012",
"0.6101822",
"0.6098389",
"0.60869825",
"0.60867965",
"0.6079926",
"0.6071261",
"0.60682535",
"0.6065698",
"0.60625094",
"0.60601944",
"0.6041935",
"0.6004954",
"0.5983523",
"0.5980943",
"0.5980223",
"0.59779435",
"0.5977113"
] | 0.6471772 | 14 |
Restarts the timer and closes any existing progress bar. | def restart(self):
self.done()
self.counter = 0
self.start_time = time.time() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restart_timer(self):\n self.log.info(\"{} timer restarted ({} seconds)\".format(self.name, self.interval))\n self.count = self.interval / self.sleep_chunk\n if not self.defer and self.interval > 0:\n self._callback()\n if self.start_event.is_set():\n self.reset_event.set()\n else:\n self.start_event.set()",
"def restart(self):\n self._start_time = None\n self.start()",
"def close_progress(self):\r\n\r\n pass",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def stop_timer(self):\r\n self.countdownTimer.stop()",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def restart(self):\n\n self.stop()\n self.start()",
"def close(self):\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None",
"def restart():\n stop()\n start()",
"def timer(self):\n self.time_remaining -= 1\n if self.time_remaining > 0:\n Timer(1, self.timer).start()",
"def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()",
"def restart_motion_timer(self) -> None:\n if \"motion_timer\" in self.handles:\n self.adbase.cancel_timer(self.handles[\"motion_timer\"])\n self.handles.pop(\"motion_timer\")\n self.handles[\"motion_timer\"] = self.adbase.run_in(\n self.disable_area_motion, self.delay_off\n )",
"def restart(self):\n\t\treturn self.reset().start()",
"def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0",
"def timer_canceled(self, timer):\n try:\n try:\n timer.impltimer.stop()\n del timer.impltimer\n except (AttributeError, TypeError):\n pass\n finally:\n super(Hub, self).timer_canceled(timer)",
"def restart(self):\r\n pass",
"def restart(self):",
"def close(self) -> None:\n\n if not self.simple_tui:\n self.rich_progress_bar.stop()\n\n logging.shutdown()",
"def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()",
"def reset_timer():\r\n window.after_cancel(timer)\r\n canvas.itemconfig(timer_text, text=f\"00:00\")\r\n pomodoro_title.config(text=\"Timer\", fg=GREEN)\r\n check_marks.config(text=\"\")",
"def reset_stop_timer(self) -> None: \r\n self.stop_timer = 0",
"def untie(self):\n self.timer_label = None",
"def Reset():\n #if timer.is_running():\n timer.stop()\n global n,message\n global total_stop\n global success_stop\n n = 0\n message = \"0:00.0\"\n total_stop=0\n success_stop=0",
"def restart(self):\n pass",
"def restart(self) -> None:",
"def reset():\n global counter, total_attempts, successful_stops\n timer.stop()\n counter = 0\n total_attempts = 0\n successful_stops = 0",
"def restart(self, delay=None):\n if self._timer:\n self._timer.cancel()\n if not delay:\n delay = self.delay\n self._timer = Timer(delay, self.callback)\n self._timer.daemon = True\n self._timer.start()",
"def stop_timer(self):\n self.end_time = datetime.now()",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def stop_timer(self):\n self.log.info(\"{} timer stopped ({} seconds)\".format(self.name, self.interval))\n self.start_event.clear()\n # self.count = self.interval / self.sleep_chunk",
"async def kill(self, restart: bool = False) -> None:\n pass",
"def reset():\n global timer, counter, message, attempts, hits, is_running, sucs_message\n timer.stop()\n is_running = False\n timer = simplegui.create_timer(100, tick)\n counter = 0\n attempts = 0\n hits = 0\n sucs_message = \"\"",
"def timer_change(self):\n if self.time < 999:\n self.time += 1\n self.time_lcd.display(self.time)\n else:\n self.timer.stop()",
"def restartComponent(self, opts):\n self.stopComponent(opts)\n return self.startComponent(opts)",
"def timerAction():\n timer = threading.Timer(30.0, timerAction)\n timer.daemon = True\n timer.start()\n save()",
"def restart(self):\n self.gui_block.set()",
"def kill(self):\r\n plt.close(self.fig)",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()",
"def closed(self, info, is_ok):\n\n info.object.timer.Stop()\n return",
"def closed(self, info, is_ok):\n\n info.object.timer.Stop()\n return",
"def done(self):\n if self.pbar is not None:\n self.pbar.close()\n self.pbar = None\n self.counter = 0",
"def cancel(self):\n if self._timer:\n self._timer.cancel()\n self._timer = None\n else:\n raise Exception('Cannot cancel timer. No timer started.')",
"def delay_close(self):\n log.debug(\"Trigger delay close\")\n self.close_timer.start(9000)",
"def update_timer(self):\r\n frmt_time = \"%d:%02d\" % (self.time_minutes, self.time_seconds)\r\n self.time_seconds += 1\r\n if self.time_seconds == 60:\r\n self.time_seconds = 0\r\n self.time_minutes += 1\r\n\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} {} --- {}\".format(self.elapsedTimeString,\r\n frmt_time,\r\n self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.recordingString))",
"def update_timer(self):\n # Keep working\n if self.current_status == 0 and self.timer.time_left:\n time_left = seconds_to_minutes(self.timer.time_left)\n time_str = 'Pomodoro4linux - %02d:%02d' % (time_left)\n\n self.status_icon.set_tooltip(time_str)\n\n # Go get some coffee\n elif self.current_status == 0 and not self.timer.time_left:\n if self.break_count < self.timer.max_break_count:\n self.image.set_from_file(REST_ICON)\n self.break_count += 1\n self.warn_coffee_break()\n else:\n self.image.set_from_file(LONG_REST_ICON)\n self.break_count = 0\n self.warn_long_break()\n\n # Keep breaking\n elif self.current_status == 1 and self.timer.time_left:\n self._set_icon()\n time_left = seconds_to_minutes(self.timer.time_left)\n if self.break_count == 0:\n label_str = 'Long Break\\nRest for %02d:%02d minutes.' % \\\n (time_left)\n else:\n label_str = 'Coffee Break\\nRest for %02d:%02d minutes. (%d/%d)' % \\\n (time_left[0],time_left[1],self.break_count,self.timer.max_break_count)\n self.dialog.set_markup(label_str)\n\n # Come back to work, lazy boy\n elif self.current_status == 1 and not self.timer.time_left:\n label_str = 'You should be working now!'\n self.image.set_from_file(WORK_ICON)\n self.dialog.set_markup(label_str)\n self.pause_timer()\n self.current_status = 0\n self.timer.time_left = self.timer.work_time\n\n return True",
"def cancel_multi_kill_timer(self) -> None:\n self._multi_kill_timer = None",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def timer_update(self):\n self.state_time_remain = max(0, self.state_time_remain - self.timer_interval / 1000)\n\n QMetaObject.invokeMethod(self.progress_label, \"setText\", Qt.QueuedConnection,\n Q_ARG(str, \"{}s ({} / {})\".format(\"{0:.1f}\".format(self.state_time_remain),\n self.current_rep, self.num_reps)))\n\n if self.state_time_remain == 0:\n self.timer.stop()\n self.state_end_event.stateEnded.emit()",
"def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')",
"def reset_progress(self):\n self.state = \"\"",
"def restart(self):\n self.idx = 0",
"def reset(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.IA.restart()\n self._desintegrator = Desintegrator(background=self.IA.BG_MAP)\n self._desintegrator.increment_maximum_x_in(30)\n self.speed = 3.0\n self._run = True",
"def resume(self) -> None:\n if not self.started:\n TimerError(\"A timer should be started before to be resumed\")\n super().start()",
"def Restart(self, length=None, rate=None, window=None):\n self.Stop()\n self.Start(length, rate, window)",
"def restart(self):\n\n #Kill processes\n print('Restarting scan...... \\n')\n self.kill()\n\n #Delete crawler\n del self.crawler\n self.crawler = self.generate_crawler()\n\n #Give ourselves a second\n time.sleep(2)",
"def restart(self):\n\t\tself.destroy()\n\t\t\n\t\tself.resetBoard()\n\t\tself.i_play_again = 0\n\t\tself.adversary_play_again = 0\n\t\tself.isMyTurn = False\n\n\t\tself.initGame(self.parent)\n\n\t\t# Update the screen\n\t\tself.pack()",
"def reset(self):\n self.restart()\n self.cycles = 0",
"def restart():\n msg = messagebox.showinfo('YES!', \"You're Right\")\n window.destroy()\n game()",
"def restart_from_helper ( self, ):\r\n self.no_helper_restarts += 1\r\n self.logger.info( \"restart_from_helper\" )\r\n\r\n self.restart()",
"def timer(self):\n if self.board.reset_timer:\n self.time_count.set(\"000\")\n self.time_widget.config(text=self.time_count.get())\n return 0\n elif self.board.stop_timer:\n return 0\n now_time = time.time()\n self.board.time = now_time - self.board.last_frame_time\n self.time_count.set(self.numToStrLabel(int(self.board.time)))\n self.time_widget.config(text=self.time_count.get())\n self.board.time = self.board.last_frame_time\n self.after(50, self.timer)",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def reset_counter(self) -> None:",
"def stop(self):\n self._refresh_job.cancel()\n super().stop()",
"def close(self):\n\n self.en_time = time.strftime('%H:%M %A %d %B')\n self.is_active = False",
"def cancel(self):\n if self.__watcher is not None:\n self.__cancel_task()\n self.__clear_running_state()\n self.setStatusMessage(\"Cancelled\")\n self.summary_text.setText(\n \"<div>Cancelled<br/><small>Press 'Reload' to try again</small></div>\"\n )",
"def refresh(self) :\n if not self.running:\n self.running = True\n self.strip.show()\n self.running = False\n self.refreshTimer.expired = True\n self.refreshTimer.isrunning = False",
"def _close_if_complete(self):\n if self.progress_var.get()>=100:\n # delete the variable trace (necessary?)\n #self.progress_var.trace_vdelete('w',self.progress_trace_name)\n\n self._close(final_message=\"Time %s: Finished %s\"%(self.sim.timestr(),\n self.timer.func.__name__))",
"def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')",
"def reset_handler():\r\n \r\n global elapsed_time\r\n global rounds\r\n global score\r\n \r\n # In the Video Lecture the Reset Button stops the time\r\n # https://class.coursera.org/interactivepython-005/lecture/29\r\n timer.stop()\r\n \r\n elapsed_time = 0\r\n rounds = 0\r\n score = 0",
"def terminate(self):\n self.stop_timer()\n self.terminate_event.set()\n self.log.info(self.name + \" timer terminated\")",
"def set_stop(self):\n self.timer.stop=True\n final_message = \"Time %s: Interrupted %s\"%(self.sim.timestr(),\n self.timer.func.__name__)\n self._close(final_message)",
"def timer(self, seconds=80):\n try:\n if self.to_stop or seconds <= 0: # if the time is up or everyone already guessed.\n if not self.to_stop:\n self.server_socket.send('end;'.encode())\n self.to_stop = True\n if self.game_number == 3:\n ending_label = Label(self.root2,\n text=\"thank you for playing!\\nplease register again to\\nplay another game\",\n font=('bubble', 15), bg='white')\n ending_label.place(x=200, y=250)\n ending_label.after(5000, self.root2.destroy)\n else:\n next_round_label = Label(self.root2, text=\"next round starts in a bit\", font=('bubble', 15))\n next_round_label.pack(padx=50, pady=20, side=TOP)\n self.root2.destroy()\n else:\n timer_label = Label(self.root2, text=str(seconds), font=('bubble', 15), bg='white', width=5)\n timer_label.place(x=235, y=40)\n self.root2.after(1000, lambda: self.timer(seconds - 1))\n\n except:\n self.timer(0)\n # self.clear_screen()\n # next_round_label = Label(self.root2, text=\"next round starts in a bit\", font=('bubble', 15))\n # next_round_label.pack(padx=50, pady=20, side=TOP)\n # self.to_stop = True\n # self.root2.after(5000, self.restart())",
"def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)",
"def close(self):\n #title()\n self.experiment.pause = True\n if self.running:\n self.running = False\n\n self._unregisterCallbacks()",
"def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def reset_and_stop(self):\n self.enabled = False\n self.start_time = None",
"def restart_serial(self):\n self.ser.close()\n self.ser.open()\n #assert self.test_cmd(False)",
"def on_Start_CWA_button_clicked(self):\n self.timer2.start(500)",
"def start_countdown(self):\n top2=tk.Toplevel(self.root, bg=\"lightyellow\")\n top2.geometry(\"485x753+900+300\")\n ''' BOOSTERS '''\n # Wealth Clock\n self.label_wealth = tk.IntVar()\n self.label_wealth.set(self.ctr)\n tk.Label(top2, text=\"Wealth Clock\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=0)\n tk.Label(top2, textvariable=self.label_wealth, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=0)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetWealth).grid(column=2, row=0)\n # Field Boost\n self.label_field = tk.IntVar()\n self.label_field.set(self.ctr)\n tk.Label(top2, text=\"Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=1)\n tk.Label(top2, textvariable=self.label_field, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=1)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetField).grid(column=2, row=1)\n # Blue Field Boost\n self.label_bluefield = tk.IntVar()\n self.label_bluefield.set(self.ctr)\n tk.Label(top2, text=\"Blue Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=2)\n tk.Label(top2, textvariable=self.label_bluefield, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=2)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetBlueField).grid(column=2, row=2)\n # Red Field Boost\n self.label_redfield = tk.IntVar()\n self.label_redfield.set(self.ctr)\n tk.Label(top2, text=\"Red Field Boost\", bg=\"lightblue\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=3)\n tk.Label(top2, textvariable=self.label_redfield, bg=\"lightblue\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=3)\n tk.Button(top2, text=\"Restart\", bg=\"lightblue\", width=10, font=(\"Verdans\", 10), command=self.resetRedField).grid(column=2, row=3)\n ''' DISPENSERS '''\n # Star Area Jelly Dispenser\n self.label_jelly = tk.IntVar()\n self.label_jelly.set(self.ctr)\n tk.Label(top2, text=\"Royal Jelly\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=10)\n tk.Label(top2, textvariable=self.label_jelly, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=10)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetJelly).grid(column=2, row=10)\n # Ant Pass\n self.label_ant = tk.IntVar()\n self.label_ant.set(self.ctr)\n tk.Label(top2, text=\"Ant Pass\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=11)\n tk.Label(top2, textvariable=self.label_ant, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=11)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetAnt).grid(column=2, row=11)\n # Blueberry Dispenser\n self.label_blueberry = tk.IntVar()\n self.label_blueberry.set(self.ctr)\n tk.Label(top2, text=\"Blueberry Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=12)\n tk.Label(top2, textvariable=self.label_blueberry, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=12)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetBlueberry).grid(column=2, row=12)\n # Strawberry Dispenser\n self.label_strawberry = tk.IntVar()\n self.label_strawberry.set(self.ctr)\n tk.Label(top2, text=\"Strawberry Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=13)\n tk.Label(top2, textvariable=self.label_strawberry, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=13)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetStrawberry).grid(column=2, row=13)\n # Honey Dispenser\n self.label_honey = tk.IntVar()\n self.label_honey.set(self.ctr)\n tk.Label(top2, text=\"Honey Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=14)\n tk.Label(top2, textvariable=self.label_honey, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=14)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetHoney).grid(column=2, row=14)\n # Treat Dispenser\n self.label_treat = tk.IntVar()\n self.label_treat.set(self.ctr)\n tk.Label(top2, text=\"Treat Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=15)\n tk.Label(top2, textvariable=self.label_treat, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=15)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetTreat).grid(column=2, row=15)\n # Glue Dispenser\n self.label_glue = tk.IntVar()\n self.label_glue.set(self.ctr)\n tk.Label(top2, text=\"Glue Dispenser\", bg=\"lightgreen\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=16)\n tk.Label(top2, textvariable=self.label_glue, bg=\"lightgreen\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=16)\n tk.Button(top2, text=\"Restart\", bg=\"lightgreen\", width=10, font=(\"Verdans\", 10), command=self.resetGlue).grid(column=2, row=16)\n ''' MOBS '''\n self.label_ladybug= tk.IntVar()\n self.label_ladybug.set(self.ctr)\n tk.Label(top2, text=\"Lady Bug\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=20)\n tk.Label(top2, textvariable=self.label_ladybug, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=20)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetLadybug).grid(column=2, row=20)\n self.label_rhino= tk.IntVar()\n self.label_rhino.set(self.ctr)\n tk.Label(top2, text=\"Rhino Beetle\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=21)\n tk.Label(top2, textvariable=self.label_rhino, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=21)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetRhino).grid(column=2, row=21)\n self.label_spider= tk.IntVar()\n self.label_spider.set(self.ctr)\n tk.Label(top2, text=\"Spider\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=22)\n tk.Label(top2, textvariable=self.label_spider, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=22)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetSpider).grid(column=2, row=22)\n self.label_mantis= tk.IntVar()\n self.label_mantis.set(self.ctr)\n tk.Label(top2, text=\"Mantis\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=23)\n tk.Label(top2, textvariable=self.label_mantis, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=23)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetMantis).grid(column=2, row=23)\n self.label_scorpion= tk.IntVar()\n self.label_scorpion.set(self.ctr)\n tk.Label(top2, text=\"Scorpion\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=24)\n tk.Label(top2, textvariable=self.label_scorpion, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=24)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetScorpion).grid(column=2, row=24)\n self.label_werewolf= tk.IntVar()\n self.label_werewolf.set(self.ctr)\n tk.Label(top2, text=\"Werewolf\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=25)\n tk.Label(top2, textvariable=self.label_werewolf, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=25)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetWerewolf).grid(column=2, row=25)\n self.label_snail= tk.IntVar()\n self.label_snail.set(self.ctr)\n tk.Label(top2, text=\"Stump Snail\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=26)\n tk.Label(top2, textvariable=self.label_snail, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=26)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetSnail).grid(column=2, row=26)\n self.label_cavemonster= tk.IntVar()\n self.label_cavemonster.set(self.ctr)\n tk.Label(top2, text=\"Cave Monster\", bg=\"red\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=27)\n tk.Label(top2, textvariable=self.label_cavemonster, bg=\"red\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=27)\n tk.Button(top2, text=\"Restart\", bg=\"red\", width=10, font=(\"Verdans\", 10), command=self.resetCavemonster).grid(column=2, row=27)\n ''' BOSSES '''\n self.label_king = tk.IntVar()\n self.label_king.set(self.ctr)\n tk.Label(top2, text=\"King Beetle\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=30)\n tk.Label(top2, textvariable=self.label_king, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=30)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetKing).grid(column=2, row=30)\n self.label_tunnel = tk.IntVar()\n self.label_tunnel.set(self.ctr)\n tk.Label(top2, text=\"Tunnel Bear\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=31)\n tk.Label(top2, textvariable=self.label_tunnel, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=31)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetTunnel).grid(column=2, row=31)\n self.label_stick = tk.IntVar()\n self.label_stick.set(self.ctr)\n tk.Label(top2, text=\"Stick Bug\", bg=\"maroon\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=32)\n tk.Label(top2, textvariable=self.label_stick, bg=\"maroon\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=32)\n tk.Button(top2, text=\"Restart\", bg=\"maroon\", width=10, font=(\"Verdans\", 10), command=self.resetStick).grid(column=2, row=32)\n ''' QUESTS '''\n self.label_brownbear = tk.IntVar()\n self.label_brownbear.set(self.ctr)\n tk.Label(top2, text=\"Brown Bear Quest\", bg=\"teal\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=40)\n tk.Label(top2, textvariable=self.label_brownbear, bg=\"teal\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=40)\n tk.Button(top2, text=\"Restart\", bg=\"teal\", width=10, font=(\"Verdans\", 10), command=self.resetBrownbear).grid(column=2, row=40)\n self.label_blackbear = tk.IntVar()\n self.label_blackbear.set(self.ctr)\n tk.Label(top2, text=\"Black Bear Quest\", bg=\"teal\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=41)\n tk.Label(top2, textvariable=self.label_blackbear, bg=\"teal\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=41)\n tk.Button(top2, text=\"Restart\", bg=\"teal\", width=10, font=(\"Verdans\", 10), command=self.resetBlackbear).grid(column=2, row=41)\n ''' QUESTS '''\n self.label_honeystorm = tk.IntVar()\n self.label_honeystorm.set(self.ctr)\n tk.Label(top2, text=\"Honey Storm\", bg=\"yellow\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=50)\n tk.Label(top2, textvariable=self.label_honeystorm, bg=\"yellow\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=50)\n tk.Button(top2, text=\"Restart\", bg=\"yellow\", width=10, font=(\"Verdans\", 10), command=self.resetHoneystorm).grid(column=2, row=50)\n self.label_sproutsummoner = tk.IntVar()\n self.label_sproutsummoner.set(self.ctr)\n tk.Label(top2, text=\"Sprout Summoner\", bg=\"yellow\", width=20, font=(\"Verdans\", 15)).grid(column=0, row=51)\n tk.Label(top2, textvariable=self.label_sproutsummoner, bg=\"yellow\", width=15, font=(\"Verdans\", 15)).grid(column=1, row=51)\n tk.Button(top2, text=\"Restart\", bg=\"yellow\", width=10, font=(\"Verdans\", 10), command=self.resetSproutsummoner).grid(column=2, row=51)\n\n if self.ctr > 0:\n self.update()\n else:\n self.top2.destroy()",
"def reset(self):\n self.cumtime = 0\n self.start_time = self.time()",
"def restart(self):\n self.points_arr.append(self.click_count)\n self.grid.destroy()\n self.click_count = 0\n self.ranking_label.destroy()\n self.ranking_box.destroy()\n self.ranking_box = Gtk.Grid()\n self.vbox.add(self.ranking_box)\n self.ranking_panel()\n self.point_label_score.set_markup(\"<b>0</b>\")\n self.point_label_score.show_all()\n self.grid = BallsGrid(self.rows, self.cols)\n self.create_grid()\n self.grid.show()",
"def stop():\n if timer.is_running():\n timer.stop()\n global tries, wins, winstreak, losestreak, mood, scorepos\n tries += 1\n if current % 10 == 0:\n wins += 1\n winstreak += 1\n losestreak = 0\n mood = goodmood(winstreak)\n else:\n winstreak = 0\n losestreak += 1\n mood = badmood(losestreak)\n if tries > 9:\n scorepos = (241, 140)\n if wins > 9:\n scorepos = (228, 140)",
"def stop(self):\n self.close.set()",
"def endprogress():\n global title_global\n sys.stdout.write(\"\\r\" + title_global + \": [\" +\"#\" * 40 + \"]100% -- Done! \\n\")\n sys.stdout.flush()",
"def timer_ffmpeg_process_timeout():\n try:\n if not self.ffmpeg_process_ps.is_alive():\n timer_ffmpeg_process.stop()\n self.w.hide()\n del (self.w)\n self.ffmpeg_process_ps = None\n except:\n pass",
"def restart(self):\n self.main_grid_values = [\n [0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)\n ]\n\n self.score_value.set('0')\n self.add_two()\n self.add_two()\n self.update_grid()\n\n self.bind('<{}>'.format(self.controller.slide_left_control), self.push_left)\n self.bind('<{}>'.format(self.controller.slide_right_control), self.push_right)\n self.bind('<{}>'.format(self.controller.slide_up_control), self.push_up)\n self.bind('<{}>'.format(self.controller.slide_down_control), self.push_down)\n\n self.game_over_button.destroy()",
"def reset(self):\r\n self.progress = self._get_progress(self.start)\r\n return self",
"def restart_game(self):\n self.play()",
"def close(self):\n\t\tttotal = time.time() - self.stats['startloop'] - self.stats['waittime']\n\t\tapDisplay.printColor(\"COMPLETE LOOP:\\t\"+apDisplay.timeString(ttotal)+\n\t\t\t\" for \"+str(self.stats[\"count\"]-1)+\" series\",\"green\")\n\t\tappionScript.AppionScript.close(self)",
"def start_timer():\r\n global reps\r\n reps += 1\r\n \r\n # convert minutes to seconds\r\n work_sec = WORK_MIN * 60\r\n short_break_sec = SHORT_BREAK_MIN * 60\r\n long_break_sec = LONG_BREAK_MIN * 60\r\n \r\n # countdown session types\r\n if reps % 2 == 0:\r\n pomodoro_title.config(text=\"Break\", fg=PINK)\r\n count_down(short_break_sec)\r\n elif reps % 8 == 0:\r\n pomodoro_title.config(text=\"Break\", fg=RED)\r\n count_down(long_break_sec)\r\n else:\r\n pomodoro_title.config(text=\"Work\", fg=GREEN)\r\n count_down(work_sec)",
"def _async_cancel_timer(self) -> None:\n if self._timer:\n self._timer.cancel()\n self._timer = None",
"def stop():\n if(timer.is_running() == True):\n timer.stop()\n global tries\n tries += 1\n if (current % 10 == 0):\n global wins\n wins += 1",
"def stop(self):\n\t\tif self.__start_time is None:\n\t\t\traise TimerError(f\"Timer is not running, so it can't be stopped. Use .start to start the timer.\")\n\n\t\telapsed_time = time.perf_counter() - self.__start_time\n\n\t\tself.__start_time = None",
"def close(self):\n self._progress.close()\n self._fd.close()",
"def next_timer(bot, chat_id, message_id):\n\n bot_collection[chat_id].timers.current_timer.cancel()\n bot_settings_set_nul(chat_id)\n\n start_timer(bot, chat_id, message_id)",
"def close(self):\n self._normal_close = True\n\n self.cancel()"
] | [
"0.66411126",
"0.6284358",
"0.62715745",
"0.62477195",
"0.62477195",
"0.6240947",
"0.62338436",
"0.62116516",
"0.6179327",
"0.6083561",
"0.6083475",
"0.60694265",
"0.59795976",
"0.5975325",
"0.59243655",
"0.58290434",
"0.5823037",
"0.57699066",
"0.5765696",
"0.5754959",
"0.5740772",
"0.57318693",
"0.5721797",
"0.5711136",
"0.571063",
"0.5706461",
"0.56756395",
"0.56723636",
"0.5670013",
"0.5654577",
"0.56222117",
"0.5594648",
"0.55894506",
"0.5570533",
"0.55668515",
"0.555688",
"0.55152255",
"0.55151755",
"0.5509993",
"0.5503385",
"0.5499825",
"0.5499825",
"0.5498909",
"0.54896003",
"0.5469842",
"0.5464915",
"0.54463506",
"0.5435738",
"0.5432102",
"0.541949",
"0.5418106",
"0.53965104",
"0.53921276",
"0.5367893",
"0.5365183",
"0.5355815",
"0.53542566",
"0.5352399",
"0.5351977",
"0.53467",
"0.5343849",
"0.53362006",
"0.53211004",
"0.53159803",
"0.53049856",
"0.5303275",
"0.5296731",
"0.52952665",
"0.52894086",
"0.52837235",
"0.52792275",
"0.5275807",
"0.5275384",
"0.5274414",
"0.5273862",
"0.527155",
"0.5264417",
"0.5251891",
"0.52415746",
"0.5240847",
"0.5239843",
"0.52357453",
"0.5206946",
"0.52044463",
"0.5193931",
"0.5187112",
"0.51810294",
"0.5176129",
"0.51682436",
"0.5164255",
"0.51624465",
"0.51585585",
"0.51469624",
"0.5146579",
"0.51411736",
"0.5140453",
"0.51365423",
"0.5136018",
"0.51106584",
"0.51059586"
] | 0.6867762 | 0 |
Advances the progress bar. If visible, shows progress, otherwise updates in the background. If the time threshold has passed and the progress bar should appear, this method creates it. | def next(self):
if self.skip:
return
self.counter += 1
if self.pbar is None and (time.time() - self.start_time) > self.threshold:
self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter)
elif self.pbar is not None:
self.pbar.update(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step",
"def notify_progress(self, ratio):\n self._progress_bar += ratio\n while self._progress_bar > self._offset_bar:\n self._offset_bar += 0.01\n self._progress_window.progress(100 * self._progress_bar)\n # print(100 * self._progress_bar)",
"def _show_time_updates(p_bar):\n while p_bar.total > p_bar.n:\n time.sleep(1)\n if p_bar.total > p_bar.n:\n p_bar.refresh()",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def incProgress(self, val):\n\n if val is not None:\n self._progressBar.show()\n self._progressBar.setTextVisible(True)\n self.progress = self.progress + val\n try:\n self._progressBar.setValue(self.progress)\n qApp.processEvents()\n except:\n pass\n else:\n self._progressBar.setTextVisible(False)\n self._progressBar.hide()\n self._progressBar.reset()\n\n if self.isHidden is True:\n self.isHidden = False\n self.show_()",
"def increment(self, length):\r\n self.progress_bar.update(length)",
"def ad_step_to_progress_bar(self, n):\r\n self.progress_step += n\r\n self.progress[\"value\"] = self.progress_step\r\n self.progress.update_idletasks()",
"def increase_progress(self, value):\r\n\r\n pass",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))",
"def set_progress(self, progress: float):",
"def updateAmount(self, newAmount = 0):\n if newAmount and self.starting_amount is None:\n self.starting_amount = newAmount\n self.starting_time = time.time()\n if newAmount < self.min: newAmount = self.min\n if newAmount > self.max: newAmount = self.max\n self.prev_amount = self.amount\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n percentDone = int(round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n\n if numHashes == 0:\n self.progBar = \"[>%s]\" % (' '*(allFull-1))\n elif numHashes == allFull:\n self.progBar = \"[%s]\" % ('='*allFull)\n else:\n self.progBar = \"[%s>%s]\" % ('='*(numHashes-1),\n ' '*(allFull-numHashes))\n \n if self.show_percentage:\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = str(percentDone) + \"%\"\n else:\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = '%s/%s' % (self.amount, self.span)\n # slice the percentage into the bar\n self.progBar = ''.join([self.progBar[0:percentPlace], percentString,\n self.progBar[percentPlace+len(percentString):]\n ])\n if self.starting_amount is not None:\n amount_diff = self.amount - self.starting_amount\n if amount_diff:\n self.prev_time = self.current_time\n self.current_time = time.time()\n elapsed = self.current_time - self.starting_time\n eta = elapsed * (self.max - self.amount)/float(amount_diff)\n self.progBar += ' ETA:'+time_to_str(eta)",
"def progress_update(self):\n self._window.scan_progress.setValue(self.scan_progress)",
"def show_progressbar(self):\n\n self.progressframe = tk.Toplevel(self, background='white')\n self.progressframe.lift()\n self.progressframe.focus_force()\n self.progressframe.grab_set()\n self.progressframe.resizable(False, False)\n self.progressframe.minsize(width=200, height=50)\n progressbar = ttk.Progressbar(self.progressframe, mode='indeterminate', length=200)\n progressbar.pack(pady=(10, 0), padx=5)\n progressbar.start(10)\n progresslabel = tk.Label(self.progressframe, text='Generating BOM Comparison', background='white')\n progresslabel.pack(pady=(0, 10))",
"def advance():\n pg = ppv.progress + random.random() / 20\n if pg < 1:\n ppv.progress = pg\n ui.delay(advance, random.random() / 2)\n else:\n ppv.progress = 1",
"def tick(self):\n self.current_count += 1\n self.progress(self.current_count)",
"def on_timeout(self, data):\n new_value = self.progressbar.get_fraction() + 0.01\n\n if new_value > 1:\n return False\n\n self.progressbar.set_fraction(new_value)\n return True",
"def progress(self):\n if self.running:\n pass\n else:\n self._engine.progress()",
"def progress(self, value):\n self.step = float(value)\n self._draw()",
"def update_progress(progress, time):\n barLength = 30 # Modify this to change the length of the progress bar\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength * progress))\n text = \"\\rPercent: [{0}] {1:.2f}% --- {3:.2f} s. remain. {2}\".format(\n \"=\" * (block - 1) + \">\" + \" \" * (barLength - (block - 1) - 1), progress * 100, status, time)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def time_remaining(self):\n elapsed_time = time.time() - self.start_time\n self.progressbar['value'] = progressbar.current\n time_remaining = round((1 - progressbar.current) * elapsed_time)\n # Disabled for Demo due to confusion\n # if time_remaining < 60:\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining} seconds')\n # elif 3600 > time_remaining > 60:\n # time_remaining = round(time_remaining / 60)\n # self.progress_label.config(text=f'Estimated TIme Remaining: {time_remaining} minutes')\n # elif time_remaining > 3600:\n # time_remaining = dt.timedelta(seconds=time_remaining)\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining}')",
"async def display_progress_bar(self, is_displayed):\n\t\tprint(\"DISPLAY PROGRESS BAR: \" + str(is_displayed))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"display_progress_bar\": is_displayed\n\t\t\t}\n\t\t)",
"def print_progress_bar(self, iter_num, start_time):\n iteration = iter_num + 1\n prefix = \"Progress: \"\n length = 50\n fill = '█'\n percent = (\"{0:.\" + str(1) + \"f}\").format(100 *\n (iteration / float(self.num_games)))\n exact_progress = \"{}/{}\".format(iteration, self.num_games)\n filled_length = int(length * iteration // self.num_games)\n total_time = int(time()-start_time)\n time_remaining = (time() - start_time)/(float(iter_num)+0.1)\n time_remaining = str(int(time_remaining*(self.num_games-iter_num)))\n bars = fill * filled_length + '-' * (length - filled_length)\n\n print('\\r%s |%s| (%s) %s%% | ETA: %ss (%ss)\\t' %\n (prefix, bars, exact_progress,\n percent, time_remaining,\n total_time), end='\\r')\n\n # Print New Line on Complete\n if iteration >= self.num_games:\n print(\"\\r\\n\\r\\n\")",
"def updateProgress(self, msg):\n self.count += 1\n \n if self.count >= 20:\n self.Destroy()\n \n self.progress.SetValue(self.count)",
"def status_notify(self, fraction, progress_text, status_text=''):\n gtk.gdk.threads_enter()\n try: # coupling...\n if self._pdialog.top_widget:\n self._pdialog.progressbar.set_fraction(fraction)\n self._pdialog.progressbar.set_text(progress_text)\n self._pdialog.statustext.set_markup('<i>%s</i>' % status_text)\n finally:\n gtk.gdk.threads_leave()",
"def add_progressbar(self):\n\n self._progressBar = QProgressBar(self._splash)\n self._progressBar.setGeometry(self._splash.width() / 10, 8 * self._splash.height() / 10,\n 8 * self._splash.width() / 10, self._splash.height() / 10)\n self._progressBar.hide()",
"def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)",
"def trackProgress(target, message=\"\", updateRate=1):\n\n global _activeBar\n if _activeBar is not None:\n logging.getLogger(__name__).error(\"Cannot construct a new progress bar, \"\n \"another one is already active.\")\n raise RuntimeError(\"A progress bar is already active.\")\n\n try:\n _activeBar = makeProgressbar(target, message, updateRate)\n yield _activeBar\n _activeBar.finalize() # success => clean up\n\n except:\n # failure => leave bar visible and advance a line\n sys.stderr.write(\"\\n\")\n raise\n\n finally:\n # in any case the bar is now done\n _activeBar = None",
"def draw_progress_bar(self, col, row, size, ratio, color=GREEN):\n npixels = size * ratio\n for n in range(int(npixels)):\n self.putpixel(col + n, row, color)\n # flash an addiotional pixel as fraction indicator\n if npixels - int(npixels) > .5 and self.nframes & 1 == 0:\n self.putpixel(col + int(npixels), row, color)",
"def _set_progress(self, value: float) -> None:\n\n self._progress = round(value, 4)\n self._render(self._old_value, self._value, value)",
"def _progress(self, walker):\n\n raise NotImplementedError",
"def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0",
"def ffmpeg_progress_hook(self, progress: int) -> None:\n\n if self.parent.simple_tui and not self.parent.web_ui:\n self.progress = 50\n else:\n self.progress = 50 + int(progress * 0.45)\n\n self.update(\"Converting\")",
"def _progress_bar(free_key, capacity_key, result_key, unit):\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )",
"def start(self):\n super(ProgressBar, self).start()\n self.display()",
"def wait_progress(self):\n pass",
"def wait_progress(self):\n pass",
"def update_stats(self, step):\n self.dynamic.progressBar.setValue(\n float(step) / float(self.meas_max_volt / self.steps) * 100\n )",
"def progress(self, progress):\n\n self._progress = progress",
"def progress(self, progress):\n\n self._progress = progress",
"def update_amount(self, newAmount=0, suffix=''):\n if newAmount < self.min:\n newAmount = self.min\n if newAmount > self.max:\n newAmount = self.max\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = np.float(self.amount - self.min)\n percentDone = (diffFromMin / np.float(self.span)) * 100.0\n percentDone = np.int(np.round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2 - 18\n numHashes = (percentDone / 100.0) * allFull\n numHashes = np.int(np.round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n if numHashes == 0:\n self.prog_bar = '%s[>%s]' % (self.prefix, ' '*(allFull-1))\n elif numHashes == allFull:\n self.prog_bar = '%s[%s]' % (self.prefix, '='*allFull)\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n else:\n self.prog_bar = '[%s>%s]' % ('='*(numHashes-1), ' '*(allFull-numHashes))\n # figure out where to put the percentage, roughly centered\n percentPlace = int(len(self.prog_bar)/2 - len(str(percentDone)))\n percentString = ' ' + str(percentDone) + '% '\n # slice the percentage into the bar\n self.prog_bar = ''.join([self.prog_bar[0:percentPlace],\n percentString,\n self.prog_bar[percentPlace+len(percentString):]])\n # prefix and suffix\n self.prog_bar = self.prefix + self.prog_bar\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n # time info - elapsed time and estimated remaining time\n if percentDone > 0:\n elapsed_time = time.time() - self.start_time\n self.prog_bar += '%5ds / %5ds' % (int(elapsed_time),\n int(elapsed_time * (100./percentDone-1)))",
"def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)",
"def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')",
"def reportProgress(self):\n \n pass",
"def _update_progress(\n start_time,\n current_time,\n progress_increment,\n current_progress,\n total,\n unit,\n callback_function,\n):\n if callback_function is not None:\n new_progress = current_progress + progress_increment\n elapsed_time = current_time - start_time\n callback_function(progress_increment, new_progress, total, unit, elapsed_time)\n\n return new_progress",
"def showProgressBar(self):\n logging.info(\"Show progress bar\")\n self.progressLabel = QLabel(self)\n font = QFont()\n font.setFamily(\"GE Inspira\")\n font.setPointSize(20)\n self.progressLabel.setFont(font)\n self.progressLabel.setAlignment(Qt.AlignCenter)\n self.progressLabel.setText(\"Hello\")\n self.ui.layout_v.addWidget(self.progressLabel)\n\n self.progressBar = QProgressBar(self) # Progress bar created\n self.progressBar.setRange(0, 0)\n self.ui.layout_v.addWidget(self.progressBar)",
"def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)",
"def msg_progress(self, message, progbar=None):\n # update progress bar level\n if progbar is None:\n if self.progress_bar > 0.89:\n # reset progress bar\n self.progress_bar = 0\n #Blender.Window.DrawProgressBar(0, message)\n print \"NifScripts: \" + message\n self.progress_bar += 0.1\n else:\n self.progress_bar = progbar\n # draw the progress bar\n #Blender.Window.DrawProgressBar(self.progress_bar, message)\n print \"NifScripts: \" + message",
"def enable_progress_bars() -> None:\n if HF_HUB_DISABLE_PROGRESS_BARS is True:\n warnings.warn(\n \"Cannot enable progress bars: environment variable `HF_HUB_DISABLE_PROGRESS_BARS=1` is set and has\"\n \" priority.\"\n )\n return\n global _hf_hub_progress_bars_disabled\n _hf_hub_progress_bars_disabled = False",
"def _bar_progress(self, count, done=False):\n if self.blank:\n return\n self.current_count = count\n count = min(count, self.total)\n if self.total == count or not self.total:\n complete = 100\n else:\n complete = int(floor(100.0*count/self.total))\n if complete <= self.last_percent:\n return\n self.last_percent = complete\n if self.view_type is self.PERCENT:\n self.f.write('\b\b\b\b%3d%%' % complete)\n elif self.view_type is self.BAR:\n blockcount = int(complete//2)\n if blockcount <= self.blockcount:\n return\n for i in range(self.blockcount, blockcount):\n self.f.write(self.bar_char)\n self.blockcount = blockcount\n else:\n raise Exception('unknown value for view_type: %r' % self.view_type)\n if complete == 100:\n self.f.write('\\n')\n self.f.flush()",
"def set_progress(self, value):\n self.gauge.SetValue(value)",
"def next(self):\n print(f\" {colored('[', 'yellow')}{bold(self.progress[self.pos])}{colored(']', 'yellow')} \"\n f\"{bold('Processing, please wait...')}\",\n end=\"\\r\",\n flush=True\n )\n self.increment()",
"def set_progress(self, progress: int) -> None:\n self.update(progress % len(self.frames)) # prevent IndexError if progress >= len(frames)",
"def update_progress(progress, max_time, starting_time=start_time):\n \n percent = float(progress)/float(max_time)\n int_percent = int(percent*100)\n elapsed_min = (time.mktime(time.gmtime())-starting_time)/60.0\n if percent > 0:\n eta_min = int(round(elapsed_min/percent))\n else:\n eta_min = '?'\n sys.stdout.write( '\\r[{0}{2}] {1}% ({3}) Elapsed:{4}min ETA:{5}min'.format('#'*(int_percent), int_percent,' '*(100-(int_percent)), progress, int(elapsed_min), eta_min))\n sys.stdout.flush()",
"def bar(self, progress):\n if not hasattr(self, \"_limit\") or not self._limit:\n self._limit = self.terminal_size()\n graph_progress = int(progress * self._limit)\n self.stdout.write(\"\\r\", ending=\"\")\n progress_format = \"[%-{}s] %d%%\".format(self._limit)\n self.stdout.write(\n self.style.SUCCESS(\n progress_format\n % (self.progress_symbol * graph_progress, int(progress * 100))\n ),\n ending=\"\",\n )\n self.stdout.flush()",
"def animator(progbar, count, tot_string = False, linenum = False, terminal = False, \n init = False, length = False):\n if init:\n from textprogressbar import TextProgressBar\n return TextProgressBar(length, dirname = tot_string)\n if type(linenum) == int:\n with terminal.location(0, terminal.height - (linenum + 1)):\n if tot_string:\n progbar.animate(count, tot_string)\n else:\n progbar.animate(count)\n else:\n if tot_string:\n progbar.animate(count, tot_string)\n else:\n progbar.animate(count)",
"def setProgress(self, val):\n if val is not None:\n self._progressBar.show()\n self._progressBar.setTextVisible(True)\n self.progress = val\n try:\n self._progressBar.setValue(self.progress)\n except:\n pass\n else:\n self._progressBar.setTextVisible(False)\n self._progressBar.hide()\n self._progressBar.reset()\n\n if self.isHidden is True:\n self.isHidden = False\n self.show_()",
"def make_progress_bar():\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args",
"def updateProgress (self, iteration, total, prefix='Progress', suffix='complete', decimals=1, length=100, fill='█', printEnd=\"\\r\"):\n if iteration == 0:\n self.start_time = timer()\n ETC = '' #Estimated Time to Completion\n if (iteration/total)*100 >= self.updates[self.update_counter]:\n elapsed = timer() - self.start_time\n if iteration != 0:\n minutes = int((elapsed * total/iteration - elapsed)//60)\n seconds = int((elapsed * total/iteration - elapsed)%60)\n ETC = \"(~{:d} mins {:d}s left)\".format(minutes, seconds)\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n # Unfortunately \\r doesn't work in the pycharm console, so we have to reprint the whole bar everytime,\n # clogging the console.\n #print(f'\\r{prefix} |{bar}| {percent}% {suffix} {ETC}', end = printEnd)\n print(f'{prefix} |{bar}| {percent}% {suffix} {ETC}')\n # Print New Line on Complete\n if iteration == total:\n print()\n self.update_counter += 1",
"def update_progress(self, progress, message):\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\n \"status: STARTED %d%% %s\", self._progress, message or \"\"\n )\n self._callback('on_progress_update', self._progress, message)\n return self.update_response(\n self.encoder.encode_started(self._progress, message)\n )",
"def getProgress(self):",
"def setProgress(self, prog):\n\t\tself.progress = prog",
"def _setProgress(self, progress):\n # print \"Progress set %.2f --------------------------------\" % progress\n\n if progress > 100.0:\n progress = 100.0\n\n self._progress = progress\n self._progressChangedNotifier.notify(self)",
"def progress_bar(progress):\n bar_length = 50\n block = int(round(bar_length * progress))\n text = 'Progress: [{0}] {1}'.format('#' * block + '-' * (bar_length - block),\n progress * 100)\n # Print progress after removing the previous progress\n sys.stdout.write('\\r' + text)\n sys.stdout.flush()",
"def progressbar(total, alive, desc=None):\n with tqdm(total=total, unit=\"B\", unit_scale=True, mininterval=BAR_MININTERVAL, miniters=BAR_MINITERS, desc=desc,\n unit_divisor=1024, ncols=BAR_NCOLS, postfix={'td': 0, 'tps': 0}) as pbar:\n size = 0\n tmp_latency = '0ms'\n tmp_tps = '0'\n while size < total:\n size = pbar_get_size()\n size = size if size < total else total\n updata = size - pbar.n\n updata = check_value_threshold(updata, 0, total)\n\n pbar.set_description(desc)\n pbar.update(updata)\n td, tps = get_latency_tps()\n if td != tmp_latency or tps != tmp_tps:\n pbar.set_postfix(td=td, tps=tps)\n tmp_latency, tmp_tps = td, tps\n\n if alive.value > 0:\n if alive.value == 1 and not globl.get_value('force_exit').value:\n pbar.update(total - pbar.n)\n break\n time.sleep(BAR_SLEEP_FOR_UPDATE)",
"def add_progress_bar(self, name, value=None, label=None):\n widget=QtWidgets.QProgressBar(self)\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n if value is not None:\n widget.setValue(value)\n return self.add_simple_widget(name,widget,label=label)",
"def ShowProgress(self, frac, dur):\n self.script.append(\"show_progress(%f, %d);\" % (frac, int(dur)))",
"def set_progress(self, step):\n if self._max and step > self._max:\n self._max = step\n elif step < 0:\n step = 0\n\n prev_period = int(self._step / self.redraw_freq)\n curr_period = int(step / self.redraw_freq)\n\n self._step = step\n\n if self._max:\n self._percent = self._step / self._max\n else:\n self._percent = 0.0\n\n if prev_period != curr_period or self._max == step:\n self.display()",
"def load_progress(self, amount):\n #self.rect.x = int(board.DISPLAY.width * (amount - 1.0))\n num_on = int(amount * self.bmp2led.num_pixels + 0.5)\n num_off = self.bmp2led.num_pixels - num_on\n on_pixel = [255, 0, 0, 0]\n on_pixel[1 + self.bmp2led.green_index] = 10\n self.spi.write(bytearray([0] * 4 + on_pixel * num_on +\n [255, 0, 0, 0] * num_off + [255] *\n ((self.bmp2led.num_pixels + 15) // 16)))",
"def _render(\n self,\n _old_value: Union[int, float],\n _new_value: Union[int, float],\n _progress_value: float,\n ) -> None:\n\n _prev_ratio, _new_ratio = self._get_ratios(_old_value, _new_value)\n _old_value_size, _new_value_size = self._get_value_sizes(\n _prev_ratio, _new_ratio\n )\n\n # Adjusts for edge cases, such as 0-width non-zero value, or 100% width\n # non-maximum values\n _new_value_size = self._adjust_size_for_range_limits(\n _new_value_size, _new_value\n )\n\n # Default values for increasing value\n _color = 2\n _incr = 1\n _start = max(_old_value_size, 0)\n _end = max(_new_value_size, 0)\n\n if _old_value_size >= _new_value_size:\n # Override defaults to be decreasing\n _color = 0 # Clear\n _incr = -1 # Iterate range downward\n _start = max(_old_value_size, 0) - 1\n _end = max(_new_value_size, 0) - 1\n # If we're setting to minimum, make sure we're clearing by\n # starting one \"bar\" further\n if _new_value == self.minimum:\n _start += 1\n\n _render_offset = self.margin_size + self.border_thickness\n\n vert_start, vert_end, vert_incr = self._get_vertical_fill(_start, _end, _incr)\n horiz_start, horiz_end, horiz_incr = self._get_horizontal_fill(\n _start, _end, _incr\n )\n\n vert_start += _render_offset\n vert_end += _render_offset\n horiz_start += _render_offset\n horiz_end += _render_offset\n\n for vertical_position in range(vert_start, vert_end, vert_incr):\n for horizontal_position in range(horiz_start, horiz_end, horiz_incr):\n self._bitmap[horizontal_position, vertical_position] = _color",
"def updateAmount(self, newAmount=0):\n\n if newAmount <= self.min:\n newAmount = self.min\n if newAmount >= self.max:\n newAmount = self.max\n\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n percentDone = int(round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special\n # cases for empty and full\n if numHashes == 0:\n self.progBar = \"[%s%s]\" % (self.edge, ' ' * (allFull - 1))\n elif numHashes == allFull:\n self.progBar = \"[%s]\" % (self.marker * allFull)\n else:\n self.progBar = \"[%s%s%s]\" % (self.marker * (numHashes - 1),\n self.edge,\n ' ' * (allFull - numHashes))\n\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = str(percentDone) + \"%\"\n\n # slice the percentage into the bar\n self.progBar = ''.join([self.progBar[0:percentPlace], percentString,\n self.progBar[percentPlace+len(percentString):]\n ])",
"def transfer_progress(self, stats):",
"def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return",
"def push_progress(self, status, object_id, progress):\n pass",
"def set_progress_value(self, value):\r\n\r\n pass",
"def on_progress_slider_value_changed(self):\n if self.progress_slider_pressed is True and self.playlist.current is not None:\n self.playlist.current.move2position_bytes(self.view.progress_bar.value())",
"def update_progress(self, value=None):\n if self.main_app is not None:\n if value is not None:\n self.main_app.update_progress(value)\n else:\n if self.total_files != 0:\n self.main_app.update_progress((self.current_file / self.total_files) * 100)",
"def make_show_progress():\n \n start_time = time.time()\n lines_read = 0\n\n def show_progress(chunk_length):\n \"\"\"Displays a progress line. Created by make_show_progress.\"\"\"\n \n nonlocal lines_read\n\n lines_read += chunk_length\n elapsed_time = int(time.time() - start_time)\n print('{:,} lines read | time {:,}s'.format(lines_read, elapsed_time))\n\n return show_progress",
"def increment(self, val=1):\n self.set_progress(self._current + val)",
"def updateBar(self):\n pass",
"def take_snapshot(self, progress: Union[int, float],\n now: Optional[Union[int, float]] = None):\n if not self._progresses or progress - self._progresses[-1] > .001:\n # we only record the time and corresponding progress if the\n # progress has been advanced by 0.1%\n if now is None:\n now = time.time()\n self._progresses.append(progress)\n self._times.append(now)",
"def progress_bar(self):\n # Cache subscriber_count\n count_contact = Contact.objects.filter(phonebook__campaign=self.id).count()\n\n # Cache need to be set per campaign\n # subscriber_count_key_campaign_id_1\n subscriber_count = cache.get(\n 'subscriber_count_key_campaign_id_' + str(self.id))\n\n if subscriber_count is None:\n list_contact = Contact.objects.values_list('id', flat=True)\\\n .filter(phonebook__campaign=self.id)\n\n subscriber_count = 0\n try:\n subscriber_count += Subscriber.objects\\\n .filter(contact__in=list_contact,\n campaign=self.id,\n status=SUBSCRIBER_STATUS.SENT)\\\n .count()\n except:\n pass\n\n cache.set(\"subscriber_count_key_campaign_id_%s\" % str(self.id), subscriber_count, 5)\n\n subscriber_count = int(subscriber_count)\n count_contact = int(count_contact)\n\n if count_contact > 0:\n percentage_pixel = (float(subscriber_count) / count_contact) * 100\n percentage_pixel = int(percentage_pixel)\n else:\n percentage_pixel = 0\n subscriber_count_string = \"subscribers (\" + str(subscriber_count) + \")\"\n return \"<div title='%s' style='width: 100px; border: 1px solid #ccc;'><div style='height: 4px; width: %dpx; background: #555; '></div></div>\" % \\\n (subscriber_count_string, percentage_pixel)",
"def gui_process(self):\n ti = self.scan_queue.qsize()\n t = TqdmUpTo(total=self.scan_queue.qsize(), unit='Files')\n\n while True:\n try:\n t.update(ti - self.scan_queue.qsize())\n ti = self.scan_queue.qsize()\n if self.message_queue.__len__() > 0:\n for m in self.message_queue:\n TqdmUpTo.write(m)\n self.message_queue.remove(m)\n # We dont need more then 60fps in the terminal :P\n except BrokenPipeError:\n continue",
"def labelUpdate(self, run_dict):\n self.progressBar.reset()\n self.progressBar.setMinimum(1)\n self.progressBar.setMaximum(run_dict[\"Progress\"])\n self.progressLabel.setText(run_dict[\"Text\"])",
"def updateBar(self):\n self.mixer = alsaaudio.Mixer()\n volumes = self.mixer.getvolume()\n mutes = self.mixer.getmute()\n\n # update on changes and prolong living time of self.\n if self.masterVol != volumes[0]:\n self.masterVol = volumes[0]\n self.prolongLiving()\n\n if self.masterMute != mutes[0]:\n self.masterMute = mutes[0]\n self.prolongLiving()\n\n if(self.masterMute == 1):\n self.volumeBar.set_fraction(0)\n self.label.set_markup(\n \"<span foreground='white' size='small'>0</span>\")\n else:\n self.volumeBar.set_fraction(self.masterVol / 100)\n if(self.masterVol == 100):\n self.label.set_markup(\n \"<span foreground='white' size='xx-small'>\" + str(self.masterVol) + \"</span>\")\n else:\n self.label.set_markup(\n \"<span foreground='white' size='small'>\" + str(self.masterVol) + \"</span>\")\n\n return True",
"def set_Progress(self,func):\n self.__obj.set_Progress(func)",
"def updateProgress(self, percentage):\n self.chunk_percentage[self.current_step - 1] = percentage * self.current_chunk_size\n self.progress_updated.emit(self.percentage)",
"def SetProgress(self, frac):\n self.script.append(\"set_progress(%f);\" % (frac,))",
"def progress(self, i, num_episodes, last_avg, start, end):\n percentage = (i / num_episodes) * 100.0\n self.log(\n \"Epoch {:>5}/{:<5} [{:>5.1f}%] Avg reward: {:+.3f}\".format(\n i,\n num_episodes,\n percentage,\n last_avg), end='')\n if percentage == 0.0:\n self.log(' Est.time: UNKNOWN')\n return\n elapsed = end - start\n remaining = ((100. - percentage) * elapsed) / percentage\n self.log(' Est.time: {}'.format(self.timer(remaining)))",
"def progressWindow(*args, endProgress: bool=True, isCancelled: bool=True, isInterruptable:\n bool=True, maxValue: Union[int, bool]=0, minValue: Union[int, bool]=0,\n progress: Union[int, bool]=0, status: Union[AnyStr, bool]=\"\", step: int=0,\n title: Union[AnyStr, bool]=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[bool, Any]:\n pass",
"def _log_progress(self):\n self.num_of_requests_in_pipeline += 1\n if self.num_of_requests_in_pipeline % 20 == 0:\n print('-' * 200)\n print(f'DB PIPELINE: {self.num_of_requests_in_pipeline} items wenth though pipeline.')\n print('-' * 200)",
"def _on_progress(self, num):\n self._num_progresses += num\n self._log.info(\"Progress incrementing by {}\".format(num))\n self._host_comms.send_msg(\"progress\", num)",
"def _log_progress(self, t):\n\n # Run the update only 2 step before the actual logging happens in order to\n # make sure that the most recent possible values will be stored in\n # self.summary. This is a hacky workaround in order to support OffPolicyAgent\n # which runs 2 threads without coordination\n if (t+2) % self.log_freq == 0 and self.learn_started:\n episode_rewards = self.env_monitor.get_episode_rewards()\n self.episode_rewards = np.asarray(episode_rewards)\n if self.episode_rewards.size > 0:\n self.mean_ep_rew = np.mean(episode_rewards[-self.stats_n:])\n self.best_mean_ep_rew = max(self.best_mean_ep_rew, self.mean_ep_rew)\n\n if t % self.log_freq == 0 and self.learn_started:\n stats_logger.info(\"\")\n for s, lambda_v in self.log_info:\n stats_logger.info(s.format(lambda_v(t)))\n stats_logger.info(\"\")\n\n if self.summary:\n # Log with TensorBoard\n self.tb_writer.add_summary(self.summary, global_step=t)",
"def progress_func(completed, total):\n if not self.log:\n return\n dots = (completed * dot_count) / total\n if dots > dot_count:\n dots = dot_count\n self.progress_lock.acquire()\n if self.dots_written < dot_count:\n dots_to_write = dots - self.dots_written\n self.dots_written = dots\n os.write(old_stdout, '.' * dots_to_write)\n self.progress_lock.release()",
"def advance(self, step=1):\n self.set_progress(self._step + step)",
"def _update(self):\n\n # Read the power supply status\n # TODO: Display power icon while charging\n plugged_in = open('/sys/class/power_supply/AC0/online').readline().strip() # pylint: disable=unused-variable\n power_percent = atoi(open('/sys/class/power_supply/BAT0/capacity').readline().strip())\n\n self.window.pcBatteryDisplay.setValue(power_percent)\n\n # Set color based on power_level\n if power_percent <= 25:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.RED))\n elif power_percent <= 60:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.ORANGE))\n else:\n self.window.pcBatteryDisplay.setStyleSheet('QProgressBar::chunk {{background-color: #{:06x}}}'.format(\n gui_utils.Color.BAR_GREEN))\n\n # Compute the CPU usage\n with open('/proc/stat') as f:\n\n # Parse the data from the file\n fields = [float(column) for column in f.readline().strip().split()[1:]]\n idle, total = fields[3], sum(fields)\n idle_delta = idle - self.cpu_last_idle\n total_delta = total - self.cpu_last_total\n self.cpu_last_idle = idle\n self.cpu_last_total = total\n\n # Calulate the utilisation\n utilisation = 100.0 * (1.0 - idle_delta / total_delta)\n self.cpu_buffer.append(utilisation)\n\n self.window.pcCpuDisplay.setValue(sum(self.cpu_buffer) / len(self.cpu_buffer))",
"def runLongTask(self):\n for i in range(5):\n sleep(1)\n self.reportProgress(i + 1)",
"def make_progress_bar(self):\n progress_label = Label(self.master, text=\"Progress:\")\n progress_label.grid(row=7, column=0)\n\n progress_bar = Progressbar(length=200)\n progress_bar.grid(row=7, column=1)\n\n return progress_label, progress_bar",
"def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)"
] | [
"0.6722962",
"0.6673535",
"0.6486935",
"0.6408159",
"0.6408159",
"0.6382689",
"0.63463795",
"0.6336971",
"0.6323932",
"0.63100183",
"0.6251043",
"0.6093328",
"0.60822016",
"0.6081106",
"0.6039662",
"0.59908193",
"0.5933977",
"0.59072167",
"0.5903473",
"0.5895819",
"0.5878664",
"0.58754796",
"0.5856501",
"0.58544827",
"0.58541465",
"0.5849341",
"0.57626057",
"0.57622206",
"0.574999",
"0.57054585",
"0.5702607",
"0.5696231",
"0.5695583",
"0.56735617",
"0.5664897",
"0.5658084",
"0.5654643",
"0.5654643",
"0.56533325",
"0.5646776",
"0.5646776",
"0.564015",
"0.5639459",
"0.56363565",
"0.56239647",
"0.5597211",
"0.5589159",
"0.5562056",
"0.55585676",
"0.55583096",
"0.5556898",
"0.55558634",
"0.55436206",
"0.5536996",
"0.55327535",
"0.5529558",
"0.5525684",
"0.5522388",
"0.55081475",
"0.5505035",
"0.5490031",
"0.548777",
"0.5471565",
"0.5459553",
"0.54575354",
"0.5456816",
"0.54547393",
"0.5453503",
"0.54492724",
"0.5441239",
"0.5416704",
"0.54004407",
"0.53808135",
"0.537372",
"0.5359488",
"0.5358527",
"0.53430915",
"0.53297675",
"0.53267556",
"0.53257257",
"0.5315318",
"0.5311388",
"0.5309726",
"0.53087056",
"0.5282646",
"0.5272702",
"0.527086",
"0.5268819",
"0.52587825",
"0.5256968",
"0.525567",
"0.52435434",
"0.52389604",
"0.52372104",
"0.5223968",
"0.52236706",
"0.5223207",
"0.5221861",
"0.52205503",
"0.5217745"
] | 0.6813362 | 0 |
Closes the progress bar, if it was opened. | def done(self):
if self.pbar is not None:
self.pbar.close()
self.pbar = None
self.counter = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close_progress(self):\r\n\r\n pass",
"def _close_if_complete(self):\n if self.progress_var.get()>=100:\n # delete the variable trace (necessary?)\n #self.progress_var.trace_vdelete('w',self.progress_trace_name)\n\n self._close(final_message=\"Time %s: Finished %s\"%(self.sim.timestr(),\n self.timer.func.__name__))",
"def close(self):\n self._progress.close()\n self._fd.close()",
"def close(self):\n if self.disable:\n return\n if self._pbar:\n self.close_pbar()\n super().close()",
"def close(self) -> None:\n\n if not self.simple_tui:\n self.rich_progress_bar.stop()\n\n logging.shutdown()",
"def close(self):\n if self._open:\n self._open = False",
"def close(self):\n self._isOpen = False",
"def close(self):\n\n if self._state == states['open']:\n self._do_close()",
"def close(self):\n self.is_open = False",
"def close(self):\n self._normal_close = True\n\n self.cancel()",
"def close(self):\n self.__CheckOpen('close')\n self.__closed = True",
"def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None",
"def close(self):\n self.closing = True",
"def _close(self):\n log.Debug('dpbx.close():')",
"def close(self):\n self.closed = True",
"def handle_close(self):\n self.active = False\n self.close()",
"def close(self):\n self._close = True",
"def close(self) -> None:\n self.__open = False\n total: float = self.get_total()\n if not self.__updated_total == round(total * 100.0):\n self.__open = True\n raise RuntimeError(\"Cash drawer contents incorrect.\")\n self.__updated_total = 0",
"def close(self):\n if not self._close_state.is_set():\n self._close_state.set()",
"def close(self):\n self.closed = True",
"def close(self):\n if not self.file.closed:\n self.file.close()",
"def close(self):\n self.__closed = True",
"def close(self):\n self.__closed = True",
"def close(self, pbar=None):\n\n if not self.enabled:\n return\n \n # home the axis\n self.home(pbar=pbar)\n \n # close the communication and free the port\n self.ser.close()",
"def close(self):\n self.f.close()",
"def close(self):\n self.f.close()",
"def BPfin(self):\n _log.Linfo(\"Début ## BarreProgression.BPfin\")\n\n self.BP_barre_progression.setValue(100)\n self.BP_barre_progression.close()",
"def _close(self):\n # TODO\n self.holding = False",
"def close(self):\n # By default, this is a NOOP",
"def close(self, *obj):\n self._save_size()\n self.clean_up()\n self.uistate.gwm.close_track(self.track)\n self.opened = False\n self.parent_window.present()",
"def stop(self):\n self.close.set()",
"def close(self):\n self.f.close()\n if self.f_script is not None:\n self.f_script.close()",
"def close(self):\n # This is a NOOP by default",
"def close(self):\n # This is a NOOP by default",
"def _close(self):\n self.fh.close()",
"def close(self):\n self.hdfile.close()",
"def close(self):\n self.hdfile.close()",
"def close(self) -> None:\n self.done = True\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None",
"def close(self):\n if not self._f:\n return\n\n logger.info(\"Closed {} ({})\".format(self.name, self.num))\n\n self._f.close()\n self._f = None",
"def close(self):\n if self._closefunc:\n self._closefunc()\n self._closefunc = None",
"def close(self):\n self.done = True\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None",
"def close(self):\n\t\tself.filep.close()",
"def close(self):\n print 'close'",
"def close(self) -> None:\r\n pass",
"def close(self) -> None:\n with self._cv:\n self._transition(Status.CLOSED)",
"def close(self) -> None:\n self.f.close()",
"def close(self):\n \n return self.set_level('down')",
"def close(self) -> None:\n ...",
"def close(self) -> None:\n ...",
"def close(self) -> None:\n self._close_impl(invalidate=False)",
"def close(self) -> None:\n pass",
"def close(self) -> None:\n pass",
"def close(self) -> None:\n pass",
"def close(self):\n self._lib.NRFJPROG_close_dll()\n if self._log_file is not None and self._log_file is not sys.stderr and self._log_file is not sys.stdout:\n self._log_file.close()",
"def close(self):\n if self.file is not None:\n self.file.close()\n self.file = None",
"def close(self):\n self.Close()",
"def close(self):\n self.file.close()",
"def close(self):\n self.file.close()",
"def close(self):\n self.exit()",
"def closing(self, cancelable=False):\r\n return True",
"def close(self):\n self.call('close')",
"def close(self):\n self.fileobj.close()",
"def close(self):\n self.fileobj.close()",
"def close(self):\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None",
"def close(self):\n self._close()",
"def close(self):\n self._close()",
"def close(self):\n self._context.state = CLOSED",
"def close(self):\n \n self.__exit__(None, None, None)\n return",
"def close(self):\n self._command = \"close\"",
"def close(self):\n self.__exit__(None, None, None)",
"def close(self):\n self.__exit__(None, None, None)",
"def close(self):\n with self._not_full:\n with self._not_empty:\n self._closed = True\n self._not_full.notify_all()\n self._not_empty.notify_all()",
"def close(cls):\n pass",
"def closing_plugin(self, cancelable=False):\n return True",
"def closing_plugin(self, cancelable=False):\n return True",
"def close(self):\r\n pass",
"def close(self):\r\n pass",
"def close(self):\r\n pass",
"def close_file(self):\r\n self.file.close()",
"def close(self):\n\n\t\tself._window.close()",
"def close(self):\n self.handle.close()",
"def close(self):\n\t\tttotal = time.time() - self.stats['startloop'] - self.stats['waittime']\n\t\tapDisplay.printColor(\"COMPLETE LOOP:\\t\"+apDisplay.timeString(ttotal)+\n\t\t\t\" for \"+str(self.stats[\"count\"]-1)+\" series\",\"green\")\n\t\tappionScript.AppionScript.close(self)",
"def close(self) -> float:\n return self._close",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass",
"def close(self):\n pass"
] | [
"0.81975293",
"0.75678986",
"0.74818844",
"0.729322",
"0.7214553",
"0.6678668",
"0.66652167",
"0.6659462",
"0.66420907",
"0.66245186",
"0.6562891",
"0.6544766",
"0.65217733",
"0.65041906",
"0.6473157",
"0.6471343",
"0.6361349",
"0.6353468",
"0.6337863",
"0.6337843",
"0.63348854",
"0.63185847",
"0.63185847",
"0.63094425",
"0.6269641",
"0.6269641",
"0.62639105",
"0.6217592",
"0.62116843",
"0.6195269",
"0.6192191",
"0.6168882",
"0.6164717",
"0.6164717",
"0.6162139",
"0.6161591",
"0.6161591",
"0.6154566",
"0.61450726",
"0.61220855",
"0.6117335",
"0.6106701",
"0.60904825",
"0.60819817",
"0.6078059",
"0.60692775",
"0.60648304",
"0.6054087",
"0.6054087",
"0.60497797",
"0.6037305",
"0.6037305",
"0.6037305",
"0.6036732",
"0.60267687",
"0.6024484",
"0.6014408",
"0.6014408",
"0.60120803",
"0.5990289",
"0.59882754",
"0.59881765",
"0.59881765",
"0.59818757",
"0.5975542",
"0.5975542",
"0.59749424",
"0.5973847",
"0.59682274",
"0.5967688",
"0.5967688",
"0.5967324",
"0.5939339",
"0.59375477",
"0.59375477",
"0.593597",
"0.593597",
"0.593597",
"0.5935426",
"0.59320605",
"0.5929177",
"0.59258944",
"0.5918877",
"0.5911348",
"0.5911348",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997",
"0.59095997"
] | 0.7161358 | 5 |
Returns the state of the engine | def state(self):
return self._state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_state(self):\n return self._env.get_state()",
"def get_state(self):\n return self.env.sim.get_state()",
"def get_state(self):\n pass",
"def state(self) :\n\t\ttry :\n\t\t\treturn self._state\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_state(self) -> Any:\n raise NotImplementedError(\n 'This environment has not implemented `get_state()`.'\n )",
"def get_state(self):\n raise NotImplementedError",
"def get_state(self):\n raise NotImplementedError",
"def GetState(self):\r\n \r\n return self.state",
"def getState():\n # TODO: this isn't nearly as meaningful as it used to be",
"def state(self):\n result = self.getResult()\n return result.state",
"def get_state(self):\n return self.controller.get_state()",
"def return_state(self):\n\t\treturn self.state",
"def state(self):\n return self.get_state()",
"def get_state(self):\n return self.state",
"def get_state(self):\n return self.state",
"def getState(self) :\n return self.state",
"def get_current_state(self):\n return self.world.get_state()",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def get_current_state(self):\n return self.game.get_current_state()",
"def state(self):\n print(\"getter of variable state called\")\n return self._state",
"def _get_state(self):",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def getState(self):\r\n return self._get_SS_State()#self.currentState\r",
"def getState(self):\n return self.state",
"def getState(self):\n return self.state",
"def getState(self):\n return self.state"
] | [
"0.7835089",
"0.7828732",
"0.7622395",
"0.75098014",
"0.7504023",
"0.74837524",
"0.74837524",
"0.747881",
"0.74513674",
"0.7436323",
"0.74217993",
"0.7401581",
"0.7358345",
"0.73560464",
"0.73560464",
"0.73396134",
"0.733",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7290264",
"0.7274785",
"0.72727734",
"0.7256091",
"0.7252784",
"0.7252784",
"0.7252784",
"0.7252784",
"0.7252784",
"0.72156876",
"0.7201198",
"0.7201198",
"0.7201198"
] | 0.0 | -1 |
starts the engine, and will prepare any special function retquired This is the only method that Peregrin will need for it to work with this object. | def start(self):
self._state = 'Started' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Start(self) :\n\t\t...",
"def _initialise_run(self) -> None:",
"def _start(self):",
"def startup_run(self):\n raise NotImplementedError # implement in subclass",
"def start(self):\n ...",
"def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()",
"def start_engine(self) -> None:\n cmd = f\"postgres+psycopg2://{self.server.user}:{self.server.passwd}@{self.server.ip}:5432/{self.server.database}\"\n self.engine = create_engine(cmd)\n self.engine_state = \"started\"",
"def start():",
"def start():",
"def start():",
"def start():",
"def startup(self) -> None:",
"def start (self):\n pass",
"def start (self):\n pass",
"def on_pre_enter(self):\n self.setup()\n self.start()",
"def start(self):\r\n pass",
"def _start(self):\n pass",
"def startup(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()",
"def start(self) -> None:\n ...",
"def start(self) -> None:\n ...",
"def start( *args, **kwargs ):",
"def start(self) -> None:",
"def start(self) -> None:",
"def launch(self):",
"def init_run(self):\n raise NotImplementedError",
"def main():\n get_engine(onnx_file_path, engine_file_path)",
"def start() -> None:\n # Authenticate\n ee.Authenticate()\n\n # Initialize the library\n ee.Initialize()",
"def start_procedure(self):\n pass",
"def start(self, **kwargs):\n pass",
"def start(self, **kwargs):\n pass",
"def __init__(self, **keywords):\n\t\tfrom pymodule import ProcessOptions\n\t\tProcessOptions.process_function_arguments(keywords, self.option_default_dict, error_doc=self.__doc__, class_to_have_attr=self)\n\t\tself.setup_engine(metadata=__metadata__, session=__session__, entities=entities)",
"def start(self):\n raise NotImplementedError",
"def start(self):\n raise NotImplementedError",
"def start(self):\n raise NotImplementedError",
"def start_processing(self):",
"def start_engine():\r\n traffic = TrafficCollector()\r\n weather = WeatherController()\r\n client = MongoClient()\r\n db = client.jam_forecaster\r\n\r\n scheduler = BlockingScheduler()\r\n scheduler.add_job(get_data, trigger='cron', hour='6-22', minute='*/5', second='0', max_instances=10, args=[traffic, weather, db])\r\n scheduler.start()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def __enter__(self):\r\n if not self._engine:\r\n self.restart()\r\n return self",
"def on_start(self):\n self.init()",
"def preloop(self):\n\n self.manager = FacilityManager()",
"def start_session(self):\r\n ee.Initialize()",
"def run(self,*args,**kwargs):\n print(\"[TEMPLATE ENGINE] 'run' function not implemented\")\n pass",
"def setUp(self) -> None:\n self.engine = EvalHPOA()",
"async def _start(self,\n client: TypingClient,\n engine: 'Engine',\n queue: Queue,\n log: logging,\n init: Dict[str, str],\n kwargs: Dict[str, Any]) -> 'WinterSummerBot':\n self._generator = self.generator(client,\n engine,\n queue,\n log,\n init,\n **kwargs)\n await anext(self)\n return self",
"def startup(self, event):\n # The engine is starting up. The main task is to do a catch up on any\n # data still on the station, but not yet put in the database. Not\n # all consoles can do this, so be prepared to catch the exception:\n try:\n self._catchup(self.engine.console.genStartupRecords)\n except NotImplementedError:\n pass",
"def initengine(self):\n\n\t\tif glutInit(sys.argv):\n\t\t\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n\n\t\t\t# get a 640 x 480 window\n\t\t\tglutInitWindowSize(640, 480)\n\n\t\t\t# the window starts at the upper left corner of the screen\n\t\t\tglutInitWindowPosition(0, 0)\n\t\t\tself.window = glutCreateWindow(\"test\")\n\t\t\tglutDisplayFunc(render)\n\t\t\tglutIdleFunc(doAnimationStep)\n\t\t\tglutKeyboardFunc(keyPressed)\n\t\t\tinitgl(640, 480)\n\t\t\tglutMainLoop()",
"def start_game(self):\n\n\t\tpass",
"def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()",
"def prepare(self):",
"def post_start(self):",
"def _prepare(self):\n try:\n self._eng.endLoop()\n except:\n pass",
"def started(self):",
"def _initialize_engine(self, engine: ExecutionEngine) -> None:\n _logger.debug('Middleware %r initialized with engine: %r', self, engine)",
"def RUN(self):",
"def prepare(self, db):\n raise NotImplementedError('GenericEngine.prepare is an abstract method.')",
"def initialise(self):",
"def create_engine(self, input):\n return",
"def _do_begin(self):\n self.backend.begin()",
"def _startup(self):\n self._logger.debug(\"About to start up plugin %s\", self.unique_name)\n\n if not self._ez_client.can_connect():\n raise RestConnectionError(\"Cannot connect to the Beer-garden server\")\n\n # If namespace couldn't be determined at init try one more time\n if not self._legacy and not self._config.namespace:\n self._setup_namespace()\n\n self._system = self._initialize_system()\n self._instance = self._initialize_instance()\n\n if self._config.working_directory is None:\n app_parts = [self._system.name, self._instance.name]\n if self._system.namespace:\n app_parts.insert(0, self._system.namespace)\n\n self._config.working_directory = appdirs.user_data_dir(\n appname=os.path.join(*app_parts), version=self._system.version\n )\n\n workdir = Path(self._config.working_directory)\n if not workdir.exists():\n workdir.mkdir(parents=True)\n\n self._logger.debug(\"Initializing and starting processors\")\n self._admin_processor, self._request_processor = self._initialize_processors()\n self._admin_processor.startup()\n self._request_processor.startup()\n\n self._logger.debug(\"Setting signal handlers\")\n self._set_signal_handlers()",
"def activate(self):\n self.start()",
"def initialise(self, args, environ):",
"def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()",
"def __init__(self):\n\t\tappionScript.AppionScript.__init__(self)\n\t\tself.rundata = {}\n\t\t### extra appionLoop functions:\n\t\tself._addDefaultParams()\n\t\tself.setFunctionResultKeys()\n\t\tself._setRunAndParameters()\n\t\t#self.specialCreateOutputDirs()\n\t\tself._initializeDoneDict()\n\t\tself.result_dirs={}",
"def setup( self ):",
"def pre_execute(self):",
"def start(self):\n try:\n pass\n except:\n pass",
"def init():",
"def initialise(self):\n self.sc.init.exec_action(self.variables)",
"def testInit(self):\n self.globalInit()\n self.test.start()",
"def start_algorithm(self):\r\n pass",
"def startUp(self):\n pass",
"def start(self):\n self.running = True\n while self.running:\n self.update_prompt()\n with exception_handler(self.engine):\n self.cmdloop()\n self.engine.reset()",
"def setup(self):\n self.kernel = RunningKernel()\n self.setup_sanitize_files()",
"def pre_start(self) -> None:\n pass",
"def _setup_engine(class_definition, params):\n\n cls = load_from_module(class_definition)\n return cls(params)",
"def teleopInit(self):\n self.globalInit()\n self.teleop.start()",
"def main(self):\r\n pass",
"def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()",
"def _run_env(self):\n raise NotImplementedError()",
"def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)",
"def start(self):\n return self.setup.start",
"def init(): \n\tset_verbosity()\n\t_set_threads()\n\t_set_heartbeat()\n\t#_set_storage()\n\t\n\tinit_targets()\n\t\n\tsend_heartbeat(start=True)\n\t\n\tinfo_msg = \"init plugin script\"\n\tlogger.info(info_msg)\n\n\tinit_plugin()\n\n\tinfo_msg = \"loaded %s plugin(s)\" %(len(kb.plugins.handle))\n\tlogger.info(info_msg)",
"def __init__(self):\n self._workload = None\n self._engine = Engine()",
"def start_kernel(self, **kw):",
"def startBackend():\n global started\n if started:\n return\n started = True\n print(\"Backend started\")",
"def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []",
"def quickstart(root=None, script_name=\"\", config=None):\n if config:\n _global_conf_alias.update(config)\n\n tree.mount(root, script_name, config)\n\n engine.signals.subscribe()\n engine.start()\n engine.block()",
"def start():\n import OnlineEnv as Online\n Online.end_config(False)\n #Online.end_config(True)",
"def on_start(self):\n self.write_log(\"策略启动\")\n self.pos_calculator = NeutralGridPositionCalculator()\n self.avg_price = self.pos_calculator.avg_price\n self.current_pos = self.pos_calculator.pos"
] | [
"0.6717045",
"0.66311574",
"0.65825856",
"0.644477",
"0.6419866",
"0.6397608",
"0.6393187",
"0.63627625",
"0.63627625",
"0.63627625",
"0.63627625",
"0.636224",
"0.632943",
"0.632943",
"0.6321839",
"0.6320396",
"0.6315927",
"0.6269407",
"0.6243949",
"0.6243949",
"0.6243949",
"0.6243949",
"0.6243949",
"0.6243949",
"0.6243949",
"0.6243949",
"0.62352747",
"0.62278056",
"0.62278056",
"0.62235415",
"0.62180877",
"0.62180877",
"0.6202165",
"0.6197114",
"0.61507237",
"0.6140115",
"0.6117863",
"0.6093835",
"0.6093835",
"0.60936385",
"0.60934937",
"0.60934937",
"0.60934937",
"0.60621846",
"0.60492116",
"0.60112095",
"0.60112095",
"0.60112095",
"0.60112095",
"0.5999496",
"0.5977063",
"0.5976955",
"0.5974549",
"0.59720683",
"0.5966457",
"0.5963908",
"0.5961039",
"0.59583044",
"0.59568775",
"0.59361064",
"0.5920395",
"0.59047127",
"0.5900677",
"0.5893226",
"0.58741564",
"0.58639216",
"0.58488876",
"0.5846545",
"0.58391005",
"0.583022",
"0.57989615",
"0.57934874",
"0.57880294",
"0.5781085",
"0.5771758",
"0.577174",
"0.5758969",
"0.57506275",
"0.57470447",
"0.57337844",
"0.57248634",
"0.57219017",
"0.5721458",
"0.5720011",
"0.57190925",
"0.5718762",
"0.57078016",
"0.570461",
"0.5704482",
"0.570393",
"0.57031846",
"0.57017815",
"0.5701335",
"0.5700473",
"0.568422",
"0.56814456",
"0.5677956",
"0.5676188",
"0.56682384",
"0.5664275",
"0.5660746"
] | 0.0 | -1 |
will process the class and auto run the relevant actions | def run(self, *args, **kwargs):
self.actions()
for funcName, action in self._actions.items():
actionName, actionParams = action
if actionParams == None:
func = getattr(self, funcName)
print('Running %s.%s' % (self._title, funcName))
func()
else:
self.runAction(actionName, funcName)
self._db.commit_db() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n self.class_inst_obj.processor(self.msg)",
"def process(self):\n pass",
"def process_class_list(self, module, classes):",
"def process(self):",
"def process(self):",
"def process(self):",
"def processing(self):\n pass",
"def process(self):\n raise NotImplementedError",
"def start_processing(self):",
"def run(self):\n\t\t\n\t\tpass",
"def run(self):\r\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def _run(self):\n raise NotImplementedError",
"def _run(self):\n raise NotImplementedError",
"def run(self):\n self.run()",
"def run(self): \r\n return",
"def run(self):\n while self.container.process(): pass",
"def process():",
"def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')",
"def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)",
"def RUN(self):",
"def setup_class(klass):",
"def setup_class(klass):",
"def __run_class_setup_fixtures(self):\n self.__run_class_fixtures(\n self.STAGE_CLASS_SETUP,\n self.class_setup_fixtures + [ self.classSetUp ],\n self.EVENT_ON_RUN_CLASS_SETUP_METHOD,\n self.EVENT_ON_COMPLETE_CLASS_SETUP_METHOD,\n )",
"def _postprocess(self):",
"def _run ( self ) :\n raise NotImplementedError ( \"AppBase: method _run() should be implemented in a subclass\" )",
"def run(self):\n \n pass",
"def main(self):\n PWBS_EM = PWBSEventManager.getInstance()\n try:\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-before-parseargs\",\n this=self\n )\n self.args = self.argparser.parse_args()\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-after-parseargs\",\n this=self,\n args=self.args\n )\n self.pwbscm.log.log_debug(\"Argument Parser: {0}\".format(repr(self.args)))\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-before-specialtaskinterpreter\",\n this=self\n )\n special_task_executed = self.special_tasks_interpreter()\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-after-specialtaskinterpreter\",\n this=self\n )\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-before-taskinterpreter\",\n this=self\n )\n self.task_runner()\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-after-taskinterpreter\",\n this=self\n )\n if (len(self.args.Task) == 0) and (not special_task_executed):\n self.argparser.print_help()\n\n except NotImplementedFeatureError as e:\n print(\"Not Implemented Feature Called!\")\n PWBS_EM.startEvent(\n \"pwbs-event--pwbs_class-main-notimplementedfeatureerror\",\n this=self,\n error=e\n )",
"def run(self):\n raise NotImplementedError # implement in subclass",
"def _run(self):\n # We usually override this in __init__\n # pylint: disable=method-hidden\n return",
"def _process(self, activity):",
"def process(self):\n self.extract()\n self.transform()\n self.load()",
"def run(self):\n self._params = self.parsingcommands()\n self.start()",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def run(self):",
"def _run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError",
"def process(self, container):\n pass;",
"def perform(self):\n pass",
"def main():\n\t\t# KISS mockup of class operation\n\t\tself.log_in()\n\t\tself.get_token()\t\t# Access token for Facebook API\n\t\tself.get_post()\n\t\tself.get_comments()\n\t\tself.get_hashtags()\t\t# Filter comments that contain Hashtag\n\t\tself.comment_in_json()\t# JSON file where the comments we have replied to are stored\n\t\tself.author_following()\n\t\tself.post_reply()\t\t\n\t\tself.post_video()",
"def setup_class(self):\n pass",
"def process_thread(self):",
"def on_class_parse(self, ctx):\n return None",
"def run_command(self, command_class):\n command_class(*self.__args, **self.__kwargs).run()",
"def post_process(cls, *args, **kwargs):\n pass",
"def post_process(cls, *args, **kwargs):\n pass",
"def post_process(cls, *args, **kwargs):\n pass",
"def post_process(cls, *args, **kwargs):\n pass",
"def action_run(self):\n pass",
"def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)",
"def Run(self):\n pass",
"def test_class_started(self, cls):",
"def execute(cls):\n pass",
"def post_processor(self):",
"def run(self):\n try:\n self._run_internal()\n finally:\n self._cleanup()",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def post_process(self):\n pass",
"def run_test_class(self, ClassName):\n tests = ClassName(\n self.model,\n self.parameter_values,\n self.disc,\n self.solution,\n self.operating_condition,\n )\n tests.test_all()",
"def run(self):\n\n self.__run_class_setup_fixtures()\n self.__enter_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)\n self.__run_class_teardown_fixtures()",
"def execute(cls, **inputs):\n instance = cls(**inputs)\n if hasattr(instance, \"process\"):\n time_start = time.time()\n result = instance.process()\n instance.runtime = time.time() - time_start\n return result\n if hasattr(instance, \"post_process\"):\n instance.post_process()",
"def run(self):\n raise NotImplementedError()",
"def run(self):\n raise NotImplementedError()",
"def _run(self, *args, **kwargs):\n raise NotImplementedError",
"def run(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")",
"def Run():\r\n pass",
"def run(self):\r\n self.log(texto=f\"Executando {self._name}\")",
"def run(self):\n raise Exception('derived class should redefine this function')",
"def run(self):\n print('A simple bot started the process.')\n try:\n self.calculate_before_process()\n\n if self.process == \"Like\":\n self.process_like()\n elif self.process == \"Like-and-follow\":\n self.process_like_and_follow()\n except Exception as e:\n print(e)\n finally:\n self.dump_all()\n print('A simple bot finished the process.')"
] | [
"0.733801",
"0.6919119",
"0.68216944",
"0.6646222",
"0.6646222",
"0.6646222",
"0.6335444",
"0.6312122",
"0.6258709",
"0.61450005",
"0.6139092",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.6109335",
"0.60882485",
"0.60882485",
"0.60718805",
"0.6035683",
"0.60292065",
"0.6026033",
"0.5953021",
"0.5940548",
"0.59134287",
"0.58977747",
"0.58977747",
"0.58867466",
"0.5883492",
"0.5877083",
"0.5863667",
"0.5843353",
"0.58187306",
"0.5807344",
"0.5791827",
"0.5791293",
"0.57864743",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.57856154",
"0.5781348",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57704264",
"0.57648635",
"0.57606333",
"0.57477665",
"0.57372856",
"0.5731235",
"0.57311887",
"0.5729862",
"0.572832",
"0.572832",
"0.572832",
"0.572832",
"0.57236505",
"0.57119435",
"0.57064235",
"0.5706298",
"0.56918377",
"0.56915545",
"0.56896055",
"0.56628644",
"0.56628644",
"0.56628644",
"0.56628644",
"0.56628644",
"0.566175",
"0.5653357",
"0.5645344",
"0.56452024",
"0.56452024",
"0.5643409",
"0.56423265",
"0.5618171",
"0.5609953",
"0.5609244",
"0.56031287"
] | 0.0 | -1 |
will run the action specifiec in the action name | def runAction(self, actionName, funcName):
itemDataList = self._db.getItemDataList(self._engine_id, actionName)
actionId = self._db.addAction(actionName)
func = getattr(self, funcName)
i = 0
total = len(itemDataList)
startTime = timeit.default_timer()
print('%s.%s => %s' % (self._title, funcName, total))
for itemId, itemURI in itemDataList:
i += 1
func(itemURI)
self._db.updateItem(self._engine_id, itemId, actionId, datetime.datetime.now())
if i % 1000 == 0:
interTime = timeit.default_timer()
step = ((interTime - startTime) / i)
eta = step * (total - i)
print('Processing: %s / %s ETA: %ss at %s' % (i, total, eta, step))
if self._db != None:
self._db.commit_db()
self._db.commit_db() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_action(self, action):\n method_name = action.text().lower()\n method_name = method_name + \"_action\"\n action_method = getattr(self, method_name)\n action_method()",
"def call_action(self, action):\n pass",
"def action_run(self):\n pass",
"def perform_action(self, action_id: int) -> None:\r\n ...",
"def perform ( self, action, action_event = None ):\r\n getattr( self.editor, action.action )()",
"def executeAction(self,**kwargs):\n try:\n action = kwargs[\"fname\"]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n entries = {}\n pose_offset = 'empty'\n if action in self.bl.getAllSavedActions():\n pose_offset = self.bl.baxter_actions[str(action)]['joint_position']\n entries['Show action only'] = [self.moveBy, pose_offset]\n entries['Show pick up action'] = [self.pickUpActionColour, pose_offset]\n# entries['Add condition'] = self.addEmptyCondition\n# entries['Rename '+str(action)] = [self.renameAction, action]\n entries['Learn '+str(action)] = getattr(self.bl, 'demoAction')\n\n self.mm.addGenericMenu(\"learnMenu\", self.mm.cur_page,\"Action saved as: %s\" % (str(pose_offset)),entries)\n self.mm.loadMenu(\"learnMenu\")",
"def perform_action(self, action_data):\n pass",
"def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )",
"def run(self):\n\n self._action.execute()",
"def act(self, action):\n action_name = action.op\n args = action.args\n list_action = first(a for a in self.actions if a.name == action_name)\n if list_action is None:\n raise Exception(\"Action '{}' not found\".format(action_name))\n if not list_action.check_precond(self.kb, args):\n raise Exception(\"Action '{}' pre-conditions not satisfied\".format(action))\n list_action(self.kb, args)",
"def __call__(self):\n action = self.args.get('action', None)\n if not hasattr(self, 'action_%s' % (action,)):\n action = 'plugin_root'\n\n action_method = getattr(self, 'action_%s' % (action, ))\n return action_method()",
"def perform_actual_action(self, action):\n self.game.perform_action(action)",
"def actions():\n pass",
"def execute_action(self, agent, action):\n abstract",
"def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()",
"def _execute_action(self, action):\n if action['type'] == 'http':\n self._execute_action_http(action)\n elif action['type'] == 'mail':\n self._execute_action_mail(action)\n elif action['type'] == 'chat':\n pass\n elif action['type'] == 'printer':\n self._execute_action_printer(action)\n elif action['type'] == 'smb':\n self._execute_action_smb(action)\n\n # Wait for a randomized interval.\n time.sleep(random.randint(1, 5))",
"def execute_action(self, action_name, *args, **kw):\n action_dict = {}\n for robot_id in self._robot_group._robots_id_in_group_list:\n robot_module = self._robot_group.all_robots_dict[robot_id].get_module(self._module_name)\n action_dict[robot_id] = getattr(robot_module, action_name)(*args, **kw)\n logger.info(\"Multi Module robot id {0}: begin to execute the action\".format(robot_id))\n multi_action = MultiAction(action_dict)\n return multi_action",
"def do_action(self, action, **kwargs):\r\n print(action)\r\n action_method = getattr(self, action._method.__name__)\r\n if action_method:\r\n action_method(**kwargs)",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def execute_action(self, agent, action):\n raise NotImplementedError",
"def execute_action(self, agent, action):\n raise NotImplementedError",
"def do_action(self, action, a=None, b=None):\n pass",
"def perform_action(self, action):\n if action[0] == 10: # Query\n return self.process_query(action)\n elif action[0] == 20: # Look at a document\n return self.examine_document(action)",
"def act(self, x):\n return self.action",
"def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()",
"def _action(self):\n pass",
"def _do_action(self):\n pass",
"def _do_action(self):\n pass",
"def choose_action(self):\r\n pass",
"def run(self, *args, **kwargs):\n self.actions()\n\n for funcName, action in self._actions.items():\n actionName, actionParams = action\n if actionParams == None:\n func = getattr(self, funcName)\n print('Running %s.%s' % (self._title, funcName))\n func()\n else:\n self.runAction(actionName, funcName)\n self._db.commit_db()",
"def take_action(self, action):\n\t\traise NotImplementedError",
"def get_action(self, context):\n pass",
"def _run_action_external(self):\n action = self.act_kwargs['action_external']\n logger.debug('running external action %s on file %s' % (action, self.file_name))",
"def actions() -> None:\n pass",
"def perform_step(self, action):\n pass",
"def action(self):\n pass",
"def action(self):\n pass",
"def _act(self, action):\n self._set_action(action)",
"def perform_user_action(action_index):\r\n if action_index is not None:\r\n print()\r\n action = available_actions[action_index]\r\n if current_state in action[\"valid_states\"]:\r\n function_to_run = available_actions[action_index][\"function\"]\r\n if function_to_run is not None:\r\n function_to_run()\r\n else:\r\n print(\"Internal error: NOT IMPLEMENTED (no function assigned for the action)!\")\r\n else:\r\n print(\"This function is not allowed in the current system state (%s)\" % current_state)\r\n else:\r\n print(\"Invalid input, please choose a valid action\")\r\n print()\r\n return None",
"def run_code(self, test):\n for action in test:\n self.assertEquals(1, len(action))\n action_type, action = list(action.items())[0]\n\n if hasattr(self, \"run_\" + action_type):\n getattr(self, \"run_\" + action_type)(action)\n else:\n raise InvalidActionType(action_type)",
"def getActionByName(name):\n for action in getActions():\n if action.name == name:\n return action\n\n raise errors.NoSuchAction(name)",
"def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))",
"def perform_action(self, action_name, *action_parameters_):\n result = Being.perform_action(self, action_name, *action_parameters_)\n # return original result\n return result",
"def do_action(self):\n func = self._get_action_func()\n func(self)",
"def get_action_by_name(self, name):\n for action in self.all_actions:\n if action.name == name:\n return action\n return None",
"def obtain_action(self):\r\n\t\treturn",
"def dispatch(self, *args, **kwargs):\r\n action = kwargs.pop('action', 'default')\r\n action_method = getattr(self, str(action), self.default)\r\n return action_method(*args, **kwargs)",
"def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def execute_script(self, action, *args):\n self.host.cmd(('./%s' + len(args) * ' %s') % (action, *args))",
"def dispatch(self, *args, **kwargs):\n action = kwargs.pop('action', 'default')\n action_method = getattr(self, str(action), self.default)\n return action_method(*args, **kwargs)",
"def take_action(self, *args, **kwargs):\r\n pass",
"def apply_action(self, action):\n return self.__environment.step(action)",
"def action(self) -> str:\n return pulumi.get(self, \"action\")",
"def exec_actions(actions, watch_path, var_name_ext):\n var_name = var_name_ext.rsplit('.', 1)[0]\n current_dir = os.getcwd()\n os.chdir(watch_path)\n watch_path_absolute = os.getcwd()\n for action_type, action in actions:\n action_name = action.replace('$NAME_EXT', var_name_ext)\\\n .replace('$NAME', var_name)\\\n .replace('$CURRENT_DIR', current_dir)\\\n .replace('$WATCH_DIR_LAST', watch_path_absolute.split('/')[-1])\\\n .replace('$WATCH_DIR', watch_path_absolute)\n if action_type == 'SHELL_COMMAND':\n os.system(action_name)\n os.chdir(current_dir)",
"def _get_action_from_name(self, name):\n\n container = self._action\n if name is None:\n return None\n\n for action in container:\n if \"/\".join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action",
"def chooseAction(self):\n print \"nothing\"\n pass",
"def action(self):\n current_action = self.get_script_entry()\n if current_action[\"type\"] == \"request\":\n self._handle_request(current_action)\n elif current_action[\"type\"] == \"event\":\n self._handle_event(current_action)\n elif current_action[\"type\"] == \"response\":\n self._handle_response(current_action)\n else:\n raise AttributeError(\"Wrong action type!\" +\n \" Scenario: \" + str(self._loaded_sc[\"name\"]) +\n \" Action: \" + str(self._scenario_script_cur))",
"def getAction(self, nameOrAction):\n\t\tif isinstance(nameOrAction, Action):\n\t\t\treturn nameOrAction\n\t\telse:\n\t\t\treturn self.actions[nameOrAction]",
"def execute_action(self, action, lib):\n if not self.good:\n return self\n #print \"_Action: \"+self.name+\"{\"+str(action)+\"}\\n{\", self._elements_to_arg_list(), \"}\"\n if action == None:\n if OPTIONS['allow-no-actions']:\n action = \"self.name + '(' + str(arg) + ')'\"\n else:\n print \"Error: No Action for '\"+self.name+\"'\"\n print \"For: {\"+str(self._elements_to_arg_list())+\"}\"\n exit(1)\n\n ## setup the helpers, and goodies\n arg = self._elements_to_arg_list()\n global DATA; DATA = arg\n global SZ; SZ = len(DATA)\n action = _action_transform(action);\n\n if OPTIONS['action-debug']:\n print \"Action: \"+self.name+\"{\"+str(action)+\"}\\n{\", self._elements_to_arg_list(), \"}\"\n val = eval(action if action != None else \"''\")\n if OPTIONS['action-debug']:\n print \"Value: '\"+str(val)+\"'\\n\"\n new_result = Result.success([val])\n new_result.set_name(self.name)\n if action == None and not (OPTIONS['action-debug'] and OPTIONS['action-debug-all']):\n return self\n else:\n return new_result",
"def select_action(self):\n pass",
"def apply_action(self, cmd_name, *args):\n\n action = Action(self.tahoma_device.url)\n action.add_command(cmd_name, *args)\n self.controller.apply_actions(\"HomeAssistant\", [action])",
"def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")",
"def run_hook(vmname, action):\n vm = None\n try:\n vm = globals()[vmname]\n except KeyError:\n print(\"No such VM\")\n \n if vm:\n try:\n hook = getattr(vm, action)\n hook()\n except AttributeError:\n print(\"Action not supported\")",
"def action(func):\n ACTIONS[func.__name__.rstrip('_')] = func\n return func",
"def takeAction(self, action):\n return self.env.step(action)",
"def renameAction(self,**kwargs):\n try:\n old_action = kwargs[\"fname\"].split(' ')[1]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n rospy.loginfo(\"Enter the new name of the action:\")\n action = sys.stdin.readline().strip()\n\n self.bl.baxter_actions[str(action)] = self.bl.baxter_actions[str(old_action)]\n del self.bl.baxter_actions[str(old_action)]\n\n self.baxter.mm.changeMenuTitle(\"Action %s renamed to: %s\" % (old_action, str(action)))\n self.baxter.yes() \n self.mm.loadMenu(\"teachMenu\")",
"def execute_actions(self, actions):\n execute_actions(self.board, self.agent_locs, actions)",
"def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]",
"def step(self, action):",
"def set_action(self,action):\n self.__action = action",
"def execute_action(self, a):\n return self.emulator.next(a)",
"def action(self, action):\n self._action = action",
"def _take_action(self, action_idx: ActionType) -> None:\n raise NotImplementedError(\"\")",
"def apache(action):\n click.echo(action)\n return",
"def _get_action(self):\n return self.__action",
"def post(self, request, *args, **kwargs):\n getattr(self, kwargs['action'])()\n return HttpResponse()",
"def set_action(self, action):\n self.action = action",
"def set_action(self, action):\n self.action = action",
"def fileAction(action=\"\", *args):\n\tif action == \"fbxexp\":\n\t\tpathFld = \"pathTFBG\"\n\t\tnameFld = \"nameTFG\"\n\t\tsuffix = \"fbx\"\n\telif action == \"mayaexp\" or \"openMaya\":\n\t\tpathFld = \"mayaTFBG\"\n\t\tnameFld = \"animNameTFG\"\n\t\tsuffix = \"ma\"\n\telif action == \"animFbx\":\n\t\tpathFld = \"mayaTFBG\"\n\t\tnameFld = \"animNameTFG\"\n\t\tsuffix = \"fbx\"\n\t\t\t\t\n\tfolder = cmds.textFieldButtonGrp(widgets[pathFld], q=True, tx=True)\n\texpName = cmds.textFieldGrp(widgets[nameFld], q=True, tx=True)\n\tfileName = \"{0}.{1}\".format(expName, suffix)\n\t\n\tif folder and expName:\n\t\tpath = cFuncs.fixPath(os.path.join(folder, fileName))\n\t\tif action == \"fbxexp\":\n\t\t\texpFbx(path)\n\t\telif action == \"mayaexp\":\n\t\t\tcmds.file(path, force=True, type=\"mayaAscii\", exportSelected= True)\n\t\telif action == \"animFbx\":\n\t\t\texpFbx(path)\n\t\telif action == \"openMaya\":\n\t\t\tcmds.file(path, open=True, force=True)\n\telse:\n\t\tcmds.warning(\"You need to have a location AND a file name\")",
"def find_action_by_name(self, name): # because I dont want possible actions to be a dictionary- it fucks stuff up\n return next(x for x in self.possible_actions if name.lower() == x.name.lower())",
"def get_action(action_name):\n action = justrok.Globals.action_collection.action(action_name)\n if action is None:\n justrok.logger.error('action %r not found', action_name)\n return lambda: None\n else:\n return action.trigger",
"def setAction(self, func):\n\t\tself.action = func",
"def findAction(self, actionId): #$NON-NLS-1$\r",
"def act(self, infoset):\n assert self.action in infoset.legal_actions\n return self.action",
"def mappedQuicklookAction(self, method_name):\n # find currently active quicklook instance\n active_win = util.get_active_MDI_win()\n # call the function of the action on the instance\n getattr(active_win, str(method_name))()",
"def run_action(client: Client, args: Namespace):\n\n result = None\n\n if args.action == 'exec':\n result = client.run(args.command, *args.argument)\n elif args.action == 'say':\n result = client.say(args.message)\n elif args.action == 'fortune':\n result = client.fortune(\n short=not args.long, offensive=args.offensive)\n elif args.action == 'datetime':\n result = client.datetime(frmt=args.format)\n elif args.action == 'in-use':\n players = client.players\n\n if players.online:\n LOGGER.info('There are %i players online:', players.online)\n LOGGER.info(', '.join(players.names))\n else:\n LOGGER.warning('There are no players online.')\n exit(1)\n\n if result:\n LOGGER.info(result)",
"def execute_action(self, action, values=None):\r\n raise NotImplementedError('Subclass must override execute_action method')",
"def choose_action(self, board, possible_actions):\r\n pass",
"def execute(self):\n for action in self.actions:\n self._logger.info('[~] Executing %s.', action)\n self._execute_action(action)",
"def test_unknown_action(self):\n exit_string = actions.main([\"foo\"])\n self.assertEqual(\"Action foo undefined\", exit_string)",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()"
] | [
"0.7718024",
"0.74983245",
"0.72574073",
"0.7193699",
"0.71931565",
"0.6973566",
"0.6973558",
"0.69627744",
"0.6923847",
"0.6882539",
"0.68605995",
"0.6821797",
"0.6815239",
"0.6786962",
"0.67747885",
"0.67588645",
"0.6750203",
"0.67334545",
"0.67019147",
"0.67019147",
"0.67019147",
"0.67019147",
"0.67002136",
"0.67002136",
"0.6689654",
"0.6685591",
"0.666418",
"0.6644248",
"0.66410327",
"0.6633806",
"0.6633806",
"0.6626671",
"0.66010237",
"0.6598684",
"0.6564809",
"0.6543023",
"0.6537864",
"0.6529858",
"0.6527344",
"0.6527344",
"0.65259707",
"0.6518796",
"0.65019053",
"0.6491263",
"0.6455705",
"0.64494324",
"0.64281833",
"0.6414617",
"0.6412529",
"0.63908255",
"0.6358753",
"0.6358753",
"0.63498443",
"0.63441676",
"0.6326642",
"0.6322306",
"0.6308356",
"0.6305491",
"0.62949246",
"0.62942463",
"0.62896883",
"0.6287242",
"0.6283981",
"0.6276782",
"0.62511486",
"0.62329525",
"0.6218858",
"0.6206697",
"0.61997896",
"0.6170411",
"0.6167314",
"0.6155977",
"0.6154366",
"0.6139587",
"0.61374635",
"0.6134258",
"0.613074",
"0.6123912",
"0.6122215",
"0.61210823",
"0.61164856",
"0.61164856",
"0.60877734",
"0.6082964",
"0.6068304",
"0.60622275",
"0.60569453",
"0.6044832",
"0.6041216",
"0.60377675",
"0.6028755",
"0.6014651",
"0.60042894",
"0.6001775",
"0.6000411",
"0.6000411",
"0.6000411",
"0.6000411",
"0.6000411",
"0.6000411"
] | 0.60786176 | 84 |
returns the objects information | def info(self):
return (self._title, self._version, self._descr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_objects_data(self):\n pass",
"def objects(self):",
"def getInfo():",
"def objects(self):\n\t\treturn self._objects",
"def get_objects_data(self):\n return dict(result=self.objects)",
"def GetObjects(self): \r\n return self.model.GetObjects()",
"def get_objects_data(self):\n return dict(objects=self.objects)",
"def _get_output_objects_info(self):\n if len(self.output_objects) == 0:\n return []\n\n return self.output_objects[0].keys()",
"def get_info(self):\n pass",
"def get_info(self):\n pass",
"def dump_objects(self):\n #print 'Object Count: ', self.object_store.len()\n \n for item in self.object_store:\n print 'Object Name: ', item.__dict__['Name'], ' LocalID: ', item.__dict__['LocalID']",
"def objects(self):\r\n return self._objects",
"def dataObjects(self):\n\t\treturn self._objects",
"def get_objects_data(self):\n return dict(items=self.objects)",
"def info(self) -> dict:",
"def getObject(self):\n return self.base.get(\"object\", [])",
"def get_info(self):\n return {}",
"def info(self):",
"def info(self):",
"def get_objects(self):\n return self._objects",
"def _get_information(self):\n pass",
"def info(self):\n return {}",
"def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs",
"def object_lists(self) -> Dict[str, List[Any]]:\n return {name: self.hyperparams[name][2] for name in self.names()\n if self.hyperparams[name][0] == 'object'}",
"def receivedObjectInfo(outputPath):\n # Open the root file and print the object information\n success = False\n fOut = ROOT.TFile.Open(outputPath, \"READ\")\n keys = fOut.GetListOfKeys()\n\n receivedObjects = dict()\n for key in keys:\n obj = key.ReadObj()\n receivedObjects[key.GetName()] = \"Obj name: {0}, Obj IsA() Name: {1}\".format(obj.GetName(), obj.IsA().GetName())\n success = True\n\n # Print to log for convenience\n logger.info(receivedObjects)\n\n return (success, receivedObjects)",
"def objects(self):\n return self.obj_to_id.keys()",
"def get_soma_objects(self):\n\n msg_store = MessageStoreProxy(database=\"soma2data\", collection=\"soma2\")\n objs = msg_store.query(SOMA2Object._type, message_query={\"map_name\":self.soma_map,\"config\":self.soma_conf})\n print \"queried soma2 objects >> \", objs\n self.soma_objects = ce.get_soma_objects()\n print \"hard coded objects >> \", [self.soma_objects[r].keys() for r in self.soma_objects.keys()]",
"def return_info(self):\n\t\treturn self.info",
"def infos(self):\n return self._infos",
"def getInfo(self):\n return self.info",
"def info(self):\n return self._info",
"def show_all_information(self):\n return self.__dict__\n # print(self.first_name)\n # print(self.last_name)\n # print(self.age)\n # print(self.name)\n # print(self.gender)\n # print(self.number_of_children)",
"def obj_list(self):\n return self._obj_list",
"def getObjectInfo(fluiddb, objectId):\n return fluiddb.objects[objectId].get(showAbout=True)",
"def info(self):\n print self.id, self.type, self.xyz.get_xyz",
"def hbObjects(self):\r\n return self.__hbObjs",
"def get_object_info(self):\n vehicle_object = Object(header=self.get_msg_header(\"map\"))\n # ID\n vehicle_object.id = self.get_id()\n # Pose\n vehicle_object.pose = self.get_current_ros_pose()\n # Twist\n vehicle_object.twist = self.get_current_ros_twist()\n # Acceleration\n vehicle_object.accel = self.get_current_ros_accel()\n # Shape\n vehicle_object.shape.type = SolidPrimitive.BOX\n vehicle_object.shape.dimensions.extend([\n self.carla_actor.bounding_box.extent.x * 2.0,\n self.carla_actor.bounding_box.extent.y * 2.0,\n self.carla_actor.bounding_box.extent.z * 2.0])\n\n # Classification if available in attributes\n if self.classification != Object.CLASSIFICATION_UNKNOWN:\n vehicle_object.object_classified = True\n vehicle_object.classification = self.classification\n vehicle_object.classification_certainty = 1.0\n self.classification_age += 1\n vehicle_object.classification_age = self.classification_age\n\n return vehicle_object",
"def info(self):\r\n return self._get('info', {})",
"def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))",
"def get_info(self) -> List[Dict]:\n return [{} for _ in range(self.num)]",
"def getObjectDetails(self, extension, attribute, names, attributes):\n\n # Extract the the required information about the object\n # relation out of the BackendParameters for the given extension.\n of = ObjectFactory.getInstance()\n be_data = of.getObjectBackendParameters(extension, attribute)\n\n if not be_data:\n raise GOsaException(C.make_error(\"BACKEND_PARAMETER_MISSING\", extension=extension, attribute=attribute))\n\n # Collection basic information\n otype, oattr, foreignMatchAttr, matchAttr = be_data[attribute] #@UnusedVariable\n\n # Create a list of attributes that will be requested\n if oattr not in attributes:\n attributes.append(oattr)\n attrs = dict([(x, 1) for x in attributes])\n\n # Start the query and bring the result in a usable form\n index = PluginRegistry.getInstance(\"ObjectIndex\")\n\n res = index.search({\n 'or_': {'_type': otype, '_extensions': otype, oattr: names}\n }, attrs)\n\n result = {}\n mapping = {}\n\n for entry in names:\n _id = len(result)\n mapping[entry] = _id\n result[_id] = None\n\n for entry in res:\n item = {}\n for attr in attributes:\n if attr in entry and len(entry[attr]):\n item[attr] = entry[attr] if attr == 'dn' else entry[attr][0]\n else:\n item[attr] = \"\"\n\n if item[oattr] in mapping:\n _id = mapping[item[oattr]]\n result[_id] = item\n\n return {\"result\": result, \"map\": mapping}",
"def get_details(self):",
"def get_obj_info(ns, obj, human_friendly):\n if lmi_isinstance(obj, ns.CIM_StorageExtent):\n return get_device_info(ns, obj, human_friendly)\n else:\n return get_pool_info(ns, obj, human_friendly)",
"def get_info ( self ):\n proxy = self._cur_control.proxy\n return ( proxy.list(), proxy.index )",
"def infolist(self):\r\n return list(self.infoiter())",
"def getInfo(self):\n return self._info",
"def got_info(self, cloud_obj):",
"def getItems(self):\n for object in self.database:\n print(object)",
"def get_objects(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n return objs, objs_attached",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))",
"def get_info(self):\n return None",
"def full_info(self, object, name, value):\n return self.info()",
"def detail(self):\n info = self.info()\n return info",
"def info(self):\n return self._info",
"def info(self):\n return self.client.call('GET', self.name + 'info')",
"def attributes(self):",
"def get_objects(si, args):\n # Get datacenter object.\n datacenter_list = si.content.rootFolder.childEntity\n \"\"\"\n if args.datacenter_name:\n datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)\n else:\n \"\"\"\n datacenter_obj = datacenter_list[0]\n\n # Get datastore object.\n datastore_list = datacenter_obj.datastoreFolder.childEntity\n \"\"\"if args.datastore_name:\n datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)\n elif len(datastore_list) > 0:\"\"\"\n datastore_obj = datastore_list[0]\n #else:\n # print \"No datastores found in DC (%s).\" % datacenter_obj.name\n\n # Get cluster object.\n cluster_list = datacenter_obj.hostFolder.childEntity\n \"\"\"if args.cluster_name:\n cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)\n elif len(cluster_list) > 0:\"\"\"\n cluster_obj = cluster_list[0]\n #else:\n # print \"No clusters found in DC (%s).\" % datacenter_obj.name\n\n # Generate resource pool.\n resource_pool_obj = cluster_obj.resourcePool\n\n return {\"datacenter\": datacenter_obj,\n \"datastore\": datastore_obj\n ,\"resource pool\": resource_pool_obj}",
"def get_info(self):\n return \"TODO !\"",
"def get_info(obj):\n res = {}\n res['vserver_group_id'] = obj.vserver_group_id\n if hasattr(obj, 'backend_servers'):\n res['backend_servers'] = obj.backend_servers\n if hasattr(obj, 'vserver_group_name'):\n res['vserver_group_name'] = obj.vserver_group_name\n return res",
"def get_object_information(self, obj_ref: str, **kwargs) -> Dict[str, Any]:\n return cd_client.get_object_information(\n DirectoryArn=self._dir_arn,\n ObjectReference={\n 'Selector': obj_ref\n },\n **kwargs\n )",
"def details(self):\n pass",
"def get_properties():",
"def object(self):",
"def get_object (self) :\n\n # object is a weak_ref, and may have been garbage collected - we simply\n # return 'None' then\n return self._object ()",
"def getProperties():",
"def objectFields(self):\n raise NotImplementedError",
"def get_main_information(self) -> Dict:\n if self.information is None:\n self.information = self.orthanc.get_instance_information(\n self.identifier\n )\n\n return self.information",
"def get_common_food(cls):\n objs = cls.objects\n return objs",
"def get_info(self):\n return {'q_ref': self.q_ref, 'v_ref': self.v_ref, 'U': self.U, 'type': 'POD'}",
"def list(self):\n return self.cell.objects+self.cell.tempObjects",
"def dump_objects():\n pass",
"def info() -> Dict[str, Any]:",
"def metadata(self): # -> list[Unknown]:\n ...",
"def metadata(self): # -> list[Unknown]:\n ...",
"def get_info(self):\n hits, misses, cacheSizeBytes, cacheSize = (\n self.hits,\n self.misses,\n self.__get_cache_size(),\n len(self.__recentAccessed),\n )\n filled = cacheSizeBytes / self.__maxSize\n\n return {\n \"hits\": hits,\n \"misses\": misses,\n \"cacheSize\": {\"bytes\": cacheSizeBytes, \"items\": cacheSize},\n \"filled\": filled,\n }",
"def all(self):\n return (self.__objects)",
"def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates",
"def get_all_object_names(self):\n o_objects = []\n for s in [\"Non Model\", \"Solids\", \"Unclassified\", \"Sheets\", \"Lines\"]:\n o_objects += self.design.modeler.get_objects_in_group(s)\n return o_objects",
"def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json",
"def clarify_objects(self): \n dict_cxt = dict(list(zip(list(map(tuple, self)), self.objects)))\n table = list(map(list, list(dict_cxt.keys())))\n objects = list(dict_cxt.values())\n return Context(table, objects, self.attributes)",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def objects (self):\n return InternalObjectList (self)",
"def lookup(obj):\n return list(dir(obj))",
"def lookup(obj):\n return list(dir(obj))",
"def info(objects):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not objects:\n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n info = mph.get_dynamic_global_properties()\n morph_per_mvest = mph.get_morph_per_mvest()\n chain_props = mph.get_chain_properties()\n for key in info:\n t.add_row([key, info[key]])\n t.add_row([\"morph per mvest\", morph_per_mvest])\n t.add_row([\"account_creation_fee\", chain_props[\"account_creation_fee\"]])\n print(t.get_string(sortby=\"Key\"))\n # Block\n for obj in objects:\n if re.match(\"^[0-9-]*$\", obj) or re.match(\"^-[0-9]*$\", obj) or re.match(\"^[0-9-]*:[0-9]\", obj) or re.match(\"^[0-9-]*:-[0-9]\", obj):\n tran_nr = ''\n if re.match(\"^[0-9-]*:[0-9-]\", obj):\n obj, tran_nr = obj.split(\":\")\n if int(obj) < 1:\n b = Blockchain(morphene_instance=stm)\n block_number = b.get_current_block_num() + int(obj) - 1\n else:\n block_number = obj\n block = Block(block_number, morphene_instance=stm)\n if block:\n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n block_json = block.json()\n for key in sorted(block_json):\n value = block_json[key]\n if key == \"transactions\" and not bool(tran_nr):\n t.add_row([\"Nr. of transactions\", len(value)])\n elif key == \"transactions\" and bool(tran_nr):\n if int(tran_nr) < 0:\n tran_nr = len(value) + int(tran_nr)\n else:\n tran_nr = int(tran_nr)\n if len(value) > tran_nr - 1 and tran_nr > -1:\n t_value = json.dumps(value[tran_nr], indent=4)\n t.add_row([\"transaction %d/%d\" % (tran_nr, len(value)), t_value])\n elif key == \"transaction_ids\" and not bool(tran_nr):\n t.add_row([\"Nr. of transaction_ids\", len(value)])\n elif key == \"transaction_ids\" and bool(tran_nr):\n if int(tran_nr) < 0:\n tran_nr = len(value) + int(tran_nr)\n else:\n tran_nr = int(tran_nr)\n if len(value) > tran_nr - 1 and tran_nr > -1:\n t.add_row([\"transaction_id %d/%d\" % (int(tran_nr), len(value)), value[tran_nr]])\n else:\n t.add_row([key, value])\n print(t)\n else:\n print(\"Block number %s unknown\" % obj)\n elif re.match(\"^[a-zA-Z0-9\\-\\._]{2,16}$\", obj):\n account = Account(obj, morphene_instance=stm)\n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n account_json = account.json()\n for key in sorted(account_json):\n value = account_json[key]\n if key == \"json_metadata\":\n value = json.dumps(json.loads(value or \"{}\"), indent=4)\n elif key in [\"posting\", \"witness_votes\", \"active\", \"owner\"]:\n value = json.dumps(value, indent=4)\n elif isinstance(value, dict) and \"asset\" in value:\n value = str(account[key])\n t.add_row([key, value])\n print(t)\n\n # witness available?\n try:\n witness = Witness(obj, morphene_instance=stm)\n witness_json = witness.json()\n t = PrettyTable([\"Key\", \"Value\"])\n t.align = \"l\"\n for key in sorted(witness_json):\n value = witness_json[key]\n if key in [\"props\"]:\n value = json.dumps(value, indent=4)\n t.add_row([key, value])\n print(t)\n except exceptions.WitnessDoesNotExistsException as e:\n print(str(e))\n # Public Key\n elif re.match(\"^\" + mph.prefix + \".{48,55}$\", obj):\n account = mph.wallet.getAccountFromPublicKey(obj)\n if account:\n account = Account(account, morphene_instance=stm)\n key_type = mph.wallet.getKeyType(account, obj)\n t = PrettyTable([\"Account\", \"Key_type\"])\n t.align = \"l\"\n t.add_row([account[\"name\"], key_type])\n print(t)\n else:\n print(\"Public Key %s not known\" % obj)\n else:\n print(\"Couldn't identify object to read\")",
"def info(self):\n return {\n \"dimension_x\": self.dimension_x,\n \"dimension_y\": self.dimension_y,\n \"api_level\": self.api_level,\n \"device_model\": self.model,\n }",
"def make_objects(self):\n pass",
"def _get_objects(self, object_type, **kwargs):\r\n params = dict()\r\n if kwargs:\r\n for key, val in kwargs.items():\r\n if '_' in key:\r\n new_key = key.replace(\"_\",\"-\") \r\n params[new_key] = val\r\n else:\r\n params[key] = val\r\n try: \r\n response = requests.get(self.api_endpoint + object_type, \r\n auth=(self.user,self.pwd), \r\n params=params, verify=False)\r\n\r\n devices = json.loads(response.text)\r\n\r\n except requests.exceptions.RequestException as e:\r\n print \"Error:\",e\r\n return 1\r\n\r\n return_objects = []\r\n for i in devices.keys():\r\n if i == u\"links\":\r\n continue \r\n for j in devices[i]:\r\n return_objects.append(XtremObjFactory(object_type,j,self))\r\n\r\n return return_objects",
"def objects(self):\n _, c = self.get_column(0)\n size = len(c)\n headers = self.headers()\n for i in range(size):\n obj = {}\n for h in headers:\n _, col = self.get_column(h)\n val = col[i]\n obj[h] = val\n yield obj",
"def lookup(obj):\n return(dir(obj))",
"def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass",
"def info_cache(self):\n self.info.info()\n self.dataset.info()\n self.category.info()",
"def info(obj=None):\n if obj is None:\n print (\"Python keywords:\")\n import keyword\n for kwname in keyword.kwlist:\n print (\" \", kwname)\n print(\"Built in objects:\")\n for bi_object_name in sorted(__builtins__.keys()):\n bi_object = __builtins__[bi_object_name]\n if callable(bi_object):\n if type(bi_object) is types.ClassType:\n print(\" {} (class)\".format(bi_object.__name__))\n elif type(bi_object) is types.FunctionType:\n print(\" {} (function)\".format(bi_object.__name__))\n elif hasattr(obj, \"__doc__\") and obj.__doc__ is not None:\n print (\"Documentation for %s :\\n\" % (obj.__name__))\n print (obj.__doc__)\n elif type(obj) is types.ModuleType:\n pprint(dir(obj))\n elif type(obj) is types.ClassType:\n pprint(dir(obj))\n elif type(obj) is types.InstanceType:\n pprint(dir(obj))\n pprint(dir(obj.__class__))\n return \"\"",
"def objects(self):\n if not self._objects:\n id_set = {}\n for x in self.addition_events():\n if 'id' in x: id_set[UUID(x['id'])] = 1\n self._objects = id_set.keys()\n\n return self._objects",
"def rtsobjects():\n pass",
"def lookup(obj):\n\n return (dir(obj))",
"def get_info(self):\n raise NotImplementedError(\"Robot.get_info\")",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects"
] | [
"0.77862304",
"0.7456217",
"0.7124919",
"0.7075904",
"0.7070359",
"0.68811506",
"0.68797517",
"0.68526816",
"0.6834337",
"0.6834337",
"0.68310577",
"0.6820976",
"0.68096256",
"0.6797335",
"0.6773765",
"0.67031676",
"0.6696353",
"0.6682475",
"0.6682475",
"0.66812974",
"0.66787213",
"0.64964765",
"0.64892673",
"0.64727116",
"0.6451782",
"0.6448562",
"0.6407986",
"0.6402261",
"0.63950026",
"0.63933426",
"0.63905144",
"0.63403785",
"0.63327193",
"0.6311523",
"0.6306475",
"0.6272466",
"0.62665457",
"0.6263752",
"0.62551",
"0.62544626",
"0.624999",
"0.6247726",
"0.6243111",
"0.6239722",
"0.6232841",
"0.62110955",
"0.6199319",
"0.6198266",
"0.6182518",
"0.61455876",
"0.61455876",
"0.61300427",
"0.6122221",
"0.60945547",
"0.609019",
"0.60791165",
"0.60772395",
"0.60685074",
"0.6062123",
"0.6059674",
"0.60587883",
"0.60530585",
"0.6046103",
"0.6037683",
"0.6024433",
"0.6018208",
"0.6011933",
"0.6001749",
"0.59850466",
"0.5971934",
"0.5942246",
"0.5935889",
"0.5929287",
"0.59205437",
"0.59205437",
"0.5918995",
"0.5916542",
"0.5911768",
"0.59056586",
"0.589142",
"0.5890938",
"0.588238",
"0.58811486",
"0.5870519",
"0.5870519",
"0.58697915",
"0.58681244",
"0.58652323",
"0.5860109",
"0.58528453",
"0.5847255",
"0.5846122",
"0.5830782",
"0.5829703",
"0.5823594",
"0.5814361",
"0.5808683",
"0.58069175",
"0.58063024",
"0.58063024"
] | 0.59778154 | 69 |
pass in the configuration file | def config(self, config):
self._config = config
self._haystackPath = self._config.get('Paths', 'Haystack') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configuration():",
"def config():",
"def config():",
"def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file",
"def read_config(self, config_filename):",
"def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)",
"def config( **kwargs ):",
"def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()",
"def __init__(self, config_file='/etc/sfa/ldap_config.py'):\n\n try:\n execfile(config_file, self.__dict__)\n\n self.config_file = config_file\n # path to configuration data\n self.config_path = os.path.dirname(config_file)\n except IOError:\n raise IOError, \"Could not find or load the configuration file: %s\" \\\n % config_file",
"def config(self):\n pass",
"def config(self):\n pass",
"def get_config_file(self):\n\n conf_file = self.args.file\n if conf_file is not None:\n if os.path.isfile(conf_file):\n config_file = open(conf_file, \"r\")\n self.main_file = yaml.load(config_file, Loader=yaml.FullLoader)\n elif os.path.isfile(\n os.path.join(get_path(\"DEFAULT\", \"config_file_path\"), conf_file)\n ):\n fpath = get_path(\"DEFAULT\", \"config_file_path\")\n config_file = open(os.path.join(fpath, conf_file), \"r\")\n self.main_file = yaml.load(config_file, Loader=yaml.FullLoader)\n else:\n self.logger.error(\n colorama.Fore.RED\n + \"ERROR!! Config file '%s' is not present \" % conf_file,\n extra=self.log_detail,\n )\n sys.exit(1)\n else:\n if self.args.hostname and self.args.testfiles:\n temp_dict = {\n \"hosts\": [{\"device\": \"\", \"username\": \"\", \"passwd\": \"\"}],\n \"tests\": [],\n }\n temp_dict[\"hosts\"][0][\"device\"] = self.args.hostname\n temp_dict[\"hosts\"][0][\"username\"] = self.args.login\n temp_dict[\"hosts\"][0][\"passwd\"] = self.args.passwd\n for tfile in self.args.testfiles:\n temp_dict[\"tests\"].append(tfile)\n self.main_file = temp_dict\n\n if (\n self.main_file.__contains__(\"sqlite\")\n and self.main_file[\"sqlite\"]\n and self.main_file[\"sqlite\"][0]\n ):\n self.chk_database(\n self.main_file,\n self.args.pre_snapfile,\n self.args.post_snapfile,\n self.args.check,\n self.args.snap,\n )\n else:\n # if --check option is given for sqlite, then snap file name is not compulsory\n # else exit the function saying arguments not correct\n if self.args.check is True and (\n self.args.pre_snapfile is None or self.args.post_snapfile is None\n ):\n self.logger.error(\n colorama.Fore.RED\n + \"Arguments not given correctly, Please refer help message\",\n extra=self.log_detail,\n )\n self.parser.print_help()\n sys.exit(1)",
"def config(ctx):\n return",
"def get_conf(self, file_name):\n try:\n self.conf = Config(file_name)\n except ImportError:\n s = \"No such file in simulation directory: \" + file_name\n self.abort(ImportError, s)",
"def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)",
"def load_config(self):\n pass",
"def initialize_from_config(self):",
"def configure(self, options, conf):",
"def readConfigFile(self):\n self.config_obj = ConfigParser.ConfigParser()\n self.config_obj.readfp(open(self.configfile))\n\n # Set the log file\n if (not self.args_obj.log_file and self.config_obj.has_option('DEFAULT','logfile')):\n self.logfile = self.config_obj.get('DEFAULT', 'logfile')\n\n # Set the baud rate\n if (not self.args_obj.baud_rate and self.config_obj.has_option('DEFAULT','baud')):\n self.baudrate = self.config_obj.get('DEFAULT', 'baud')\n\n # Set the device port \n if (not self.args_obj.device and self.config_obj.has_option('DEFAULT','device')):\n self.device = self.config_obj.get('DEFAULT', 'device')\n\n # Set the connection timeout\n if (not self.args_obj.timeout and self.config_obj.has_option('DEFAULT','timeout')):\n self.timeout = self.config_obj.get('DEFAULT','timeout')\n\n if DEBUG:\n print('(DEBUG) Config Options:')\n self.pp.pprint(self.config_obj.sections())",
"def configure(self, config_file):\n self.config = ConfigParser()\n self.config.read(config_file)\n conf = self.config\n self.sqs_queue = conf[\"sqs\"][\"queue_url\"]\n self.s3_bucket = conf[\"s3\"][\"bucket\"]\n self.harvest_key_prefix = conf[\"harvest\"][\"key_prefix\"]\n self.extract_key_prefix = conf[\"extract\"][\"key_prefix\"]\n self.geoloc_key_prefix = conf[\"geolocation\"][\"key_prefix\"]\n self.extract_csv_header = conf[\"extract\"][\"csv_header\"].split(\",\")\n self.geoloc_csv_header = conf[\"geolocation\"][\"csv_header\"].split(\",\")\n self.get_headers(os.path.join(CONFIG_DIR, \"headers\"))\n self.get_user_agents(os.path.join(CONFIG_DIR, \"user_agents\"))\n\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s %(message)s\",\n level=logging.INFO,\n filename=os.path.join(CONFIG_DIR, \"browser.log\"),\n filemode=\"a\")",
"def __init__(self, file_handle):\n config = ConfigParser.ConfigParser()\n config.readfp(file_handle)\n self.database_address_ = config.get('General', 'database_address')\n self.google_developer_key_ = config.get('Google', 'developer_key')\n self.google_cref_ = config.get('Google', 'cref')",
"def __init__(self):\n self.storefn = Config.getConfigFnPath()\n\n # Load the configuration file file\n self.load()",
"def load_from_conf(self):\r\n raise NotImplementedError",
"def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)",
"def includeme(config):",
"def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)",
"def __init__(self, file):\n self.file = file\n self.config = self.__analyze_config()",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def configure(self, args):\n pass",
"def read_configuration (self):\n\t\tself.config.read(self._configfile)",
"def load_settings_from_file(self, cfg_file):\n \n #\n #\n # TODO\n # Missing settings should not cause exceptions\n #\n #\n #\n\n if not os.path.exists(cfg_file): \n raise Exception('Provided config file [%s] does not exist or cannot be read.' % cfg_file)\n\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.read(cfg_file)\n \n \n self.reference_root = config.get('Paths','reference-root')\n \n self.scratch_root = os.getcwd()\n try:\n self.scratch_root = config.get('Paths','scratch-root')\n except ConfigParser.NoOptionError:\n self.logger.info('Scratch-root setting is missing. Using current directory: %s' % self.scratch_root)\n\n\n if (self.run_folder != None):\n self.run_id = os.path.basename(self.run_folder)\n else:\n raise Exception('Set runfolder with PipelineConfig.set_runfolder() before loading settings')\n \n \n #\n # TODO\n # needs to be updated on update of settings\n #\n self.runs_scratch_dir = os.path.join(self.scratch_root, self.run_id) if self.run_folder != None else self.scratch_root\n self.logger.info('Run\\'s scratch directory: %s' % self.runs_scratch_dir)\n \n # optional results and fastq archive dirs \n self.results_archive = None\n try:\n self.results_archive = config.get('Paths','results-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No results-archive provided. Results will not be archived outside of the run\\'s scratch directory.')\n \n self.fastq_archive = None\n try:\n self.fastq_archive = config.get('Paths','fastq-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No fastq-archive provided. Fastq files will not be archived outside of the run\\'s scratch directory.')\n \n \n # optional /tmp dir\n self.tmp_dir = '/tmp'\n try:\n self.tmp_dir = config.get('Paths','tmp-dir')\n except ConfigParser.NoOptionError:\n self.logger.info('No tmp-dir provided. /tmp will be used.')\n \n \n \n \n # reference files\n self.reference = os.path.join(self.reference_root, config.get('Resources','reference-genome'))\n self.capture = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed'))\n self.capture_qualimap = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed-for-qualimap'))\n self.capture_plus = os.path.join(self.reference_root, config.get('Resources', 'capture-plus-regions-bed'))\n self.gene_coordinates = os.path.join(self.reference_root, config.get('Resources', 'gene-coordinates'))\n \n self.adapters = os.path.join(self.reference_root, config.get('Resources', 'adapters-fasta'))\n \n # tools\n self.bcl2fastq = config.get('Tools','bcl2fastq')\n self.trimmomatic = config.get('Tools','trimmomatic') \n self.bwa = config.get('Tools','bwa')\n self.samtools = config.get('Tools','samtools')\n self.picard = config.get('Tools','picard')\n self.gatk = config.get('Tools','gatk')\n self.freebayes = config.get('Tools','freebayes')\n self.bcftools = config.get('Tools','bcftools')\n self.qualimap = config.get('Tools','qualimap')\n \tself.fastqc\t = config.get('Tools','fastqc')\n\n\n # annovar settings\n self.convert_to_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','convert_to_annovar'))\n self.annovar_annotate = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_annotate'))\n self.table_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','table_annovar'))\n self.annovar_human_db = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_human_db'))\n self.annovar_1000genomes_eur = config.get('Annovar','annovar_1000genomes_eur')\n self.annovar_1000genomes_eur_maf_cutoff = config.get('Annovar','annovar_1000genomes_eur_maf_cutoff')\n self.annovar_inhouse_dbs = config.get('Annovar','annovar_inhouse_dbs')\n self.omim_gene_phenotype_map_file = config.get('Annovar','omim_gene_phenotype_map_file')",
"def __init__(self, filename=None):\n if filename:\n if not os.path.exists(filename):\n raise Exception(\"No configuration found at %s\" % filename)\n super(Configuration, self).__init__(filename)",
"def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()",
"def configure(self, conf):\n return",
"def parse_config(self):\n # TODO: parse config file\n pass",
"def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)\n #print(self.config)",
"def __init__(self, settings):\n self._read_config(settings)",
"def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)",
"def __init__(self, config_file=None):\n\t\tself.options = {}\n\n\t\tif config_file:\n\t\t\tself.set_file(config_file)",
"def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose",
"def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)",
"def process_config(self, filename):\n \n self.log_message(\"processing config file: \"+filename)\n parser = SafeConfigParser()\n parser.optionxform = str\n parser.read(filename)\n self.source_files[filename] = parser\n \n sections = parser.sections()\n for section in sections:\n \n options = parser.options(section)\n params = {}\n non_std = {}\n for option in options:\n ## any option that ends with the word \"password\" will be encrypted and will automatically be decrypted upon\n ## processing \n if option in self.standard_options:\n params[option] = self.get_value(option, parser.get(section, option))\n else:\n non_std[option] = self.get_value(option, parser.get(section, option))\n\n params['non_std'] = non_std\n params['source_file'] = filename\n params['name']=section\n params['run_date']=self.run_date\n c_entry = ConfigEntry(params)\n if c_entry.ready: \n entry_num = c_entry.get_entry_type()\n self.entries[self.entry_types[entry_num]].append(c_entry)\n self.entry_dict[section] = {'source':filename,'entry':c_entry}\n self.log_message(\"Loaded Config Entry: \"+section)\n else:\n self.log_message(\"Failed to load config entry: \"+section)\n\n return self.entries",
"def config():\n file_path = None # path to the input file\n db_path = None # path to the output db\n atomic_properties = (\n \"Properties=species:S:1:pos:R:3\"\n ) # atomic properties of the input file\n molecular_properties = [\"energy\"] # molecular properties of the input file\n overwrite = False",
"def config(self):\n\t\tsys.stderr.write(\"Base class method called: config() This souldn't happen.\\n\")",
"def load_from_conf(self):\n raise NotImplementedError",
"def read_configuration_file(self,filename=None):\n\n\n # In the previous iteration, this set a bunch of public attributes. I\n # have reimplemented them as properties because it is much easier for an\n # external developer to understand property implemntation rather than search\n # for a property which maybe mutated.\n # -- EJR, 2/17/2019\n\n assert type(filename) in [type(None),str]\n\n if type(filename) is type(None):\n _filename = self.config_fn\n elif type(filename) is str:\n _filename = filename\n else:\n m = \"filename must either be a str or NoneType\"\n raise(TypeError(m))\n\n super().read_configuration_file(filename=_filename)",
"def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]",
"def __init__(self, filename, dirname='~'):\n self.config = configparser.ConfigParser()\n\n expanded_dirname = os.path.expanduser(dirname)\n self.configuration_filename = os.path.join(expanded_dirname, filename)\n if os.path.isfile(self.configuration_filename):\n self.config.read(self.configuration_filename)",
"def __init__(self, __file):\n\n\t\tself.fileName = __file\n\t\tif (os.path.isfile(self.fileName)):\n\t\t\t# config.ini found, load it\n\t\t\tself.config.read(self.fileName)\n\t\t\tself.default = False\n\t\telse:\n\t\t\t# config.ini not found, generate a default one\n\t\t\tself.generateDefaultConfig()\n\t\t\tself.default = True",
"def config(self) -> Dict[str, Any]:",
"def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]",
"def __init__(self, conf_file_location: str, template_dir: str, target_dir: str, hard_reset: bool):\n self.config: Config = yaml_loader.load(conf_file_location, Config)\n self.massage_config_file()\n self.config_dict: Dict = as_dict(self.config)\n self.template_dir = template_dir\n self.target_dir = target_dir\n self.hard_reset = hard_reset",
"def configure(self) -> None:",
"def config(self):\n raise NotImplementedError",
"def __init__(self, config: str) -> None:\n self.configuration = config",
"def __init__(self, config: str) -> None:\n self.configuration = config",
"def __init__(self, cooper_config_file):\n\t\t# Determine if the default config file will be used or a user-defined file\n\t\ttry:\n\t\t\tif cooper_config_file is None:\n\t\t\t\tprint(\"[+] Using the default config file: {}\".format(self.cooper_config_file))\n\t\t\telse:\n\t\t\t\tself.cooper_config_file = cooper_config_file\n\t\t\t\tprint(\"[+] Alternate config file identified: {}\".format(self.cooper_config_file))\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] \")\n\t\t\tprint(\"L.. Details: {}\".format())\n\t\t# Open the config file for parsing\n\t\ttry:\n\t\t\tself.config_parser = configparser.ConfigParser()\n\t\t\tself.config_parser.read(self.cooper_config_file)\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] Could not open the config file -- make sure it exists and is readable.\")\n\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t# Parse the config file's values\n\t\ttry:\n\t\t\tself.landing_page_url_replacement = self.config_section_map(\"Replacement URLs\")[\"landing_page_url_replacement\"]\n\t\t\tself.landing_page_form_action = self.config_section_map(\"Replacement URLs\")[\"landing_page_form_action\"]\n\t\t\tself.email_replacement_url = self.config_section_map(\"Replacement URLs\")[\"email_replacement_url\"]\n\t\t\tself.email_tracker_url = self.config_section_map(\"Replacement URLs\")[\"email_tracker_url\"]\n\t\t\tself.path_to_chromedriver = self.config_section_map(\"Browser\")[\"driver_path\"]\n\t\t\tif self.config_section_map(\"Browser\")[\"user_agent\"] == \"\":\n\t\t\t\tself.user_agent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6\"\n\t\t\telse:\n\t\t\t\tself.user_agent = self.config_section_map(\"Browser\")[\"user_agent\"]\n\t\texcept Exception as err:\n\t\t\tprint(\"[!] Failed to read all values from the config file! Exiting...\")\n\t\t\tprint(\"L.. Details: {}\".format(err))\n\t\t\tsys.exit()",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def __init__(self, filepath):\n try:\n config_file_r = open(filepath)\n self.sim_parametres = yaml.load(config_file_r, Loader=yaml.FullLoader)\n except:\n raise Exception(\"Le fichier de configuration n'a pas été atteint ou n'a pas pu être lu. Veuillez vérifier \"\n \"qu'il n'y ait aucune erreur de syntaxe.\")",
"def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)",
"def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)",
"def get_config(self):\n if self.config is None:\n self.config = Configuration()\n\n #Hard coded the file for now, will change with Django interface\n self.config.parse_file('config')",
"def config(self, **kw):\n self.cfg_fixture.config(**kw)",
"def configure(self):\r\n pass",
"def __init__(self, config_path=None):\n config_path = config_path or CONF.api_paste_config\n if os.path.exists(config_path):\n self.config_path = config_path\n else:\n self.config_path = CONF.find_file(config_path)",
"def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)",
"def configure(self):\n pass",
"def configure(self):\n pass",
"def __init__(self, configfile='settings.cfg'):\n \n self.configfile = configfile\n \n # Load parameters from config file\n config = ConfigParser.RawConfigParser()\n config.read(self.configfile)\n \n # Set parameters to default if not in config file \n self.title=config.get('Settings','title') if config.has_option(\n 'Settings','title') else 'REDPy Catalog'\n self.filename=config.get('Settings','filename') if config.has_option(\n 'Settings','filename') else 'redpytable.h5'\n self.groupName=config.get('Settings','groupName') if config.has_option(\n 'Settings','groupName') else 'default'\n self.groupDesc=config.get('Settings','groupDesc') if config.has_option(\n 'Settings','groupDesc') else 'Default Test Run'\n self.nsta=config.getint('Settings','nsta') if config.has_option(\n 'Settings','nsta') else 8 \n self.station=config.get('Settings','station') if config.has_option(\n 'Settings','station') else 'SEP,YEL,HSR,SHW,EDM,STD,JUN,SOS'\n self.channel=config.get('Settings','channel') if config.has_option(\n 'Settings','channel') else 'EHZ,EHZ,EHZ,EHZ,EHZ,EHZ,EHZ,EHZ'\n self.network=config.get('Settings','network') if config.has_option(\n 'Settings','network') else 'UW,UW,UW,UW,UW,UW,UW,UW'\n self.location=config.get('Settings','location') if config.has_option(\n 'Settings','location') else '--,--,--,--,--,--,--,--'\n self.samprate=config.getfloat('Settings','samprate') if config.has_option(\n 'Settings','samprate') else 100.\n self.nstaC=config.getint('Settings','nstaC') if config.has_option(\n 'Settings','nstaC') else 5\n self.printsta=config.getint('Settings','printsta') if config.has_option(\n 'Settings','printsta') else 2\n self.server=config.get('Settings','server') if config.has_option(\n 'Settings','server') else 'IRIS'\n self.port=config.getint('Settings','port') if config.has_option(\n 'Settings','port') else 16017\n self.nsec=config.getint('Settings','nsec') if config.has_option(\n 'Settings','nsec') else 3600\n self.lwin=config.getfloat('Settings','lwin') if config.has_option(\n 'Settings','lwin') else 7.\n self.swin=config.getfloat('Settings','swin') if config.has_option(\n 'Settings','swin') else 0.8\n self.trigon=config.getfloat('Settings','trigon') if config.has_option(\n 'Settings','trigon') else 3.\n self.trigoff=config.getfloat('Settings','trigoff') if config.has_option(\n 'Settings','trigoff') else 2.\n self.kurtmax=config.getfloat('Settings','kurtmax') if config.has_option(\n 'Settings','kurtmax') else 80.\n self.kurtfmax=config.getfloat('Settings','kurtfmax') if config.has_option(\n 'Settings','kurtfmax') else 150.\n self.oratiomax=config.getfloat('Settings','oratiomax') if config.has_option(\n 'Settings','oratiomax') else 0.06\n self.kurtwin=config.getfloat('Settings','kurtwin') if config.has_option(\n 'Settings','kurtwin') else 5.\n self.winlen=config.getint('Settings','winlen') if config.has_option(\n 'Settings','winlen') else 1024\n self.fmin=config.getfloat('Settings','fmin') if config.has_option(\n 'Settings','fmin') else 1.\n self.fmax=config.getfloat('Settings','fmax') if config.has_option(\n 'Settings','fmax') else 10.\n self.filomin=config.getfloat('Settings','filomin') if config.has_option(\n 'Settings','filomin') else 1.\n self.filomax=config.getfloat('Settings','filomax') if config.has_option(\n 'Settings','filomax') else 2.5\n self.fiupmin=config.getfloat('Settings','fiupmin') if config.has_option(\n 'Settings','fiupmin') else 5.\n self.fiupmax=config.getfloat('Settings','fiupmax') if config.has_option(\n 'Settings','fiupmax') else 10.\n self.telefi=config.getfloat('Settings','telefi') if config.has_option(\n 'Settings','telefi') else -1.\n self.teleok=config.getint('Settings','teleok') if config.has_option(\n 'Settings','teleok') else 1 \n self.cmin=config.getfloat('Settings','cmin') if config.has_option(\n 'Settings','cmin') else 0.7\n self.ncor=config.getint('Settings','ncor') if config.has_option(\n 'Settings','ncor') else 4\n self.minorph=config.getfloat('Settings','minorph') if config.has_option(\n 'Settings','minorph') else 0.05\n self.maxorph=config.getfloat('Settings','maxorph') if config.has_option(\n 'Settings','maxorph') else 7.\n self.minplot=config.getint('Settings','minplot') if config.has_option(\n 'Settings','minplot') else 3\n self.dybin=config.getfloat('Settings','dybin') if config.has_option(\n 'Settings','dybin') else 1.\n self.hrbin=config.getfloat('Settings','hrbin') if config.has_option(\n 'Settings','hrbin') else 1.\n self.recplot=config.getfloat('Settings','recplot') if config.has_option(\n 'Settings','recplot') else 14.\n \n # Derived Settings\n self.ptrig=1.5*self.winlen/self.samprate\n self.atrig=3*self.winlen/self.samprate\n self.mintrig=self.winlen/self.samprate\n self.wshape = int((self.ptrig + self.atrig)*self.samprate) + 1",
"def load_config(self, filename, fileconfout=None):\n self._filename = filename\n self._init_config = open(filename).read().splitlines()\n metaconfig = [l for l in self._init_config\n if not (l.startswith(\"#\") or l.startswith(\"\\t\") or l.startswith(\" \")) and len(l)>0]\n\n for k in metaconfig:\n key, *value = k.split()\n if len(value)==1:\n self.set_value(key, value[0], None)\n \n elif len(value)>1:\n if value[1]==\"#\":\n self.set_value(key, value[0], \" \".join(value[2:]))\n else:\n raise IOError(\"Cannot parse the line %s\"%k)\n else:\n raise IOError(\"cannot parse the line %s\"%k)\n if fileconfout is not None:\n self.set_value(\"PARA_OUT\", fileconfout)",
"def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config",
"def __create_config_file__(fileparser):\n fileparser['server'] = {\n 'server': Configuration.server + \" # Server IP\",\n 'port': str(Configuration.port) +\n \" # Values allowed: \" + str(Configuration.port_min) +\n \"..\" + str(Configuration.port_max),\n 'certfile': Configuration.certfile +\n \" # Use an absolute path\",\n 'timeout': str(Configuration.timeout) +\n \" # Timeout of the connection request\"\n }\n fileparser['client'] = {\n 'curve1': Configuration.curve1 +\n \" # Values allowed: secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher1': Configuration.cipher1 +\n \" # Values allowed: aes-128-cbc, aes-256-cbc, etc.\",\n 'curve2': Configuration.curve2 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher2': Configuration.cipher2 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\",\n 'curve3': Configuration.curve3 +\n \" # Values allowed: None, secp521r1, sect571r1, secp384r1, etc.\",\n 'cipher3': Configuration.cipher3 +\n \" # Values allowed: None, aes-128-cbc, aes-256-cbc, etc.\"\n }\n fileparser['ui'] = {\n 'lock': str(Configuration.lock) +\n \" # Lock screen - Values allowed: 0 or a positive integer\",\n 'colour': str(Configuration.colour) +\n \" # If available use colours (1) or not (0)\",\n 'colourB': Configuration.colourB +\n \" # Colour for editable widgets (button, input box...)\",\n 'colourD': Configuration.colourD +\n \" # Colour for decoration (label, frame...)\",\n 'colourT': Configuration.colourT +\n \" # Colour for titles\",\n 'colourM': Configuration.colourM +\n \" # Colour for messages\"\n }\n with open(Configuration.configfile, 'w') as configfile:\n fileparser.write(configfile)\n os.chmod(Configuration.configfile,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IREAD | stat.S_IWRITE)",
"def set_file(self, config_file):\n\t\tif not os.path.exists(config_file):\n\t\t\traise ConfigFileNotFound(config_file)\n\t\tself.parse(config_file)",
"def configure(self, options, conf):\n pass",
"def _config_file_callback(ctx, param, value):\n ctx.default_map = ctx.default_map or {}\n section = ctx.info_name\n\n if value:\n config = anyconfig.load(value)[section]\n ctx.default_map.update(config)\n\n return value",
"def _setConfig(self,config):\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config",
"def conf():\n global config\n return config",
"def load_configuration(self) -> None:\n config_file = self.default_config_file\n if self.config_file:\n config_file = self.config_file\n self.config = configparser.ConfigParser(delimiters=\"=\")\n # mypy is unhappy with us assigning to a method - (monkeypatching?)\n self.config.optionxform = lambda option: option # type: ignore\n self.config.read(config_file)",
"def _configure(self):\n pass",
"def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config",
"def configure(self):\n\n pass",
"def __init__(self, config):\n self.config = config",
"def __init__(self, config):\n self.config = config",
"def __init__(self, config):\n self.config = config",
"def read_file(self, filename):\n # The config file is Python code -- makes life easy.\n config_vars = {}\n try:\n execfile(filename, config_vars)\n except IOError, exc:\n if exc.filename is None: # arg! execfile() loses filename\n exc.filename = filename\n raise exc\n self.set_from_dict(config_vars)",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]",
"def __init__(self):\n\n self.config = load_config()\n self.set_env_var()",
"def load_config(self, config_file):\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)",
"def __init__(self, filename=\"config.ini\"):\n if not os.path.isfile(filename):\n self.set_default_config(filename)\n\n self.config = configparser.ConfigParser()\n self.config.read(filename)\n\n self.filename = filename\n self.database_name = self.config.get('config',\n 'database_name',\n fallback='manga.db')\n self.volume_limit = self.config.getint('config',\n 'volume_limit',\n fallback=128)\n self.series_per_page = self.config.getint('config',\n 'series_per_page',\n fallback=0)\n self.compact_list = self.config.getboolean('config',\n 'compact_list',\n fallback=False)\n self.show_empty_series = self.config.getboolean('config',\n 'show_empty_series',\n fallback=False)\n self.default_to_gui = self.config.getboolean('config',\n 'default_to_gui',\n fallback=True)",
"def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify",
"def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data",
"def __init__(self, filename):\n if filename is None:\n self.config = toml.load('configs/default.conf')\n return\n self.config = toml.load(filename)\n self.config['directory'] = {}\n self.config['directory']['root'] = os.path.dirname(\n os.path.realpath(__file__))\n self.config['directory']['datasets'] = os.path.join(\n self.config['directory']['root'], 'datasets')",
"def __init__(self, filename):\n self.cfg_spec = ConfigObj(config_spec_text.splitlines(), list_values=False)\n self.cfg_filename = filename\n valid = Validator()\n if not os.path.exists(self.cfg_filename):\n #no configuration file found\n logger.info(\"File %s not found, so creating one from you from defaults\" % self.cfg_filename)\n cfg = ConfigObj(configspec=self.cfg_spec, stringify=True, list_values=True)\n cfg.filename = self.cfg_filename\n test = cfg.validate(valid, copy=True)\n cfg.write()\n self.cfg = ConfigObj(self.cfg_filename, configspec=self.cfg_spec)\n rtn = self.cfg.validate(valid, preserve_errors=True)\n if type(rtn) == types.BooleanType and rtn:\n logger.info(\"Config file validated\")\n self.tested = True\n else:\n self.tested = False\n res = flatten_errors(self.cfg, rtn)\n self.errortxt = ''\n for row in res:\n self.errortxt += 'In Section %s, key %s has error: %s' % (row[0], row[1], row[2])\n logger.error(self.errortxt)",
"def __init__(self, custom_file=None):\n self.home = os.path.abspath(os.path.expanduser('~'))\n # Static Defaults\n defaults = \\\n {\n 'cfg_sn_username' : '',\n 'cfg_sn_password' : '',\n 'cfg_nt_ext' : 'txt',\n 'cfg_nt_path' : os.path.join(self.home, 'Simplenote'),\n 'cfg_nt_trashpath' : '.trash',\n 'cfg_nt_filenamelen' : '60',\n 'cfg_log_level' : 'info'\n }\n\n cp = configparser.SafeConfigParser(defaults)\n if custom_file is not None:\n self.configs_read = cp.read([custom_file])\n else:\n self.configs_read = cp.read([os.path.join(self.home, '.snsync')])\n\n cfg_sec = 'snsync'\n\n if not cp.has_section(cfg_sec):\n cp.add_section(cfg_sec)\n\n self.configs = collections.OrderedDict()\n\n #\n # Environment Varialbles over-ride config file settings.\n # Config files are cfg_abc\n # Envs are sn_abc\n #\n\n if os.environ.get('sn_username') is None:\n val_sn_username = cp.get(cfg_sec, 'cfg_sn_username', raw=True)\n else:\n val_sn_username = os.environ.get('sn_username')\n self.configs['sn_username'] = [val_sn_username, 'Simplenote Username']\n\n if os.environ.get('sn_password') is None:\n val_sn_passowrd = cp.get(cfg_sec, 'cfg_sn_password', raw=True)\n else:\n val_sn_passowrd = os.environ.get('sn_password')\n self.configs['sn_password'] = [val_sn_passowrd, 'Simplenote Password']\n\n if os.environ.get('sn_nt_ext') is None:\n val_sn_nt_ext = cp.get(cfg_sec, 'cfg_nt_ext')\n else:\n val_sn_nt_ext = os.environ.get('sn_nt_ext')\n self.configs['cfg_nt_ext'] = [val_sn_nt_ext, 'Note file extension']\n\n if os.environ.get('sn_nt_path') is None:\n val_sn_nt_path = cp.get(cfg_sec, 'cfg_nt_path')\n else:\n val_sn_nt_path = os.environ.get('sn_nt_path')\n self.configs['cfg_nt_path'] = [val_sn_nt_path, 'Note storage path']\n\n if os.environ.get('sn_nt_trashpath') is None:\n val_sn_nt_trashpath = cp.get(cfg_sec, 'cfg_nt_trashpath')\n else:\n val_sn_nt_trashpath = os.environ.get('sn_nt_trashpath')\n self.configs['cfg_nt_trashpath'] = [val_sn_nt_trashpath, 'Note Trash Bin Folder for deleted notes']\n\n if os.environ.get('sn_nt_filenamelen') is None:\n val_sn_nt_filenamelen = cp.get(cfg_sec, 'cfg_nt_filenamelen')\n else:\n val_sn_nt_filenamelen = os.environ.get('sn_nt_filenamelen')\n self.configs['cfg_nt_filenamelen'] = [val_sn_nt_filenamelen, 'Length of Filename']\n\n if os.environ.get('sn_log_level') is None:\n val_sn_log_level = cp.get(cfg_sec, 'cfg_log_level')\n else:\n val_sn_log_level = os.environ.get('sn_log_level')\n self.configs['cfg_log_level'] = [val_sn_log_level, 'snsync log level']\n\n # Dynamic Defaults\n if os.environ.get('sn_db_path') is None:\n if cp.has_option(cfg_sec, 'cfg_db_path'):\n val_sn_db_path = cp.get(cfg_sec, 'cfg_db_path')\n else:\n val_sn_db_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.sqlite')\n else:\n val_sn_db_path = os.environ.get('sn_db_path')\n self.configs['cfg_db_path'] = [val_sn_db_path, 'snsync database location']\n\n if os.environ.get('sn_log_path') is None:\n if cp.has_option(cfg_sec, 'cfg_log_path'):\n val_sn_log_path = cp.get(cfg_sec, 'cfg_log_path')\n else:\n val_sn_log_path = os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.log')\n else:\n val_sn_log_path = os.environ.get('sn_log_path')\n self.configs['cfg_log_path'] = [val_sn_log_path, 'snsync log location']",
"def __init__(self):\n self.config = {}"
] | [
"0.76724064",
"0.7665119",
"0.7665119",
"0.7316571",
"0.72797155",
"0.7215086",
"0.71913195",
"0.71776897",
"0.7046851",
"0.69839555",
"0.69839555",
"0.69595927",
"0.69369966",
"0.69369495",
"0.6893396",
"0.6797057",
"0.67788506",
"0.6771583",
"0.6738065",
"0.673592",
"0.6733803",
"0.6723598",
"0.66991764",
"0.66793644",
"0.66642475",
"0.6659781",
"0.6659633",
"0.6658472",
"0.6658472",
"0.6658472",
"0.6658472",
"0.6656802",
"0.6655715",
"0.6644077",
"0.663228",
"0.66291386",
"0.66261154",
"0.66248244",
"0.6622555",
"0.6618767",
"0.65990514",
"0.6594339",
"0.6584121",
"0.6573334",
"0.65700674",
"0.65563524",
"0.65449274",
"0.6541434",
"0.6513905",
"0.6513322",
"0.65106636",
"0.65104455",
"0.6509366",
"0.6508441",
"0.6493135",
"0.64587104",
"0.64537275",
"0.6447534",
"0.6446663",
"0.6442702",
"0.6442702",
"0.64399827",
"0.64305276",
"0.64303976",
"0.6426348",
"0.6422151",
"0.6421725",
"0.64195937",
"0.641417",
"0.64069456",
"0.6406632",
"0.63960403",
"0.63960403",
"0.6388453",
"0.63859606",
"0.6372769",
"0.63681054",
"0.6358472",
"0.6357257",
"0.6356187",
"0.6347912",
"0.63467085",
"0.634342",
"0.63365865",
"0.6332214",
"0.6329594",
"0.6317343",
"0.6317343",
"0.6317343",
"0.63003874",
"0.6287304",
"0.6286728",
"0.6285641",
"0.6284403",
"0.62800205",
"0.62768906",
"0.62648404",
"0.6259277",
"0.6253593",
"0.62513995",
"0.62389123"
] | 0.0 | -1 |
if this is declared the calling program will pass in the cursor from the exisiting db | def acceptDB(self, db):
self._db = db | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cursor():\n dbh = handle()\n return dbh.cursor()",
"def get_cursor(self, *args, **kwargs):",
"def __cursor(cls):\n print('|-- Richiesta cursore da:'+str(cls.__dbCon))\n return cls.__dbCon.cursor( cursor_factory = psycopg2.extras.DictCursor )",
"def __enter__(self) -> 'cursor':\n self.conn = cx_Oracle.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor",
"def get_cursor(self):\n self.cur = self.dbcon.cursor()\n return self.cur",
"def cursor(self):\n return self._conn.cursor()",
"def __enter__(self) -> 'DBcursor':\n self.conn = connector.connect(**self.dbconfig)\n self.cursor = self.conn.cursor()\n return self.cursor",
"def cursor():\n return get_dbconn(\"nldn\").cursor()",
"def __init__(self, cursor):\n self.__cursor = cursor",
"def getCursor(self) -> sqlite3:\n return self.cursor",
"def _execute(self, *args):\n cursor = self.db.cursor()\n cursor.execute(*args)\n return cursor",
"def _get_cursor(self):\n conn = self._connect()\n conn.autocommit = True\n cursor = conn.cursor()\n return cursor",
"def dbcursor(self):\n return self.__dbcursor",
"def _cursor(self):\n cursor = self.conn.cursor()\n\n return cursor",
"def create_cursor(self):\r\n cursor = self.connection.cursor()\r\n return cursor",
"def _cursor_collection(self, cursor):\n ...",
"def _cursor_collection(self, cursor):\n ...",
"def __enter__(self) -> 'cursor':\n self.conn = pymysql.connect(self.configuration)\n #self.conn = pyodbc.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor",
"def __enter__(self):\n return self.cursor",
"def get_cursor():\n cur = conn.cursor(cursor_factory=DictCursor)\n return cur",
"def get_cursor(file_name):\n con = lite.connect(file_name)\n con.row_factory = lite.Row\n return con.cursor()",
"def __init__(self):\n self.con_dev = my_connection.dev_connection(database='nhat', user='dbw') \n self.cursor = self.con_dev.cursor()",
"def cursor(file_name):\n con = sql.connect(file_name)\n con.row_factory = sql.Row\n return con.cursor()",
"def get_cursor():\n return _thread_local.connection.cursor()",
"def db_create_cursor(self, database_name):\n\n cursor = self.connections[database_name].cursor()\n return cursor",
"def cursor(self):\n\t\treturn self._cursor",
"def get_cursor():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect_db()\n return db.cursor()",
"def connectDB(self): \n #connect to the database\n try:\n print(self.pg_dbname)\n self.conn = psycopg2.connect(\"dbname=%s user=%s password=%s host=%s port=%s\" % (self.pg_dbname, self.pg_username, self.pg_password, self.pg_host, self.pg_port))\n print(\"connected!\")\n except psycopg2.Error as e:\n print(\"I am unable to connect to the database\")\n print(e)\n\n #define cursor\n self.cur = self.conn.cursor()",
"def cursor(self):\n return self.conn.cursor()",
"def get_cursor(self):\n try:\n self.cursor = self.connection.cursor()\n logging.getLogger(__name__).info(\"Cursor was created.\")\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with cursor creating. %s\" %er)\n finally:\n return self.cursor",
"def __init__(self, cursor):\n\n self.cursor = cursor\n self.number = None",
"def managed_cursor(self, cursor_factory=None):\n\n self.conn_url = (f'postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}')\n self.conn = psycopg2.connect(self.conn_url)\n self.conn.autocommit = True\n self.curr = self.conn.cursor(cursor_factory=cursor_factory)\n try:\n yield self.curr\n finally:\n self.curr.close()\n self.conn.close()",
"def get_cursor(self):\n return self.connection.cursor()",
"def _cursor_collection(self, cursor_doc):\n ...",
"def get_cursor(self):\n return self.cursor",
"def _execute(self, db):\n raise NotImplementedError",
"def cursor(self):\r\n if self._closed:\r\n raise Error('The connection to the database has been closed.')\r\n return Cursor(self)",
"def __enter__(self):\n try:\n self.conn = sqlite3.connect(self.database_configuration)\n self.cursor = self.conn.cursor()\n\n return self.cursor\n except Exception as error:\n print(f\"DBCM::__enter__::{error}\")",
"def cursor(self):\n if self.__connection is None:\n self.connect()\n return self.__connection.cursor()",
"def __init__(self, database_name):\n try:\n self.db = psycopg2.connect(\"dbname={}\".format(database_name))\n self.cursor = self.db.cursor()\n except Exception as ex:\n print(ex)",
"def cursor(self) -> NamedTupleCursor:\n return self.connection.cursor",
"def cur(self):\n return self.conn.cursor()",
"def manual_enter(self):\n self._dbconnect = sqlite3.connect(self._db_file)\n\n # Set row_factory to access columns by name\n self._dbconnect.row_factory = sqlite3.Row\n\n # Create a cursor to work with the db\n self._cursor = self._dbconnect.cursor()",
"def db_execute_query(db_connection, query, query_args):\n cursor = db_connection.cursor()\n #datalab_logger_connections.info(\"reading database[Query. May Take Time]...\")\n cursor.execute(query, query_args)\n #datalab_logger_connections.info(\"finish to query database\")\n return cursor",
"def oracle_cursor(query):\n conf_file_dir = os.path.dirname(os.path.realpath(__file__))\n conf_file = conf_file_dir + os.sep + '.setup.conf'\n (username, password, host, port, sid) = credential_setup(conf_file)\n dsn_tns = cx_Oracle.makedsn(host, port, sid)\n connection = cx_Oracle.connect(username, password, dsn_tns)\n cursor = connection.cursor()\n cursor.execute(query)\n\n return cursor",
"def cursor(self):\n with self.conn as c:\n yield c.cursor()",
"def cursor(self):\n with self.conn as c:\n yield c.cursor()",
"def __execute__(self, sql, commit=True):\n cursor = None\n PDEBUG('Executing SQL: %s'%sql)\n try:\n cursor = self.conn.execute(sql)\n except sqlite3.IntegrityError as e:\n print('DUP: %s' % e)\n except Exception as e:\n print('FATAL: %s -- %s' % (e, sql))\n else:\n if commit:\n self.conn.commit()\n\n return cursor",
"def _during_execute(self, db):\n pass",
"def cursor(self):\n with self.connection() as conn:\n cursor = conn.cursor(prepared=True)\n try:\n yield cursor\n finally:\n cursor.close()",
"def cdr_cursor(self):\n\n if not hasattr(self, \"_cdr_cursor\"):\n opts = dict(tier=self.tier, user=\"CdrGuest\")\n self._cdr_cursor = db.connect(**opts).cursor()\n return self._cdr_cursor",
"def execute(query):\n print query\n cursor.execute(query)",
"def exec_get_cur(self, request, params={}, extra_params = None):\n con = self.create_connection()\n cur = con.cursor()\n cur_exec(cur, request, params)\n return cur",
"def execute(cursor, query):\n while True:\n try:\n cursor.execute(query)\n break\n except Exception as e:\n print(\"Database query: {} {}\".format(cursor, query))\n print(\"Database retry reason: {}\".format(e))\n return cursor",
"def _exec (self, sql, **kwargs):\n\n kwargs['id'] = self.id\n cursor = self.connection.cursor ()\n cursor.execute (sql.replace ('table_name', self.table_name), kwargs)\n return cursor",
"def new_cursor(self, db, collection):\n if self._mongo_client == None:\n raise Exception(\"mongo_client is None\")\n cursor = self._mongo_client[db][collection]\n return cursor",
"def cursor(cls) -> Iterator[sqlite3.Cursor]:\n with closing(cls.db.cursor()) as cur:\n yield cur",
"def cursor(self):\n return self._adapter.cursor()",
"def get_cursor(self):\n return self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)",
"def cursor(self):\n return self._cursor",
"def cursor(self):\n return self._cursor",
"def cursor(self):\n return self._cursor",
"def cursorExecute(self, query):\n return self.cursor.execute(query)",
"def query_to_cur(dbh, query, verbose=verbose):\n if verbose : \n print query\n cur = dbh.cursor()\n cur.execute(query)\n\n return cur",
"def execute(self, qry):\n def internal():\n print 'qry = ', qry\n self._cur = self.get_cursor()\n print 'self._cur = ', self._cur\n self._cur.execute(qry)\n # self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor).execute(qry)\n rows = self._cur.fetchall()\n return rows\n\n return self._retry(internal)",
"def get_database_cursor(conn=None):\n\n if not conn:\n conn = get_database_connection()\n\n return conn.cursor()",
"def _run_query(self):",
"def __init__(self, cursor, mapper):\n super().__init__(cursor)\n self.mapper = mapper",
"def query_to_cur(dbh, qry, args):\n if args.debug:\n print(datetime.datetime.strftime(datetime.datetime.now(), \"%D %H:%m:%S\"), qry, file=sys.stderr)\n t0 = time.time()\n cur = dbh.cursor()\n cur.execute(qry)\n print(\"query took\", time.time() - t0, \"seconds\")\n return cur",
"def _isolate_db_query(self, query_fn, ret_lst, args, kwargs):\n conn = sqlite3.connect(\n self.db_filename,\n isolation_level=None,\n timeout=self.transaction_timeout)\n try:\n conn.execute(\"PRAGMA locking_mode=EXCLUSIVE;\").close()\n conn.execute(\"BEGIN EXCLUSIVE;\").close()\n\n # This fn can call things like: ret = conn.execute(qs).fetchall()\n # Remember that all cursors need to be closed!\n query_fn(conn, ret_lst, *args, **kwargs)\n\n conn.commit()\n except Exception:\n try:\n conn.rollback()\n except Exception:\n pass\n raise\n finally:\n try:\n conn.close()\n except Exception:\n pass",
"def os_start_db( self, ):\r\n pass",
"def yield_db_cursor(connect_params=DB_PARAMS, cursor_type=DictCursor):\n\n with psycopg2.connect(**connect_params) as con:\n with con.cursor(cursor_factory=cursor_type) as cur:\n yield cur",
"def __call__(self, dbio, *args, **kwargs):\n sql = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(f\"running select:{sql}\")\n cur = dbio.conn.cursor()\n cur.execute(sql)\n results = cur.fetchall()\n columns = [desc[0] for desc in cur.description]\n cur.close()\n dbio.conn.commit()\n return results, columns\n else:\n logger.debug(\"will run:{sql}\")\n return None, None",
"def with_cursor(fn):\n @functools.wraps(fn)\n def wrapped_fn(self, *args, **kwargs):\n with contextlib.closing(self.connect()) as connection:\n with contextlib.closing(connection.cursor()) as cursor:\n fn(self, cursor, *args, **kwargs)\n return wrapped_fn",
"def conn_curs():\n dbname = \"abcdefgh\"\n password = \"Acka-1jfue4-snYmkall\"\n host = \"db.elephantsql.com\"\n\n connection = psycopg2.connect(dbname=dbname, user=dbname,\n password=password, host=host)\n cursor = connection.cursor()\n return connection, cursor",
"def get_cursor(self, commit=True):\n\t\tconnection = pg8000.connect(**self._dsn)\n\t\tcursor = RigorCursor(connection.cursor())\n\t\ttry:\n\t\t\tyield cursor\n\t\texcept pg8000.IntegrityError as error:\n\t\t\texc_info = sys.exc_info()\n\t\t\tself.rollback(cursor)\n\t\t\traise rigor.database.IntegrityError, exc_info[1], exc_info[2]\n\t\texcept pg8000.DatabaseError as error:\n\t\t\texc_info = sys.exc_info()\n\t\t\tself.rollback(cursor)\n\t\t\traise rigor.database.DatabaseError, exc_info[1], exc_info[2]\n\t\texcept:\n\t\t\texc_info = sys.exc_info()\n\t\t\tself.rollback(cursor)\n\t\t\traise exc_info[0], exc_info[1], exc_info[2]\n\t\telse:\n\t\t\tif commit:\n\t\t\t\tself.commit(cursor)\n\t\t\telse:\n\t\t\t\tself.rollback(cursor)",
"def _set_cursor(self, cursor):\n self._cursor = cursor",
"def hook_db(self):\n # get dates as strings from the database so that we can serialize to\n # json this is not working for some reason. another solution has been\n # reached, but this would be a better one, so I'm leaving it here.\n #conv = MySQLdb.converters.conversions.copy()\n #conv[10] = str\n self.database = MySQLdb.connect(host=self.host,\n port=self.port,\n user=self.user,\n passwd=self.passwd,\n db=self.db)\n #conv=conv)\n self.cursor = self.database.cursor()",
"def cursor(self, cursor):\n\n self._cursor = cursor",
"def __init__(self, db, event_handler):\n self.db = db\n self.cur = db.get_cursor()\n self.event_handler = event_handler\n self.stored_answer = True",
"def execute(self, exetuple):\n\t\tcur = self.connect.cursor(MySQLdb.cursors.DictCursor)\n\t\ttry:\n\t\t\tcur.execute(*exetuple)\n\t\texcept Exception, e:\n\t\t\traise e\n\t\treturn cur",
"def __call__(self, dbio, *args, **kwargs):\n sql, f = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(\"'copy_expert' will run\\n{}\".format(sql))\n cur = dbio.conn.cursor()\n cur.copy_expert(sql, f)\n cur.close()\n dbio.conn.commit()\n f.close()\n else:\n logger.info(\"'copy_expert' will run\\n{}\".format(sql))\n f.close()",
"def q(self, sql, return_curs=False):\n curs = self._db.cursor()\n curs.execute(sql)\n\n if return_curs:\n return curs\n else:\n curs.close()",
"def run_query(query):\n conn = connection.get_db_connection()\n cursor = conn.cursor()\n cursor.execute(query)\n return cursor",
"def execute_param(cursor, query, param):\n while True:\n try:\n cursor.execute(query, param)\n break\n except Exception as e:\n print(\"Database query: {} {} {}\".format(cursor, query, param))\n print(\"Database retry reason: {}\".format(e))\n time.sleep(random.random())\n return cursor",
"def __init__(self, dbfile):\n self.dbfile = dbfile\n self.cxn = sqlite3.connect(dbfile)\n self.cur = self.cxn.cursor()",
"def __call__(self, dbio, *args, **kwargs):\n sql = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(\"'execute' will run\\n{}\".format(sql))\n cur = dbio.conn.cursor()\n cur.execute(sql)\n cur.close()\n dbio.conn.commit()\n else:\n logger.info(\"'execute' will run\\n{}\".format(sql))",
"def _after_execute(self, db):\n pass",
"def _before_execute(self, db):\n pass",
"def _cursor_namespace(self):\n ...",
"def _query_mysql(self):\n mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)\n conn = mysql.get_conn()\n cursor = conn.cursor()\n cursor.execute(self.sql)\n return cursor",
"def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return cursor",
"def cursor(self):\n cursor = Cursor(self, self.__aceQLHttpApi)\n return cursor",
"def __init__(self):\n self.dbcon = DbConnection.get_con()",
"def __init__(self):\n self.conn = sqlite3.connect(\"database/users.db\", check_same_thread=False)\n self.cursor = self.conn.cursor()",
"def connection(self, collectionname, dbname=None):\n if not collectionname or \"..\" in collectionname:\n raise DataError(\"collection names cannot be empty\")\n if \"$\" in collectionname and not (collectionname.startswith(\"oplog.$main\") or\n collectionname.startswith(\"$cmd\")):\n raise DataError(\"collection names must not \"\n \"contain '$': %r\" % collectionname)\n if collectionname.startswith(\".\") or collectionname.endswith(\".\"):\n raise DataError(\"collecion names must not start \"\n \"or end with '.': %r\" % collectionname)\n if \"\\x00\" in collectionname:\n raise DataError(\"collection names must not contain the \"\n \"null character\")\n return Cursor(dbname or self._pool._dbname, collectionname, self._pool)",
"def __iter__(self):\n return self._cursor",
"def __init__(self, cursor, collection):\n # 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.\n super(AgnosticBaseCursor, self).__init__(delegate=cursor)\n self.collection = collection\n self.started = False\n self.closed = False",
"def execute_query(self, *args, **kwargs):",
"def conn(self):\n self.cnx = psycopg2.connect(**self.dbConfig) \n self.cur = self.cnx.cursor()",
"def dictionary_cursor(self):\n\n if not hasattr(self, \"_dictionary_cursor\"):\n self._dictionary_cursor = self.dictionary_conn.cursor()\n return self._dictionary_cursor"
] | [
"0.75196695",
"0.7447285",
"0.6916227",
"0.68934",
"0.6793164",
"0.6761797",
"0.67598337",
"0.6744899",
"0.67059517",
"0.66798764",
"0.66659814",
"0.66617465",
"0.6598716",
"0.6572655",
"0.656831",
"0.6523354",
"0.6523354",
"0.64487654",
"0.64415014",
"0.64395714",
"0.64310354",
"0.64194196",
"0.64028245",
"0.63968515",
"0.63831365",
"0.6374102",
"0.6356072",
"0.6292042",
"0.62858355",
"0.6272239",
"0.6238739",
"0.6205711",
"0.6193449",
"0.618993",
"0.61796963",
"0.6177927",
"0.6174506",
"0.6162678",
"0.6151297",
"0.6117189",
"0.61015403",
"0.60855085",
"0.6083142",
"0.60756785",
"0.6067609",
"0.6060685",
"0.6060685",
"0.6058073",
"0.60528165",
"0.6044993",
"0.6020803",
"0.6004489",
"0.598581",
"0.59504193",
"0.5918265",
"0.5908433",
"0.59079605",
"0.5902149",
"0.58974344",
"0.5892647",
"0.5892647",
"0.5892647",
"0.5886165",
"0.58759075",
"0.58628833",
"0.58596116",
"0.58580345",
"0.5846357",
"0.5844763",
"0.5836594",
"0.58214676",
"0.5817493",
"0.5811148",
"0.58109045",
"0.5791789",
"0.5784368",
"0.5781974",
"0.57676494",
"0.57606435",
"0.5756457",
"0.574764",
"0.5743206",
"0.57308435",
"0.57273674",
"0.5714569",
"0.5693502",
"0.56711197",
"0.5666205",
"0.56643957",
"0.56618595",
"0.5654923",
"0.5653733",
"0.5652628",
"0.56481236",
"0.56466",
"0.5642268",
"0.56400853",
"0.56370646",
"0.562628",
"0.5614504",
"0.56095535"
] | 0.0 | -1 |
Returns a list of action and state this object can perform... These are in a form that Peregrin can handle, and are use by the class to limit what it allows Peregrin to call. | def actions(self):
self._actions = {}
self._actions['getItems'] = ('FileCrawler', None)
#self._actions['getContents'] = ('ParseContents', ('path'))
return self._actions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getActions(self, state): \n util.raiseNotDefined()",
"def get_available_actions(self, state):\n pass",
"def actions(self, state):\n myActionList= (1,2);\n return myActionList",
"def actions(self, state):\n\t\traise NotImplementedError",
"def actions(self) -> list:\n if self.debug: print(f\"AState.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._actions = self._generate_actions()\n self._examined = True\n return self._actions",
"def get_actions(self, state: TState = None) -> Sequence[TAction]:\n pass",
"def getStateActionFeatures(self,state,action):\n return [state, self.actions[action]]",
"def getLegalActions(self,state):\n return self.actionFn(state)",
"def actions(self, state):\n raise NotImplementedError # Override this!",
"def _state_actions(self) -> dict:\n return {}",
"def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def _get_actions(self):\n return self.__actions",
"def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]",
"def getLegalActions(self, state):\n return self.actionFn(state)",
"def get_actions(self):\n return []",
"def get_list_of_actions(self):\n return self.actions",
"def actions(self):\n return self._action_list",
"def actions(self):\n return self._actions",
"def get_action_state(self):\n pass",
"def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())",
"def getPossibleActions(self, state):\n if self.weHaveBall(state):\n return [('hold',)] + [('pass', i) for i in range(1, self.keeperNum)]\n else:\n return [None]",
"def get_available_actions(self):\n return self.actions",
"def operation_list(self):\n return [\n STATE_IDLE,\n STATE_HEAT,\n STATE_COOL,\n STATE_AUTO,\n STATE_FAN_ONLY,\n ]",
"def actions(self):\r\n return self.puzzle.actions",
"def actions(self, state):\n\n\t\t# Returns the possible numbers we can use.\n\t\treturn [(str)(x + 1) for x in range (0,self.size)]",
"def actions(self):\n raise NotImplementedError",
"def get_actions(self):\n return self.agent.get_actions()",
"def get_actions(self):\r\n return -4,4",
"def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__",
"def actions(self, state):\n actions = []\n raise_forces = [(\"RAISE_FORCE\", state.force*2.0)]\n reduce_forces = [] #[(\"REDUCE_FORCE\", state.force/2.0)]\n controlled = [t for t in self.territories if getattr(state, t) == \"CONTROLLED\"]\n owned = set([t for t in self.territories if getattr(state, t) in [\"CONTROLLED\", \"RE-ENFORCED\"]])\n border_territories = set()\n for t in owned:\n border_territories |= self.territories[t].get_borders() - owned\n reenforce_territories = [(\"RE-ENFORCE\", t) for t in controlled]\n take_territories = [(\"TAKE\", t) for t in border_territories if state.force >= self.territories[t].force]\n return raise_forces + reduce_forces + reenforce_territories + take_territories",
"def actions(self) -> Sequence[_A_out]:\n return self._actions",
"def actions(self):\n\n return self._actions.getSlice(0)",
"def actions(self):\n\n return self._actions.getSlice(0)",
"def getActions(self):\n actions = self.actions[:]\n return actions",
"def get_state_actions_mapping(self):\n return None",
"def legal_actions(self):\n raise NotImplementedError",
"def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())",
"def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)",
"def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)",
"def actions(self) -> List[str]:\n return list(self.__endpoints.keys())",
"def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]",
"def actions(self):\n return {0, 1, 2, 3, 4, 5, 11, 12}",
"def actions(self) -> list:\n if self.debug: print(f\"StateNode.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._edges = self.state.actions()\n for e in self._edges:\n e: Action\n e.source = self\n e.cost = self.get_cost(e)\n self._examined = True\n return self._edges",
"def actions(self, state):\n\n possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']\n index_blank_square = self.find_blank_square(state)\n\n # implement actions here \n\n return possible_actions",
"def actions(self, state, player):\r\n raise NotImplementedError",
"def actions(self):\r\n return actions.Actions(self)",
"def actions(cls):\n return [m for m in cls.__dict__ if not \"__\" in m]",
"def actions(state):\n action_list = []\n\n if state.active_color == cc.WHITE_ACTIVE:\n active_pieces = cc.WHITE_PIECES\n elif state.active_color == cc.BLACK_ACTIVE:\n active_pieces = cc.BLACK_PIECES\n else:\n raise Exception(\"Actions: Invalid Active Color\")\n # Check for states where castling can occur\n castles = gm.get_castle(state)\n if castles[0]: # Kingside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_KINGSIDE))\n if castles[1]: # Queenside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_QUEENSIDE))\n\n # Loop over the board, finding the moves for each piece\n for rank in range(8):\n for column in range(8):\n if state.board[rank, column] in active_pieces:\n p = gm.Piece(state.board[rank, column], (rank, column))\n action_list.extend(p.get_moves(state))\n\n # Handle En passant attacks\n for action in action_list:\n if action.end == state.en_passant:\n action.capture = True\n\n return action_list",
"def get_step_actions(self):\n return self.actor(tf.numpy_function(self.get_states, [], self.states[0].dtype))",
"def getLegalActions(self):\n return ['forward', 'left', 'right', None]",
"def Qs(self, state, actions):\n return {a: self[state, a] for a in actions}",
"def action_space(self, state) -> set:\n return {0, 1} # Actions independent of state",
"def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())",
"def action(self, gstate, actions):\n self.log.debug(\"Picking among actions %s\" % actions)\n return actions[0]",
"def get_legal_actions(self):\n pass",
"def act(self):\n\n\t\t# Figure out the action selected by each head\n\t\tQs = self.dqn.get_Qs(self.state_history)\n\t\tactions = np.argmax(Qs, axis=1)\n\n\t\t# Select the action of the control head\n\t\taction = actions[self.head_number]\n\t\tQ = Qs[self.head_number]\n\n\t\treturn action, Q",
"def available_action(self):\n return range(self.actions)",
"def _get_legal_actions(self):\n raise NotImplementedError",
"def getLegalActions(self):\n return ['BOT', 'SLD']",
"def getAllActions(self):\n decision_rules = self.getAllDecisionRules()\n return list(itertools.product(decision_rules, self.getAllRobotActions()))",
"def action_space(self, curr_state):\n # Action space - allowed (position, value) combinations for the agent and environment given the current state\n\n agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))\n env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))\n return (agent_actions, env_actions)",
"def legal_actions(self):\n return self.env.legal_actions()",
"def get_all_action_functions_map():\n actions = {\n 'interact': measure_interaction_effects,\n 'countdata': count_data_points,\n 'filter': filter_operation,\n 'explain': explain_operation,\n 'predict': predict_operation,\n 'self': self_operation,\n 'previousfilter': last_turn_filter,\n 'previousoperation': last_turn_operation,\n 'data': data_operation,\n 'followup': followup_operation,\n 'important': important_operation,\n 'show': show_operation,\n 'change': what_if_operation,\n 'likelihood': predict_likelihood,\n 'model': model_operation,\n 'function': function_operation,\n 'score': score_operation,\n 'label': show_labels_operation,\n 'mistake': show_mistakes_operation,\n 'statistic': feature_stats,\n 'define': define_operation,\n 'predictionfilter': filter_operation,\n 'labelfilter': filter_operation\n }\n return actions",
"def legal_actions(self):\n\n if self.player == TERMINAL: # is terminal\n return []\n elif self.player == CHANCE: # is chance\n if self.phase == PREFLOP: # preflop\n return [Action(deal=[i, j, k, l])\n for i in range(52) for j in range(52)\n for k in range(52) for l in range(52)\n if len(set([i, j, k, l])) == 4]\n elif self.phase == FLOP: # flop\n return [Action(deal=[i, j, k])\n for i in range(52) for j in range(52)\n for k in range(52) if len(set([i, j, k])) == 3]\n elif self.phase == TURN: # turn\n return [Action(deal=[i]) for i in set(range(52)) - set(self.pub)]\n else: # river\n return [Action(deal=[i]) for i in set(range(52)) - set(self.pub)]\n else:\n if self.status[self.player] == RAISED2TIMES: # can't raise\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player)]\n else: # can raise\n if self.phase == PREFLOP or self.phase == FLOP: # preflop or flop\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player),\n Action(action=RAISE, bet=1, player=self.player)]\n else: # turn or river\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player),\n Action(action=RAISE, bet=2, player=self.player)]",
"def actions(self):\n return self._separated_constructs(RuleAction)",
"def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def map_state_to_actions(self, state: types.StateSpace, **override_args: Any) \\\n -> types.ActionProcess:\n z_star = self.max_weight_policy(state)\n actions = self.mpc_policy.obtain_actions(z_star=z_star, num_mpc_steps=1)\n return actions",
"def actions(self, state):\r\n\r\n valid_actions = []\r\n # What kind of an action it will be\r\n # 1. Add a new piece to the game.\r\n # 2. Move and existing piece.\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n # If we want to place a new piece in the game\r\n if new_piece:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == '-':\r\n # (player, to, from)\r\n # Since we are introducing a new piece it's coming from\r\n # an imaginary position i.e. (9, 9)\r\n valid_actions.append((player, (i, j), (9, 9)))\r\n\r\n # when we moving an existing piece in the game\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] != '-':\r\n # Now check for places this player can move from this position\r\n for ii, jj in self.valid_adjacent_positions[(i, j)]:\r\n if state[ii][jj] == '-':\r\n # (player, to, from)\r\n valid_actions.append((state[i][j], (ii, jj), (i, j)))\r\n\r\n return copy.deepcopy(valid_actions)",
"def getLegalActions( state ): ## This is being called by the GameState.getLegalActions function and uses self as the state argument.\n return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )## REF-211 calls the getPossibleActions method in the Actions class.",
"def actions(self, state, enemy=False):\n vaccinate_actions = []\n quarantine_actions = []\n medics = 1\n police = 2\n if not enemy:\n for (i, j) in self.zoc:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc):\n quarantine_actions.append(('quarantine', (i, j)))\n else:\n for (i, j) in self.zoc_enemy:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc_enemy):\n quarantine_actions.append(('quarantine', (i, j)))\n\n vaccinate_actions_pre = powerset1(vaccinate_actions, medics)\n quarantine_actions_pre = powerset1(quarantine_actions, police)\n vaccinate_actions_tup = tuple(vaccinate_actions_pre)\n quarantine_actions_tup = tuple(quarantine_actions_pre)\n\n if ((len(vaccinate_actions_tup) == 0) and (len(quarantine_actions_tup) != 0)):\n possible_actions = quarantine_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) != 0)):\n possible_actions = vaccinate_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) == 0)):\n possible_actions = [()]\n else:\n possible_actions = tuple()\n for action_p in quarantine_actions_tup:\n for action_m in vaccinate_actions_tup:\n action_m += action_p\n possible_actions += (action_m, action_p)\n possible_actions += vaccinate_actions_tup + quarantine_actions_tup\n return tuple(possible_actions)",
"def actions(self) -> List['outputs.PreventionJobTriggerInspectJobAction']:\n return pulumi.get(self, \"actions\")",
"def _generate_actions(self) -> list:\n pass",
"def actions(self, state):\n \n #les actions sont définies comme étant les nombres possibles dans \n #la case i,j\n theActions = []\n for i in range(size):\n for j in range(size):\n line = i\n col = j\n if(state[i][j] == 0):\n possibleNumbers = [1,2,3,4,5,6,7,8,9]\n config = state\n for a in range(size):\n x = config[line][a]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n for b in range(size):\n x = config[b][col]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n #identifie quelle boite on veut vérifier\n hBox = col - col % 3\n vBox = line - line % 3\n \n for c in range(3):\n for d in range(3):\n x = config[c+vBox][d+hBox]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n for k in possibleNumbers:\n theActions.append((i,j,k))\n return theActions",
"def get_available_actions(self):\n actions = [self.ACTIONS_INDEXES['IDLE']]\n\n # Shall we also restrict LEFT & RIGHT actions ?\n\n if self.spacecraft.velocity_index < self.spacecraft.SPEED_COUNT - 1:\n actions.append(self.ACTIONS_INDEXES['FASTER'])\n if self.spacecraft.velocity_index > 0:\n actions.append(self.ACTIONS_INDEXES['SLOWER'])\n return actions",
"def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices",
"def _getAvailableActions(self, board):\r\n myPits = board.mySide(self.id)\r\n return [i for i in myPits if i > 0]",
"def _getAvailableActions(self, board):\r\n myPits = board.mySide(self.id)\r\n return [i for i in myPits if i > 0]",
"def get_action(self, state):\r\n\r\n action = self.minimax(state)\r\n print(self.exp, self.pru, self.recCall)\r\n\r\n return action",
"def result(self, state, action):\n return self.actions(state)\n\n raise NotImplementedError",
"def _get_legal_actions(self):\n return self.game.get_legal_actions()",
"def options(self):\n if self._state == GameState.PLAY_OR_DRAW:\n return [NopAction(), DrawAction()] + self._play_options()\n elif self._state == GameState.PLAY:\n return [NopAction()] + self._play_options()\n elif self._state == GameState.PLAY_DRAWN:\n res = [NopAction()]\n if self._can_play(self._current_hand()[-1]):\n res += [PlayCardAction(len(self._current_hand()) - 1)]\n return res\n elif self._state == GameState.PICK_COLOR or self._state == GameState.PICK_COLOR_INIT:\n return [PickColorAction(c) for c in [Color.RED, Color.ORANGE, Color.GREEN, Color.BLUE]]\n elif self._state == GameState.CHALLENGE_VALID or self._state == GameState.CHALLENGE_INVALID:\n return [NopAction(), ChallengeAction()]\n raise RuntimeError('invalid state')",
"def _get_action(self, state, episode):\n if (episode < self.num_explore_episodes):\n action = [random.choice(list(range(self.num_actions)))\n for _ in range(len(state))]\n return action\n\n action = []\n self.model.q_1.eval()\n with torch.no_grad():\n state = torch.tensor(state, device=self.device).float()\n action = torch.argmin(self.model.q_1(state), dim=-1).tolist()\n self.model.q_1.train()\n return action",
"def get_interactions(self):\n return self._interactions",
"def getAllHumanActions(self):\n return self.human_policy.actions",
"def action_map(self):\n return self._action_map",
"def actions(self, request, action_list, group):\n return action_list",
"def actions(self):\r\n return Actions(self)",
"def fan_list(self):\n return [\n STATE_AUTO,\n STATE_ON,\n ]",
"def getLegalActions(self, state):\n actions = [i for i in range(-5, 6)]\n for action in actions:\n if action > state[0] or action < -state[1]:\n actions.remove(action)\n return actions",
"def actions(self, state):\n l = len(state)\n if (l == 0): return [self.query[0]]\n ret = []\n blank = state.count(' ')\n if (l - blank < len(self.query)):\n ret += [self.query[l - blank]]\n if (state[-1] != ' '): ret += ' '\n return ret",
"def final_actions(self):\n return set(filter(lambda x: not isinstance(x, Operator), self.behaviour()))",
"def get_action_history(self):\n\t\treturn self._action_history",
"def actions(self):\n from moztrap.view.lists.actions import actions\n return actions",
"def findActions(problem, state):\r\n size = len(problem) - 1\r\n legalActions = []\r\n if state[0] > 0 and problem[state[0] - 1][state[1]] != 'w':\r\n legalActions.append('N')\r\n if state[0] < size and problem[state[0] + 1][state[1]] != 'w':\r\n legalActions.append('S')\r\n if state[1] > 0 and problem[state[0]][state[1] - 1] != 'w':\r\n legalActions.append('W')\r\n if state[1] < size and problem[state[0]][state[1] + 1] != 'w':\r\n legalActions.append('E')\r\n return legalActions",
"def get_open_chipsactions(self):\n return self.__open_chipsactions[:]",
"def actions():\n pass"
] | [
"0.79093474",
"0.7743691",
"0.7566298",
"0.7564206",
"0.75434977",
"0.73026574",
"0.72860223",
"0.72488976",
"0.7185109",
"0.71836317",
"0.7171607",
"0.71485937",
"0.71485937",
"0.71485937",
"0.7132329",
"0.71137536",
"0.70944935",
"0.7048736",
"0.7040828",
"0.69925964",
"0.6983162",
"0.6863641",
"0.68622416",
"0.6829995",
"0.6813787",
"0.6811585",
"0.68104714",
"0.6777085",
"0.67127275",
"0.6702503",
"0.66914713",
"0.66807705",
"0.66567963",
"0.6624489",
"0.6624489",
"0.65991837",
"0.6593508",
"0.6592251",
"0.65696293",
"0.6544457",
"0.6544457",
"0.6544328",
"0.64975965",
"0.64868474",
"0.6448376",
"0.644452",
"0.64375985",
"0.6431659",
"0.6420013",
"0.6401825",
"0.6394563",
"0.6392754",
"0.63841957",
"0.6358026",
"0.6318219",
"0.6314325",
"0.6301156",
"0.629871",
"0.6264402",
"0.6255496",
"0.6238983",
"0.6237521",
"0.6236708",
"0.6226092",
"0.62178236",
"0.6212263",
"0.62106425",
"0.62070584",
"0.6204154",
"0.6204154",
"0.6194417",
"0.61894476",
"0.6188888",
"0.61831063",
"0.6180733",
"0.61737823",
"0.6169721",
"0.6169653",
"0.61665684",
"0.61562157",
"0.61562157",
"0.615021",
"0.6147069",
"0.61353797",
"0.61236423",
"0.6109903",
"0.610786",
"0.6106419",
"0.6103393",
"0.6084888",
"0.60818523",
"0.60737824",
"0.60715735",
"0.6055824",
"0.60476625",
"0.60349876",
"0.60224754",
"0.6009786",
"0.6008124",
"0.59991235"
] | 0.6020623 | 97 |
Will search the path provided and apply the tags given | def getItems(self):
fname = 'getItems'
actionId = self._db.addAction('WebCrawler')
actionId_ex = self._db.addAction('extractor')
if not os.path.exists(self._haystackPath):
self._haystackPath = os.path.expanduser(self._haystackPath)
if not os.path.exists(self._haystackPath):
self._haystackPath = os.path.abspath(self._haystackPath)
print('\t{0} [{1}]'.format(fname, self._haystackPath))
for (pathStr, dirs, files) in os.walk(self._haystackPath):
head, tail = os.path.split(pathStr)
for fileStr in files:
fileDTCheck = ''
filePath = os.path.join(pathStr,fileStr)
# get the file date...
fileDT = datetime.datetime.fromtimestamp(os.path.getmtime(filePath)).replace(microsecond=0)
fileSize = os.path.getsize(filePath)
fileName, fileExt = os.path.splitext(filePath)
# save the item to the database
itemId = self._db.addItem(self._engine_id, "file://%s" % filePath, fileDT)
# now check the data for this item...
itemList = self._db.getItemDataAll(itemId)
isMatch = False
for item in itemList:
if item[0] == 'FileDate':
# we have a date string...
fileDTCheck = datetime.datetime.strptime(item[1], "%Y-%m-%d %H:%M:%S")
if fileDTCheck == fileDT:
# the same time, no changes needed
isMatch = True
if isMatch:
# get next item as this is already exists
continue
# print(the details)
print(fileDTCheck, fileDT)
print('>>\t%s\t%s\t%s' % (fname, head, tail))
# set the datetime and other details
self._db.addItemData(itemId, 'Haystack', tail, 0)
self._db.addItemData(itemId, 'FileName', fileName, 0)
self._db.addItemData(itemId, 'FileExt', fileExt, 0)
self._db.addItemData(itemId, 'FileDate', fileDT, 0)
self._db.addItemData(itemId, 'FileSize', fileSize, 0)
# now to process the file...
# this will extract out metadata and add to the itemData table the value pairs.
pattern = re.compile(r'^.*[.](?P<ext>htm|html)$')
pattPNG = re.compile(r'^.*[.](?P<ext>mp.|mpeg|avi|swf|jpg|jpeg|png)$')
pattTAR = re.compile(r'^.*[.](?P<ext>tar\.gz|tar\.bz2|\.zip|\.tar|\.7z)$')
m = pattern.match(filePath)
if not m:
m = pattPNG.match(filePath)
if not m:
m = pattTAR.match(filePath)
if not m:
self.getContents(itemId, filePath, tail)
self._db.updateItem(self._engine_id, itemId, actionId_ex, datetime.datetime.now())
else:
# we have a file extension...
if m.group('ext').startswith('.htm'):
# add this as an event to be processed by the html link reader...
self._db.addItemEvent(self._engine_id, actionId, itemId)
if self._db:
self._db.commit_db() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tag_dir(self, path, accepted_exts):\n for root, dirs, files in os.walk(path):\n for f in files:\n name, ext = splitext(f)\n if ext.lower() in accepted_exts:\n try:\n self.tag(join(root, f))\n except Exception, e:\n logging.warning(\"Failed to tag {}\".format(abspath(join(root, f))), exc_info=True)",
"def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)",
"def tag_file_process(self, multiple_files):\n # the path is now becoming a string since it goes through the UI\n # text entry box, not a list or tuple any more, so we turn it to a\n # list of paths\n file_list = multiple_files.split(' ')\n # the main dictionary to store all tags\n tag_dict = dict()\n rows = []\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n current_tag_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(current_tag_file)\n for row in reader:\n # the encode, decode is use to resolve the \"\\ueffa\"\n # BOM-utf8 problem\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n tag_dict[row[0]] = dict()\n rows.append(row)\n # store the tag into the dictionary\n for row in rows:\n # the 1st column is the main key(mob fact col name)\n # the 2nd column is the tag id\n # the 3rd column is the tag with real meaning\n tag_dict[row[0]][row[1]] = row[2]\n\n else:\n tk.messagebox.showinfo('warning', 'can not obtain: ' +\n file_path)\n return tag_dict",
"def tags():",
"def extract_and_tag_test():\n test_untagged_path = os.getcwd() + \"/data/test/test_untagged/\"\n test_untagged_directory = os.fsencode(test_untagged_path)\n\n print(\"Tagging text. Please wait...\")\n for file in os.listdir(test_untagged_directory):\n filename = os.fsdecode(file)\n try:\n if filename.endswith(\".txt\"):\n text = entity_process.read_data(test_untagged_path, file)\n text = text.lower()\n header,body = entity_process.split_text(text)\n header_array = header.splitlines()\n\n\n start_time, end_time = entity_process.extract_time(header)\n location = entity_process.extract_location(header_array, body)\n speaker = entity_process.extract_speaker(header_array, body)\n\n entity_tagger.tag_all(filename, text, start_time, end_time, location, speaker)\n except Exception as e:\n raise e\n return \"No files found here!\"\n print(\"Tagging complete! Text saved to\" + os.getcwd() + \"/out\")",
"def process_ansible_yml_path(yml_path, ctx):\n for filepath in os_walk(yml_path):\n process_ansible_file(filepath, ctx)",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def tag(referencefile):\n dirpath = path.abspath(referencefile)\n\n if path.isdir(dirpath):\n dircontents = listdir(dirpath)\n else:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n\n while not 'tag' in dircontents:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n if len(dircontents) == 0 or path.split(dirpath)[1] == 'chemistry':\n print(\"tag file not found\")\n return None\n\n return path.join(dirpath, 'tag')",
"def make_tags(self):\n\n if self.debug:\n print 'Running make_tags()'\n\n for root, dirs, files in os.walk(self.content_folder):\n basefolder = os.path.basename(root)\n\n # If the option self.strip_dot_files is defined,\n # the dotfiles/dotfolders will be excluded.\n if self.strip_dot_files:\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n files[:] = [f for f in files if not f.startswith('.')]\n\n if self.tag_indicator in basefolder:\n self._make_symlink(root)\n\n for file in files:\n # This line checks for the tag indicator and that the file\n # is not being included from the folder that contains the\n # symlinks that will be created with this loop.\n if self.tag_indicator in file and not root.startswith(\n self.tags_folder):\n\n if self.debug:\n print 'tag: %s (from %s)' % (file, root)\n full_path = '%s/%s' % (root, file)\n self._make_symlink(full_path)",
"def tagger():",
"def handle_tag_search(self, tag_text):\n log.debug(\"Handling tag search: %s\", tag_text)\n tags = tag_text.split()\n self.filter_tags = tags\n self.current_selected = 0\n self._refresh()",
"def give_item_by_tag(tags):\n with shelve.open(RECIPE_PATH) as base:\n base_list = [recipe.lower() for recipe in base]\n recipes_by_tag = [rec for rec in base_list for tag in tags if tag in rec]\n return \"; \".join(recipes_by_tag)",
"def set_tags(self, tags, filename):\n return self.set_tags_batch(tags, [filename])",
"def _ProcessTagsForFileUse(self) -> List[str]:\n return list(self.tags)",
"def insert_text_in_file(file_path: pathlib.Path, tag: str, text: str) -> bool:\n lines: List[str] = []\n with file_path.open('r') as f:\n lines = f.readlines()\n for ii, line in enumerate(lines):\n if line.find(tag) >= 0:\n lines.insert(ii + 1, text)\n with file_path.open('w') as f:\n f.writelines(lines)\n return True\n return False",
"def process(self, matches, tag):\n if isinstance(matches, str):\n matches = [matches]\n\n done = set()\n for match in matches:\n processed = False\n for path in self.dirs:\n for file in sorted(path.glob(match)):\n if file.name in done:\n continue\n self.__log.info('file %r matches %r' % (str(file), match))\n processed = True\n done.add(file.name)\n yield from str(file) >> tag >> self.out\n if not processed:\n raise ValueError('no matching files found for %r' % match)",
"def tag_file_chooser(self):\n filename_list = tk.filedialog.askopenfilenames()\n self._tag_path_var.set(filename_list)",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def replace_template_tags(lines, file_provider):\n ret = []\n for line in lines:\n m = re.search(r'{/' + path_re + '}', line)\n if m:\n span = m.span()\n template_path = m.group('path').lstrip('/')\n template_lines = file_provider(template_path)\n template_lines = replace_template_tags(template_lines, file_provider)\n ret += process_replacement_lines(line[:span[0]], line[span[1]:], template_lines)\n else:\n ret.append(line)\n return ret",
"def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))",
"def tags_load(folder):\n tags=[]\n fname=os.path.join(folder,\"experiment.txt\")\n if not os.path.exists(fname):\n print(\"creating empty\",os.path.basename(fname))\n with open(fname,'w') as f:\n f.write('')\n with open(fname) as f:\n raw=f.read().split(\"\\n\")\n for line in raw:\n line=line.strip().split(\"#\")[0]\n if not \"=\" in line:\n continue\n tag,vals=[x.strip() for x in line.split('=')]\n vals=[clock_to_float(x) for x in vals.split('-')]\n tags.append([tag]+vals)\n tags.sort(key=lambda x: x[1])\n if len(tags) and not 'baseline' in [x[0] for x in tags]:\n print(\" Tags found but no baseline found! Inventing one.\")\n BL1=1\n BL2=tags[0][1]-1\n if BL2<BL1:\n BL2=BL1+1\n tags.insert(0,['baseline',BL1,BL2])\n if len(tags):\n print(\"Tags found in %s:\"%os.path.basename(fname))\n for tag in tags:\n print(\" %s = %s\"%(tag[0],str(tag[1:])))\n return tags",
"def add_tags(event):\n\n add_tags_from_presets()",
"def tag(self, pathname):\n info = SongInfo(pathname)\n self.populate(info)\n logging.info(\"Tagging {} (artist: {}, title: {}, album: {}, genre: {})\".format(abspath(pathname), info.artist, info.title, info.album, info.genre))\n if not self.dry_run:\n for w in self.writers:\n w.write(pathname, info)",
"def scan_path(path,ext_lst=['md','markdown']):\n if not os.path.exists(path):\n logger.debug('scan_path: invalid path : %s' %(path,files))\n return -1\n pages={}\n files=[]\n for item in glob.glob(os.path.join(path,'*.*')):\n if string.split(os.path.basename(item),'.')[-1] in ext_lst:\n files.append(item)\n logger.debug('scan_path: %s : %s' %(path,files))\n res = None\n pagelist=[]\n articledict={}\n for item in files:\n res=os.path.basename(item).split()\n res=string.split(os.path.basename(item),'.')\n #print 'scan_path: res : %s' %res \n if res[0] not in pagelist:\n logger.debug('scan_path: add page : %s' %res[0]) \n pagelist.append(res[0])\n articledict[res[0]]=[] #or add itself?\n articledict[res[0]].append(item) \n pages['pagelist'] = pagelist\n if len(pagelist)>0:\n for page in pagelist: \n logger.debug('scan_path: pages= %s' %page)\n logger.debug('scan_path: articles= %s' %articledict[page]) \n pages[page]=articledict[page] \n #look for templates/layout... To ENHANCE...\n pages['template']=glob.glob(os.path.join(path,'*.tpl')) \n if os.path.isfile(os.path.join(path,'layout.tpl')):\n logger.info('scan_path: found general layout file : %s' %os.path.join(path,'layout.tpl'))\n pages['layout']=os.path.join(path,'layout.tpl')\n return pages",
"def handle_starttag(self, tag, attrs):\n if tag == \"a\":\n curr_tag = Tag(tag)\n for attribute in attrs:\n curr_tag.add_attribute(attribute[0], attribute[1])\n self.current_tags.append(curr_tag)\n elif tag == \"img\":\n curr_tag = Tag(tag)\n for attribute in attrs:\n curr_tag.add_attribute(attribute[0], attribute[1])\n self.images.append(curr_tag)",
"def modify_tags(self, note_id, tags):\n try:\n self._find_note(note_id).tags = tags\n except AttributeError:\n print(f\"Note with id {note_id} not found\")",
"def ctags():\n proj_dir = os.environ['TM_PROJECT_DIRECTORY']\n tag_file = os.path.normpath(os.path.join(proj_dir, '../../tags'))\n\n if not os.path.isfile(tag_file):\n print \"Tag File not found (%s).\" % tag_file\n print html_footer()\n sys.exit()\n\n return CTags(tag_file)",
"def add_subtags(path, pxml_subtags):\n # print(\"CALLING... add_subtag\")\n root, tree = gen_tree(path)\n nmsps = root.nsmap['r']\n for k,v in pxml_subtags.items():\n subtag1 = tree.find(k)\n for i in v:\n if 'rId' not in i[1]:\n rId = f\"{{{nmsps}}}id\"\n subtext = etree.SubElement(subtag1, i[0])\n subtext.attrib['id'] = i[1]\n subtext.attrib[rId] = i[2]\n else:\n subtext = etree.SubElement(subtag1, i[0])\n subtext.attrib[rId] = i[1]\n tree.write(f'{output_path}/ppt/presentation.xml', pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return",
"def __load_tags(self) -> None:\n self.tags = TagHelper.TagHelper.generate_tag_object(self)\n self.tag_helper = TagHelper.TagHelper(self)\n self.tag_helper.fetch()",
"def execute(ctags_exe, tag_file, directory, append=False):\n # Ensure the directory exists\n tag_file_dir = os.path.dirname(tag_file)\n if not os.path.exists(tag_file_dir):\n os.makedirs(tag_file_dir)\n\n cmd = [ctags_exe, '-f', uris.to_fs_path(uris.from_fs_path(tag_file)), '--languages=Python', '-R'] + CTAG_OPTIONS\n if append:\n cmd.append('--append')\n cmd.append(uris.to_fs_path(uris.from_fs_path(directory)))\n\n log.info(\"Executing exuberant ctags: %s\", cmd)\n log.info(\"ctags: %s\", subprocess.check_output(cmd))",
"def collect_local(self, path, req_tag=True):\n for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:\n if not os.path.isfile(f):\n continue\n self.collect_single(f, req_tag)",
"def scanTags(self, sources=None):\n sources = sources or self.sources\n tagnames = set()\n for src in sources:\n tagnames.update(src.getTagNames())\n self.tagnames = list(tagnames)\n self.tagnames.sort()",
"def add_tags_recursive(self, tags2add: List[str]) -> None:\n self.tags += tags2add\n for data in self._child_data.values():\n data.add_tags_recursive(tags2add)",
"def create_tags():\n\n INPUT = \"\"\"\n \"Python general\",Python\n R,\"Other Programming Languages\"\n Java,\"Other Programming Languages\"\n C-Languages,\"Other Programming Languages\"\n Analytics,\"Data Science\"\n Visualization,\"Data Science\"\n \"Big Data\",\"Data Science\"\n Predictions,\"Data Science\"\n MongoDB,Databases\n \"Web Servers and MicroFWs (Flask/Tornado/Nginx/...)\",Web\n Ipython,Python\n \"Web General\",Web\n Socket,DevOps\n Django,\"Application Frameworks\"\n Docker,DevOps\n Security,Security\n Privacy,Security\n Odoo,\"Application Frameworks\"\n \"Scientific Libraries (Numpy/Pandas/SciKit/...)\",\"Data Science\"\n Pyramid,\"Application Frameworks\"\n Plone,\"Application Frameworks\"\n \"Data Science\",\"Data Science\"\n Machine-Learning,\"Data Science\"\n PostgreSQL,Databases\n Django-Girls,Community\n Agile,\"Development Methods\"\n Documentation,Programming\n \"DevOps general\",DevOps\n Community,Community\n \"Natural Language Processing\",\"Data Science\"\n PyPy,Python\n Open-Source,\"Open Source\"\n Linux,\"Operating Systems\"\n \"SQL Alchemy\",Databases\n Communication,Community\n Tooling,Programming\n \"Test Libraries (pyTest/node/...)\",Testing\n MySQL,Databases\n Packaging,Python\n \"JavaScript Web Frameworks (AngularJS/ReactJS/...)\",Web\n \"Internet of Things (IoT)\",Hardware\n Performance,Programming\n Saltstack,DevOps\n Management,\"Development Methods\"\n Scrum,\"Development Methods\"\n Kanban,\"Development Methods\"\n Internationalization,Programming\n \"Behavior Driven Development (BDD)\",\"Development Methods\"\n HTML5,Web\n NoSQL,Databases\n OpenGL,Web\n \"Test Driven Development (TDD)\",Testing\n Education,Educational\n CPython,Python\n APIs,Web\n \"Python 3\",Python\n \"Best Practice\",\"Best Practice and Use Cases\"\n Development,Programming\n Testing,Testing\n Beginners,Educational\n Programming,Programming\n Cython,Python\n \"Deep Learning\",\"Data Science\"\n Unix,\"Operating Systems\"\n \"Case Study\",\"Case Study\"\n E-Commerce,Web\n \"Distributed Systems\",DevOps\n \"Functional Programming\",Programming\n Architecture,Programming\n OpenStack,DevOps\n \"Raspberry PI\",Hardware\n Teaching,\"Everything Else\"\n \"Meta Classes\",Programming\n \"Public Cloud (AWS/Google/...)\",DevOps\n \"Augmented Reality\",\"Everything Else\"\n Engineering,\"Everything Else\"\n Physics,Sciences\n \"Clean Code\",Educational\n \"System Administration\",DevOps\n Mix-Ins,Programming\n \"Static Analysis\",\"Everything Else\"\n \"Compiler and Interpreters\",Python\n Type-Hinting,Programming\n \"Web Crawling\",Web\n JavaScript,\"Other Programming Languages\"\n NodeJS,Web\n \"Conferences and Meet-Ups\",Community\n Databases,Databases\n Infrastructure,DevOps\n \"Elastic Search\",Databases\n Go-Lang,\"Other Programming Languages\"\n HTTP,Web\n Operations,DevOps\n \"Configuration Management (Ansible/Fabric/Chef/...)\",DevOps\n \"Deployment/Continuous Integration and Delivery\",DevOps\n Jenkins,Testing\n Science,Sciences\n Authentication,Security\n 3D,\"Everything Else\"\n Blender,\"Everything Else\"\n Diversity,Community\n Robotics,Hardware\n Human-Machine-Interaction,Hardware\n Debugging,Testing\n \"Euro Python and EPS\",Community\n LaTeX,\"Other Programming Languages\"\n Game-Development,\"Everything Else\"\n Kivy,Python\n Cross-Platform-Development,Python\n Git,DevOps\n PyQt,Programming\n Virtualization,DevOps\n \"Software Design\",Programming\n Multi-Processing,Programming\n Multi-Threading,Programming\n Windows,\"Operating Systems\"\n \"Messaging and Job Queues (RabbitMQ/Redis/...)\",DevOps\n \"Fun and Humor\",\"Everything Else\"\n Command-Line,Programming\n CMS,Web\n \"GEO and GIS\",\"Everything Else\"\n \"Graph Databases\",Databases\n Abstractions,\"Everything Else\"\n \"Code Analysis\",Programming\n Wearables,Hardware\n Mobile,Web\n \"Jupyter/iPython Notebook\",Python\n RESTful,Web\n Cryptography,Security\n OpenCV,Hardware\n \"ASYNC / Concurreny\",Programming\n \"Virtual Env\",Programming\n PyPi,Python\n Micro-Computers,Hardware\n Microservices,Programming\n Scaling,DevOps\n \"Python Software Foundation (PSF)\",Community\n workforce,Business\n DIY,\"Everything Else\"\n \"Image Processing\",\"Everything Else\"\n \"Mac OS X\",\"Operating Systems\"\n \"Data Structures\",Programming\n \"System Architecture\",DevOps\n Algorithms,\"Data Science\"\n PyLadies,Community\n \"The Answer to Life the Universe and Everything Else\",\"Everything Else\"\n Gadgets,Hardware\n \"All Other Programming Languages\",\"Other Programming Languages\"\n \"Use Case\",\"Best Practice and Use Cases\"\n Sensors,Hardware\n \"Other Hardware\",Hardware\n failures/mistakes,\"Best Practice and Use Cases\"\n clients,Business\n freelancing,Business\n \"Mind Bending\",\"Everything Else\"\n Templating,Web\n legacy-code,Programming\n MicroPython,Python\n \"Python 2\",Python\n python,Python\n Data,\"Data Science\"\n Structures,\"Data Science\"\n Web,Web\n Business,Business\n Notebook,\"Data Science\"\n Jupyter/iPython,\"Data Science\"\n Life,Community\n Universe,Sciences\n Deep,\"Data Science\"\n Learning,\"Data Science\"\n Internet,Web\n \"Internet of Things\",DevOps\n EPS,Community\n EuroPython,Community\n \"Open Stack\",DevOps\n finance,\"\"\n Trading,\"\"\n \"\"\".strip()\n\n buffer = StringIO(INPUT)\n\n reader = csv.reader(buffer)\n for line in reader:\n ConferenceTag.objects.create(\n name=line[0].strip(), category=line[1].strip()\n )\n print(\"Created tag\", line[0].strip())",
"def process_path(self, path):\n if not os.access(path, os.R_OK):\n raise Exception\n\n for element in os.listdir(path):\n if not os.path.isdir(os.path.join(path, element)):\n continue\n if self.loaded_elements.has_key(element):\n raise Exception\n self.loaded_elements[element] = Element(os.path.join(path, element))",
"def apply(self, path: ImportPath) -> None:\n pass",
"def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def findTags(user_input, tagged_text):\n result = []\n for item in tagged_text:\n for w in user_input:\n if w[WORD] == item[WORD]:\n tup = (w[WORD], item[TAG])\n result.append(tup)\n continue\n\n return result",
"def tag_mapping(data_path, data_type):\n with open(data_path+data_type+\"_labels.txt\", \"r\") as file1:\n tags = [line.split(\" \")[:-1] for line in file1.readlines()]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag",
"def add_tagging(self, task_instance):",
"def _gather_path(self, comp, path, function_map):\n print(f'\"Analyzing {comp} at {path}')\n if not os.path.exists(path):\n print('No files in {path}')\n return\n\n for root, _dirs, files in os.walk(path):\n if self.excluded(root):\n continue\n if not self.included(root, self.dir_inclusions):\n continue\n for fname in files:\n if not self.included(fname, self.file_inclusions):\n continue\n if fname.endswith(\".su\"):\n with open(os.path.join(root, fname), \"r\") as frame:\n for line in frame.readlines():\n split = line.split()\n if len(split) < 3:\n continue\n func = f\"{comp}:{split[0]}\"\n usage = int(split[-2])\n if usage < self.cutoff:\n continue\n if func not in function_map:\n function_map[func] = usage\n elif usage > function_map[func]:\n function_map[func] = usage",
"def index_tags():\n # Get the script directory\n SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n # Let's make our index a defaultdict with autovivification\n index = tree()\n\n # Iterate through all files in the data directory\n for file in os.listdir(os.path.join(SCRIPT_DIR, '../data')):\n\n # Load the JSON file containing the tags for a site\n with codecs.open(\n os.path.join(SCRIPT_DIR, '../data/' + file),\n 'r',\n encoding='utf-8'\n ) as input_file:\n tags = json.load(input_file)\n\n # The site ID is the filename minus the (.json) at the end\n site_id = file[:-5]\n\n # Iterate through all tags and add them to the index\n for tag in tags['items']:\n index[tag['name']][site_id] = tag['count'] # Autovivification ftw!\n\n # Create the index directory\n try:\n os.makedirs(os.path.join(SCRIPT_DIR, '../index'))\n # If the directory already exists, ignore the error, otherwise report it\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n # Save the tag index to a local JSON file\n with codecs.open(\n os.path.join(SCRIPT_DIR, '../index/index_tags.json'),\n 'w',\n encoding='utf-8'\n ) as output_file:\n json.dump(index, output_file, ensure_ascii=False)\n\n # Some status information for the console\n print('Successfully created the tag index.')",
"def process_tag_list(self, taglist):\r\n self.do_before()\r\n for tag in taglist:\r\n self.feed(tag)\r\n self.do_after()",
"def scan(self,path):\n if os.path.exists(path):\n self.path=path\n logger.debug('builder.scan: %s' %self.path)\n self.pages=scan_path(path,self.ext_lst)\n else:\n logger.error('builder.scan: path does not exist : %s' %path)\n return self.pages",
"def tag():\n conn = create_conn()\n c = conn.cursor()\n tags = load_emails()\n c.executemany('INSERT INTO tags (filepath, tag) VALUES (?,?)', tags)\n conn.commit()\n conn.close()",
"def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))",
"def set_tags_batch(self, tags, filenames):\n # Explicitly ruling out strings here because passing in a\n # string would lead to strange and hard-to-find errors\n if isinstance(tags, basestring):\n raise TypeError(\"The argument 'tags' must be dictionary \"\n \"of strings\")\n if isinstance(filenames, basestring):\n raise TypeError(\"The argument 'filenames' must be \"\n \"an iterable of strings\")\n \n params = []\n params_utf8 = []\n for tag, value in tags.items():\n params.append(u'-%s=%s' % (tag, value))\n \n params.extend(filenames)\n params_utf8 = [x.encode('utf-8') for x in params]\n return self.execute(*params_utf8)",
"def register_keywords():\n resources = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../resources\"))\n logger.info(\"resources=%s\" % resources)\n if not os.path.isdir(resources):\n raise AssertionError(\"Unable to find resources directory! resources=%s\" % resources)\n for filename in glob.glob(os.path.join(resources, \"*.robot\")):\n logger.info(\"looking up keywords in file %s\" % filename)\n try:\n BuiltIn().import_resource(filename)\n keywords = lookup_keywords(filename)\n for keyword in keywords:\n register_run_keyword(filename, keyword, 0)\n except:\n pass",
"def parse_paths(self):\n self.soup = BeautifulSoup(open(self.get_path('install')))\n for spec in list(self.specs.keys()):\n spec_file = self.find_specs_path(spec)\n if spec_file:\n # If spec file exists\n self.specs[spec] = path_format(spec_file)\n else:\n # If specs are held inside install.xml\n self.specs[spec] = self.install",
"def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)",
"def _getTagIDs(self):\n paths = self._criteria.get('paths')\n if paths:\n store = getMainStore()\n return list(store.find(Tag.id, Tag.path.is_in(paths)))",
"def edit_tags(self):\n os.system(\"clear\")\n while True:\n tag_categories = [\"meal\", \"genre\", \"complexity\", \"course\", \"no change\"]\n _, key = _num_select(\"Which tag would you like to edit\", tag_categories)\n if key == \"meal\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"breakfast\", \"lunch\", \"dinner\"])\n self.tags[key]=value\n elif key == \"genre\":\n genres = [\"american\", \"italian\", \"mexican\", \"asian\", \"indian\", \"misc\"]\n _, value = _num_select(\"Which tag would you like to apply\",\n genres)\n elif key == \"complexity\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"simple\", \"intermediate\", \"complicated\"])\n elif key == \"course\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"appetizer\", \"salad\", \"side\", \"main\", \"dessert\"])\n else:\n return",
"def tag_search(category):\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM categories\n INNER JOIN entries ON\n entries.slug = categories.slug AND\n entries.published = categories.published\n WHERE categories.category='{category}'\n ORDER BY entries.published DESC\n \"\"\".format(category=category))\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)",
"def tags(root='.'):\n tags_dict = dict()\n for dirpath, filename, file, date in traverse_posts(root):\n # extract tags from frontmatter\n with open('{}/{}'.format(dirpath, filename), 'r') as f:\n fm = frontmatter.loads(f.read())\n for tag in fm['tags']:\n if tag in tags_dict:\n tags_dict[tag].append(file)\n else:\n tags_dict[tag] = [file]\n\n return OrderedDict(reversed(sorted(tags_dict.items(), key=lambda x: len(x[1]))))",
"def get_tag(file, tag):\r\n import re\r\n\r\n # make sure the necessary globals are initialised\r\n global filenames # set of processed files\r\n if 'filenames' not in globals():\r\n filenames = set()\r\n global tags # dictionary of cached tag values\r\n if 'tags' not in globals() : # the collection has not yet been initialized\r\n tags = {}\r\n\r\n\r\n if file not in filenames:\r\n # file has not been processed yet\r\n\ttry:\r\n\t f = open(file, \"rt\")\r\n\texcept IOError:\r\n\t logger.warning(\"File '%s' not found.\", file)\r\n\t return \"*** ERROR *** File %s Not found***\\n\" % file\r\n\r\n\t# matches up to 5 chars at start of line followed the \"{{{\" \r\n\t# followed by tag name followed by up to five chars \r\n\t# with optional trailing white space.\r\n\tstarttag = '^(\\s*).{0,5}\\{{3}(\\S+).{0,5}\\s*$'\r\n\tstartre = re.compile(starttag)\r\n\t# matches up to 5 chars followed by \"}}}\" followed by up to 5 chars and\r\n\t# optional trailing white space.\r\n\tendtag = \"^\\s*.{0,5}\\}{3}.{0,5}\\s*$\"\r\n\tendre = re.compile(endtag)\r\n\tcapturing = False # are we capturing?\r\n\tcurtagname = \"\"\r\n\ttagvalue = \"\"\r\n\ttrim = 0\r\n\r\n\twhile True:\r\n\t l = f.readline()\r\n\t if not l: break\r\n\t if capturing:\r\n\t if endre.match(l):\r\n\t\t capturing = False\r\n\t\t tags[(file, curtagname)] = tagvalue\r\n\t\t tagvalue = ''\r\n\t\telse:\r\n\t\t tagvalue += l[trim:]\r\n\t\t tagvalue += '\\n'\r\n\r\n\r\n\t else:\r\n\t m = startre.match(l)\r\n\t\tif m: # we have a start tag\r\n trim = len(m.group(1))\r\n\t\t curtagname = m.group(2)\r\n\t\t capturing = True\r\n\r\n\tf.close()\r\n filenames.add(file)\r\n\r\n\r\n try:\r\n return tags[(file,tag)]\r\n except KeyError:\r\n\tlogger.warning(\"Tag '%(tag)s' not found in %(file)s\", \r\n\t\t\t{'file':file, 'tag':tag})\r\n\r\n\treturn \"*** ERROR *** Tag %(tag)s not found in file %(file)s ***\\n\" % \\\r\n\t\t\t\t{'file':file, 'tag':tag}",
"def parse_and_add_directory(self, list_of_root_tags, directory):\n\n # Check if directory exists and list_of_root_tags isn't empty\n\n if len(list_of_root_tags) == 0:\n raise Exception(\n \"{} : List of root tags empty in parse_and_add_directory!\".format(\n self.__schema_name\n )\n )\n\n if not os.path.isdir(directory):\n raise Exception(\n \"{} : Directory {} does not exist in parse_and_add_directory!\".format(\n self.__schema_name, directory\n )\n )\n\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n if file.upper().endswith(\".XML\"):\n try:\n new_path = os.path.join(subdir, file)\n parsed = self.__get_parsed_relaxng(new_path)\n root_tag = parsed.getroot().tag\n if root_tag in list_of_root_tags:\n self.add_test(\n f\"Path Added: {file}\", new_path, None, parsed_xml=parsed\n )\n except:\n pass",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)",
"def tag(request, tag_name):\n raise NotImplementedError",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def tagged(tag = ''):\n\tresults = queries.tagged(tag)\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=tag)",
"def parse_dir_replace(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_replace(path, args)",
"def fromtag(tofind, tag):\n\n abstag = path.abspath(tag)\n speciespath = str(path.split(abstag)[0])\n species = str(path.split(speciespath)[1])\n conformers = str(path.join(speciespath, \"conformers\"))\n\n where = {\n \"species\" : species,\n \"speciespath\" : speciespath,\n \"tag\" : tag,\n \"geometry\" : path.join(speciespath, species + \".xyz\"),\n \"conformers\" : conformers,\n \"obconformer\" : path.join(conformers, species + \"-conformers.xyz\"),\n \"conformermopacins\" : \\\n [x for x in listdir(conformers) if x.endswith(\".mp\")],\n \"conformermopacouts\" : \\\n [x for x in listdir(conformers) if x.endswith(\".mp.out\")],\n \"conformermopacarcs\" : \\\n [x for x in listdir(conformers) if x.endswith(\".mp.arc\")],\n \"conformerqchemins\" : \\\n [x for x in listdir(conformers) if x.endswith(\".qc.in\")],\n \"conformerqchemouts\" : \\\n [x for x in listdir(conformers) if x.endswith(\".qc.out\")],\n \"optin\" : path.join(speciespath, species + \"-opt.in\"),\n \"optout\" : path.join(speciespath, species + \"-opt.out\"),\n \"freqin\" : path.join(speciespath, species + \"-freq.in\"),\n \"freqout\" : path.join(speciespath, species + \"-freq.out\"),\n \"pcmin\" : path.join(speciespath, species + \"-pcm.in\"),\n \"pcmout\" : path.join(speciespath, species + \"-pcm.out\"),\n \"embin\" : path.join(speciespath, species + \"-emb.in\"),\n \"embout\" : path.join(speciespath, species + \"-emb.out\"),\n \"embxml\" : path.join(speciespath, species + \"-emb.xml\"),\n }\n if not tofind in where:\n print(tofind, \" not a valid option for findfromtag\")\n return None\n\n\n return where[tofind]",
"def tags_tocl(d, tag_list, title):\r\n filtered_anchors = []\r\n for anc in sorted(d.keys(), key=str.lower):\r\n entry = d[anc]\r\n if not \"tags\" in entry: continue\r\n found = [t for t in tag_list if t in entry[\"tags\"]]\r\n if not found: continue\r\n filtered_anchors.append(anc)\r\n return TemplateData(t=title, e=filtered_anchors)",
"def uploadFile(self,path):\n\n response = requests.post('https://api.imagga.com/v1/content',\n auth=(self.apikey, self.secret),\n files={'image': open(path, 'r')})\n json_data = json.loads(response.text)\n uploadedData=json_data[u'uploaded'][0]\n resourceId=uploadedData[u'id']\n filename = uploadedData[u'filename']\n self.fileToIdMap[filename] = resourceId\n self.getTagsUsingId(resourceId)",
"def tagged(request,slug):\n\n tag = get_object_or_404(Tag, slug=slug)\n books = Book.objects.filter(tags=tag)\n \n for book in books:\n book\n\n context = {\n 'tag':tag,\n 'books':books,\n }\n return render(request, 'favorite.html', context)",
"def _HandleTag(self, input_line, tag, output_stream):\n if tag not in self._open_tags:\n self._OpenTag(input_line, tag, output_stream)\n else:\n self._CloseTag(input_line, tag, output_stream)",
"def parse_resources(self, soup):\n for res in soup.find_all('res'):\n if 'customlangpack' in res['id'].lower():\n self.find_langpack_path(res)\n else:\n rid = remove_xml(res['id'])\n self.resources[rid] = path_format(self.properties.substitute(res['src']))",
"def scanner(path=\".\", files=[]):\n\n if os.path.isfile(path):\n return files.append(path)\n for item in os.listdir(path):\n item = os.path.join(path, item)\n if os.path.isfile(item):\n files.append(item)\n else:\n files = scanner(item, files)\n return files",
"async def slashtag_add(\n self,\n ctx: commands.Context,\n tag_name: TagName(check_global=False),\n *,\n tagscript: TagScriptConverter,\n ):\n await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False)",
"def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)",
"def tags(docid):\n page = request.args.get('page')\n filename = SETTINGS.LABELED_LOCATION + '/' + docid\n page_text = get_document_page(docid, page)\n if not os.path.isfile(filename):\n return spanify(page_text, page)\n else:\n with open(filename) as tokens_file:\n labels = json.load(tokens_file)\n return spanify(page_text, page, labels)",
"def tag_sents(self, sents):\n # WORK HERE!!",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def search_tag(self, tag):\n self.driver.get(self.tag_url.format(tag))",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def apply(self, rewriter):\n for (tag, attributes, extent) in self.tags:\n start = extent.start\n start_line = start.line - 1\n start_col = start.column - 1\n if not rewriter.is_in_range(start_line, start_col):\n continue\n\n end = extent.end\n end_line = end.line - 1\n end_col = end.column - 1\n if not rewriter.is_in_range(end_line, end_col):\n continue\n\n start_tag = '<' + tag\n if attributes:\n attr = ' '.join([a[0] + '=\"' + a[1] + '\"' for a in attributes])\n start_tag += ' ' + attr\n start_tag += '>'\n\n end_tag = '</' + tag + '>'\n\n rewriter.insert_before(start_tag, start_line, start_col)\n rewriter.insert_after(end_tag, end_line, end_col)",
"def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item",
"def create_id2amendment_info(path, tag):\n d = {}\n for triple in os.walk(path):\n root, files = triple[0], triple[2]\n for filename in files:\n if filename.endswith('.json'):\n # trim its file extension \n amendment_id = n = filename[:-5]\n d[amendment_id] = (tag, root, os.path.join(root, filename))\n return d",
"def tag(self, text):\n\t\tpass",
"def paths(self, paths):\r\n self._paths = paths\r\n self._extract()",
"def is_tag(tag_name, user_path, current_user) -> bool:\n user = current_user[0]\n tag_list = os.listdir((user_path + '\\\\' + user).encode('unicode_escape'))\n temp = list(map(bytes.decode, tag_list))\n if tag_name in temp:\n return True\n else:\n return False",
"def search(wiki, pattern):\n wiki.search_tags(pattern)",
"def register_tag(self, txt, foreground, background):\r\n # self.tag_config(txt, foreground=foreground, background=background)\r\n self.known_tags.add(txt)",
"def traverse(name, furtherPath):",
"def update_tags(since=None, author=None):\n\n # in python 3.5+ we can use the following commented line to get a list of *.java files\n # glob.glob('./**/*.java', recursive=True):\n # but we keep the following for backward compatibility\n\n files = [path.join(dirpath, f)\n for dirpath, _, files in walk('.')\n for f in files if f.endswith('.java')]\n\n if len(files) == 0:\n print(\"Found no relevant *.java files in the project\");\n return\n\n # go over found *.java files and do actual javadoc updating\n for f in files:\n print(\"Processing file '{}'... \".format(f), end=\"\")\n\n jdoc = extract_javadoc(f)\n updated_since = None if has_since_tag(jdoc) else since\n updated_author = None if has_author_tag(jdoc) else author\n\n if updated_since is None and updated_author is None:\n print(\"skipping\")\n continue\n\n line = \"\"\n\n # user printed feedback\n if updated_since is not None:\n line += \"@since => {} \".format(updated_since)\n\n if updated_author is not None:\n line += \"@author => {} \".format(updated_author)\n\n print(line)\n\n insert_tags(f, jdoc, updated_since, updated_author)",
"def get_file_tags(self, fpath):\n\n # Get the dataset\n dataset = self.get_dataset(fpath)\n\n # Get the URIs for the datset\n uris = dataset.get_file_tags(filepath=fpath)\n\n # Turn uris into human readable tags\n tags = self.__facets.process_bag(uris)\n\n # Get DRS labels\n drs_facets = dataset.get_drs_labels(tags)\n\n # Generate DRS id\n drs = dataset.generate_ds_id(drs_facets, fpath)\n\n return TaggedDataset(drs, tags, uris)",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def enrich(self, lazy_corpus_loader, fileids=None):\n tag_dict = self.tag_dict\n word_tag_dict = self.word_tag_dict\n for word_tag in lazy_corpus_loader.tagged_words(fileids=fileids):\n word = word_tag[0]\n tag = word_tag[1]\n if not tag_dict.get(tag, None):\n raise KeyError, \"Unknown pos-tag: %s\" % tag\n tag_dict = word_tag_dict.get(word, {})\n tag_dict[tag] = tag_dict.get(tag, 0) + 1\n word_tag_dict[word] = tag_dict\n self.word_tag_dict = word_tag_dict",
"def read_file(path: str, tags: list):\n if not os.path.exists(path):\n raise FileNotFoundError(\"file {} does not exists\".format(path))\n collection = {}\n doc_id = 0\n continue_read = True\n change_marqueur = False\n with open(path, 'r') as file:\n for l in file.readlines():\n\n if l[0] == \".\": # on est face à un marqueur\n marqueur = l[:2]\n if marqueur == \".I\":\n doc_id += 1\n collection[doc_id] = \"\"\n continue\n elif marqueur in tags:\n continue_read = True\n change_marqueur = True\n continue\n else:\n continue_read = False\n continue\n\n if continue_read:\n collection[doc_id] += l.strip()\n if change_marqueur:\n collection[doc_id] += \". \"\n change_marqueur = False\n else:\n collection[doc_id] += \" \"\n\n return collection",
"def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterPathList, self).__init__(*args, **kwargs)\n\n # Construct a regular expression tag evaluator.\n regextag = self.thistag.find('PathRegex')\n if regextag == None:\n raise ValueError('Required tag missing: PathRegex')\n self.regex = RegexTag(regextag)\n\n # Get the \"look for the first match\" flag.\n self.matchfirst = self.get_boolean('matchFirst')\n logger.debug('matchfirst = {0}'.format(self.matchfirst))\n\n # Get the list of path names.\n self.paths = self.context.tokens['Paths']\n logger.debug('paths = {0}'.format(self.paths))",
"def hook_tags_for_projects(task):\n if task['project'] in TAGS_FOR_PROJECTS.keys():\n for tag in TAGS_FOR_PROJECTS[task['project']]:\n task['tags'].add(tag)",
"def find(self, path, all=False):\n matches = []\n if path in self.sources:\n for match in self.loader.get_template_sources(path):\n if not all:\n return match\n matches.append(match)\n return matches",
"def search_tag(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.tags, f.tags.id, f.tags.name)\n q.where().equal(f.tags.name, _input)\n tag_data = j.executeQuery(q)\n\n if tag_data:\n tag_id, tag_name = tag_data[0]\n examples = _create_examples(j.list_word_by_tag, tag_name)\n return SelectorResult('tag', tag_id, tag_name, *examples)",
"def suggest_tags(content):\n suggested_keywords = _suggest_keywords(content)\n suggested_regexes = _suggest_regexes(content)\n suggested_tag_ids = suggested_keywords | suggested_regexes\n\n return Tag.objects.filter(id__in=suggested_tag_ids)",
"def remove_tag(args):",
"def pathfor( name, **matchdict ) :",
"def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)",
"def _make_tag_lookup(self):\n self.lookup = defaultdict(set)\n res = self.g.query(\n \"\"\"SELECT ?class ?tag WHERE {\n ?class rdfs:subClassOf+ brick:Class.\n ?class brick:hasAssociatedTag ?tag .\n ?tag rdf:type brick:Tag\n }\"\"\"\n )\n class2tag = defaultdict(set)\n for (cname, tag) in res:\n cname = cname.split(\"#\")[1]\n tag = tag.split(\"#\")[1]\n class2tag[cname].add(tag)\n for cname, tagset in class2tag.items():\n self.lookup[tuple(sorted(tagset))].add(cname)\n pickle.dump(self.lookup, open(\"taglookup.pickle\", \"wb\"))"
] | [
"0.6427988",
"0.6152635",
"0.59591275",
"0.5777887",
"0.5693095",
"0.5677073",
"0.5635031",
"0.5607307",
"0.55902433",
"0.5455293",
"0.5320511",
"0.5300698",
"0.527677",
"0.52514946",
"0.52197546",
"0.5191613",
"0.51899403",
"0.51578",
"0.515451",
"0.5131709",
"0.51221174",
"0.51094306",
"0.51031697",
"0.5074569",
"0.50594395",
"0.5057429",
"0.503827",
"0.5031781",
"0.50183624",
"0.50141305",
"0.499445",
"0.49786824",
"0.49759656",
"0.49515274",
"0.49500176",
"0.4941373",
"0.49390906",
"0.49355635",
"0.49347147",
"0.49243245",
"0.491461",
"0.4909768",
"0.49031416",
"0.4886498",
"0.48802242",
"0.48762324",
"0.4870364",
"0.48700777",
"0.485807",
"0.48515335",
"0.48497862",
"0.48478845",
"0.48414385",
"0.483076",
"0.48291653",
"0.48249504",
"0.48182645",
"0.48110005",
"0.48102722",
"0.4810224",
"0.48084265",
"0.48011094",
"0.47919428",
"0.47914404",
"0.47810867",
"0.47724524",
"0.47600305",
"0.47386226",
"0.47334215",
"0.47313404",
"0.47289425",
"0.47286525",
"0.47265196",
"0.47255415",
"0.47158605",
"0.47058967",
"0.47058967",
"0.47058967",
"0.47051072",
"0.46991333",
"0.4696298",
"0.46941176",
"0.46850187",
"0.46824616",
"0.46765968",
"0.46704516",
"0.46682274",
"0.46678004",
"0.46676573",
"0.46597725",
"0.4655022",
"0.4637995",
"0.4629898",
"0.46287793",
"0.46268007",
"0.46229807",
"0.461341",
"0.46060115",
"0.4604479",
"0.46028104",
"0.4601147"
] | 0.0 | -1 |
Will process the file, expected files are text and URL bookmarks... | def getContents(self, itemId, itemURI, *args):
if args:
actionId = self._db.addAction(args[0])
else:
actionId = -1
print('\t\t[%s] %s\t(%s)' % (itemId, itemURI, actionId))
# dissect the file
patURL = re.compile(r'URL=(?P<url>.*$)', re.IGNORECASE)
patHttp = re.compile(r'(?P<url>http.*$)', re.IGNORECASE)
patFtp = re.compile(r'(?P<url>ftp.*$)', re.IGNORECASE)
f = open(itemURI,"r")
url = ''
idx = -1
for line in f:
idx += 1
m = patURL.match(line)
if not m:
m = patHttp.match(line)
if not m:
m = patFtp.match(line)
if m:
url = m.group('url')
itemIdRight = self._db.addItem(self._engine_id, url, datetime.datetime.now(), args)
self._db.addItemLink(self._engine_id, itemId, itemIdRight, 'Contains')
# we have a URI, down we wnat to action it, use the tail value to set the action:
self._db.addItemEvent(self._engine_id, actionId, itemIdRight)
self._db.addItemData(itemId, 'Contents', line, idx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_file(file_name):\n pass # delete this line and replace with your code here",
"def process_file(self, file, target_dir):\n raise NotImplementedError(\"Process file method not implemented\")",
"def process_file(filename):\n if cpp_style.can_handle(filename) or filename == '-':\n cpp_style.process_file(filename)\n elif text_style.can_handle(filename):\n text_style.process_file(filename)",
"def doTheJob(input_file):\n\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace(\".txt\", \".html\"))",
"def process_file(self, filename: str) -> None:\n try:\n with open(filename, mode='r', encoding='utf-8') as f:\n contents = f.read()\n except Exception as e: # pylint: disable=broad-exception-caught\n # log and ignore exceptions from read\n logging.exception('Error reading %s: %s', filename, e)\n else:\n self.process(filename, contents)",
"def process_raw_phrases(file_path):",
"def do_file (self, line) :\n\t\targ = line.split()\n\t\tfor a in arg :\n\t\t\tif self.exists( a ) :\n\t\t\t\tprint \"%s: %s\" % ( a, self.__wd['content'][a]['type'] )",
"def process_file(input, output):\r\n import re\r\n\r\n t = re.compile(\"^\\s*:(.*)<(.*)>:\\s*$\")\r\n while True:\r\n l= input.readline()\r\n\tif not l: break\r\n\tlogger.debug(\"Read '%s' from input file\", l) \r\n\tm = t.match(l)\r\n\tif m:\r\n\t logger.debug(\"Matched line = '%s'\", m.group(0))\r\n\t file, tag = m.group(1,2)\r\n\t logger.debug(\"Finding File = '%s', tag = '%s'\", file, tag)\r\n\t s = get_tag(file, tag)\r\n\t output.write(s)\r\n\r\n\telse:\r\n\t logger.debug(\"copying '%s' directly to the output.\", l)\r\n output.write(l)",
"def identify_file(self, file):",
"def process_file(file_path):\n\n enc = detect_bom_encoding(file_path)\n if enc is None:\n with open(file_path, 'r') as f:\n result = run_checks(file_path, f)\n else:\n with open(file_path, 'r', encoding=enc) as f:\n result = run_checks(file_path, f)\n print('Finished processing %s\\n' % file_path)\n return result",
"def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n decoded_lines = decoded_text.split('\\n')\n\n # Remove titles of Wikipedia articles if desired\n if self.remove_headers:\n filtered_lines = []\n for line in decoded_lines:\n line_strip = line.strip()\n if len(line_strip) > 0:\n if line_strip[0] != '=' and line_strip[-1] != '=':\n filtered_lines.append(line)\n decoded_lines = filtered_lines\n\n eol = self.eol or ''\n if self.split_by_line:\n text = [(line.lstrip() + eol,) for line in decoded_lines]\n else:\n text = [(eol.join(decoded_lines),)]\n\n return text",
"def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text",
"def process_file(_dir, _name, pkg_id):\n file_path = os.path.join(_dir, _name)\n if os.path.islink(file_path):\n return\n try:\n fp = open(file_path, 'r')\n first_line = fp.readline()\n if constants.PYTHON in first_line:\n _func = extract_python_kwds\n elif constants.BASH in first_line or constants.SHELL in first_line:\n _func = extract_shell_kwds\n else:\n # Return if file is not a python or a shell script.\n return\n # Populates the keywords in the file.\n _func(fp, pkg_id)\n if _name not in constants.STOP_WORDS:\n insert_keyword(_name.split('.')[0], pkg_id)\n except Exception as e:\n# @todo(Logging, level info)\n print file_path, e",
"def process_file(filename):\n print \"Reading and Parsing File: {}\".format(filename)\n parsed_entries = file_parser(filename)\n print \"Starting to Process Entries\"\n chunked_entires = chunk_entries(parsed_entries)\n return [process_entries(entry) for entry in chunked_entires]",
"def _process_file(self, path):\n\t\tself.getFailures(path)\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tticket = self.failManager.toBan()\n\t\t\t\tself.jail.putFailTicket(ticket)\n\t\texcept FailManagerEmpty:\n\t\t\tself.failManager.cleanup(MyTime.time())\n\t\tself.dateDetector.sortTemplate()\n\t\tself.__modified = False",
"def process(self):\r\n count = 0\r\n if (self.file_handle.closed):\r\n self.file_handle = open(self.file_handle.name)\r\n soup = BeautifulSoup(self.file_handle)\r\n if not soup.contents[0] == \"DOCTYPE NETSCAPE-Bookmark-file-1\":\r\n raise Exception(\"File is not a google bookmarks file\")\r\n\r\n urls = dict() # url:url_metadata\r\n\r\n # we don't want to just import all the available urls, since each url\r\n # occurs once per tag. loop through and aggregate the tags for each url\r\n for tag in soup.findAll('h3'):\r\n links = tag.findNextSibling('dl')\r\n\r\n if links is not None:\r\n links = links.findAll(\"a\")\r\n\r\n for link in links:\r\n url = link[\"href\"]\r\n if url.startswith('javascript:'):\r\n continue\r\n tag_text = tag.text.replace(\" \", \"-\")\r\n if url in urls:\r\n urls[url]['tags'].append(tag_text)\r\n else:\r\n tags = [tag_text] if tag_text != 'Unlabeled' else []\r\n\r\n # get extended description\r\n has_extended = (\r\n link.parent.nextSibling and\r\n link.parent.nextSibling.name == 'dd')\r\n if has_extended:\r\n extended = link.parent.nextSibling.text\r\n else:\r\n extended = \"\"\r\n\r\n # Must use has_key here due to the link coming from\r\n # the parser and it's not a true dict.\r\n if link.has_key('add_date'): # noqa\r\n if int(link['add_date']) < 9999999999:\r\n timestamp_added = int(link['add_date'])\r\n else:\r\n timestamp_added = float(link['add_date']) / 1e6\r\n else:\r\n link['add_date'] = time.time()\r\n\r\n urls[url] = {\r\n 'description': link.text,\r\n 'tags': tags,\r\n 'extended': extended,\r\n 'date_added': datetime.fromtimestamp(\r\n timestamp_added),\r\n }\r\n\r\n # save the bookmarks\r\n ids = []\r\n for url, metadata in urls.items():\r\n try:\r\n bmark = self.save_bookmark(\r\n unicode(url),\r\n unicode(metadata['description']),\r\n unicode(metadata['extended']),\r\n u\" \".join(metadata['tags']),\r\n dt=metadata['date_added'])\r\n DBSession.flush()\r\n except InvalidBookmark:\r\n bmark = None\r\n if bmark:\r\n ids.append(bmark.bid)\r\n if count % COMMIT_SIZE == 0:\r\n transaction.commit()\r\n # Start a new transaction for the next grouping.\r\n transaction.begin()\r\n\r\n # Commit any that are left since the last commit performed.\r\n transaction.commit()\r\n\r\n from bookie.bcelery import tasks\r\n # For each bookmark in this set that we saved, sign up to\r\n # fetch its content.\r\n for bid in ids:\r\n tasks.fetch_bmark_content.delay(bid)",
"def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()",
"def process_file(self, filename, order=2):\n fp = open(filename)\n self.skip_gutenberg_header(fp)\n\n for line in fp:\n for word in line.rstrip().split():\n self.process_word(word, order)\n\n #print(\">>>DEBUG the suffix map\")\n #i = 0\n #for k,v in self.suffix_map.items():\n # print(\"key is {}, value is {}\".format(k, v))\n # i += 1\n # if i > 10:\n # break",
"def handle_file(self, f, settings):\n ext = splitext(f.dest_file)[1][1:]\n if not ext.lower() == \"xml\": # Not a XML file\n # Ignore the file. It is not an XML file.\n return\n\n path = f.get_patched_file_path()\n if not path: # Ignore the file.\n return\n\n notAllowedWords = settings['ReservedWords'].split(',')\n attributesToCheckLenght = settings['AttributesToCheck'].split(',')\n maxLength = settings['MaxLength']\n headerIdentifierRegex = settings['LiquibaseHeaderRegex']\n isLiquibaseFile = False\n\n with open(path, 'rb') as content_test:\n for line in content_test:\n if re.findall(headerIdentifierRegex, line):\n isLiquibaseFile = True\n if not isLiquibaseFile:\n # isNotALiquibaseFile\n return\n with open(path, 'rb') as content:\n line_num = 0\n for line in content:\n line_num += 1\n self.checkLine(f, line, line_num, attributesToCheckLenght,\n notAllowedWords, maxLength)",
"def process_based_on_type(file_path):\n\t# Is this a file?\n\tif os.path.isfile(file_path):\n\t\tprocess_file(file_path)\n\t# Or is it a directory?\n\telif os.path.isdir(file_path):\n\t\tprocess_directory(file_path)",
"def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)",
"def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)",
"def fileparser(file_path,graph_path,keyword_path,nkeyword_path,anchor_path):\n\n\tf = open(file_path, 'r')\n\n\t#TODO perhaps a sanity check, see if file exists and is well formated, also checking dirs\n\n\t#main event\n\t#feed the file contents to our HTML parser, to process it\n\thtml_code = f.read().decode('utf-8')\n\thtml_parser = OurHTMLParser()\n\thtml_parser.feed(html_code)\n\n\t#keep the results\n\tanchors = html_parser.anchors\n\tkeyword_idx = html_parser.keyword_idx\n\tnkeyword_idx = html_parser.nkeyword_idx\n\n\t#get the main file name: /home/user/fileX.html -> fileX\n\tfullname = os.path.basename(file_path)\n\tname = os.path.splitext(fullname)[0]\n\n\t#converting graphs and indices to save\n\tgraph = gengraph(keyword_idx)\n\tkeyword_dic = idx2json(keyword_idx)\n\tnkeyword_dic = idx2json(nkeyword_idx)\n\n\tgraph_filepath = os.path.join(graph_path,name) + '_graph' + '.gml'\n\tkeyword_filepath = os.path.join(keyword_path,name) + '_keywords' + '.json'\n\tnkeyword_filepath = os.path.join(nkeyword_path,name) + '_non_keywords' + '.json'\n\tanchor_filepath = os.path.join(anchor_path,name) + '_anchors' + '.json'\n\n\n\tnx.write_gml(graph,graph_filepath)\n\t\t\n\twith open(keyword_filepath, 'w') as keyword_f:\n\t\tjson.dump(keyword_dic,keyword_f)\n\n\twith open(nkeyword_filepath, 'w') as nkeyword_f:\n\t\tjson.dump(nkeyword_dic,nkeyword_f)\n\n\twith open(anchor_filepath, 'w') as anchor_f:\n\t\tjson.dump(anchors,anchor_f)",
"def _analyze_file(self) -> None:\n\n md_reader = MarkdownReader.from_file(self.path)\n for md_line in md_reader.lines:\n if len(md_line.current_section) == 1:\n if self.title and self.title != md_line.current_section[0]:\n logger.warning(f\"{self.path} Warning: Multiple titles. \")\n self.title = md_line.current_section[0]\n if (\n not md_line.is_code_block\n and md_line.text.lower().strip().startswith(\"tags: \")\n ):\n if self.tags:\n logger.warning(\n f\"{self.path} Warning: Tags were already set.\"\n )\n self.tags = self._read_tags(md_line.text)\n if (\n len(md_line.current_section) >= 2\n and md_line.current_section[1].lower().strip() == \"backlinks\"\n ):\n pass\n else:\n self.links.extend(self.id_link_regex.findall(md_line.text))",
"def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"",
"def process_wiki_file(args: Tuple[str, str, int]) -> str:\n filepath, language, min_sent_word_count = args\n with bz2.open(filepath, \"rt\", encoding=\"utf8\") as bz2_file:\n\n # Extract text between <doc> xml tags\n soup = BeautifulSoup(bz2_file.read(), \"lxml\")\n docs = soup.find_all(\"doc\")\n wiki_dump_content = \"\"\n for i, doc in enumerate(docs):\n processed_text = process_wiki_doc_text(\n doc.text, language, min_sent_word_count\n )\n if len(processed_text) == 0:\n continue\n\n # Append to result\n if i > 0 and len(wiki_dump_content) > 0:\n wiki_dump_content += \"\\n\"\n wiki_dump_content += processed_text\n\n return wiki_dump_content",
"def parse_document(file):\n lines = file.read_text(encoding='utf-8').split('\\n')\n # If the \"#\" character is present, it means the line contains the\n # document original link. So, if the # is not present,\n # we have a normal paragraph to append to the list.\n return [line for line in lines if line != '' and '#' not in line]",
"def test_parse_file_source():\n from bs4 import BeautifulSoup\n from scraper import read_from_file, parse_source\n content, encoding = read_from_file(TEST_FILE)\n result = parse_source(content, encoding)\n assert isinstance(result, BeautifulSoup)",
"def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()",
"def preprocess (self, filecontents):\n\t\treturn filecontents",
"def check_file(filename):\n f = open(filename, 'r')\n html = f.read()\n f.close()\n\n tip = TIParser(filename, html)\n tip.feed(html)\n tip.finish()",
"def parse_plain_text_export(text_file):\n\n text_file.seek(0)\n for line in text_file.readlines():\n urls = re.findall(URL_REGEX, line) if line.strip() else ()\n for url in urls:\n yield {\n 'url': url,\n 'timestamp': str(datetime.now().timestamp()),\n 'title': None,\n 'tags': '',\n 'sources': [text_file.name],\n }",
"def processFile(directory: str, filepath: str, fname: str):\n\n fname = fname.split(\".\")[0]\n extension = \"html\"\n\n # Generate filename and directory of the files to be created\n save_path = os.path.join(directory, \"Modified_files\")\n save_path = os.path.join(save_path, fname)\n # Open a blank file to write translated HTML to\n f = open(save_path + \".\" + extension, \"w+\")\n\n try:\n # Opening the file\n with open(filepath) as fp:\n # Reading data line-by-line\n line = fp.readline()\n cnt = 1\n while line:\n # process the line extracted\n temp = processLine(line)\n line = fp.readline()\n cnt += 1\n # write the processed line to the newly created file\n f.write(temp)\n except IOError:\n logging.error('An error occurred trying to read the file.')\n finally:\n # Close the file to save changes\n f.close()\n\n logging.info(\"Succeeded.. Generated Modified_Files/\" + fname +\n \".\" + extension + \" in the directory passed.\")",
"def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist",
"def process(self, file=False):\n if file:\n doc = self.corpus.getDocument(file)\n print \"process Document:\", doc.path\n print\n self._process(doc)\n return\n\n count = 0\n\n while True:\n count += 1\n doc = self.corpus.getDocument()\n if doc == None or count > self.limit:\n break\n\n self._process(doc)",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def do_single_file_preprocess(pdf_file):",
"def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)",
"def parse_file(file: CodeFile, config: dict):\n path = os.path.join(file.dir, f'{file.name}.{file.extension}')\n print(path)\n file_gen = (line for line in open(path))\n for line in file_gen:\n parse.apply_patterns(line.strip(), config['patterns'], file)",
"def process_f(filename, order=3):\n\n fp = open(filename) # open file\n\n for line in fp: # for each line of text in file: do something\n for word in line.rstrip().split(): # for each word in each line: strip \\r and split into tuple\n process_word(word, order) # for tuple, call process word()\n fp.close() # close file",
"def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def markup_file_contents(request, cfg, file_lines, filename, mime_type, encoding, colorize):\n\n # Nothing to mark up? So be it.\n if not file_lines:\n return []\n\n # Determine if we should (and can) use Pygments to highlight our\n # output. Reasons not to include a) being told not to by the\n # configuration, b) not being able to import the Pygments modules,\n # and c) Pygments not having a lexer for our file's format.\n pygments_lexer = None\n if colorize:\n from pygments import highlight\n from pygments.formatters import HtmlFormatter\n from pygments.lexers import (\n ClassNotFound,\n get_lexer_for_mimetype,\n get_lexer_for_filename,\n guess_lexer,\n )\n\n # First, see if there's a Pygments lexer associated with MIME_TYPE.\n if mime_type:\n try:\n pygments_lexer = get_lexer_for_mimetype(\n mime_type, tabsize=cfg.options.tabsize, stripnl=False\n )\n except ClassNotFound:\n pygments_lexer = None\n\n # If we've no lexer thus far, try to find one based on the FILENAME.\n if not pygments_lexer:\n try:\n pygments_lexer = get_lexer_for_filename(\n filename, tabsize=cfg.options.tabsize, stripnl=False\n )\n except ClassNotFound:\n pygments_lexer = None\n\n # Still no lexer? If we've reason to believe this is a text\n # file, try to guess the lexer based on the file's content.\n if not pygments_lexer and is_text(mime_type) and file_lines:\n try:\n pygments_lexer = guess_lexer(\n file_lines[0], tabsize=cfg.options.tabsize, stripnl=False\n )\n except (ClassNotFound, UnicodeDecodeError):\n pygments_lexer = None\n\n # If we aren't highlighting, just return FILE_LINES with URLs\n # manually marked up and tabs manually expanded.\n if not pygments_lexer:\n\n def _poor_mans_markup(line):\n line = line.expandtabs(cfg.options.tabsize)\n return markup_escaped_urls(sapi.escape(line))\n\n return [_poor_mans_markup(line) for line in file_lines]\n\n # If we get here, we're letting Pygments highlight syntax.\n ps = CustomPygmentsSink()\n highlight(\n \"\".join(file_lines),\n pygments_lexer,\n HtmlFormatter(nowrap=True, classprefix=\"pygments-\", encoding=None),\n ps,\n )\n return ps.colorized_file_lines",
"def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)",
"def process_textfile(inf):\n list_of_urls_to_check = [line.rstrip() for line in inf.readlines()]\n return list_of_urls_to_check",
"def process(self, filename: str, contents: str) -> None:\n self._current_file_decorators = set()\n self._current_file = filename\n try:\n parsed = ast.parse(contents, filename=filename)\n except Exception as e: # pylint: disable=broad-exception-caught\n # logging errors when parsing file\n logging.exception('Error parsing %s: %s', filename, e)\n else:\n self.visit(parsed)\n finally:\n self._current_file = None\n self._current_file_decorators = set()",
"def _fromFile(self,filepath, filename):\n pass",
"def main():\n parse_file(sys.argv[1])",
"def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)",
"def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)",
"def _parse(self, infile):\n raise NotImplementedError()",
"def analyze_file(self, filename):\n if self.exceeded_max():\n return\n\n if self.preprocess is not None:\n input = self.preprocess(filename)\n else:\n with open(filename, \"r\") as file:\n input = file.read()\n\n self.analyze_raw(input)",
"def apply(self, opened_file):",
"def process_doc_files(*files, add_new_line=True):\n for file in files:\n # Treat folders\n if os.path.isdir(file):\n files = [os.path.join(file, f) for f in os.listdir(file)]\n files = [f for f in files if os.path.isdir(f) or f.endswith(\".mdx\") or f.endswith(\".py\")]\n process_doc_files(*files, add_new_line=add_new_line)\n else:\n try:\n process_doc_file(file, add_new_line=add_new_line)\n except Exception:\n print(f\"There is a problem in {file}.\")\n raise",
"def process_file(filename, skip_header):\n hist = {}\n fp = open(filename)\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n for line in fp:\n if line.startswith(''):\n break\n\n process_line(line, hist)\n\n return hist",
"def parse_file(self, file):\n return self.parse(file.read())",
"def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')",
"def complete_file(self, text, line, *_):\n leading = line[len(\"file \") :]\n curpath = os.path.join(os.path.curdir, leading)\n\n def isdql(parent, filename):\n \"\"\"Check if a file is .dql or a dir\"\"\"\n return not filename.startswith(\".\") and (\n os.path.isdir(os.path.join(parent, filename))\n or filename.lower().endswith(\".dql\")\n )\n\n def addslash(path):\n \"\"\"Append a slash if a file is a directory\"\"\"\n if path.lower().endswith(\".dql\"):\n return path + \" \"\n else:\n return path + \"/\"\n\n if not os.path.exists(curpath) or not os.path.isdir(curpath):\n curpath = os.path.dirname(curpath)\n return [\n addslash(f)\n for f in os.listdir(curpath)\n if f.startswith(text) and isdql(curpath, f)\n ]",
"def process(self, fulltext=None):\r\n raise NotImplementedError(\"Please implement this in your importer\")",
"def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')",
"def process(file_path):\n\n try:\n _process(file_path)\n except Exception as exc:\n print(\"UPS\")\n raise exc",
"def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type # print now only for degug\n\n for i in self.ignore:\n if i in event.src_path or os.path.isdir(event.src_path):\n print \"Ignoring...\"\n return\n\n mod_file = event.src_path.split(self.source)[1]\n for r in self.rules:\n mod_file = mod_file.replace(r[0], r[1])\n\n print \"Writing:\", (self.destination + mod_file)\n \n input_file = utils.readFile(event.src_path)\n\n file_type = mod_file.split(\".\")[-1]\n reverted = utils.revert( input_file, \"(*\", \"*)\" ) if file_type == \"thy\" else utils.revert( input_file, \"/*\", \"*/\" )\n \n if len( reverted ) == 0 and len( input_file ) != 0:\n print \"Something might be wrong??\"\n else: utils.writeFile( self.destination + mod_file, reverted )",
"def parse(self, filename):\n try:\n if 't' in self.FILE_OPEN_MODE:\n kw = {'encoding': self.FILE_ENCODING, 'errors': 'ignore'}\n else:\n kw = {}\n with open(filename, self.FILE_OPEN_MODE, **kw) as infile:\n self._parse(infile)\n except IOError:\n raise FileFormatError()",
"def analyze_urls(filename, topic):\n # Initialize an empty list. Note that I store my urls and references\n # in a sort of strange way. Each element in result_list is a list of two\n # elements, the first element being the url, and the second element\n # being a list of all the references to the url\n result_list = []\n\n # Using the with...as construct to open the file in read mode\n with open(filename, \"r\", encoding=\"utf-8\") as files:\n # Iterate over each line (each is a url)\n for line in files:\n # Use the try ... except construct\n try:\n # Try to open each url\n with urllib.request.urlopen(line) as url_file:\n # Read the page\n page = url_file.read()\n # Decode the page\n decoded_page = page.decode(\"UTF-8\")\n # Regex expression to find the places which open\n # with a > then have some stuff, then the topic, then\n # close with a <\n pattern = fr\">[^<]*\\b{topic}\\b.*?<\"\n\n # Use the findall method from re to find all of the\n # occurrences of pattern in decoded_page as a list\n # The flags are IGNORECASE and DOTALL\n my_list = re.findall(pattern, decoded_page,\n re.IGNORECASE | re.DOTALL)\n\n # If my_list is not empty\n if my_list:\n # Slice off the the closing and opening angle\n # brackets using a list comprehension\n new_list = [word[1:-1] for word in my_list]\n # Append a new list of two elements to result_list,\n # where the first element of the list is the url,\n # and the second element of the list is the list of\n # references\n result_list.append([line, new_list])\n # One possible error is the urllib.error.URLError\n except urllib.error.URLError as url_err: # Catch the error\n # Print a message, url, and the error\n print(\"Error opening url:\", line, url_err)\n # Another possible error is the UnicodeDecodeError\n except UnicodeDecodeError as dec_err: # Catch the error\n # Print a message, and url\n print(\"Error decoding url:\", line)\n # Print the error\n print(dec_err)\n # Except all other errors\n except:\n pass\n # Return the result_list\n return result_list",
"def parse(input_path, output_dir, func_validate = None, target_tag = None, size_limit_KB = None):\n if not os.path.exists(output_dir): os.makedirs(output_dir)\n\n if os.path.isfile(input_path):\n print (\"The input file is\", input_path)\n print (\"The output directory is\", output_dir)\n files = [[input_path, os.path.basename(input_path)]]\n else:\n input_dir = input_path\n print (\"The input directory is\", input_dir)\n print (\"The output directory is\", output_dir)\n print (\"Searching HTML files in the input directory...\")\n files = file_utils.get_filelist_in_path(\"html\", input_dir, True)\n\n def process(files):\n corpus_store = corpus_utils.CorpusStore(func_validate = func_validate)\n\n for idx, file in enumerate(files):\n f_abst = file[0] # absolute path\n f_rel = file[1] # relative path\n print (\"(\", idx, \"of\", len(files), \") file\", f_rel)\n file_content = file_utils.read_file_any_encoding(f_abst)\n if (len(file_content) == 0):\n continue\n\n # 1st, process the data with Atomic Parser\n parser = pat.Parser(corpus_store)\n parser.parse(file_content)\n\n # Process the same data with Atomic HeaderBody Parser\n # parser = pah.Parser(corpus_store)\n # parser.parse(file_content)\n\n # Process the same data with HeaderBody Parser\n parser = phb.Parser(corpus_store, target_tag)\n parser.parse(file_content)\n\n # Export the parsed data into file\n print (\"Exporting the result...\")\n return corpus_store.export_corpus(output_dir, size_limit_KB = size_limit_KB)\n\n print (\"Total\", len(files), \"files to process.\")\n exported_files = process(files)\n print (\"Exported:\", exported_files)",
"def clean(input_path, expected_encodings = ('ASCII', 'UTF-8', 'SHIFT_JIS', 'CP932')):\n \n if os.path.isfile(input_path):\n print (\"The input file is\", input_path)\n files = [[input_path, os.path.basename(input_path)]]\n else:\n input_dir = input_path\n print (\"The input directory is\", input_dir)\n print (\"Searching HTML files in the input directory...\")\n files = file_utils.get_filelist_in_path(\"html\", input_dir, True)\n\n # Parse the files and store the result into data store\n print (\"Total\", len(files), \"files to process. Loading...\")\n for idx, file in enumerate(files):\n f_abst = file[0] # absolute path\n f_rel = file[1] # relative path\n print (\"(\", idx, \"of\", len(files), \") file\", f_rel)\n file_content, enc = file_utils.read_filelist_any_encoding(f_abst)\n if (len(file_content) == 0 or not enc in expected_encodings):\n print(u\"File read failed. Attempting to download the original file...\")\n downloaded_file = download_html(f_rel)\n file_content, enc = file_utils.read_filelist_any_encoding(downloaded_file)\n if (len(file_content) == 0):\n log(u\"Failed to read file even with re-downloaded file\", f_abst)\n continue\n else:\n if (not enc in expected_encodings):\n log(u\"The file encoding\", enc, \"is not as expected. But proceed.\", f_abst)\n print(u\"Replaced with downloaded file\", f_rel)\n shutil.copyfile(downloaded_file, f_abst)\n\n # Delete file if it contains 404 File Not Found\n if check_contain(file_content, \"<title>404 File Not Found</title>\"):\n log(u\"Removed file containing \\\"404 File Not Found\\\"\", f_abst)\n os.remove(f_abst)\n\n print (\"Finished.\")",
"def parse_file(self, filepath):\n\n xml_file = open(filepath, \"r\")\n xml = xml_file.read()\n content = \"\"\n\n xml_file.close()\n\n for line in xml.replace(\"&\", \"&\").split(\"\\n\"):\n if content != \"\":\n content += \" \"\n content += re.sub(\"(<(P|F).*?>)|(<\\\\/P>)\", \"\", line).strip()\n # XML cleanning\n\n start_offset = \"<START_OFFSET_DUCFileRep>\"\n content = start_offset + content\n content = content.replace(\"</LP>\", \"</LP>%s\"%start_offset)\n content = content.replace(\"</TEXT>\", \"</TEXT>%s\"%start_offset)\n content = re.sub(\"%s.*?<LP>(.*?)<\\\\/LP>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*?<TEXT>(.*?)<\\\\/TEXT>\"%start_offset, \"\\\\1\", content)\n content = re.sub(\"%s.*\"%start_offset, \"\", content)\n\n self.set_content(content)",
"def process_full_file(self, code):\n return code",
"def _preprocess_file(file_name):\n raw_content = utils.run_on_main_thread(\n partial(utils.get_file_content, file_name, force_lf_endings=True))\n\n # replace all comments with spaces to not change the position\n # of the rest\n comments = [c for c in _RE_COMMENT.finditer(raw_content)]\n content = list(raw_content)\n for m in comments:\n for i in range(m.start(), m.end()):\n content[i] = ' '\n content = \"\".join(content)\n return raw_content, content",
"def _parse_file_path(self, input_path):\n pass",
"def main(filename: str, /) -> None:",
"def read_file(root, file_name, file_type='t'):\r\n\r\n page_dir = handle_path(main_directory + '/' + root + '/' + file_name)\r\n\r\n try:\r\n # if type != 'b' and type != 't':\r\n # raise\r\n with open(page_dir, 'r' + file_type) as page_reader:\r\n return str(page_reader.read())\r\n except FileNotFoundError:\r\n print(\"The file the user requested doesn't exist\")\r\n raise_http_error(\"Not Found\")\r\n except OSError:\r\n print(\"The server couldn't get the page file\")\r\n raise_http_error(\"Internal Server Error\")",
"def __scan_file(self, args, next_file):\n\n POGGER.info(\"Scanning file '$'.\", next_file)\n context = self.__plugins.starting_new_file(next_file)\n\n try:\n POGGER.info(\"Scanning file '$' token-by-token.\", next_file)\n source_provider = FileSourceProvider(next_file)\n if args.x_test_scan_fault:\n source_provider = None\n actual_tokens = self.__tokenizer.transform_from_provider(source_provider)\n\n if actual_tokens and actual_tokens[-1].is_pragma:\n self.__plugins.compile_pragmas(\n next_file, actual_tokens[-1].pragma_lines\n )\n actual_tokens = actual_tokens[:-1]\n\n POGGER.info(\"Scanning file '$' tokens.\", next_file)\n for next_token in actual_tokens:\n POGGER.info(\"Processing token: $\", next_token)\n self.__plugins.next_token(context, next_token)\n\n POGGER.info(\"Scanning file '$' line-by-line.\", next_file)\n source_provider = FileSourceProvider(next_file)\n line_number = 1\n next_line = source_provider.get_next_line()\n while next_line is not None:\n POGGER.info(\"Processing line $: $\", line_number, next_line)\n self.__plugins.next_line(context, line_number, next_line)\n line_number += 1\n next_line = source_provider.get_next_line()\n\n POGGER.info(\"Completed scanning file '$'.\", next_file)\n self.__plugins.completed_file(context, line_number)\n\n context.report_on_triggered_rules()\n except Exception:\n context.report_on_triggered_rules()\n raise",
"def process(self,line):\n\n pattern_str = f\"src=.?[\\s\\\"].*?[\\s\\\"]\"\n p = re.compile(pattern_str)\n for m in p.finditer(line):\n\n file = m.group(0).split(\"src=\")[1][1:-1]\n if file.startswith(\"http\"):\n continue\n\n new_file = self._copy_file(file)\n\n re.sub(file,new_file,line)\n\n return line",
"def process(self, event):\n # the file will be processed there\n print (event.src_path, event.event_type) # print now only for degug",
"def process_files(self):\n matcher = self.choose_algorithm()\n # process one file at the time for better memory management\n for i, element in enumerate(self.input):\n filepath, _ = element\n\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as readfile:\n for line in readfile:\n matcher.find_match(line, self.case_insensitive)\n\n # collect unreadeable files for error log\n except Exception:\n self.errors.append(str(filepath))\n\n # copy results and reset matcher for next file\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n matcher.reset()\n\n # output - print or json\n if self.results:\n self.output(element)\n\n # if json print progress bar\n if self.json:\n self.progress_bar(i+1, len(self.input), prefix=\"Matching:\",\n fixed_len=True, length=40)",
"def parse_file(self, path):\r\n return self._parse(antlr3.ANTLRFileStream(path))",
"def _handle_import(contents, use_tags, owner):\n \n lines = contents.decode(\"utf-8\").split(\"\\n\")\n \n title = re.compile(r\"<a.*?>(.+?)</a>\", re.I)\n url = re.compile(r\"\"\"<a.*href=['\"](.+?)['\"]\"\"\", re.I)\n tags = re.compile(r\"\"\"<a.*?tags=[\"'](.+?)[\"']\"\"\", re.I)\n addTime = re.compile(r\"\"\"<a.*?add_date=[\"'](\\d+?)[\"']\"\"\", re.I)\n \n for l in lines:\n if \"<a\" in l.lower() and \"</a>\" in l.lower():\n bookmark = {}\n \n bookmark[\"title\"] = title.search(l)\n if not bookmark[\"title\"]:\n continue\n bookmark[\"title\"] = _unescape(bookmark[\"title\"].group(1))\n \n bookmark[\"url\"] = url.search(l)\n if not bookmark[\"url\"]:\n continue\n bookmark[\"url\"] = _unescape(bookmark[\"url\"].group(1))\n \n bookmark[\"tags\"] = [];\n if use_tags:\n result = tags.search(l)\n if result:\n bookmark[\"tags\"] = map(_unescape, result.group(1).split(\",\"))\n \n bookmark[\"added\"] = addTime.search(l)\n if bookmark[\"added\"]:\n bookmark[\"added\"] = bookmark[\"added\"].group(1)\n \n if not Bookmark.objects.filter(owner=owner, url=bookmark[\"url\"]).exists():\n bm = Bookmark(owner=owner, url=bookmark[\"url\"], title=bookmark[\"title\"])\n \n bm.save()\n if bookmark[\"added\"]:\n bm.added = datetime.datetime.fromtimestamp(int(bookmark[\"added\"]))\n \n for t in bookmark[\"tags\"]:\n bm.tag(t)\n \n bm.save()\n bm.autotag_rules()",
"def interpret_file(self, file, source=None, process_percents=True):\n\n for pline in self.parser.parse_file(file,\n source=source,\n process_percents=process_percents):\n action = self.execute(pline)\n if action is None:\n pass\n elif action == 'emergency stop':\n return 'Emergency Stop'\n elif action == 'pause program':\n return 'Pause'\n elif action == 'optional pause program':\n return 'Optional Pause'\n elif action == 'end program':\n return 'End'\n else:\n raise core.GCodeException('unknown action: %r' % (action,))",
"def run(self):\n # FILE INPUT\n if self.text_type == \"file\":\n self.process_files()\n\n # STRING INPUT\n else:\n self.process_strings()\n\n if self.json:\n self.save_json()\n\n if self.errors:\n print(\"\\nThe following file(s) could not be opened:\")\n for error in self.errors:\n print(f\"\\t{error}\")",
"def process(self):\r\n if self.file_handle.closed:\r\n self.file_handle = open(self.file_handle.name)\r\n\r\n self.file_handle.seek(0)\r\n parsed = etree.parse(self.file_handle)\r\n count = 0\r\n\r\n ids = []\r\n for post in parsed.findall('post'):\r\n if 'javascript:' in post.get('href'):\r\n continue\r\n\r\n add_date = dateparser.parse(post.get('time'))\r\n\r\n try:\r\n bmark = self.save_bookmark(\r\n unicode(post.get('href')),\r\n unicode(post.get('description')),\r\n unicode(post.get('extended')),\r\n unicode(post.get('tag')),\r\n dt=add_date)\r\n count = count + 1\r\n if bmark:\r\n bmark.stored = bmark.stored.replace(tzinfo=None)\r\n DBSession.flush()\r\n except InvalidBookmark:\r\n bmark = None\r\n\r\n if bmark:\r\n ids.append(bmark.bid)\r\n\r\n if count % COMMIT_SIZE == 0:\r\n transaction.commit()\r\n\r\n # Commit any that are left since the last commit performed.\r\n transaction.commit()\r\n\r\n from bookie.bcelery import tasks\r\n # For each bookmark in this set that we saved, sign up to\r\n # fetch its content.\r\n for bid in ids:\r\n tasks.fetch_bmark_content.delay(bid)\r\n\r\n # Start a new transaction for the next grouping.\r\n transaction.begin()",
"def _handle_content(self, relpath, params):\r\n abspath = os.path.normpath(os.path.join(self._root, relpath))\r\n if os.path.isfile(abspath):\r\n with open(abspath, 'r') as infile:\r\n content = infile.read()\r\n else:\r\n content = 'No file found at %s' % abspath\r\n content_type = mimetypes.guess_type(abspath)[0] or 'text/plain'\r\n if not content_type.startswith('text/') and not content_type == 'application/xml':\r\n # Binary file. Display it as hex, split into lines.\r\n n = 120 # Display lines of this max size.\r\n content = repr(content)[1:-1] # Will escape non-printables etc, dropping surrounding quotes.\r\n content = '\\n'.join([content[i:i+n] for i in xrange(0, len(content), n)])\r\n prettify = False\r\n prettify_extra_langs = []\r\n else:\r\n prettify = True\r\n if self._settings.assets_dir:\r\n prettify_extra_dir = os.path.join(self._settings.assets_dir, 'js', 'prettify_extra_langs')\r\n prettify_extra_langs = [ {'name': x} for x in os.listdir(prettify_extra_dir) ]\r\n else:\r\n # TODO: Find these from our package, somehow.\r\n prettify_extra_langs = []\r\n linenums = True\r\n args = { 'prettify_extra_langs': prettify_extra_langs, 'content': content,\r\n 'prettify': prettify, 'linenums': linenums }\r\n self._send_content(self._renderer.render_name('file_content', args), 'text/html')",
"def parse_file(file_name, out):\n try:\n with open(file_name) as f:\n parse_string(f.read(), out)\n except Exception as e:\n logging.error(\"Error when opening and parsing file %s: %s\" % (file_name, e))\n print(\"Error occurred when parsing file. See logs for more details.\",file=sys.stderr)",
"def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))",
"def process_file(path):\r\n\ttokenset = {}\r\n\r\n\tfp = open(path, 'r')\r\n\temailMsg = email.message_from_file(fp)\r\n\tfp.close()\r\n\r\n\ttokenset = parse_body(emailMsg.get_payload().lower())\r\n\r\n\treturn tokenset",
"def handle_file(self, source_path, dest_path):\n raise NotImplemented",
"def processFile (filename, ostream):\n\n istream = open(filename)\n header = CHeader(istream)\n istream.close()\n\n processClassDocs(ostream, header.classDocs)\n processClasses(ostream, header.classes)\n processFunctions(ostream, header.functions)\n\n ostream.flush()",
"def parse_file(self, file_lines):\n # separate the file into chunks of text\n chunks, chunk = [], []\n # check to see what format the corpus is in, we assume that the headers are the same for all\n # texts in the file... (maybe not safe?)\n if re.match('Primary publication:', file_lines[0]):\n header = re.compile('Primary publication:')\n else:\n header = re.compile(r'&?P\\d{6}')\n for line in file_lines:\n if header.match(line):\n if len(chunk) > 0: # pylint: disable=len-as-condition\n chunks.append(chunk)\n chunk = [line]\n else:\n if len(line) > 0: # pylint: disable=len-as-condition\n chunk.append(line)\n chunks.append(chunk)\n self.chunks = chunks\n # create a rich catalog from the chunks\n re_translit = re.compile(r'(\\d+\\'?\\.) ?(.*)')\n re_normaliz = re.compile(r'(#tr\\.ts:) ?(.*)')\n re_translat = re.compile(r'(#tr\\.en:) ?(.*)')\n for chunk in self.chunks:\n text = chunk\n if chunk[0].startswith('Primary publication:'):\n # we've got full metadata, add additional parsing later\n metadata = chunk[:25]\n text = chunk[26:]\n else: # no metadata\n metadata = []\n pnum = ''.join([c for c in text[0].split('=')[0] if c != '&']).rstrip()\n edition = text[0].split('=')[1].lstrip()\n text = text[3:]\n translit = []\n normaliz = []\n translat = []\n for line in text:\n if re.match(r'\\d+\\'?\\.', line):\n translit.append(re_translit.match(line).groups()[1])\n if line.startswith('#tr.ts:'):\n normaliz.append(re_normaliz.match(line).groups()[1])\n if line.startswith('#tr.en:'):\n translat.append(re_translat.match(line).groups()[1])\n self.catalog[pnum] = {'metadata': metadata,\n 'pnum': pnum,\n 'edition': edition,\n 'raw_text': text,\n 'transliteration': translit,\n 'normalization': normaliz,\n 'translation': translat}",
"def parseFile(self, filename):\n\n f = open(filename, \"r\")\n s = f.read()\n f.close()\n\n logging.log(10, 'parsing filename %s: %d lines' % (filename, len(s)))\n\n self.parseString(s)",
"def process_file(file_name):\n f_in = open(file_name)\n return list(map(lambda s: s.strip(), f_in.readlines()))",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))",
"def parse_file(self, file_name, **kwargs):\n with io.open(file_name, 'r', encoding='utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)",
"def process_file(self, data, filename):\n\n if data:\n data = self.update_province_info(data)\n self.get_province_info(data, filename)",
"def parse_file(self, file_name, **kwargs):\n with codecs.open(file_name, 'r', 'utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)",
"def read_input(input_file):\n \n logging.info(\"reading file {0}...this may take a while\".format(input_file))\n with gzip.open(input_file, 'rb') as f:\n for i, line in enumerate(f):\n \n if (i % 10000 == 0):\n logging.info(\"read {0} reviews\".format(i))\n # do some pre-processing and return list of words for each review\n # text\n yield gensim.utils.simple_preprocess(line)",
"def process_file(file_name):\n _,fn = os.path.split(file_name)\n pdb_id = get_pdb_id(fn)\n # Get files in pdb format even when at LBL, to avoid issues with phenix.reduce\n if ('_mirror' in file_name) or (file_name == pdb_id):\n file_name = pdb_id + '.pdb'\n file_to_clean = []\n if not os.path.isfile(file_name):\n # leave file in folder if it was already there\n # cmd = 'phenix.fetch_pdb {} --all'.format(pdb_id)\n cmd = 'phenix.fetch_pdb {}'.format(pdb_id)\n r = easy_run.go(cmd,join_stdout_stderr=False)\n for fn in r.stdout_lines:\n fn = os.path.split(fn)[-1]\n if '.pdb' in fn: file_name = fn\n file_to_clean.append(fn)\n fn = fn.replace('.pdd','_with_h.pdb')\n file_to_clean.append(fn)\n return file_name,pdb_id,file_to_clean",
"def parse_external_result(self, file):\n raise NotImplementedError",
"def fetch_and_extract(self, filename):\n # type: (Text) -> None\n raise NotImplementedError(\"\")",
"def process_link(self, inp):\n url = inp\n try:\n request = urllib2.Request(url)\n request.add_header('User-Agent', self.browsers[randint(0, 28)])\n request.add_header('Accept',\n ('text/html,application/xhtml+xml,'\n 'application/xml;q=0.9,*/*;q=0.8'))\n request.add_header('Accept-Language', 'en-us,en;q=0.5')\n soup = BeautifulSoup(urllib2.urlopen(request).read())\n content_div = soup.findAll(id=\"content\")[0]\n raw_text = clean_html(str(content_div))\n f = open('wiki_text2.txt', 'w')\n f.write(raw_text)\n f.close()\n return self.process_text(raw_text)\n except:\n traceback.print_exc()\n raise \"cant process link :traceback:%s\" % traceback.format_exc()"
] | [
"0.6773457",
"0.6624344",
"0.6464674",
"0.64460856",
"0.64447",
"0.64102936",
"0.6183915",
"0.61486447",
"0.61235183",
"0.609597",
"0.6086695",
"0.60528666",
"0.60464036",
"0.6021481",
"0.60116667",
"0.6004422",
"0.6004236",
"0.6000104",
"0.59662986",
"0.5915058",
"0.5889009",
"0.5875775",
"0.5875177",
"0.5857971",
"0.58262223",
"0.5810716",
"0.58082193",
"0.5804464",
"0.5799639",
"0.5788984",
"0.5782185",
"0.57417506",
"0.5738904",
"0.57336736",
"0.57318455",
"0.5725296",
"0.5709998",
"0.5681544",
"0.5680672",
"0.5679933",
"0.56785214",
"0.56776047",
"0.5676606",
"0.5674839",
"0.5640424",
"0.56402916",
"0.5632791",
"0.5621764",
"0.56201524",
"0.56143224",
"0.56016916",
"0.55934054",
"0.55862576",
"0.55749506",
"0.55721945",
"0.5560937",
"0.55505633",
"0.5545497",
"0.5541667",
"0.55408853",
"0.5540383",
"0.55398375",
"0.55122435",
"0.5500049",
"0.55000323",
"0.549388",
"0.54895025",
"0.5479312",
"0.5460833",
"0.54478145",
"0.5441097",
"0.5436467",
"0.5434344",
"0.5430422",
"0.54261476",
"0.5410264",
"0.5403002",
"0.54011077",
"0.53901935",
"0.5390111",
"0.53882134",
"0.53745854",
"0.5372318",
"0.5366161",
"0.5359563",
"0.53506345",
"0.5348711",
"0.5338423",
"0.53355104",
"0.53255033",
"0.53195846",
"0.53195846",
"0.53175324",
"0.53152025",
"0.5313685",
"0.5303756",
"0.53020525",
"0.5300512",
"0.5295822",
"0.5289673",
"0.52779526"
] | 0.0 | -1 |
A naive factorization method. Take integer 'n', return list of factors. | def factorize_naive(n):
if n < 2:
return []
factors = []
p = 2
while True:
if n == 1:
return factors
r = n % p
if r == 0:
factors.append(p)
n = n / p
elif p * p >= n:
factors.append(n)
return factors
elif p > 2:
# Advance in steps of 2 over odd numbers
p += 2
else:
# If p == 2, get to 3
p += 1
assert False, "unreachable" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def factor_naive(n):\n factors = []\n\n for factor in range(2, n // 2):\n q, r = divmod(n, factor)\n power = 0\n while r == 0:\n power += 1\n n = q\n q, r = divmod(q, factor)\n if power != 0:\n factors.append((factor, power))\n\n if factors == []:\n factors = [(n, 1)]\n\n return factors",
"def factors(n):\n f = list(reduce(list.__add__, ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))\n return sorted(f)",
"def _prime_factorization(n):\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors",
"def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors",
"def factors(n):\n\tif n<0: n=-n # Only deal with positive integers\n\tif (is_prime(n)):\n\t\treturn [n]\n\tfact = factorone(n)\n\tif ((abs(n) == 1) or (n == 0)): raise ValueError('Unable to factor \\\"{0}\\\"'.format(n))\n\tfacts = factors(n//fact) + factors(fact)\n\tfacts.sort()\n\treturn facts",
"def factors(n):\r\n\tif n<0: n=-n # Only deal with positive integers\r\n\tif (is_prime(n)):\r\n\t\treturn [n]\r\n\tfact = factorone(n)\r\n\tif (fact == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfacts = factors(n/fact) + factors(fact)\r\n\tfacts.sort()\r\n\treturn facts",
"def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))",
"def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors",
"def factors(n):\r\n output = []\r\n for i in range(1,n+1):\r\n if n % i == 0:\r\n output.append(i)\r\n output.append(-i)\r\n return output",
"def factorize(n):\n it = factorize._prime_iterator\n factors = []\n it.reset()\n for p in it:\n if n == 1 or n < p * p:\n break\n if n % p == 0:\n n //= p\n m = 1\n while n % p == 0 and n > 1:\n n //= p\n m += 1\n factors.append((p, m))\n if n > 1:\n factors.append((n, 1))\n return factors",
"def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf",
"def factorize(n):\n if n <= 1:\n return ValueError(\"Integer must be greater than one.\")\n r = n\n factors = []\n while r % 2 == 0:\n factors.append(2)\n r /= 2\n for i in range(3, int(r**.5) + 1, 2):\n while r % i == 0:\n factors.append(int(i))\n r /= i\n if r > 2:\n factors.append(int(r))\n return factors",
"def prime_factors(n) -> []:\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors",
"def primefactors(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def factor(n: int) -> List[Tuple[int, int]]:\n if n <= 1:\n raise ValueError\n\n factors = list()\n\n ml = 0\n p = 2\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n\n p = 3\n while p ** 2 <= n:\n ml = 0\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n p += 2\n\n if n > 2:\n factors.append((n, 1,))\n\n return factors",
"def factors(n: int) -> List[int]:\n k = 1\n while k**2 < n:\n if n % k == 0:\n yield k\n k += 1\n\n k = int(n**(1/2))\n while k > 0:\n if n % k == 0:\n yield n // k\n k -= 1",
"def factors(n):\n factors = []\n for x in range(1, int(sqrt(n)+1)):\n if (n % x) == 0:\n factors += [x, n/x]\n \n return sorted(set(factors))",
"def factorize(n):\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct",
"def factor(n):\n\n f = []\n\n for i in xrange(1, int(round(sqrt(n)))+1):\n if n%i == 0:\n f.append(i)\n f.append(n/i)\n\n return f",
"def prime_factorization(n):\n # Code taken directly from \"Prime factorization - list\" at\n # http://stackoverflow.com/a/16996439.\n primfac = []\n d = 2\n while d*d <= n:\n while (n % d) == 0:\n primfac.append(d) # supposing you want multiple factors repeated\n n //= d\n d += 1\n if n > 1:\n primfac.append(n)\n return Multiset(primfac)",
"def factor(n):\r\n\t# Rewritten to align with SAGE. Previous semantics available as factors(n).\r\n\tif (abs(n) == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfactspow = []\r\n\tcurrfact = None\r\n\tfor thefact in factors(n):\r\n\t\tif thefact != currfact:\r\n\t\t\tif currfact != None:\r\n\t\t\t\tfactspow += [(currfact,thecount)]\r\n\t\t\tcurrfact = thefact\r\n\t\t\tthecount = 1\r\n\t\telse:\r\n\t\t\tthecount += 1\r\n\tfactspow += [(thefact,thecount)]\r\n\treturn factspow",
"def factors(n, cache=None):\n if cache is None or max(cache) < n:\n potential_factors = primes(n + 1)\n else:\n potential_factors = cache\n prime_factors = []\n i = 0\n while n != 1:\n while n % potential_factors[i] == 0:\n n /= potential_factors[i]\n prime_factors.append(potential_factors[i])\n i += 1\n return prime_factors",
"def primefactors(n):\n seq = []\n val = 2\n while val <= n:\n if VERBOSE: print \"val: %s n: %s\" % (val, n)\n if n % val == 0:\n # Found a factor, shrink n by that factor \n # ie. n = 60, val = 2\n # Next pass n = 30, val = 2\n seq.append(val)\n n /= val\n else:\n # Not (or no longer) a factor\n val += 1\n\n return seq",
"def primefactors_with_multiplicity(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def factors2(n):\n\tfactors = []\n\ti = 1\n\twhile i <= math.sqrt(n):\n\t\tif n%i == 0:\n\t\t\tfactors.append(i)\n\t\t\tfactors.append(n/i)\n\t\ti += 1\n\treturn factors",
"def prime_factors(n):\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def factorize(n):\n\n if n in (0, 1):\n return [(n, 1)]\n\n factors = []\n\n if n < 0:\n factors.append((-1, 1))\n n = -n\n\n # check 2, 3, then all integers in form q = 6k +- 1\n for q in chain((2, 3), range(5, isqrt(n) + 1, 6)):\n # q = 6k - 1\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n # 6k + 1\n q += 2\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n if n != 1:\n factors.append((n, 1))\n\n return factors",
"def prime_factors(n):\n\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]",
"def prime_factorization(n):\r\n result = []\r\n for i in xrange(2, n+1):\r\n s = 0;\r\n while n / float(i) == floor(n/float(i)):\r\n n = n / float(i)\r\n s += 1\r\n if s > 0:\r\n for k in range(s):\r\n result.append(i)\r\n if n == 1:\r\n return result",
"def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors",
"def prime_factorization(n):\n\t\n\tprimes = []\n\t\n\twhile not n % 2:\n\t\tprimes.append(2)\n\t\tn //= 2\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\twhile not n % possible_factor:\n\t\t\tprimes.append(i)\n\t\t\tn //= possible_factor\n\t\n\tif n > 1:\n\t\tprimes.append(n)\n\treturn primes",
"def factor(n):\n\t# Rewritten to align with SAGE. Previous semantics available as factors(n).\n\tif ((abs(n) == 1) or (n == 0)): raise ValueError('Unable to factor {0}'.format(n))\n\tfactspow = []\n\tcurrfact = None\n\tfor thefact in factors(n):\n\t\tif thefact != currfact:\n\t\t\tif currfact != None:\n\t\t\t\tfactspow += [(currfact,thecount)]\n\t\t\tcurrfact = thefact\n\t\t\tthecount = 1\n\t\telse:\n\t\t\tthecount += 1\n\tfactspow += [(thefact,thecount)]\n\treturn tuple(factspow)",
"def prime_factors(n):\r\n factors = defaultdict(int)\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors[d]+=1\r\n n /= d\r\n d = d + 1\r\n if d*d > n:\r\n if n > 1: factors[n]+=1\r\n break\r\n return factors",
"def factors(n):\n nfactors = 0 # the number of factors of n\n for divisor in range(1, n+1): # divisors: {1,2,3,4...,n}\n if n%divisor == 0: # divides with no remainder\n nfactors += 1 # i.e. one new factor found\n return nfactors",
"def factorize(primes, n):\n factor = []\n for prime in primes:\n ex = 0\n while n % prime == 0:\n ex += 1\n n = n // prime\n if ex != 0:\n factor.append((prime, ex))\n\n return factor if n == 1 else None",
"def prime_factors(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers do not have prime',\n 'factors')\n L = []\n while n >= 2:\n i = low_prime(n)\n L.append(i)\n n //= i\n return L",
"def factor(N):\n\n factors = []\n sqrtN = math.sqrt(N)\n for x in range(2, int(sqrtN)+1):\n (d, r) = divmod(N, x)\n if r == 0:\n factors.append(x)\n if x != d: factors.append(d)\n return [1, N] + factors",
"def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs",
"def get_prime_factors(n):\n factors = {}\n if n <= 1: return {}\n \n while n != 1:\n if is_prime(n):\n factors[n] = 1\n break\n \n i = 2\n while i <= n:\n j = 0\n while n % i == 0 and n != 1:\n j += 1\n n //= i\n \n if j > 0:\n factors[i] = j\n break\n i += 1\n \n return factors",
"def prime_factors_set(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n n /= d\n d = d + 1\n if d*d > n:\n if n > 1: factors.append(n)\n break\n return list(set(factors))",
"def primeFactors(n):\n\n ps = primes(n)\n rest = n\n factors = {}\n for p in ps:\n if rest == 1:\n break\n\n if p ** 2 > n:\n if len(factors.keys()) > 0:\n factors[p] = 1\n else:\n factors[n] = 1\n break\n\n power = 0\n while rest % p == 0:\n power += 1\n rest = rest / p\n\n if power > 0:\n factors[p] = power\n\n return factors",
"def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors",
"def count_factors(n):\n i, total= 1, 0\n while i <= n:\n if n % i == 0:\n total += 1\n i += 1\n return total",
"def factors(n):\n for x in range(1,n+1):\n if n % x == 0:\n print(x)",
"def GetNFactors(n, primes, n_pfactors, _):\n sqrtn = int(n ** 0.5) + 1\n\n for p in primes:\n if p > sqrtn:\n break\n if n % p == 0:\n n //= p\n if n % p == 0:\n return n_pfactors[n]\n else:\n return n_pfactors[n] + 1\n\n # n is primes\n primes.append(n)\n return 1",
"def factors_s(n, ret=False):\n f = set()\n if n < 4:\n return f\n limit = int(n / 2 + 1)\n for i in primeList:\n if i > limit:\n break\n while n != 1:\n if n % i:\n break\n else:\n n //= i\n f.add(i)\n else:\n break\n if ret:\n return (n, f)\n return f",
"def factorial_factorization(n):\n primes = sympy.primerange(2, n+1)\n factorization = {}\n for p in primes:\n exp = sum(int(n / p**k) for k in range(1, int(math.log(n,p))+1))\n factorization[p] = exp\n return factorization",
"def prime_factorisation(n):\n prime_numbers = []\n integers = []\n for i in range(n+1):\n if is_prime(i):\n prime_numbers.append(i)\n if n in prime_numbers:\n return f'{n} is prime'\n k = 0\n while k < len(prime_numbers):\n if n % prime_numbers[k] == 0:\n integers.append(prime_numbers[k])\n n //= prime_numbers[k]\n else:\n k += 1\n return integers",
"def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs",
"def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs",
"def factor(n):\n import math\n if not n >= 0:\n raise ValueError(\"n must be >= 0\")\n if math.floor(n) != n:\n raise ValueError(\"n must be exact integer\")\n if n + 1 == n:\n raise OverflowError(\"n too large\")\n result = 1\n factor = 2\n while factor <= n:\n result *= factor\n factor += 1\n return result",
"def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1",
"def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist",
"def landau2(n):\n\n factors = primish(n)\n\n # TODO: I have no idea here...\n\n #assert sum(factors) <= n\n return product(factors)",
"def factors(number):\n\n if not (isinstance(number, int)):\n raise TypeError(\n \"Incorrect number type provided. Only integers are accepted.\")\n\n factors = []\n for i in range(1, number + 1):\n if number % i == 0:\n factors.append(i)\n return factors",
"def fn(i, n):\n if len(stack) > 0 and stack[-1] <= n: ans.append(stack + [n])\n for ii in range(i, len(factors)):\n if n % factors[ii] == 0: \n stack.append(factors[ii])\n fn(ii, n//factors[ii])\n stack.pop()",
"def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors",
"def primefac(n, aprimes = []):\n if not aprimes: aprimes = primes(n)\n ps = list(filter(lambda x : x <= n, aprimes))\n facs = []\n for p in ps:\n nn = n\n d = 0\n while nn % p == 0:\n nn = nn // p\n d += 1\n if d != 0:\n facs.append((p, d))\n return facs",
"def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)",
"def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors",
"def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors",
"def pairs_of_factors(n):\n seq = factor(n)\n # indexes into seq\n i = set(range(len(seq)))\n # create pairs of subsets indexes into seq and their complements\n ps = [(ss, i-ss) for ss in powerset(i) if 0 in ss and ss<i]\n return frozenset(\n tuple(sorted((prod(seq[i] for i in a), prod(seq[i] for i in b))))\n for a, b in ps)",
"def factor(cls, number):\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors",
"def primeFactors(n: int) -> str:\n res = ''\n factors = find_prime_factors(n)\n seen = []\n for fac in factors:\n if fac not in seen:\n n = factors.count(fac)\n if n > 1:\n res += '({}**{})'.format(fac, n)\n else:\n res += '({})'.format(fac)\n seen.append(fac)\n return res",
"def problem3():\n def _prime_factorization(n):\n \"\"\"Returns the list of prime factors of a number n\"\"\"\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors\n\n return max(_prime_factorization(600851475143))",
"def FactorInList(N, List):\n a = 1\n b = N \n for num in List:\n if N % num == 0:\n a = num\n b = N//num\n break\n \n return [a, b]",
"def fac(n:int) -> int :\n\n factorial = 1\n while n >= 1:\n factorial *= n\n n -= 1\n return factorial",
"def calcularfactorial(n):\r\n fact = 1\r\n for i in range(1, n+1): ## El valor inicial 1 es para que no arranque desde 0 si no desde 1. El valor final es n+1 xq el valor final del range nunca esta incluido\r\n fact = fact * i ## Multiplicamos el fact por el i. I va a valer lo que devuelva el range: 1,2,3,4 etc. Vamos a multiplicar los valores fact partiendo de 1 por todos los valores a recorrer\r\n return fact",
"def f(n):\n\treturn gmpy.fac(n)",
"def factorial(n: int):\n # base case, reduce must have non-empty list\n if n <= 0:\n return 0\n # use reduce function to multiple elements\n return reduce(lambda x, y: x * y, range(1,n+1))",
"def _compute_factorial(n: int) -> None:\n\n fact_count = len(_factorial_sequence)\n\n # have the terms up to n! already been computed?\n if n < fact_count:\n return\n\n # compute numbers iteratively from existing sequence\n product = _factorial_sequence[-1]\n for i in range(fact_count, n + 1):\n product *= i\n _factorial_sequence.append(product)",
"def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors",
"def find_factors(number):\n \n i = 2\n prod = 1\n factors = []\n sqrt = math.sqrt(number)\n num = number\n \n while i < num:\n div = check_divisbility(number, i)\n if div == 'divisible':\n factors.append(i)\n number /= i\n prod *= i\n recurse = find_factors(number)\n \n #I recurse here because it prevents us wasting time playing with large numbers\n for fac in recurse:\n factors.append(fac)\n number /= fac\n prod *= fac\n #stop if we find a factor greater tha sqrt(number)\n if i >= sqrt:\n break\n #make sure we're not looking once we find all the factors \n if prod == num:\n break\n else:\n if i> sqrt:\n if len(factors)==0:\n factors.append(num)\n prod *= num\n else: \n print i\n recurse = find_factors(number)\n for fac in recurse:\n factors.append(fac)\n prod *= fac\n if prod == num:\n break\n i = i+1\n if prod != num:\n raise ValueError (\"This isn't right\")\n return factors",
"def facti(n):\n if n == 0: return 1\n f= 1\n for i in range(2,n):\n f= f*i\n return f",
"def prime_factors(n: int) -> Dict[int, int]:\n if SHOW_WORKING: print(f\"prime_factors({n})\")\n original_n = n\n factors = {}\n\n while n % 2 == 0:\n print(f\"\\tChecking if {n} divides 2\")\n print(f\"\\t\\tYes--Adding 2\")\n if 2 in factors.keys():\n factors[2] += 1\n else:\n factors[2] = 1\n n //= 2\n\n checklimit: int = math.ceil(math.sqrt(n)) + 1\n for d in range(3, checklimit, 2):\n if n % d:\n print(f\"\\tChecking if {n} divides {d}\")\n print(f\"\\t\\tNo--moving on\")\n d += 1\n else:\n while n % d == 0:\n print(f\"\\tChecking if {n} divides {d}\")\n print(f\"\\t\\tYes--Adding {d}\")\n if d in factors.keys():\n factors[d] += 1\n else:\n factors[d] = 1\n n //= d\n if n > 1:\n factors[n] = 1\n\n print(f\"\\t{original_n} has prime factorisation {' * '.join([str(p) + '^' + str(e) for p, e in factors.items()])}\")\n return factors",
"def primeFactorsGivenPrimes(n, primes):\n factors = {}\n for p in primes: \n while n % p == 0:\n n //= p\n factors[p] = factors.get(p,0)+1\n if n < p*p:\n if n > 1:\n factors[n] = factors.get(n,0)+1\n return factors\n return factors",
"def factors(num):\n\tif is_prime(num) == True:\n\t\tfactors = [1, num]\n\t\treturn factors\n\telse:\n\t\tfactors = [1]\n\t\tsquare_root = int(math.ceil(math.sqrt(num)))\n\t\t\n\t\tfor n in range(2, square_root+1):\n\t\t\tif num % n == 0:\n\t\t\t\tfactors.append(n)\n\n\t\tfor n in range(1, len(factors)):\n\t\t\tnew_n = num / factors[n]\n\t\t\tif new_n not in factors:\n\t\t\t\tfactors.append(num / factors[n])\n\n\t\tfactors.append(num)\n\t\treturn factors",
"def prime_divisors(n):\r\n\treturn list(set(factors(n)))",
"def facti(n: int) -> int:\n if n == 0:\n return 1\n f = 1\n for i in range(2, n):\n f = f*i\n return f",
"def fact_i(n):\n \n result = 1\n while n > 1:\n result *= n\n n -= 1\n return result",
"def factorPR(n):\n\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n)))\n\tfor additive in range(1,5):\n\t\tfast=slow=1; i=1\n\t\twhile i<numsteps:\n\t\t\tslow = (slow*slow + additive) % n\n\t\t\ti = i + 1\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tg = gcd(fast-slow,n)\n\t\t\tif (g != 1):\n\t\t\t\tif (g == n):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\treturn g\n\treturn 1",
"def sum_factors(n):\n return sum(f*n[f] for f in n)",
"def get_prime_factor(n):\n if n % 2 == 0:\n return 2\n for num in range(3, n + 1, 2):\n if n % num == 0:\n return num",
"def factorize(number, factors, result=None):\n if result is None:\n result = []\n factor = _max_factor(number, factors)\n amount = _num_factor(number, factor)\n remain = _remainder(number, factor)\n result.append((amount, factor))\n if remain == 0:\n return result\n return factorize(remain, factors, result)",
"def pfactorGen(N):\n\n n = N\n if n == 2:\n yield 2\n else:\n p = list(takewhile(lambda x: x < N, primes(N=N))) # gen_primes()))\n i = 0\n # divide out the lowest numbers first so that as long as the\n # reduced n is composite, it must be greater than the square of the\n # next largest number (n>i^2).\n\n while p[i] * p[i] <= n:\n while n % p[i] == 0:\n yield p[i] # n is divisible by i\n n /= p[i]\n i += 1\n\n # the final reduced n is the last and largest non-composite (prime)\n # factor of N.\n\n if n > 1:\n yield int(n)",
"def factorize(self,num):\n def sieveOfEratosthenes(N, s): \n prime = [False] * (N+1) \n for i in range(2, N+1, 2): \n s[i] = 2\n for i in range(3, N+1, 2): \n if (prime[i] == False): \n s[i] = i \n for j in range(i, int(N / i) + 1, 2): \n if (prime[i*j] == False): \n prime[i*j] = True\n s[i * j] = i \n\n\n def generatePrimeFactors(N): \n ans=[]\n s = [0] * (N+1) \n sieveOfEratosthenes(N, s) \n curr = s[N] \n cnt = 1\n while (N > 1): \n N //= s[N]\n if (curr == s[N]): \n cnt += 1\n continue\n\n ans.append((str(curr),str(cnt))) \n\n curr = s[N] \n cnt = 1\n return ans\n \n return generatePrimeFactors(num)",
"def factorize(num: int) -> [int, ]:\n # assert isinstance(num, int)\n primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29,\n 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,\n 127, 131, 137, 139, 149, 151, 157, 163, 167, 173,\n 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,\n 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,\n 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409,\n 419, 421, 431, 433, 439, 443, 449, 457, 461, 463,\n 467, 479, 487, 491, 499, 503, 509, 521, 523, 541)\n factors = []\n if num == 0:\n return [0, ]\n\n # Generate a list of prime factors:\n for prime in primes:\n if prime > num:\n break\n while num % prime == 0:\n factors.append(prime)\n num = int(round(num / prime))\n if num != 1:\n # TODO: make it find larger primes to avoid this problem.\n raise ArithmeticError(\n f'num is {num}. did not finish prime factorization.')\n return factors",
"def find_factors(num):\n factors = set()\n i = 1\n while i*i < num:\n if num % i == 0:\n factors.add(i)\n factors.add(int(num/i))\n i+=1\n factors = list(factors)\n factors.sort()\n return factors",
"def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors",
"def factorial(n):\n if n == 0:\n return 1\n else:\n return reduce((lambda x, y: x * y), range(1, n + 1))",
"def list_of_divisibles(n):\n def is_prime(x, L = []):\n if x in L or x == 2:\n return True\n elif x == 1 or x % 2 == 0:\n return False\n for divisor in range(1, round(x ** .5)):\n if is_prime(divisor, L):\n if x % divisor == 0:\n return False\n return True\n \n def largest_exponent(i, n):\n \"\"\"\n Given a limit n and a base i, finds the largest exponenet x such that i ^ x <= n, and outputs i ^ x.\n\n \"\"\"\n x = 1\n while i ** x <= n:\n x += 1\n x -= 1\n print(i, x, i**x)\n return i ** x\n \n L = []\n for i in range(2, n+1):\n if i in L:\n continue\n elif is_prime(i):\n L.append(largest_exponent(i, n))\n return L",
"def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]",
"def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1",
"def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors",
"def factorial(n):\n\tf = 1\n\tfor i in range(1,n+1):\n\t\tf = f*i\n\n\treturn f",
"def linear_combination(n):\n weighs = (1, 3, 9, 27)\n\n for factors in factors_set():\n sum = 0\n for i in range(len(factors)):\n sum += factors[i] * weighs[i]\n if sum == n:\n return factors",
"def smallest_factor(n): \n if n == 1: return 1 \n for i in range(2, int(n**.5) + 1): \n if n % i == 0: return i",
"def factorone(n):\n\tif (is_prime(n)): return n\n\tfor fact in (2,3,5,7,11,13,17,19,23,29):\n\t\tif n%fact == 0: return fact\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned",
"def factorial(n):\n if not n>=0:\n \traise ValueError('n must be >=0')\n if math.floor(n)!=n:\n \traise ValueError('n must be exact integer')\n if n+1==n:\n \traise OverflowError(\"n too large\")\n result=1\n factor=2\n while factor<=n:\n \tresult*=factor\n \tfactor+=1\n return result",
"def landau1(n):\n\n i = 2\n sum_factors = 1\n factors = set()\n\n while i <= n: \n common = {j for j in factors if gcd(j, i) != 1}\n if len(common) == 0:\n factors = add_factor(i, n, factors)\n sum_factors = sum(factors)\n elif product(common) <= i:\n difference = factors.difference(common)\n new_factors = add_factor(i, n, difference)\n if product(new_factors) > product(factors):\n factors = new_factors\n sum_factors = sum(factors)\n i += 1\n\n print(n, product(factors), factors)\n return product(factors)"
] | [
"0.8692814",
"0.8681032",
"0.85969007",
"0.8498952",
"0.8478241",
"0.84262383",
"0.8397108",
"0.83875006",
"0.83585554",
"0.8355778",
"0.83539885",
"0.8348885",
"0.83466655",
"0.8333865",
"0.83293575",
"0.83183795",
"0.8317767",
"0.83094484",
"0.8283076",
"0.82130885",
"0.8210349",
"0.815054",
"0.8126397",
"0.81161386",
"0.80899245",
"0.80608433",
"0.8053054",
"0.8047244",
"0.8034153",
"0.8025938",
"0.80157",
"0.7933184",
"0.7891728",
"0.78909093",
"0.7879001",
"0.786119",
"0.781807",
"0.7718777",
"0.7619736",
"0.7607193",
"0.75864154",
"0.75722736",
"0.75242937",
"0.7523151",
"0.7445143",
"0.73652506",
"0.73505765",
"0.7345284",
"0.73446405",
"0.73446405",
"0.7331007",
"0.73220414",
"0.7282449",
"0.72333825",
"0.72049224",
"0.7160239",
"0.71559024",
"0.71435446",
"0.7100891",
"0.7089338",
"0.70653933",
"0.705347",
"0.70301914",
"0.70253366",
"0.7020523",
"0.70091134",
"0.7008096",
"0.70045674",
"0.69532275",
"0.69134754",
"0.68949574",
"0.6879052",
"0.68667334",
"0.68501306",
"0.6832884",
"0.6831958",
"0.6820586",
"0.67817605",
"0.6771915",
"0.6755312",
"0.6749511",
"0.6740087",
"0.67353404",
"0.6728019",
"0.67264897",
"0.6726257",
"0.6717526",
"0.6712247",
"0.6706458",
"0.6701881",
"0.66958755",
"0.6688504",
"0.6685507",
"0.66847795",
"0.6678567",
"0.66730124",
"0.6651202",
"0.6648019",
"0.66377014",
"0.6636539"
] | 0.818808 | 21 |
The worker function, invoked in a thread. 'nums' is a list of numbers to factor. The results are placed in outdict. | def worker(nums, outdict):
print(threading.current_thread().name)
print ("pid:", os.getpid())
for n in nums:
outdict[n] = factorize_naive(n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def mock_workers(task, num_workers):\n results = [\n [{\n \"name\": \"tweet\",\n \"value\": \"%d. Trump Trump everywhere not a Hillary to see.\" % x\n }] for x in range(num_workers)]\n return results",
"def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)",
"def compute_metrics(self, results: list) -> dict:",
"def spawn_threads():\n t0 = threading.Thread(target=print_numbers, args=[10, 0.9, \"\"]) \n t1 = threading.Thread(target=print_numbers, args=[7, 1, \" \"])\n t0.start()\n t1.start()",
"def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)",
"def worker(file_paths, out_queue):\n\t\toutdict = {}\n\t\tfor path in file_paths:\n\t\t\toutdict[n] = run_muscle(path)\n\t\tout_queue.put(outdict)",
"def sweep_threading(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multithread preparation\n ##############################\n threads = 8\n points = points//threads*threads # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,threads)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n thread_list = []\n for x in range(threads):\n thread_list.append(Sweep_Thread(self.result,job[x],prog,self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n))\n\n tStart = time.time() \n for t in thread_list:\n t.start()\n\n for t in thread_list:\n t.join()\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def worker(num_loops, cnt):\t\n\n\tglobal mutex\n\n\tfor i in range(num_loops):\n\t\ttotal = 0\n\t\tinside =0\n\n\t\tfor j in range(1000):\n\t\t\tx = random.random()\n\t\t\ty = random.random()\n\n\t\t\tif (x*x + y*y) <= 1:\n\t\t\t\t\tinside += 1\n\n\t\t\ttotal += 1\n\n\t\tmutex.acquire()\n\t\tcnt.add(total, inside)\n\t\tmutex.release()",
"def parallel(\n fn,\n workers=10,\n return_results=True,\n identifiers=None,\n args=None,\n kwargs=None,\n):\n # Check user input\n if args is not None and kwargs is not None:\n err = 'Amount of args must match those of kwargs'\n assert len(args) == len(kwargs), err\n\n if (args is not None or kwargs is not None) and identifiers is not None:\n err = 'Amount of identifier must match those of kw/args'\n n_args = len(args) if args is not None else len(kwargs)\n assert n_args == len(identifiers), err\n\n # Preprocessing for arguments lists\n identifiers = [] if identifiers is None else identifiers\n args = [] if args is None else args\n kwargs = [] if kwargs is None else kwargs\n\n if len(args) == 0 and len(kwargs) == 0:\n args = [None]\n kwargs = [None]\n else:\n if len(args) == 0:\n args = [[] for _ in range(len(kwargs))]\n if len(kwargs) == 0:\n kwargs = [dict() for _ in range(len(args))]\n\n # Initialize all the futures\n executor = futures.ThreadPoolExecutor(max_workers=workers)\n _futures = [\n executor.submit(fn, *args[i], **kwargs[i])\n for i in range(len(args))\n ]\n\n # Return only futures when requested\n if not return_results:\n return _futures\n\n # Block until we received all results\n if len(identifiers) > 0:\n results = {}\n else:\n results = []\n\n for i, future in enumerate(_futures):\n result = future.result()\n\n if len(identifiers) > 0:\n results[identifiers[i]] = result\n else:\n results.append(result)\n\n return results",
"def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d",
"def manager(num_thrds, num_loops):\n\n\tmutex.acquire()\n\tcnt.reset()\n\tmutex.release()\n\n\t# initialize the thread pool\n\tthread_pool = []\n\n\tfor i in range(num_thrds):\n\t\tthrd = threading.Thread(target=worker, args=(num_loops, cnt))\n\t\tthread_pool.append(thrd)\n\n\t# start threads\n\tfor i in range(len(thread_pool)):\n\t\tthread_pool[i].start()\n\n\tfor i in range(len(thread_pool)):\n\t\tthreading.Thread.join(thread_pool[i])\n\n\t#cnt.display()",
"def __call__(self, q, threads = None):\n if threads is -1: threads = cpu_count()\n\n if threads is None:\n results = [self.evaluate(v) for v in q]\n elif type(threads) is int and threads > 0:\n workers = Pool(threads)\n results = workers.map(self.evaluate, q)\n else:\n raise ValueError('threads keyword must be either -1 or an integer greater than zero')\n\n mu = [ t[0] for t in results ]\n sig = [ t[1] for t in results ]\n return array(mu), array(sig)",
"def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results",
"def _passing_args_impl(self, pool_class_factory):\n DELTA = 12\n ITERATIONS = 100\n pool = pool_class_factory()\n\n pool.start(CoeffMultiplierWorker, {'coeff': DELTA})\n for i in range(ITERATIONS):\n pool.ventilate(message='Vent data {}'.format(i), value=i)\n\n all_results = [pool.get_results() for _ in range(ITERATIONS)]\n self.assertEqual({DELTA}, set(np.diff(sorted(all_results))))\n\n pool.stop()\n pool.join()",
"def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes",
"def worker_run():\n while True:\n print(\"worker: waiting for numdata_lock\")\n numdata_lock.acquire()\n print(\"worker: acquired numdata_lock\")\n print(\"The number {} is spelled '{}'\".format(numdata[\"int\"],numdata[\"name\"]))\n numdata_lock.release()\n time.sleep(1)",
"def _worker(self, results):\n keys = {\n \"test-certificate-verify\": {\n \"MD5 forced\": 2,\n \"TLSv1.1 signature in TLSv1.2 Certificate Verify\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-sig-algs\": {\"MD5 first\": 2, \"MITIGATION\": \"SLOTH\"},\n \"test-clienthello-md5\": {\n \"only-md5-rsa-signature_algorithm\": 1,\n \"unknown-signature_algorithm-numbers\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-tls13-pkcs-signature\": {\n \"rsa_pkcs1_md5 signature\": 1,\n \"MITIGATION\": \"SLOTH_MD5_SIGNATURE_TLS_1_3\",\n },\n }\n return self._obtain_results(results, keys)",
"def simulation(data_size : int,nbr_file : int, path : str, target):\n res = [0 for _ in range(data_size)]\n threads = []\n for i in range(data_size):\n threads.append(Thread(target = target, args = (nbr_file,path,res,i)))\n threads[i].start()\n\n for i in range(data_size):\n threads[i].join()\n\n return res",
"def iterate_mproc_map(wrap_func, iterate_vals, nb_workers=CPU_COUNT, desc='', ordered=True):\n iterate_vals = list(iterate_vals)\n nb_workers = 1 if not nb_workers else int(nb_workers)\n nb_workers = CPU_COUNT if nb_workers < 0 else nb_workers\n\n if desc is not None:\n pbar = tqdm.tqdm(total=len(iterate_vals), desc=str('%r @%i-threads' % (desc, nb_workers)))\n else:\n pbar = None\n\n if nb_workers > 1:\n logging.debug('perform parallel in %i threads', nb_workers)\n # Standard mproc.Pool created a demon processes which can be called\n # inside its children, cascade or multiprocessing\n # https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic\n\n # pool = mproc.Pool(nb_workers)\n # pool = NonDaemonPool(nb_workers)\n pool = ProcessPool(nb_workers)\n # pool = Pool(nb_workers)\n mapping = pool.imap if ordered else pool.uimap\n else:\n logging.debug('perform sequential')\n pool = None\n mapping = map\n\n for out in mapping(wrap_func, iterate_vals):\n pbar.update() if pbar else None\n yield out\n\n if pool:\n pool.close()\n pool.join()\n pool.clear()\n\n pbar.close() if pbar else None",
"def reduce_run():",
"def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']",
"def exec(list_req, wb,write,Total):\n ret = None\n\n if write==True:\n for tick in list_req:\n retrieve_score(wb,tick,increase=True,write = write)\n retrieve_score(wb,tick,increase=False,write = write) \n \n else:\n if Total == True:\n ret_inc = retrieve_score(wb,list_req[0],increase=True,write = write)\n ret_score = retrieve_score(wb,list_req[0],increase=False,write = write)\n for tick in list_req[1:]:\n ret_inc = ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score = ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n \n else:\n ret_inc = []\n ret_score = []\n for tick in list_req[1:]:\n ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n\n\n ret = (ret_score,ret_inc)\n\n \n return ret",
"def stats_freq():\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} stats_freq()\")\n\n # Initialize a Counter object for each family\n freqs = {}\n for f in famlist:\n freqs[f] = Counter()\n\n # List all nt_names happening within a RNA family and store the counts in the Counter\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n counts = dict(sql_ask_database(conn, f\"SELECT nt_name, COUNT(nt_name) FROM (SELECT chain_id from chain WHERE rfam_acc='{f}') NATURAL JOIN nucleotide GROUP BY nt_name;\", warn_every=0))\n freqs[f].update(counts)\n \n # Create a pandas DataFrame, and save it to CSV.\n df = pd.DataFrame()\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n tot = sum(freqs[f].values())\n df = pd.concat([ df, pd.DataFrame([[ format_percentage(tot, x) for x in freqs[f].values() ]], columns=list(freqs[f]), index=[f]) ])\n df = df.fillna(0)\n df.to_csv(runDir + \"/results/frequencies.csv\") \n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")\n # notify(\"Saved nucleotide frequencies to CSV file.\")",
"def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def solution(nums):\n solution = Solution()\n output = solution.threeSum(nums)\n\n print(output)",
"def transformResults(threadCounts, values, function):\n res = {}\n for bm in list(values.keys()):\n res[bm] = []\n for (nThreads, v) in zip(threadCounts, values[bm]):\n res[bm].append(None if v == None else function(v, nThreads))\n return res",
"def worker_func(worker_id, w2t_m_queue, events, t2w_d_manager):\n average_iteration_time = 0\n worker_nn = create_neural_network()\n iteration_time = time.time()\n for i in range(ITERATIONS):\n data_point = create_data_point(worker_nn)\n events[\"Workers_can_proceed\"].clear()\n w2t_m_queue.put(data_point)\n # Signal trainer that this worker has placed its data point this iteration\n events[worker_id].set()\n average_iteration_time += (time.time() - iteration_time)\n # Have worker wait until trainer is done processing this iteration\n events[\"Workers_can_proceed\"].wait()\n iteration_time = time.time()\n # Obtain data trainer has placed into shared manager (data is weights of network)\n shared_data = t2w_d_manager[0]\n worker_nn.set_weights(shared_data)\n\n average_iteration_time /= ITERATIONS\n print(\"Worker \" + str(worker_id) + \" average put time: \" + str.format('{0:.6f}', (average_iteration_time*1000)) + \"ms\")",
"def runWork(self,benches):\r\n\r\n self.finished = 0\r\n\r\n# callbacks = []\r\n# callbacks.append(self.fetchBenchResults)\r\n# callbacks.append(self.setToWork)\r\n# callbacks.append(self.fetchResults)\r\n\r\n self.log('Signal','deferring runBenches to thread','work')\r\n\r\n# for fetcher in fetchers:\r\n# threads.deferToThread(fetcher())\r\n# self.log('Signal','Started fetcher: %s' % repr(fetcher),'runWork')\r\n\r\n# self.runBenches(callbacks)\r\n self.setToWork(benches)",
"def loop_threaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n def thread_createOnFunction(path, data, str_namePrefix, fn_thread):\n \"\"\"\n Simply create a thread function and return it.\n \"\"\"\n nonlocal index\n ta = threading.Thread(\n name = '%s-%04d.%d' % (str_namePrefix, index, self.numThreads),\n target = fn_thread,\n args = (path, data, index),\n kwargs = kwargs\n )\n return ta\n\n def threadsInBatches_run(l_threadAnalysis):\n \"\"\"\n Run threads in batches of self.numThreads\n and also handle any remaining threads.\n \"\"\"\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n # Read\n if fn_inputReadCallback:\n index = 1\n for path, data in iterator:\n dret_inputSet = inputSet_read(path, data)\n # filesRead += dret_inputSet['filesRead']\n index += 1\n\n # Analyze\n if fn_analysisCallback:\n index = 1\n l_threadAnalysis = []\n for path, data in iterator:\n l_threadAnalysis.append(thread_createOnFunction(\n path, data,\n 'analysisThread',\n # t_analyze\n analysis_do\n )\n )\n index += 1\n\n # And now batch them in groups\n threadsInBatches_run(l_threadAnalysis)\n tree_removeDeadBranches()\n # Write\n if fn_outputWriteCallback:\n index = 1\n for path, data in iterator:\n dret_outputSet = outputSet_write(path, d_tree[path])\n # filesSaved += dret_outputSet['filesSaved']\n index += 1",
"def run_threads(individuals):\n\n with Pool(os.cpu_count()-1) as p:\n new_individuals = p.map(evo.generate_phenotype, individuals)\n p.close()\n return new_individuals",
"def run_sim(self, dictionary):\n\t\tsim_start = time.time()\n\t\tglobal HAS_RUN_ITEM_ROUTING, ROUTING_ARRAY\n\t\tself.sim_num += 1 # indicate that we've begun another simulation\n\t\tpassedItems = []\n\t\titemsDoneArray = [0]\n\t\tswitch = 0\n\t\teddyTimes = []\n\t\ttaskTimes = []\n\t\tworkerDoneTimes = []\n\t\tnoTasks = 0\n\t\tscores = []\n\t\tticketNums = []\n\t\tselectivities = []\n\n\t\ttime_proxy = 0\n\t\torig_active_tasks = toggles.ACTIVE_TASKS_SIZE # saves the initial size of the array\n\t\tactive_tasks_size = orig_active_tasks # keeps track of the current size of the array\n\t\ttps_start = 3\n\t\tsecs = 0 # used to count time steps when tasks per second is less than 1\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_selectivities.append([])\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\tscores.append([])\n\t\t\telse:\n\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\tscores.append([])\n\n\t\ttotalWorkTime = 0\n\t\ttasksArray = []\n\n\t\t# array of workers who are busy\n\t\tb_workers = [0]\n\n\t\t# array of tasks currently in process\n\t\tactive_tasks = []\n\n\t\t#time counter\n\t\ttime_clock = 0\n\n\t\t# set up a dictionary to hold counts of active tasks_out\n\t\tif toggles.REAL_DATA:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\t\telse:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\n\t\t# add an entry to save the numbers of placeholder tasks\n\t\tself.pred_active_tasks[0] = []\n\n\t\t#Setting up arrays to count tickets for ticketing counting graphs\n\t\t# if toggles.COUNT_TICKETS:\n\t\t# \tif toggles.REAL_DATA:\n\t\t# \t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t# \t\t\tself.ticketNums.append([])\n\t\t# \telse:\n\t\t# \t\tfor count in toggles.CHOSEN_PREDS:\n\t\t# \t\t\tself.ticketNums.append([])\n\n\t\t# Setting up arrays for TRACK_SIZE\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\tself.consensus_size.append([])\n\t\t\telse:\n\t\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\t\tself.consensus_size.append([])\n\n\t\t# If running Item_routing, setup needed values\n\t\tif ((not HAS_RUN_ITEM_ROUTING) and toggles.RUN_ITEM_ROUTING) or toggles.RUN_MULTI_ROUTING:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in toggles.CHOSEN_PREDS]\n\t\t\telse:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in range(toggles.NUM_QUESTIONS)]\n\t\t\troutingC, routingL, seenItems = [], [], set()\n\t\t\tfor i in range(len(predicates)):\n\t\t\t\troutingC.append(0)\n\t\t\t\troutingL.append([0])\n\n\t\tip_pair = IP_Pair()\n\t\ttotal_ip_pairs = IP_Pair.objects.all().count()\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tprev_time = 0\n\n\t\t\twhile (IP_Pair.objects.filter(isDone=False).exists() or active_tasks) :\n\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tif (time_clock % 60 == 0) or (time_clock - prev_time > 1):\n\t\t\t\t\t\tprint \"$\"*43 + \" t = \" + str(time_clock) + \" \" + \"$\"*(47-len(str(time_clock)))\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t\t\tprint \"Incomplete IP Pairs: \" + str(IP_Pair.objects.filter(isDone=False).count()) + \" | Tasks completed: \" + str(self.num_tasks)\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tfor ip in IP_Pair.objects.filter(inQueue=True):\n\t\t\t\t\t\t\tprint \"IP Pair \" + str(ip.pk) + \" | Predicate: \" + str(ip.predicate.id) + \" ||| Tasks out: \" + str(ip.tasks_out) + \" | Num yes: \" + str(ip.num_yes) + \" | Num no: \" + str(ip.num_no) + \" | isDone: \" + str(ip.isDone)\n\n\t\t\t\t\t\t\tif ip.num_no + ip.num_yes > toggles.CONSENSUS_SIZE_LIMITS[1]:\n\t\t\t\t\t\t\t\tprint \"Total votes: \" + str(ip.num_no+ip.num_yes)\n\t\t\t\t\t\t\t\traise Exception (\"Too many votes cast for IP Pair \" + str(ip.id))\n\n\t\t\t\t\t\t\tif (ip.tasks_out == 0) and ip.isDone and ip.inQueue:\n\t\t\t\t\t\t\t\traise Exception (\"IP Pair \" + str(ip.id) + \" has no tasks out and is done, still in queue\")\n\t\t\t\t\t\tif toggles.EDDY_SYS == 2:\n\t\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\t\tprint \"Task for IP Pair \" + str(task.ip_pair.id)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint \"Placeholder\"\n\t\t\t\t\t\tplaceholders = 0\n\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\tif task.ip_pair == None:\n\t\t\t\t\t\t\t\tplaceholders += 1\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tif len(active_tasks) == 0:\n\t\t\t\t\t\t\tprint \"Active tasks is empty.\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"Active tasks: \" + str(len(active_tasks)) + \" | Placeholders: \" + str(placeholders)\n\n\t\t\t\t\t\t\tprint \"IP pairs in queue: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\t\t# print \"\"\n\t\t\t\t\t\t# for p in Predicate.objects.filter(pk__in=[pred+1 for pred in toggles.CHOSEN_PREDS]) :\n\t\t\t\t\t\t# \tprint \"Predicate \" + str(p.pk) + \" ||| Queue full: \" + str(p.queue_is_full) + \" | Queue length: \" + str(p.queue_length) + \" | Tickets: \" + str(p.num_tickets)\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t# throw some errors for debugging purposes\n\t\t\t\tif not (Item.objects.filter(inQueue=True).count() == IP_Pair.objects.filter(inQueue=True).count()):\n\t\t\t\t\tprint \"inQueue items: \" + str(Item.objects.filter(inQueue=True).count())\n\t\t\t\t\tprint \"inQueue IPs: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\traise Exception(\"IP and item mismatch\")\n\n\t\t\t\tfor p in Predicate.objects.filter(queue_is_full = True):\n\t\t\t\t\tif not p.num_pending >= p.queue_length:\n\t\t\t\t\t\traise Exception (\"Queue for predicate \" + str(p.id) + \" isn't actually full\")\n\n\t\t\t\t\tif IP_Pair.objects.filter(predicate=p, inQueue=True).count() < p.queue_length:\n\t\t\t\t\t\traise Exception (\"Not enough IP_Pairs in queue for predicate \" + str(p.id) + \" for it to be full\")\n\n\t\t\t\t\t# if IP_Pair.objects.filter(predicate=p, inQueue=True).count() > p.queue_length:\n\t\t\t\t\t# \traise Exception(\"The queue for predicate \" + str(p.id) + \" is over-full\")\n\n\t\t\t\t\tif not IP_Pair.objects.filter(predicate=p, inQueue=True).count() == p.num_pending:\n\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(p.id) + \": \" + str(IP_Pair.objects.filter(predicate=p, inQueue=True).count())\n\t\t\t\t\t\tprint \"Number pending for pred \" + str(p.id) + \": \" + str(p.num_pending)\n\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\n\t\t\t\tself.time_steps_array.append(time_clock)\n\n\t\t\t\t# increment seconds for when tasks per second less than 1\n\t\t\t\tsecs += 1\n\t\t\t\tratio=IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t# change the rate of task requests\n\t\t\t\t\ttps = self.set_tps(ratio, tps_start)\n\n\t\t\t\tif toggles.RESIZE_ACTIVE_TASKS:\n\t\t\t\t\tratio = IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\t\tactive_tasks_size = self.set_active_size(ratio, orig_active_tasks)\n\n\n\t\t\t\tif toggles.TRACK_ACTIVE_TASKS:\n\t\t\t\t\t# append a new counter for the next time step\n\t\t\t\t\tfor pred in self.pred_active_tasks:\n\t\t\t\t\t\tself.pred_active_tasks[pred].append(0)\n\n\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t_id = task.ip_pair.predicate.id\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t_id = 0\n\t\t\t\t\t\t# add one to the most recent counter\n\t\t\t\t\t\tself.pred_active_tasks[_id][-1] += 1\n\n\t\t\t\tprev_time = time_clock\n\t\t\t\tendTimes = []\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_times_array.append(time_clock)\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\t# check if any tasks have reached completion, update bookkeeping\n\t\t\t\t# print \"Removing tasks\"\n\t\t\t\tfor task in active_tasks:\n\t\t\t\t\tif (task.end_time <= time_clock):\n\t\t\t\t\t\tupdateCounts(task, task.ip_pair)\n\t\t\t\t\t\t#task.refresh_from_db()\n\t\t\t\t\t\tactive_tasks.remove(task)\n\t\t\t\t\t\tb_workers.remove(task.workerID)\n\t\t\t\t\t\tself.num_tasks += 1\n\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\tif not IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count() == task.ip_pair.predicate.num_pending:\n\t\t\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count())\n\t\t\t\t\t\t\t\tprint \"Number pending for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(task.ip_pair.predicate.num_pending)\n\t\t\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\t\t\t\t\telse:\n\t\t\t\t\t\tendTimes.append(task.end_time)\n\n\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Placeholder\"\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\n\t\t\t\t# decides whether to give out more tasks if tasks per second is less than 1\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\ttask_limit = tps\n\t\t\t\t\tcount = 0\n\t\t\t\t\tif tps < 1:\n\t\t\t\t\t\ttask_limit = 1\n\t\t\t\t\t\trefill = False\n\t\t\t\t\t\tif secs >= 1.0/tps:\n\t\t\t\t\t\t\trefill = True\n\t\t\t\t\t\t\tsecs = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\trefill = True\n\t\t\t\telse:\n\t\t\t\t\t# set up variables to function properly in case fixed active tasks size is being used\n\t\t\t\t\trefill = True\n\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\ttask_limit = active_tasks_size\n\t\t\t\t# fill the active task array with new tasks as long as some IPs need eval\n\t\t\t\tif refill:\n\t\t\t\t\twhile (count < task_limit) and IP_Pair.objects.filter(isDone=False).exists(): # and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, isDone=False).exists()): #or IP_Pair.objects.filter(inQueue=True, tasks_remaining__gt=0).exists()):\n\t\t\t\t\t# while (count < tps) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\t\t\t\t\t# while (len(active_tasks) < active_tasks_size) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\n\t\t\t\t\t\ttask, worker = self.issueTask(active_tasks, b_workers, time_clock, dictionary, switch)\n\n\t\t\t\t\t\tif task is not None:\n\n\t\t\t\t\t\t\t# TODO if we're in \"placeholder task\" mode, task should never be None\n\n\n\t\t\t\t\t\t\tactive_tasks.append(task)\n\t\t\t\t\t\t\tb_workers.append(worker)\n\n\t\t\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Placeholder\"\n\t\t\t\t\t\t\t# \telse:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\t\t\t\t\t\t\t# ITEM ROUTING DATA COLLECTION\n\t\t\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\t\t\tif task.ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\t\t\tseenItems.add(task.ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\t\t\tif task.ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# we couldn't give ANYONE a task; fast-forward to next task expiry\n\t\t\t\t\t\t\tself.no_tasks_to_give += 1\n\t\t\t\t\t\t\tif endTimes:\n\t\t\t\t\t\t\t\ttime_clock = min(endTimes) - 1\n\t\t\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcount = len(active_tasks)\n\n\t\t\t\tmove_window()\n\n\t\t\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t\t\tself.placeholder_change_count.append(DummyTask.objects.all().count())\n\t\t\t\t\tself.num_tasks_change_count.append(Task.objects.all().count())\n\t\t\t\ttime_clock += 1\n\n\n\n\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] >= time_clock:\n\t\t\t\t\tswitch += 1\n\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Simulaton completed ||| Simulated time = \" + str(time_clock) + \" | number of tasks: \" + str(self.num_tasks)\n\t\t\t\tprint \"Time steps: \" + str(len(self.time_steps_array))\n\t\t\t\tprint \"Predicates saved in active tasks dict: \" + str(self.pred_active_tasks.keys())\n\t\t\t\tprint \"Size of predicates' arrays: \" + str([len(self.pred_active_tasks[key]) for key in self.pred_active_tasks])\n\n\n\n\t\telse:\n\t\t\twhile(ip_pair != None):\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\t# only increment if worker is actually doing a task\n\t\t\t\tworkerID = self.pick_worker([0], [0]) # array needed to make pick_worker run\n\t\t\t\tworkerDone, workerDoneTime = worker_done(workerID)\n\t\t\t\tself.worker_done_time += workerDoneTime\n\n\t\t\t\tif not IP_Pair.objects.filter(isDone=False):\n\t\t\t\t\tip_pair = None\n\n\t\t\t\telif (workerDone):\n\t\t\t\t\tif not toggles.DUMMY_TASKS:\n\t\t\t\t\t\tself.num_placeholders += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\td = DummyTask(workerID=workerID)\n\t\t\t\t\t\td.save()\n\t\t\t\t\t\tself.num_tasks += 1\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"worker \" + workerID +\" has no tasks to do\"\n\n\t\t\t\telse:\n\t\t\t\t\tip_pair = pending_eddy(workerID)\n\n\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\tif ip_pair is not None: # if this is a real ip pair\n\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\tif ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\tseenItems.add(ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\tif ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\tif toggles.REAL_DATA :\n\t\t\t\t\t\ttask = self.simulate_task(ip_pair, workerID, 0, dictionary)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttask = self.syn_simulate_task(ip_pair, workerID, 0, switch, self.num_tasks)\n\n\t\t\t\t\tmove_window()\n\t\t\t\t\tself.num_tasks += 1\n\n\n\n\t\t\t\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tpredicate.refresh_from_db()\n\t\t\t\t\t\t\t\tscores[predNum].append(predicate.score)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=count+1)\n\t\t\t\t\t\t\t\tticketNums[count].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.ticketNums[predNum].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\n\t\t\t\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\tpredicate.refresh_from_db(fields=['trueSelectivity'])\n\t\t\t\t\t\t\t#print \"true selectivity: \", str(predicate.trueSelectivity)\n\t\t\t\t\t\t\tself.pred_selectivities[predNum].append(predicate.trueSelectivity)\n\n\t\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] == self.num_tasks:\n\t\t\t\t\t\tswitch += 1\n\n\n\n\t\tif toggles.DUMMY_TASKS:\n\t\t\tself.num_placeholders = DummyTask.objects.all().count()\n\t\t\tself.num_real_tasks = self.num_tasks - self.num_placeholders\n\n\t\t# TODO add cumulative work time and cumulative placeholder time separately\n\t\t# TODO make sure all graphs use appropriate information -- new data members\n\t\t# TODO change return stuff of run_sim to be none of the things it is now\n\n\t\t# save relevant values\n\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tself.simulated_time = time_clock\n\t\t\tself.simulated_time_array.append(self.simulated_time)\n\t\t\tself.cum_work_time_array.append(self.cum_work_time)\n\t\t\tself.cum_placeholder_time_array.append(self.cum_placeholder_time)\n\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\tself.num_real_tasks_array.append(self.num_real_tasks)\n\t\t\tself.num_placeholders_array.append(self.num_placeholders)\n\n\t\tif toggles.TEST_ACCURACY:\n\t\t\tself.get_incorrects()\n\t\t\tself.num_incorrect_array.append(self.num_incorrect)\n\n\t\t# if toggles.TRACK_IP_PAIRS_DONE:\n\t\t# \tdest = toggles.OUTPUT_PATH + \"ip_done_vs_tasks_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \tcsv_dest = dest_resolver(dest+\".csv\")\n\t\t#\n\t\t# \tdataToWrite = [self.ips_tasks_array, self.time_steps_array, self.ips_done_array]\n\t\t# \tgeneric_csv_write(csv_dest, dataToWrite) # saves a csv\n\t\t# \tif toggles.DEBUG_FLAG:\n\t\t# \t\tprint \"Wrote File: \" + csv_dest\n\t\t# \tif toggles.GEN_GRAPHS:\n\t\t# \t\tif (IP_Graph_2 == False and toggles.EDDY_SYS==2) or (IP_Graph_5==False and toggles.EDDY_SYS==5):\n\t\t# \t\t\tline_graph_gen(dataToWrite[0], dataToWrite[2], dest + \".png\",\n\t\t# \t\t\t\t\t\tlabels = (\"Number Tasks Completed\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\t\t\ttitle = \"Number IP Pairs Done vs. Number Tasks Completed\")\n\t\t# \t\t\tif toggles.SIMULATE_TIME:\n\t\t# \t\t\t\tdest1 = toggles.OUTPUT_PATH + \"ip_done_vs_time_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \t\t\t\tline_graph_gen(dataToWrite[1], dataToWrite[2], dest1+'.png',\n\t\t# \t\t\t\tlabels = (\"Time Steps\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\ttitle = \"Number IP Pairs Done vs. Time\")\n\t\t# \t\t\tif toggles.EDDY_SYS == 2:\n\t\t# \t\t\t\tIP_Graph_2 = True\n\t\t# \t\t\telif toggles.EDDY_SYS == 5:\n\t\t# \t\t\t\tIP_Graph_5 = True\n\n\t\t# TODO figure out this no_tasks thingie\n\t\t# produces/appends to CSVs\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t# dest = toggles.OUTPUT_PATH + \"noTasks.csv\"\n\t\t\t# with open(dest, 'a') as f:\n\t\t\t# \tf.write(str(no_tasks_to_give) + \",\")\n\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t# \tprint \"Wrote file: \" + dest\n\n\t\t\tdest = toggles.OUTPUT_PATH + \"placeholderTasks.csv\"\n\t\t\twith open(dest, 'a') as f1:\n\t\t\t\tf1.write(str(self.num_placeholders) + ',')\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote file: \" + dest\n\n\t\tif toggles.OUTPUT_SELECTIVITIES:\n\t\t\toutput_selectivities(toggles.RUN_NAME) # TODO make sure this still works\n\n\t\tif toggles.OUTPUT_COST:\n\t\t\toutput_cost(toggles.RUN_NAME)\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tpredScoresLegend = []\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\t\t\telse:\n\t\t\t\txMultiplier = toggles.NUM_QUESTIONS\n\t\t\t\tfor predNum in range(toggles.NUM_QUESTIONS):\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, scores, predScoresLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"predScores\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"scores\"))\n\n\n\n\t\tif toggles.COUNT_TICKETS:\n\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tticketCountsLegend = []\n\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\n\t\t\tticket_nums_shifted = [] # ticket_nums doesn't start at index 0, so create array to hold counts for each pred\n\t\t\tfor pred in self.ticket_nums:\n\t\t\t\tlengthdiff = len(self.ticket_nums[pred]) - time_proxy # how many more entries are there in ticket counts than time proxy\n\t\t\t\tif lengthdiff > 0:\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Warning: trimmed last \"+ str(lengthdiff) + \" entries off ticket counts, graph may not be accurate\"\n\t\t\t\t\tself.ticket_nums[pred] = self.ticket_nums[pred][:-lengthdiff] # trim to make lengths equal for plotting\n\t\t\t\tticket_nums_shifted.append(self.ticket_nums[pred]) # append in the new array\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tticketCountsLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, ticket_nums_shifted, ticketCountsLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"ticketCounts\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"Ticket counts\"))\n\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif not toggles.SIMULATE_TIME:\n\t\t\t\ttasks = range(len(self.consensus_size[0]))\n\t\t\t\tlegend = []\n\t\t\t\tdest = toggles.OUTPUT_PATH + \"consensus_size\"+str(self.sim_num)\n\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\n\t\t\t\telse:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\t\t\t\tgeneric_csv_write(dest+'.csv',self.consensus_size)\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tgraphGen.consensus_over_time(tasks, legend, self.consensus_size, dest)\n\t\t\t\tself.consensus_size=[]\n\n\t\t# TODO have this graph use the correct arrays\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tselectivitiesLegend = []\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tselectivitiesLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(self.num_tasks)]*len(toggles.CHOSEN_PREDS), self.pred_selectivities, selectivitiesLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"selectivities\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"Number of tasks completed in single simulation\", \"Predicate selectivities\"), scatter=True)\n\n\t\t# if this is the first time running a routing test\n\t\tif toggles.RUN_ITEM_ROUTING and not HAS_RUN_ITEM_ROUTING:\n\t\t\tHAS_RUN_ITEM_ROUTING = True\n\n\t\t\t# setup vars to save a csv + graph\n\t\t\tdest = toggles.OUTPUT_PATH+'_item_routing'+ str(toggles.SIMULATE_TIME)\n\t\t\tlabels = (str(predicates[0].question), str(predicates[1].question))\n\t\t\tdataToWrite = [labels,routingL[0],routingL[1]]\n\t\t\tgeneric_csv_write(dest+'.csv',dataToWrite) # saves a csv\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \"+dest+'.csv'\n\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\tgraphGen.item_routing(routingL[0],routingL[1], labels, dest)\n\n\n\n\t\t# if we're multi routing\n\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\tROUTING_ARRAY.append(routingC) #add the new counts to our running list of counts\n\n\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tsim_end = time.time()\n\t\tsim_time = sim_end - sim_start\n\t\tself.run_sim_time = sim_time\n\t\treturn",
"def do_multiply(self, mul: int, *nums: int):\n for num in nums:\n print(mul * num, file=self.stdout)",
"def num_func_mapper(nums, funs):\n pass",
"def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials",
"def product(*nums):\n\treturn reduce((lambda x, y: x * y), nums)",
"def run_and_join():\n daemon = threading.Thread(target=print_numbers, args=[10, 1, \"\"])\n daemon.daemon = True\n daemon.start()\n daemon.join()\n print(\"After the join\")",
"def run_daemon_thread():\n daemon = threading.Thread(target=print_numbers, args=[100, 1, \"\"])\n daemon.daemon = True\n daemon.start()",
"def main():\n pool = Pool(processes=50)\n results = pool.imap_unordered(experiment, range(50), chunksize=1)\n\n # Output\n offset = 1\n # for i, (data_surv, data_order, data_ctrl) in enumerate(results):\n for i, (data_surv, data_ctrl) in enumerate(results):\n with open(f'../data/reproductive_barrier/hybrid_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_surv:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n with open(f'../data/reproductive_barrier/order_of_incompatibility/experiment_{i+offset}.csv', 'w') as fp:\n for x in data_order:\n fp.write('%d,' % int(x[0]) + ','.join(map(str, x[1:])) + '\\n')\n\n with open(f'../data/reproductive_barrier/control_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_ctrl:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n return",
"def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):\n with Timer('\\t{0} ({1}) completed in'.format(process_name, str(func))):\n pool = Pool(cpus)\n vals = pool.map(func, iterable)\n pool.close()\n return vals",
"def parallel_map(\n task,\n values,\n task_args=None,\n task_kwargs=None,\n num_cpus=None,\n progress_bar=None,\n):\n # TODO: if QuTiP's parallel_map catches up, we can remove this function,\n # and put QuTiP's parallel_map into __all__ to maintain krotov's interface.\n if task_args is None:\n task_args = ()\n if task_kwargs is None:\n task_kwargs = {}\n\n if num_cpus is None:\n num_cpus = multiprocessing.cpu_count()\n\n if progress_bar is None:\n progress_bar = BaseProgressBar()\n if progress_bar is True:\n progress_bar = TextProgressBar()\n\n progress_bar.start(len(values))\n nfinished = [0]\n\n def _update_progress_bar(x):\n nfinished[0] += 1\n progress_bar.update(nfinished[0])\n\n if USE_LOKY:\n Executor = LokyReusableExecutor\n if USE_THREADPOOL_LIMITS:\n Executor = partial(\n LokyReusableExecutor,\n initializer=_process_threadpool_limits_initializier,\n )\n else:\n Executor = ProcessPoolExecutor\n\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n with _threadpool_limits(limits=1):\n with Executor(max_workers=num_cpus) as executor:\n jobs = []\n try:\n for value in values:\n args = (value,) + tuple(task_args)\n job = executor.submit(task, *args, **task_kwargs)\n job.add_done_callback(_update_progress_bar)\n jobs.append(job)\n res = [job.result() for job in jobs]\n except KeyboardInterrupt as e:\n raise e\n\n progress_bar.finished()\n return res",
"def calc_stats(results, number, concurrency):\n\n all_res = results.all_res\n count = len(all_res)\n\n amax = np.amax(all_res)\n amin = np.amin(all_res)\n\n return {\n \"rps\": len(all_res) / float(results.total_time),\n \"mean\": np.mean(all_res),\n \"min\": amin,\n \"max\": amax,\n \"amp\": float(amax - amin),\n \"median\": np.median(all_res),\n \"stdev\": np.std(all_res),\n \"perc_95\": np.percentile(all_res, 95),\n \"perc_80\": np.percentile(all_res, 80),\n \"failed\": number - count,\n \"total_time\": results.total_time,\n \"count\": count,\n \"number\": number,\n \"concurrency\": concurrency,\n \"server\": results.server,\n }",
"def main():\n accessCount = int(input(\"Enter the number of accesses: \"))\n numWriters = int(input(\"Enter the number of writers: \"))\n numReaders = int(input(\"Enter the number of readers: \"))\n\n sleepMax = 4\n\n counter = Counter(0)\n print(\"counter defined at:\", str(counter.count))\n cell = SharedCell(counter)\n print(\"shared counter data:\", str(cell.data.count))\n\n writerList = []\n readerList = []\n for cnt in range(numWriters):\n writerList.append(Writer(cell, accessCount, sleepMax, cnt + 1))\n for cnt in range(numReaders):\n readerList.append(Reader(cell, accessCount, sleepMax, cnt + 1))\n\n print(\"Starting the threads\")\n for writer in writerList:\n writer.start()\n for reader in readerList:\n reader.start()",
"def primenumbers(number, recurv, templist):\n for j in recurv:\n templist = [i for i in templist if i % j != 0]\n\n for x in templist:\n if not recurv:\n if number % x == 0:\n recurv.append(x)\n primenumbers(number, recurv, templist)\n if number % x == 0 and x > recurv[-1]:\n recurv.append(x)\n '''while tempnumber != 1 and tempnumber in recurv:\n if tempnumber % x == 0:\n tempnumber = number / x\n recurv.append(x)'''\n answer = functools.reduce(lambda x, y: x * y, recurv)\n print('answer:')\n print(answer)\n print(number)\n if answer == number:\n print('worked')\n break\n else:\n print('hi')\n primenumbers(number, recurv, templist)\n return recurv",
"def test_worker_produces_some_results(self):\n # 10000 is an interesting case as in the original implementation it caused stack overflow\n VENTILATE_COUNT = 4\n for pool in [DummyPool(), ThreadPool(1)]:\n pool.start(PreprogrammedReturnValueWorker, [[], [], [42], []])\n for _ in range(VENTILATE_COUNT):\n pool.ventilate('not_important')\n\n self.assertEqual(42, pool.get_results())\n with self.assertRaises(EmptyResultError):\n pool.get_results()\n\n pool.stop()\n pool.join()",
"def _parallel_fit_eval(process_number, data, clfs, evaluators, scoring):\n clfs[process_number].fit(data, process_number)\n\n results = dict()\n\n results[\"train_score_\" + str(process_number)] = (clfs[process_number].returnTrainingScores())\n\n evaluators[process_number].evaluate(data, process_number, scoring)\n\n for key in evaluators[process_number].results.keys():\n results[key + '_test_score_' + str(process_number)] = evaluators[process_number].results[key]\n\n return results",
"def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return",
"def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):\n # proc_pool = Local variable proc_pool for Pool of processes\n # log_level = log_level\n # count_total = Total counter of items to distribute/play/indicate progress\n # len(itemslist)\n\n log_level = logging.getLogger().getEffectiveLevel()\n logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',\n __name__, a_fn.__name__, nprocs)\n # if log_level <= logging.WARNING:\n # if args is not None:\n # for i, arg in enumerate(args):\n # logging.info('===mprocessing f():[%s] arg[%s]={%s}',\n # a_fn.__name__, i, arg)\n\n # if __name__ == '__main__':\n logging.debug('===Multiprocessing=== Setting up logger!')\n # CODING No need for such low level debugging to stderr\n # multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(log_level)\n\n logging.debug('===Multiprocessing=== Logging defined!')\n\n # ---------------------------------------------------------\n # chunk\n #\n # Divides an iterable in slices/chunks of size size\n #\n def chunk(iter_list, size):\n \"\"\"\n Divides an iterable in slices/chunks of size size\n\n >>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):\n ... len(a)\n 3\n 3\n 3\n 1\n \"\"\"\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())\n\n proc_pool = []\n lockdb = multiprocessing.Lock()\n running = multiprocessing.Value('i', 0)\n mutex = multiprocessing.Lock()\n count_total = len(itemslist)\n\n size = (len(itemslist) // int(nprocs)) \\\n if ((len(itemslist) // int(nprocs)) > 0) \\\n else 1\n\n logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',\n len(itemslist), int(nprocs), size)\n\n # Split itemslist in chunks to distribute accross Processes\n for splititemslist in chunk(itemslist, size):\n logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',\n len(splititemslist), size)\n logging.debug('===type(splititemslist)=[%s]', type(splititemslist))\n logging.debug('===Job/Task Process: Creating...')\n proc_task = multiprocessing.Process(\n target=a_fn, # argument function\n args=(lockdb,\n running,\n mutex,\n splititemslist,\n count_total,\n cur,))\n proc_pool.append(proc_task)\n logging.debug('===Job/Task Process: Starting...')\n proc_task.start()\n NPR.niceprint('===Job/Task Process: [{!s}] Started '\n 'with pid:[{!s}]'\n .format(proc_task.name,\n proc_task.pid),\n verbosity=3,\n logalso=logging.DEBUG)\n\n # Check status of jobs/tasks in the Process Pool\n if log_level <= logging.DEBUG:\n NPR.niceprint('===Checking Processes launched/status:',\n verbosity=3, logalso=logging.DEBUG)\n for j in proc_pool:\n NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),\n verbosity=3, logalso=logging.DEBUG)\n\n # Regularly print status of jobs/tasks in the Process Pool\n # Prints status while there are processes active\n # Exits when all jobs/tasks are done.\n while True:\n if not any(multiprocessing.active_children()):\n logging.debug('===No active children Processes.')\n break\n for prc in multiprocessing.active_children():\n logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())\n proc_task_active = prc\n NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n proc_task_active.join(timeout=60)\n NPR.niceprint('===Waited for 60s on '\n '{!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n # Wait for join all jobs/tasks in the Process Pool\n # All should be done by now!\n for j in proc_pool:\n j.join()\n NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'\n .format(j.name, j.is_alive(), j.exitcode),\n verbosity=2)\n\n logging.warning('===Multiprocessing=== pool joined! '\n 'All processes finished.')\n\n # Will release (set to None) the lockdb lock control\n # this prevents subsequent calls to\n # use_lock( nuLockDB, False)\n # to raise exception:\n # ValueError('semaphore or lock released too many times')\n logging.info('===Multiprocessing=== pool joined! '\n 'Is lockdb None? [%s]. Setting lockdb to None anyhow.',\n lockdb is None)\n lockdb = None\n\n # Show number of total files processed\n NPR.niceprocessedfiles(running.value, count_total, True)\n\n return True",
"def PARALLEL_worker_mc_inv(procnum, num_samples_per_processor, inversion_type, M_amplitude, green_func_array, real_data_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, return_dict_MTs, return_dict_similarity_values_all_samples, return_dict_shift_idxs, return_dict_MT_single_force_rel_amps, return_dict_medium_1_medium_2_rel_amp_ratios, invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels, num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n print(\"Processing for process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")\n \n # Define temp data stores for current process:\n tmp_MTs = np.zeros((len(green_func_array[0,:,0]), num_samples_per_processor), dtype=float)\n tmp_similarity_values_all_samples = np.zeros(num_samples_per_processor, dtype=float)\n tmp_shift_idxs_all_samples = []\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_MT_single_force_rel_amps = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n tmp_medium_1_medium_2_rel_amp_ratios = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_frac_medium_2_diff_phases_dict = {} # Dictionary for temp storing of phase fractions of medium 1\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = np.zeros((num_samples_per_processor, 3), dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = []\n \n # Sort greens function storage if processing for multiple media:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n green_func_array_total_both_media = green_func_array.copy()\n \n # 3. Loop over samples, checking how well a given MT sample synthetic wavefrom from the forward model compares to the real data:\n for i in range(num_samples_per_processor):\n # Generate random medium amplitude ratio and associated greens functions (if required):\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n # If want to invert for ratio of meduim 1 to medium 2 separately for different phases:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_2_diff_phases_dict[\"P\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"S\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"surface\"] = np.random.uniform(0.0, 1.0)\n # Generate associated greens functions:\n green_func_array = np.zeros(np.shape(green_func_array_total_both_media[:,:,:,0]), dtype=float)\n # Loop over greens function for each station-phase:\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_2 = tmp_frac_medium_2_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array[j, :, :] = (1. - tmp_frac_medium_2)*green_func_array_total_both_media[j,:,:,0] + tmp_frac_medium_2*green_func_array_total_both_media[j,:,:,1] \n # Otherwise generate single fraction value and associated greens functions:\n else:\n frac_medium_2 = np.random.uniform(0.0, 1.0)\n green_func_array = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # 4. Generate synthetic waveform for current sample:\n # Vary moment amplitude randomly if specified:\n if invert_for_relative_magnitudes_switch:\n M_amplitude_exp_factor = np.random.uniform(low=rel_exp_mag_range[0], high=rel_exp_mag_range[1])\n M_amplitude = 10.**M_amplitude_exp_factor\n # And generate waveform from source mechanism tensor:\n if inversion_type==\"full_mt\":\n MT_curr_sample = generate_random_MT()*M_amplitude # Generate a random MT sample\n elif inversion_type==\"full_mt_Lune_samp\":\n MT_curr_sample = generate_random_MT_Lune_samp()*M_amplitude # Generate a random MT sample, sampled uniformly in Lune space\n elif inversion_type==\"DC\":\n MT_curr_sample = generate_random_DC_MT()*M_amplitude # Generate a random DC sample\n elif inversion_type==\"single_force\":\n MT_curr_sample = generate_random_single_force_vector()*M_amplitude # Generate a random single force sample\n elif inversion_type == \"DC_single_force_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_coupled_tensor() # Generate a random DC-single-force coupled sample, with associated relative amplitude of DC to single force\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_single_force_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_crack_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_crack_coupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"single_force_crack_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_single_force_crack_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n synth_waveform_curr_sample = forward_model(green_func_array, MT_curr_sample) # Note: Greens functions must be of similar amplitude units going into here...\n \n # 5. Compare real data to synthetic waveform (using variance reduction or other comparison metric), to assign probability that data matches current model:\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_waveform_curr_sample, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n \n # 6. Append results to data store:\n tmp_MTs[:,i] = MT_curr_sample[:,0]\n tmp_similarity_values_all_samples[i] = similarity_curr_sample\n tmp_shift_idxs_all_samples.append(list(shift_idxs))\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps[i] = random_DC_to_single_force_amp_frac\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,0] = tmp_frac_medium_2_diff_phases_dict[\"P\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,1] = tmp_frac_medium_2_diff_phases_dict[\"S\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,2] = tmp_frac_medium_2_diff_phases_dict[\"surface\"]\n else:\n tmp_medium_1_medium_2_rel_amp_ratios[i] = frac_medium_2\n \n if i % 10000 == 0:\n print(\"Processor number:\", procnum, \"- Processed for\",i,\"samples out of\",num_samples_per_processor,\"samples\")\n \n # 7. And convert misfit measure to likelihood function probability:\n tmp_similarity_values_all_samples = np.exp(-(1.-tmp_similarity_values_all_samples)/2.)\n \n # And return values back to script:\n return_dict_MTs[procnum] = tmp_MTs\n return_dict_similarity_values_all_samples[procnum] = tmp_similarity_values_all_samples\n return_dict_shift_idxs[procnum] = tmp_shift_idxs_all_samples\n return_dict_MT_single_force_rel_amps[procnum] = tmp_MT_single_force_rel_amps\n if num_phase_types_for_media_ratios>0:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios_multi_phases\n else:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios\n print(\"Finished processing process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")",
"def run(self):\n\t\t#Go through each number in the list\n\t\tfor number in self.numList:\n\t\t\t#record the count for this number\n\t\t\tself.permutationCountArray[self.count.value]=len(number)\n\t\t\t#increment total of numbers processed by this process\n\t\t\tself.count.value+=1",
"def multiply(numbers):\n counter = 0\n for num in numbers:\n counter *= num\n return counter",
"def test_get_submissions():\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each submission\n for x in threads:\n print(x.d_)",
"def double_nums(num_list):",
"def aliveworkers(workers):\n \n #ping everyone using threads\n threads=[]\n results={}\n output=threading.Lock()\n \n def threadcode(worker):\n worker=worker[:]\n logging.info(\"Pinging %r\" % (worker,))\n results[worker]=sshping(worker)\n logging.info (\"Worker %r is %s.\" % (worker, [\"down\",\"up\"][results[worker]]))\n \n for i,worker in enumerate(workers):\n threads.append(threading.Thread())\n threads[i].run=lambda: threadcode(worker)\n threads[i].start()\n threads[i].join(0.1)\n \n #wait for threads to finish\n for thread in threads:\n thread.join()\n \n aliveworkers=[worker for worker,result in results.items() if result==True]\n return aliveworkers",
"def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)",
"def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict",
"def _learn_individual_mixture_weights(n_users, alpha, multinomials, max_iter, tol, val_mat, prior_strength, num_proc):\n lls = np.ones(n_users)\n pis = np.tile(alpha, n_users).reshape(n_users, len(multinomials))\n pis = normalize(pis, 'l1', axis=1) # pi's for each user.\n\n log.info('Doing individual weights with %d proc' % num_proc)\n mix_weights = []\n alpha *= prior_strength\n if any(alpha < 1):\n alpha += 1\n\n # multi-process. Essentially calls _mp_learn_user_mix for a set of users.\n batch_size = int(np.ceil(1. * n_users / num_proc)) # how many users per process\n args = (alpha, multinomials, val_mat, max_iter, tol)\n uids = range(n_users)\n queue = Queue()\n num_eof = 0\n proc_pool = []\n\n # set-up the processes\n for i in range(num_proc):\n p_uids = uids[i * batch_size:(i + 1) * batch_size] # define which users this process will handle.\n if len(p_uids) == 0:\n break\n proc = Process(target=_mp_learn_user_mix, args=(queue, p_uids, args))\n proc_pool.append(proc)\n\n # start the processes\n [proc.start() for proc in proc_pool]\n\n # collect end tokens\n while num_eof < len(proc_pool):\n resp = queue.get()\n if type(resp) == str:\n num_eof += 1\n else:\n mix_weights.append(resp)\n [proc.join() for proc in proc_pool]\n queue.close()\n # end multi-process\n\n for id, u_mix_weights, u_ll in mix_weights:\n pis[id] = np.array(u_mix_weights)\n lls[id] = u_ll\n\n mask = np.where(lls != 1)\n\n lls = lls[mask] * np.squeeze(np.array(val_mat.sum(axis=1)))[mask]\n event_ll = np.sum(lls) / np.sum(val_mat)\n\n return pis, event_ll",
"def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)",
"def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()",
"def main():\n\tusers = deque([])\n\tthreads = []\n\tprint(\"Starting with: %d \" % SO_FAR)\n\ttry:\n\t\tcursor.execute(\"SET SESSION net_read_timeout = 3600\")\n\t\tcursor.execute(\"SELECT user_id, screen_name FROM `test`.`new_temp` WHERE listed_count > 10 LIMIT %d OFFSET %d\" % (NUM_USERS, SO_FAR))\n\t\tfor row in cursor:\n\t\t\tusers.append((int(row[0]), row[1]))\n\t\tfor t in range(0, NO_THREADS):\n\t\t\tt = Worker(users)\n\t\t\tthreads.append(t)\n\t\t\tt.start()\n\t\tfor t in threads:\n\t\t\tt.join()\n\t\twith open('twitter_get_lists_for_user.txt', 'w') as f:\n\t\t\tf.write(str(count.value))\n\t\tf.close()\n\t\tsys.exit(0)\n\texcept Exception as e:\n\t\tprint e\n\tfinally:\n\t\tcnx.close()",
"def tmap(f, seq_args, num_workers=20, worker_queue=None, wait=True, stop_on_error=True):\n\n if worker_queue:\n wq = worker_queue\n else:\n # see if we have a global queue to work with.\n if _wq:\n wq = _wq\n else:\n if num_workers == 0:\n return map(f, seq_args)\n\n wq = WorkerQueue(num_workers)\n\n # we short cut it here if the number of workers is 0.\n # normal map should be faster in this case.\n if len(wq.pool) == 0:\n return map(f, seq_args)\n\n # print(\"queue size:%s\" % wq.queue.qsize())\n\n # TODO: divide the data (seq_args) into even chunks and\n # then pass each thread a map(f, equal_part(seq_args))\n # That way there should be less locking, and overhead.\n\n results = []\n for sa in seq_args:\n results.append(FuncResult(f))\n wq.do(results[-1], sa)\n\n # wq.stop()\n\n if wait:\n # print(\"wait\")\n wq.wait()\n # print(\"after wait\")\n # print(\"queue size:%s\" % wq.queue.qsize())\n if wq.queue.qsize():\n raise RuntimeError(\"buggy threadmap\")\n # if we created a worker queue, we need to stop it.\n if not worker_queue and not _wq:\n # print(\"stopping\")\n wq.stop()\n if wq.queue.qsize():\n um = wq.queue.get()\n if not um is STOP:\n raise RuntimeError(\"buggy threadmap\")\n\n # see if there were any errors. If so raise the first one. This matches map behaviour.\n # TODO: the traceback doesn't show up nicely.\n # NOTE: TODO: we might want to return the results anyway? This should be an option.\n if stop_on_error:\n error_ones = list(filter(lambda x: x.exception, results))\n if error_ones:\n raise error_ones[0].exception\n\n return map(lambda x: x.result, results)\n return [wq, results]",
"def test_compute_workload(self):\r\n\r\n spread = [1.23, 0.5, 1.27]\r\n num_cores = 3\r\n num_flows = 11\r\n result = compute_workload(num_cores, num_flows, spread)\r\n self.assertEqual(result, [4, 2, 5])",
"def _multitasking_fake(task_iter, **kwargs):\n time_list = []\n if isinstance(task_iter, dict):\n out_iter = {}\n iter_type = 'dict'\n elif isinstance(task_iter, list):\n out_iter = [None] * len(task_iter)\n iter_type = 'list'\n else:\n raise ValueError('Param `task_iter` must be a list or a dict object.')\n if iter_type == 'dict':\n iter_items = task_iter.items()\n else:\n iter_items = enumerate(task_iter)\n for k, v in iter_items:\n assert len(\n v) <= 3, 'Length of list as the value in dict cant be longer than 3.'\n v = {\n 1: list(v) + [(), {}],\n 2: list(v) + [{}],\n 3: v,\n }.get(len(v))\n func, args, kws = v\n start = datetime.datetime.now()\n out_iter[k] = func(*args, **kws)\n end = datetime.datetime.now()\n time_list.append((k, (end - start).microseconds))\n time_list.sort(key=operator.itemgetter(1), reverse=True)\n all_time = float(sum([_[1] for _ in time_list]))\n print '*' * 10, 'cost:', all_time / 1e6, '(S)', '*' * 10\n for t in time_list:\n print t[0], ':', t[1], '>', str(round(t[1] / all_time * 100, 2)) + '%'\n return out_iter",
"def parallel_stats_pairs(f): \n\n if path.isfile(runDir + \"/data/\"+f+\"_pairs.csv\") and path.isfile(runDir + \"/data/\"+f+\"_counts.csv\"):\n return\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} p_stats_pairs({f})\")\n\n chain_id_list = mappings_list[f]\n data = []\n sqldata = []\n for cid in tqdm(chain_id_list, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: {f} basepair types\", unit=\"chain\",leave=False):\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n # Get comma separated lists of basepairs per nucleotide\n interactions = pd.DataFrame(\n sql_ask_database(conn, f\"SELECT nt_code as nt1, index_chain, paired, pair_type_LW FROM nucleotide WHERE chain_id='{cid}';\"), \n columns = [\"nt1\", \"index_chain\", \"paired\", \"pair_type_LW\"]\n )\n # expand the comma-separated lists in real lists\n expanded_list = pd.concat([ pd.DataFrame({ 'nt1':[ row[\"nt1\"] for x in row[\"paired\"].split(',') ],\n 'index_chain':[ row['index_chain'] for x in row[\"paired\"].split(',') ],\n 'paired':row['paired'].split(','), \n 'pair_type_LW':row['pair_type_LW'].split(',') \n }) \n for _, row in interactions.iterrows() \n ]).reset_index(drop=True)\n\n # Add second nucleotide\n nt2 = []\n for _, row in expanded_list.iterrows():\n if row.paired in ['', '0']:\n nt2.append('')\n else:\n try:\n n = expanded_list[expanded_list.index_chain == int(row.paired)].nt1.tolist()[0]\n nt2.append(n)\n except IndexError:\n print(cid, flush=True)\n try:\n expanded_list[\"nt2\"] = nt2\n except ValueError:\n print(cid, flush=True)\n print(expanded_list, flush=True)\n return 0,0\n\n # keep only intra-chain interactions\n expanded_list = expanded_list[ ~expanded_list.paired.isin(['0','']) ]\n expanded_list[\"nts\"] = expanded_list[\"nt1\"] + expanded_list[\"nt2\"]\n \n # Get basepair type\n expanded_list[\"basepair\"] = np.where(expanded_list.nts.isin([\"AU\",\"UA\"]), \"AU\",\n np.where(expanded_list.nts.isin([\"GC\",\"CG\"]), \"GC\",\n np.where(expanded_list.nts.isin([\"GU\",\"UG\"]), \"Wobble\",\"Other\")\n )\n )\n expanded_list = expanded_list[[\"basepair\", \"pair_type_LW\"]]\n\n # Update the database\n vlcnts = expanded_list.pair_type_LW.value_counts()\n sqldata.append( ( vlcnts.at[\"cWW\"]/2 if \"cWW\" in vlcnts.index else 0, \n vlcnts.at[\"cWH\"] if \"cWH\" in vlcnts.index else 0, \n vlcnts.at[\"cWS\"] if \"cWS\" in vlcnts.index else 0, \n vlcnts.at[\"cHH\"]/2 if \"cHH\" in vlcnts.index else 0, \n vlcnts.at[\"cHS\"] if \"cHS\" in vlcnts.index else 0, \n vlcnts.at[\"cSS\"]/2 if \"cSS\" in vlcnts.index else 0, \n vlcnts.at[\"tWW\"]/2 if \"tWW\" in vlcnts.index else 0, \n vlcnts.at[\"tWH\"] if \"tWH\" in vlcnts.index else 0, \n vlcnts.at[\"tWS\"] if \"tWS\" in vlcnts.index else 0, \n vlcnts.at[\"tHH\"]/2 if \"tHH\" in vlcnts.index else 0, \n vlcnts.at[\"tHS\"] if \"tHS\" in vlcnts.index else 0, \n vlcnts.at[\"tSS\"]/2 if \"tSS\" in vlcnts.index else 0, \n int(sum(vlcnts.loc[[ str(x) for x in vlcnts.index if \".\" in str(x)]])/2), \n cid) )\n\n data.append(expanded_list)\n\n # Update the database\n with sqlite3.connect(runDir + \"/results/RNANet.db\", isolation_level=None) as conn:\n conn.execute('pragma journal_mode=wal') # Allow multiple other readers to ask things while we execute this writing query\n sql_execute(conn, \"\"\"UPDATE chain SET pair_count_cWW = ?, pair_count_cWH = ?, pair_count_cWS = ?, pair_count_cHH = ?,\n pair_count_cHS = ?, pair_count_cSS = ?, pair_count_tWW = ?, pair_count_tWH = ?, pair_count_tWS = ?, \n pair_count_tHH = ?, pair_count_tHS = ?, pair_count_tSS = ?, pair_count_other = ? WHERE chain_id = ?;\"\"\", many=True, data=sqldata, warn_every=0)\n\n # merge all the dataframes from all chains of the family\n expanded_list = pd.concat(data)\n\n # Count each pair type\n vcnts = expanded_list.pair_type_LW.value_counts()\n\n # Add these new counts to the family's counter\n cnt = Counter()\n cnt.update(dict(vcnts))\n\n # Create an output DataFrame\n f_df = pd.DataFrame([[ x for x in cnt.values() ]], columns=list(cnt), index=[f])\n f_df.to_csv(runDir + f\"/data/{f}_counts.csv\")\n expanded_list.to_csv(runDir + f\"/data/{f}_pairs.csv\")\n \n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")",
"def split_calculation_to_threads(iterable, func, args):\n args_list = []\n batches = list(split_iterable_to_batches(iterable))\n for batch in batches:\n temp = list(args)\n temp.insert(0, batch)\n args_list.append(tuple(temp))\n with Pool(NUM_THREADS) as p:\n results = p.starmap(func, args_list)\n return results",
"def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results",
"def multiplication_total_of(num_list):",
"def tickets(people):\n people= [100, 50, 25]",
"def run_multiprocessing(args, function):\n vcf_fn = args.data_file\n num_processes = args.num_threads\n if num_processes > 1:\n # Split the VCF into chunks\n callset = allel.read_vcf(vcf_fn, fields=[\"variants/CHROM\", \"variants/POS\"])\n pos_list = callset[\"variants/POS\"]\n chroms = callset[\"variants/CHROM\"]\n assert np.all(chroms == chroms[0])\n chrom = str(chroms[0])\n\n def get_chromosome_chunks(lst, num_processes):\n length = len(lst)\n n = math.ceil(length / num_processes)\n chunks = list()\n for index, i in enumerate(range(0, length, n)):\n if index != num_processes - 1:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[i + n])),\n )\n )\n else:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[-1])),\n )\n )\n return chunks\n\n chunks = get_chromosome_chunks(pos_list, num_processes)\n chunks_iter = iter(chunks)\n reports = list()\n completed_files = list()\n with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:\n for index, row in enumerate(pool.map(function, chunks_iter)):\n reports.append(row)\n print(\n \"Processed Chunk {}: {} with {} sites added.\".format(\n index, chunks[index][2], row[\"num_sites\"]\n )\n )\n if row[\"num_sites\"] > 0:\n completed_files.append(index)\n else:\n os.remove(args.output_file + str(index) + \"-lock\")\n\n # Combine reports and print\n master_report = reports[0]\n for report in reports[1:]:\n for var_type, val in report.items():\n master_report[var_type] += val\n print(master_report)\n\n # Combine sampledata files\n filenames = completed_files\n all_samples = []\n for name in filenames:\n all_samples.append(tsinfer.load(args.output_file + str(name)))\n os.remove(args.output_file + str(name))\n\n samples = all_samples[0].copy(args.output_file)\n samples.append_sites(*all_samples[1:])\n samples.finalise()\n assert np.all(np.diff(samples.sites_position[:]) > 0)\n\n else:\n raise ValueError",
"def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return",
"def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper",
"def thread_map(f, args_list, n_threads=None):\n if n_threads is None:\n n_threads = int(multiprocessing.cpu_count() / 2)\n pool = multiprocessing.pool.ThreadPool(processes=n_threads)\n return pool.map(f, args_list)",
"def main() -> None:\n\n task_results = {}\n for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):\n task_results[task] = []\n for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:\n for single_sequence_id in (\n (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)\n ):\n category_result = evaluate_dbir_for_category(\n category, task=task, single_sequence_id=single_sequence_id\n )\n print(\"\")\n print(\n f\"Results for task={task}; category={category};\"\n + (\n f\" sequence={single_sequence_id}:\"\n if single_sequence_id is not None\n else \":\"\n )\n )\n pretty_print_nvs_metrics(category_result)\n print(\"\")\n\n task_results[task].append(category_result)\n _print_aggregate_results(task, task_results)\n\n for task in task_results:\n _print_aggregate_results(task, task_results)",
"def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result",
"def TNBP(nthreads, stopList = []):\n store = {}\n threads = []\n # create the threads\n for i in range(nthreads):\n stops = stopList[i::nthreads]\n t = Thread(target=NBP_list, args=(stops,store))\n threads.append(t)\n\n # start the threads\n [ t.start() for t in threads ]\n # wait for the threads to finish\n [ t.join() for t in threads ]\n return store",
"def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])",
"def workersNeeded(k, m):\n # formula: k/m\n from math import ceil\n return ceil(float(k)/float(m))",
"def _worker(self, args):\n pass",
"def compute_penalty(self, spec_nums: Union[float, Iterable[float]], spec_kwrd: str) \\\n -> Union[float, List[float]]:\n raise NotImplementedError",
"def getResults(solver, minBit, maxBit, saveFile, noResults):\n\n for k in range(minBit, maxBit + 1, 2):\n for i in range(noResults):\n\n keys = generate_RSA.KeyGen(k) # initialise keys\n keys.generateKeys() # generate keys\n\n solver.setN(keys.n) # setup solver\n solver.setE(keys.e)\n\n solver.solve() # solve problem\n\n if solver.d == keys.d: # if we got it right\n resTime = resTime_C # update correct dictionaries\n resCount = resCount_C\n resSpace = resSpace_C\n else:\n resTime = resTime_W # else update wrong dictionaries\n resCount = resCount_W\n resSpace = resSpace_W\n\n if k not in resTime: # if we've not yet had a result for k\n resTime[k] = [solver.time, 1] # then set\n resSpace[k] = [solver.space, 1] # then set\n resCount[k] = [solver.count, 1]\n else:\n oldT, oldC = resTime[k] # keeps a running average\n newC = oldC + 1 # increment count\n newT = ((oldT * oldC) + solver.time) / newC # get new averagae\n resTime[k] = [newT, newC] # without storing all variables\n\n oldS, oldC = resSpace[k] # keeps a running average\n newS = ((oldS * oldC) + solver.space) / newC\n resSpace[k] = [newS, newC] # without storing all variables\n\n oldCount, oldC = resCount[k] # keeps a running average\n newCount = ((oldCount * oldC) + solver.count) / newC\n resCount[k] = [newCount, newC] # without storing all variables\n\n if i % 10 == 0:\n saveResults(saveFile) # every ten results save again",
"def test_thread_pool():\r\n thread_pool = ThreadPool()\r\n result = []\r\n\r\n def populate_result_task():\r\n result.extend([i for i in range(0, 10)])\r\n return\r\n\r\n thread_pool.add_task(populate_result_task)\r\n thread_pool.tasks.join()\r\n thread_pool.terminate_all_workers()\r\n assert result == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]",
"def create_and_launch_subprocesses(num_cpu, classify_warnings_fn, arg_groups,\n group_results):\n pool = multiprocessing.Pool(num_cpu)\n for cpu in range(num_cpu):\n proc_result = pool.map(classify_warnings_fn, arg_groups[cpu])\n if proc_result is not None:\n group_results.append(proc_result)\n return group_results",
"def model_multiprocess(reservoir_dicts, dual_lists, root, run_dict,\n perm_tups=None, cores=2, machine='laptop',\n parallel=False):\n sys.setrecursionlimit(5000000)\n if parallel:\n Parallel(n_jobs=cores)(\n delayed(NM08_model_loop)(root, run_dict, res_dict, dual_list,\n perm_tup, machine, 100, k+j+m)\n for j, res_dict in enumerate(reservoir_dicts)\n for k, dual_list in enumerate(dual_lists)\n for m, perm_tup in enumerate(perm_tups)\n )\n else:\n for r_dict in reservoir_dicts:\n NM08_model_loop(root, run_dict, r_dict, machine)\n return",
"def simplify_functions_parallel(expressions):\n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n\n processes = []\n factorised_expressions = []\n for index, item in enumerate(expressions):\n # print(\"item \", item, \"index\", index)\n processes.append(multiprocessing.Process(target=bar, args=(item, index, return_dict)))\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n ## Showing the difference in an example:\n # print(return_dict)\n for i in sorted(return_dict.keys()):\n factorised_expressions.append(return_dict[i])\n return factorised_expressions",
"def process_file_metrics(root_dir, in_file_names, file_processors):\n manager = mp.Manager()\n file_metrics = manager.dict()\n\n parameters = [(root_dir, key, file_metrics, file_processors) for key in in_file_names]\n\n # main loop\n p = mp.Pool(max(1, mp.cpu_count() - 1))\n p.starmap(_process_file_metrics_parallel, parameters)\n p.close()\n p.join()\n\n return file_metrics",
"def run(self):\n\t\t## findMinError\n\t\tself.count = 0\n\t\tprint 'Starting Process type', self.ftype\n\t\tself.min_error = 1\n\t\t# it = 0\n\t\t#self.threadnum = min(500, len(self.pool))\n\t\trows = 3\n\t\tself.threadnum = (len(self.pool)+2)/3\n\t\trows = len(self.pool)/self.threadnum\n\t\tlist_rowlists = [self.pool[x:x+rows] for x in xrange(0, len(self.pool), rows)]\n\t\tmapper = SimpleMapReduce(self.MapFind, self.Reduce, num_workers=self.threadnum)\n\t\tprint 'before mapper'\n\t\tresult = mapper(list_rowlists)\n\t\tprint result\n\t\t#self.min_row, error_infor\n\n\t\tself.min_threshold = error_infor[0]\n\t\tself.min_error = error_infor[1]\n\t\tself.min_flag = error_infor[2]\n\t\t# it += 1\n\t\t# if it%10==0:\n\t\t# \tprint 'type'+str(self.ftype),\"{0:.1%}\".format(float(it)/len(self.pool)), ' search completed'\n\t\treturn",
"def main():\n model = sys.argv[1]\n maxfun = int(sys.argv[2])\n n_threads = int(sys.argv[3])\n\n # Validate input.\n assert maxfun >= 0, \"Maximum number of function evaluations cannot be negative.\"\n assert n_threads >= 1 or n_threads == -1, (\n \"Use -1 to impose no restrictions on maximum number of threads or choose a \"\n \"number higher than zero.\"\n )\n\n # Set number of threads\n os.environ[\"NUMBA_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"MKL_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"OMP_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"NUMEXPR_NUM_THREADS\"] = f\"{n_threads}\"\n\n # Late import of respy to ensure that environment variables are read by Numpy, etc..\n import respy as rp\n\n # Get model\n params, options = rp.get_example_model(model, with_data=False)\n\n # Simulate the data\n simulate = rp.get_simulate_func(params, options)\n df = simulate(params)\n\n # Get the criterion function and the parameter vector.\n crit_func = rp.get_log_like_func(params, options, df)\n\n # Run the estimation\n start = dt.datetime.now()\n\n for _ in range(maxfun):\n crit_func(params)\n\n end = dt.datetime.now()\n\n # Aggregate information\n output = {\n \"model\": model,\n \"maxfun\": maxfun,\n \"n_threads\": n_threads,\n \"start\": str(start),\n \"end\": str(end),\n \"duration\": str(end - start),\n }\n\n # Save time to file\n with open(\"scalability_results.txt\", \"a+\") as file:\n file.write(json.dumps(output))\n file.write(\"\\n\")",
"def rerank_mp(x2ys, x2cnt, x2xs, width, n_trans, num_workers):\n from multiprocessing import Pool\n\n shared_inputs = x2ys, x2cnt, x2xs, width, n_trans\n print(f\"Entering multiprocessing with {num_workers} workers...\"\n f\" (#words={len(x2ys)})\")\n with Pool(num_workers) as p:\n x2ys_cpe = dict(p.starmap(\n _rerank_mp,\n zip(x2ys.items(), it.repeat(shared_inputs)),\n ))\n return x2ys_cpe",
"def worker_func(queue_in, queue_out, model_type, hidden_size, novelty_use, env_name, noise_std, action_type):\r\n env = gym.make(env_name)\r\n \r\n cache = {} # to store population / networks\r\n \r\n while True:\r\n parents_seeds = queue_in.get()\r\n if parents_seeds == None:\r\n break\r\n new_cache = {}\r\n # for each network seeds \r\n for seeds in parents_seeds:\r\n # if seed history exist\r\n if len(seeds) > 1:\r\n net = cache.get(seeds[:-1])#\r\n # check if network already exists\r\n if net is not None:\r\n # if exist mutate on the new given seed -> the last in the list\r\n net = mutate(net, seeds[-1], noise_std)\r\n else:\r\n # if not exist build the net with the seed history\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n else:\r\n # since no seed history exist -> build network\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n \r\n # saves the networks in a cache \r\n new_cache[seeds] = net\r\n # evaluate new network mutation\r\n reward, steps, bc = evaluate(env, net)\r\n queue_out.put(OutputItem(seeds=seeds, reward=reward, steps=steps, bc=bc))\r\n # after evaluating all seeds the worker sets the new_cache with saved nets to the current cache\r\n cache = new_cache",
"def _create_workers(self, start=True):\n\n bearer = api_client.get_bearer_token()\n account = api_client.account_id_from_jwt(bearer.value)\n LOGGER.info(\"account: %s\", account)\n\n project = self.args.get(\"project\") or None\n LOGGER.info(\"project: %s\", project)\n\n location = self.args.get(\"location\") or None\n LOGGER.info(\"location: %s\", location)\n\n thread_count = self.args.get(\"thread_count\") or 1\n LOGGER.info(\"thread_count: %s\", thread_count)\n\n # CREATE WORKER PROCESSES\n workers = {}\n\n # Create DownloadWorker processes\n for _ in range(thread_count):\n\n # Create a process-safe run_state object for controlling process\n # run_state = multiprocessing.Array('c', \"stoppingorstuff\")\n global RUN_STATE\n wrk = UploaderWorker(\n RUN_STATE,\n self._results_queue,\n account=account,\n project=project,\n location=location)\n workers[wrk] = RUN_STATE\n\n log_history_wrk = self.create_log_history()\n\n workers[log_history_wrk] = RUN_STATE\n\n if start:\n for wrkr in workers:\n wrkr.start()\n time.sleep(.5)\n\n return workers",
"def compute(self, dset1, dset2, bins1, bins2, sites=None, max_workers=None,\n chunks_per_worker=5):\n if max_workers is None:\n max_workers = os.cpu_count()\n\n slices = self._get_slices(dset1, dset2, sites,\n chunks_per_slice=chunks_per_worker)\n if len(slices) == 1:\n max_workers = 1\n\n jpd = {}\n if max_workers > 1:\n msg = ('Computing the joint probability distribution between {} '\n 'and {} in parallel using {} workers'\n .format(dset1, dset2, max_workers))\n logger.info(msg)\n\n loggers = [__name__, 'rex']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n for sites_slice in slices:\n future = exe.submit(self.compute_joint_pd,\n self.res_h5, dset1, dset2,\n bins1, bins2,\n res_cls=self.res_cls,\n hsds=self._hsds,\n sites_slice=sites_slice)\n futures.append(future)\n\n for i, future in enumerate(as_completed(futures)):\n jpd.update(future.result())\n logger.debug('Completed {} out of {} workers'\n .format((i + 1), len(futures)))\n\n else:\n msg = ('Computing the joint probability distribution between {} '\n 'and {} in serial.'\n .format(dset1, dset2))\n logger.info(msg)\n for i, sites_slice in enumerate(slices):\n jpd.update(self.compute_joint_pd(\n self.res_h5, dset1, dset2,\n bins1, bins2,\n res_cls=self.res_cls,\n hsds=self._hsds,\n sites_slice=sites_slice))\n logger.debug('Completed {} out of {} sets of sites'\n .format((i + 1), len(slices)))\n\n gc.collect()\n log_mem(logger)\n bins1 = self._make_bins(*bins1)\n bins2 = self._make_bins(*bins2)\n index = np.meshgrid(bins1[:-1], bins2[:-1], indexing='ij')\n index = np.array(index).T.reshape(-1, 2).astype(np.int16)\n index = pd.MultiIndex.from_arrays(index.T, names=(dset1, dset2))\n jpd = pd.DataFrame({k: v.flatten(order='F') for k, v\n in jpd.items()}, index=index).sort_index(axis=1)\n\n return jpd",
"def pool_job(self, func, inputs):\n\n if self.flag_use_mp:\n output = zip(*self._pool.map(func, inputs))\n self._consolidate_mp_logs()\n else:\n logger.info(\"Performing task serially\")\n output = self.serial_job(func, inputs)\n\n return output",
"def _threaded(self, *args, **kwargs):\n\n for target in self.targets:\n result = target(*args, **kwargs)\n self.queue.put(result)",
"def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)",
"async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:\n pass",
"def run_serially(sorted_nodes, fun):\n results = {}\n for node in sorted_nodes:\n result = fun(node)\n results[node] = result\n return results"
] | [
"0.79976565",
"0.7521205",
"0.56689334",
"0.56155014",
"0.5509319",
"0.5476741",
"0.53603804",
"0.53469855",
"0.53294677",
"0.53245115",
"0.53092897",
"0.53065777",
"0.53054893",
"0.5302376",
"0.5263724",
"0.5257976",
"0.51859593",
"0.5184734",
"0.5166715",
"0.51412565",
"0.5122664",
"0.51180834",
"0.5105005",
"0.50843304",
"0.5064053",
"0.5057585",
"0.50565886",
"0.5044967",
"0.5044508",
"0.5011129",
"0.500742",
"0.5005744",
"0.49976656",
"0.49708337",
"0.4967882",
"0.49638972",
"0.49607188",
"0.49558154",
"0.4945572",
"0.4945323",
"0.49452454",
"0.49365488",
"0.49192",
"0.4905375",
"0.48935845",
"0.48664263",
"0.4858595",
"0.4848955",
"0.48393023",
"0.48233333",
"0.48231003",
"0.48206204",
"0.48095798",
"0.4808717",
"0.47984678",
"0.4797679",
"0.47927007",
"0.47873732",
"0.4785818",
"0.47734335",
"0.4745518",
"0.4745014",
"0.47371072",
"0.47289205",
"0.47180682",
"0.47154692",
"0.47140446",
"0.4702929",
"0.47019264",
"0.4700661",
"0.47005975",
"0.46905324",
"0.46886903",
"0.4684601",
"0.4682939",
"0.467521",
"0.46694213",
"0.46629876",
"0.4661418",
"0.46609288",
"0.46554247",
"0.46507826",
"0.46487808",
"0.46445653",
"0.46411318",
"0.46405688",
"0.46317804",
"0.46302631",
"0.46280035",
"0.4620838",
"0.46114945",
"0.46097344",
"0.46078652",
"0.46071768",
"0.46063834",
"0.46047077",
"0.46038836",
"0.4602838",
"0.45997566",
"0.4595259"
] | 0.8538493 | 0 |
The worker function, invoked in a process. 'nums' is a list of numbers to factor. The results are placed in a dictionary that's pushed to a queue. | def worker(nums, out_q):
outdict = {}
print(threading.current_thread().name)
print ("pid:", os.getpid())
print ("data size:", nums)
for n in nums:
outdict[n] = factorize_naive(n)
out_q.put(outdict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def worker(file_paths, out_queue):\n\t\toutdict = {}\n\t\tfor path in file_paths:\n\t\t\toutdict[n] = run_muscle(path)\n\t\tout_queue.put(outdict)",
"def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return",
"def putting_on_queue(*args):\n results.put(main_func(*args))",
"def __init__ (self, *funcs_workers):\n self.numpools = len(funcs_workers)\n self.numworkerslist = []\n self.queues = [Queue() for _ in xrange(self.numpools+1)]\n for i, (func, numworkers) in enumerate(funcs_workers):\n self.numworkerslist.append(numworkers)\n for _ in xrange(numworkers):\n Process(target=worker, args=(\n func, self.queues[i], self.queues[i+1]\n )).start()",
"def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict",
"def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)",
"def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)",
"def worker_func(worker_id, w2t_m_queue, events, t2w_d_manager):\n average_iteration_time = 0\n worker_nn = create_neural_network()\n iteration_time = time.time()\n for i in range(ITERATIONS):\n data_point = create_data_point(worker_nn)\n events[\"Workers_can_proceed\"].clear()\n w2t_m_queue.put(data_point)\n # Signal trainer that this worker has placed its data point this iteration\n events[worker_id].set()\n average_iteration_time += (time.time() - iteration_time)\n # Have worker wait until trainer is done processing this iteration\n events[\"Workers_can_proceed\"].wait()\n iteration_time = time.time()\n # Obtain data trainer has placed into shared manager (data is weights of network)\n shared_data = t2w_d_manager[0]\n worker_nn.set_weights(shared_data)\n\n average_iteration_time /= ITERATIONS\n print(\"Worker \" + str(worker_id) + \" average put time: \" + str.format('{0:.6f}', (average_iteration_time*1000)) + \"ms\")",
"def worker_run():\n while True:\n print(\"worker: waiting for numdata_lock\")\n numdata_lock.acquire()\n print(\"worker: acquired numdata_lock\")\n print(\"The number {} is spelled '{}'\".format(numdata[\"int\"],numdata[\"name\"]))\n numdata_lock.release()\n time.sleep(1)",
"def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))",
"def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue",
"def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)",
"def main(config):\n all_procs = []\n result_q = mp.Queue()\n for seed in config[\"seeds\"]:\n config[\"seed\"] = seed\n p = mp.Process(target=run, args=(config, result_q))\n p.start()\n all_procs.append(p)\n\n for p in all_procs:\n p.join()\n\n all_returns = [result_q.get() for p in all_procs]\n mean_per_restart = np.mean(all_returns, axis=1)\n mean, std = np.mean(mean_per_restart), np.std(mean_per_restart)\n\n # Return the negative since we're minimizing the function\n # .. the metric minimized is suggested from Duan et al. (2016)\n return -(mean - std)",
"def worker1() -> None:\n x = 10\n while x > 0:\n logging.info('Info from Process1 {0}'.format(x))\n time.sleep(0.25)\n x -= 1",
"def mock_workers(task, num_workers):\n results = [\n [{\n \"name\": \"tweet\",\n \"value\": \"%d. Trump Trump everywhere not a Hillary to see.\" % x\n }] for x in range(num_workers)]\n return results",
"def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):\n # proc_pool = Local variable proc_pool for Pool of processes\n # log_level = log_level\n # count_total = Total counter of items to distribute/play/indicate progress\n # len(itemslist)\n\n log_level = logging.getLogger().getEffectiveLevel()\n logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',\n __name__, a_fn.__name__, nprocs)\n # if log_level <= logging.WARNING:\n # if args is not None:\n # for i, arg in enumerate(args):\n # logging.info('===mprocessing f():[%s] arg[%s]={%s}',\n # a_fn.__name__, i, arg)\n\n # if __name__ == '__main__':\n logging.debug('===Multiprocessing=== Setting up logger!')\n # CODING No need for such low level debugging to stderr\n # multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(log_level)\n\n logging.debug('===Multiprocessing=== Logging defined!')\n\n # ---------------------------------------------------------\n # chunk\n #\n # Divides an iterable in slices/chunks of size size\n #\n def chunk(iter_list, size):\n \"\"\"\n Divides an iterable in slices/chunks of size size\n\n >>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):\n ... len(a)\n 3\n 3\n 3\n 1\n \"\"\"\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())\n\n proc_pool = []\n lockdb = multiprocessing.Lock()\n running = multiprocessing.Value('i', 0)\n mutex = multiprocessing.Lock()\n count_total = len(itemslist)\n\n size = (len(itemslist) // int(nprocs)) \\\n if ((len(itemslist) // int(nprocs)) > 0) \\\n else 1\n\n logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',\n len(itemslist), int(nprocs), size)\n\n # Split itemslist in chunks to distribute accross Processes\n for splititemslist in chunk(itemslist, size):\n logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',\n len(splititemslist), size)\n logging.debug('===type(splititemslist)=[%s]', type(splititemslist))\n logging.debug('===Job/Task Process: Creating...')\n proc_task = multiprocessing.Process(\n target=a_fn, # argument function\n args=(lockdb,\n running,\n mutex,\n splititemslist,\n count_total,\n cur,))\n proc_pool.append(proc_task)\n logging.debug('===Job/Task Process: Starting...')\n proc_task.start()\n NPR.niceprint('===Job/Task Process: [{!s}] Started '\n 'with pid:[{!s}]'\n .format(proc_task.name,\n proc_task.pid),\n verbosity=3,\n logalso=logging.DEBUG)\n\n # Check status of jobs/tasks in the Process Pool\n if log_level <= logging.DEBUG:\n NPR.niceprint('===Checking Processes launched/status:',\n verbosity=3, logalso=logging.DEBUG)\n for j in proc_pool:\n NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),\n verbosity=3, logalso=logging.DEBUG)\n\n # Regularly print status of jobs/tasks in the Process Pool\n # Prints status while there are processes active\n # Exits when all jobs/tasks are done.\n while True:\n if not any(multiprocessing.active_children()):\n logging.debug('===No active children Processes.')\n break\n for prc in multiprocessing.active_children():\n logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())\n proc_task_active = prc\n NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n proc_task_active.join(timeout=60)\n NPR.niceprint('===Waited for 60s on '\n '{!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n # Wait for join all jobs/tasks in the Process Pool\n # All should be done by now!\n for j in proc_pool:\n j.join()\n NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'\n .format(j.name, j.is_alive(), j.exitcode),\n verbosity=2)\n\n logging.warning('===Multiprocessing=== pool joined! '\n 'All processes finished.')\n\n # Will release (set to None) the lockdb lock control\n # this prevents subsequent calls to\n # use_lock( nuLockDB, False)\n # to raise exception:\n # ValueError('semaphore or lock released too many times')\n logging.info('===Multiprocessing=== pool joined! '\n 'Is lockdb None? [%s]. Setting lockdb to None anyhow.',\n lockdb is None)\n lockdb = None\n\n # Show number of total files processed\n NPR.niceprocessedfiles(running.value, count_total, True)\n\n return True",
"def start_workers(w2t_m_queue, events, t2w_d_manager):\n start_time = time.time()\n print(\"*********************************************************************\")\n print(\"Initializing workers...\")\n workers = []\n for i in range(NUM_WORKERS):\n worker = mp.Process(target=worker_func, args=(i, w2t_m_queue, events, t2w_d_manager))\n worker.start()\n workers.append(worker)\n print(\"Workers initialized.\")\n print(\"Initialization time elapsed: \" + str.format('{0:.6f}', (time.time() - start_time)*1000) + \"ms\")\n print(\"*********************************************************************\")\n return workers",
"def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d",
"def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result",
"def stats_freq():\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} stats_freq()\")\n\n # Initialize a Counter object for each family\n freqs = {}\n for f in famlist:\n freqs[f] = Counter()\n\n # List all nt_names happening within a RNA family and store the counts in the Counter\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n counts = dict(sql_ask_database(conn, f\"SELECT nt_name, COUNT(nt_name) FROM (SELECT chain_id from chain WHERE rfam_acc='{f}') NATURAL JOIN nucleotide GROUP BY nt_name;\", warn_every=0))\n freqs[f].update(counts)\n \n # Create a pandas DataFrame, and save it to CSV.\n df = pd.DataFrame()\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n tot = sum(freqs[f].values())\n df = pd.concat([ df, pd.DataFrame([[ format_percentage(tot, x) for x in freqs[f].values() ]], columns=list(freqs[f]), index=[f]) ])\n df = df.fillna(0)\n df.to_csv(runDir + \"/results/frequencies.csv\") \n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")\n # notify(\"Saved nucleotide frequencies to CSV file.\")",
"def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)",
"def __call__(self, q, threads = None):\n if threads is -1: threads = cpu_count()\n\n if threads is None:\n results = [self.evaluate(v) for v in q]\n elif type(threads) is int and threads > 0:\n workers = Pool(threads)\n results = workers.map(self.evaluate, q)\n else:\n raise ValueError('threads keyword must be either -1 or an integer greater than zero')\n\n mu = [ t[0] for t in results ]\n sig = [ t[1] for t in results ]\n return array(mu), array(sig)",
"def process():",
"def worker_func(queue_in, queue_out, model_type, hidden_size, novelty_use, env_name, noise_std, action_type):\r\n env = gym.make(env_name)\r\n \r\n cache = {} # to store population / networks\r\n \r\n while True:\r\n parents_seeds = queue_in.get()\r\n if parents_seeds == None:\r\n break\r\n new_cache = {}\r\n # for each network seeds \r\n for seeds in parents_seeds:\r\n # if seed history exist\r\n if len(seeds) > 1:\r\n net = cache.get(seeds[:-1])#\r\n # check if network already exists\r\n if net is not None:\r\n # if exist mutate on the new given seed -> the last in the list\r\n net = mutate(net, seeds[-1], noise_std)\r\n else:\r\n # if not exist build the net with the seed history\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n else:\r\n # since no seed history exist -> build network\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n \r\n # saves the networks in a cache \r\n new_cache[seeds] = net\r\n # evaluate new network mutation\r\n reward, steps, bc = evaluate(env, net)\r\n queue_out.put(OutputItem(seeds=seeds, reward=reward, steps=steps, bc=bc))\r\n # after evaluating all seeds the worker sets the new_cache with saved nets to the current cache\r\n cache = new_cache",
"def worker(my_idx, inq, outq):\n print(\"worker %d: starting\" % my_idx)\n backoff = .001\n while True:\n cmd = inq.get()\n if cmd is None:\n break\n ridx, creds, cmds = cmd\n backoff = max(backoff / 2, 0.001)\n while True:\n try:\n responses = Gmail.batch_executor(creds, cmds)\n except Gmail.UserRateException:\n print(f'worker {my_idx}: backoff {backoff} sec')\n sleep(backoff)\n backoff = min(backoff * 2, 1.0)\n except Exception as ex:\n outq.put([ridx, ex])\n break\n else:\n outq.put([ridx, responses])\n break\n inq.task_done()\n print(\"worker %d stoping\" % my_idx)",
"def worker(**kwargs):\n\t\tident = kwargs[\"ident\"]\n\t\twhile True:\n\t\t\titem = worker_queue.get()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\tworker_function(ident, item)\n\t\t\tworker_queue.task_done()",
"def _create_workers(self, start=True):\n\n bearer = api_client.get_bearer_token()\n account = api_client.account_id_from_jwt(bearer.value)\n LOGGER.info(\"account: %s\", account)\n\n project = self.args.get(\"project\") or None\n LOGGER.info(\"project: %s\", project)\n\n location = self.args.get(\"location\") or None\n LOGGER.info(\"location: %s\", location)\n\n thread_count = self.args.get(\"thread_count\") or 1\n LOGGER.info(\"thread_count: %s\", thread_count)\n\n # CREATE WORKER PROCESSES\n workers = {}\n\n # Create DownloadWorker processes\n for _ in range(thread_count):\n\n # Create a process-safe run_state object for controlling process\n # run_state = multiprocessing.Array('c', \"stoppingorstuff\")\n global RUN_STATE\n wrk = UploaderWorker(\n RUN_STATE,\n self._results_queue,\n account=account,\n project=project,\n location=location)\n workers[wrk] = RUN_STATE\n\n log_history_wrk = self.create_log_history()\n\n workers[log_history_wrk] = RUN_STATE\n\n if start:\n for wrkr in workers:\n wrkr.start()\n time.sleep(.5)\n\n return workers",
"def worker2() -> None:\n x = 10\n while x > 0:\n logging.info('Info from Process2 {0}'.format(x))\n time.sleep(0.25)\n x -= 1",
"def run(self):\n\t\t#Go through each number in the list\n\t\tfor number in self.numList:\n\t\t\t#record the count for this number\n\t\t\tself.permutationCountArray[self.count.value]=len(number)\n\t\t\t#increment total of numbers processed by this process\n\t\t\tself.count.value+=1",
"def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)",
"def predict(self, smiles_list):\n data = list(enumerate(smiles_list))\n num_data = len(data)\n num_sub_proc = min(self.num_sub_proc, num_data)\n\n q1 = Queue()\n manager = Manager()\n return_dict = manager.dict()\n proc_master = Process(target=self.creator,\n args=(q1, data, num_sub_proc))\n proc_master.start()\n\n # create slave process\n procs = []\n for sub_id in range(0, num_sub_proc):\n proc = Process(target=self.worker, args=(q1, return_dict))\n procs.append(proc)\n proc.start()\n\n q1.close()\n q1.join_thread()\n proc_master.join()\n for proc in procs:\n proc.join()\n keys = sorted(return_dict.keys())\n\n result_dict = dict()\n docking_score_list = list()\n if self.rescoring:\n docking_re_list = list()\n\n for key in range(num_data):\n if key in keys:\n result_dict0 = return_dict[key]\n if 'docking' in result_dict0:\n docking_score = result_dict0['docking']\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n\n if self.rescoring:\n if 'docking_re' in result_dict0:\n docking_re = result_dict0['docking_re']\n else:\n docking_re = np.array([99.999], dtype=np.float32)\n\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n if self.rescoring:\n docking_re = np.array([99.999], dtype=np.float32)\n\n docking_score_list += [docking_score]\n if self.rescoring:\n docking_re_list += [docking_re]\n\n result_dict['docking'] = docking_score_list\n if self.rescoring:\n result_dict['docking_re'] = docking_re_list\n\n if self.use_my_module:\n self.my_class.predict(self, smiles_list, result_dict, return_dict)\n\n return result_dict",
"def get_cliques_data(th_object, start, end, filename, path, num):\n last = start\n delta = int((end + 1 - start) / num)\n points = []\n\n while last < end + 1:\n points.append(int(last))\n last += delta\n points.append(end)\n\n cliques_num = []\n max_cliques = []\n for i in range(num):\n cliques_num.append(mp.Manager().dict())\n max_cliques.append(mp.Manager().Value('i', 2))\n\n processes = []\n for i in range(num):\n p = mp.Process(target=th_object.get_cliques_data, args=(points[i], points[i + 1], filename, cliques_num[i],\n max_cliques[i]))\n processes.append(p)\n print('Starting process', i)\n p.start()\n print('Process', i, ' started')\n\n for i in processes:\n i.join()\n\n max_clique = 0\n for i in max_cliques:\n if i.value > max_clique:\n max_clique = i.value\n\n print('All processes are over')\n with open(path, 'w') as f:\n f.write(\"Time\")\n for i in range(2, max_clique + 1):\n f.write(\",\" + str(i))\n f.write(\"\\n\")\n for cc in cliques_num:\n for k, v in cc.items():\n f.write(str(k.time()))\n counter = 1\n for vv in v:\n f.write(\",\" + str(vv))\n counter += 1\n while counter < max_clique:\n f.write(\",0\")\n counter += 1\n f.write(\"\\n\")",
"def spawn_threads():\n t0 = threading.Thread(target=print_numbers, args=[10, 0.9, \"\"]) \n t1 = threading.Thread(target=print_numbers, args=[7, 1, \" \"])\n t0.start()\n t1.start()",
"def run_sim(self, dictionary):\n\t\tsim_start = time.time()\n\t\tglobal HAS_RUN_ITEM_ROUTING, ROUTING_ARRAY\n\t\tself.sim_num += 1 # indicate that we've begun another simulation\n\t\tpassedItems = []\n\t\titemsDoneArray = [0]\n\t\tswitch = 0\n\t\teddyTimes = []\n\t\ttaskTimes = []\n\t\tworkerDoneTimes = []\n\t\tnoTasks = 0\n\t\tscores = []\n\t\tticketNums = []\n\t\tselectivities = []\n\n\t\ttime_proxy = 0\n\t\torig_active_tasks = toggles.ACTIVE_TASKS_SIZE # saves the initial size of the array\n\t\tactive_tasks_size = orig_active_tasks # keeps track of the current size of the array\n\t\ttps_start = 3\n\t\tsecs = 0 # used to count time steps when tasks per second is less than 1\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_selectivities.append([])\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\tscores.append([])\n\t\t\telse:\n\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\tscores.append([])\n\n\t\ttotalWorkTime = 0\n\t\ttasksArray = []\n\n\t\t# array of workers who are busy\n\t\tb_workers = [0]\n\n\t\t# array of tasks currently in process\n\t\tactive_tasks = []\n\n\t\t#time counter\n\t\ttime_clock = 0\n\n\t\t# set up a dictionary to hold counts of active tasks_out\n\t\tif toggles.REAL_DATA:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\t\telse:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\n\t\t# add an entry to save the numbers of placeholder tasks\n\t\tself.pred_active_tasks[0] = []\n\n\t\t#Setting up arrays to count tickets for ticketing counting graphs\n\t\t# if toggles.COUNT_TICKETS:\n\t\t# \tif toggles.REAL_DATA:\n\t\t# \t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t# \t\t\tself.ticketNums.append([])\n\t\t# \telse:\n\t\t# \t\tfor count in toggles.CHOSEN_PREDS:\n\t\t# \t\t\tself.ticketNums.append([])\n\n\t\t# Setting up arrays for TRACK_SIZE\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\tself.consensus_size.append([])\n\t\t\telse:\n\t\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\t\tself.consensus_size.append([])\n\n\t\t# If running Item_routing, setup needed values\n\t\tif ((not HAS_RUN_ITEM_ROUTING) and toggles.RUN_ITEM_ROUTING) or toggles.RUN_MULTI_ROUTING:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in toggles.CHOSEN_PREDS]\n\t\t\telse:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in range(toggles.NUM_QUESTIONS)]\n\t\t\troutingC, routingL, seenItems = [], [], set()\n\t\t\tfor i in range(len(predicates)):\n\t\t\t\troutingC.append(0)\n\t\t\t\troutingL.append([0])\n\n\t\tip_pair = IP_Pair()\n\t\ttotal_ip_pairs = IP_Pair.objects.all().count()\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tprev_time = 0\n\n\t\t\twhile (IP_Pair.objects.filter(isDone=False).exists() or active_tasks) :\n\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tif (time_clock % 60 == 0) or (time_clock - prev_time > 1):\n\t\t\t\t\t\tprint \"$\"*43 + \" t = \" + str(time_clock) + \" \" + \"$\"*(47-len(str(time_clock)))\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t\t\tprint \"Incomplete IP Pairs: \" + str(IP_Pair.objects.filter(isDone=False).count()) + \" | Tasks completed: \" + str(self.num_tasks)\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tfor ip in IP_Pair.objects.filter(inQueue=True):\n\t\t\t\t\t\t\tprint \"IP Pair \" + str(ip.pk) + \" | Predicate: \" + str(ip.predicate.id) + \" ||| Tasks out: \" + str(ip.tasks_out) + \" | Num yes: \" + str(ip.num_yes) + \" | Num no: \" + str(ip.num_no) + \" | isDone: \" + str(ip.isDone)\n\n\t\t\t\t\t\t\tif ip.num_no + ip.num_yes > toggles.CONSENSUS_SIZE_LIMITS[1]:\n\t\t\t\t\t\t\t\tprint \"Total votes: \" + str(ip.num_no+ip.num_yes)\n\t\t\t\t\t\t\t\traise Exception (\"Too many votes cast for IP Pair \" + str(ip.id))\n\n\t\t\t\t\t\t\tif (ip.tasks_out == 0) and ip.isDone and ip.inQueue:\n\t\t\t\t\t\t\t\traise Exception (\"IP Pair \" + str(ip.id) + \" has no tasks out and is done, still in queue\")\n\t\t\t\t\t\tif toggles.EDDY_SYS == 2:\n\t\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\t\tprint \"Task for IP Pair \" + str(task.ip_pair.id)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint \"Placeholder\"\n\t\t\t\t\t\tplaceholders = 0\n\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\tif task.ip_pair == None:\n\t\t\t\t\t\t\t\tplaceholders += 1\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tif len(active_tasks) == 0:\n\t\t\t\t\t\t\tprint \"Active tasks is empty.\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"Active tasks: \" + str(len(active_tasks)) + \" | Placeholders: \" + str(placeholders)\n\n\t\t\t\t\t\t\tprint \"IP pairs in queue: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\t\t# print \"\"\n\t\t\t\t\t\t# for p in Predicate.objects.filter(pk__in=[pred+1 for pred in toggles.CHOSEN_PREDS]) :\n\t\t\t\t\t\t# \tprint \"Predicate \" + str(p.pk) + \" ||| Queue full: \" + str(p.queue_is_full) + \" | Queue length: \" + str(p.queue_length) + \" | Tickets: \" + str(p.num_tickets)\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t# throw some errors for debugging purposes\n\t\t\t\tif not (Item.objects.filter(inQueue=True).count() == IP_Pair.objects.filter(inQueue=True).count()):\n\t\t\t\t\tprint \"inQueue items: \" + str(Item.objects.filter(inQueue=True).count())\n\t\t\t\t\tprint \"inQueue IPs: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\traise Exception(\"IP and item mismatch\")\n\n\t\t\t\tfor p in Predicate.objects.filter(queue_is_full = True):\n\t\t\t\t\tif not p.num_pending >= p.queue_length:\n\t\t\t\t\t\traise Exception (\"Queue for predicate \" + str(p.id) + \" isn't actually full\")\n\n\t\t\t\t\tif IP_Pair.objects.filter(predicate=p, inQueue=True).count() < p.queue_length:\n\t\t\t\t\t\traise Exception (\"Not enough IP_Pairs in queue for predicate \" + str(p.id) + \" for it to be full\")\n\n\t\t\t\t\t# if IP_Pair.objects.filter(predicate=p, inQueue=True).count() > p.queue_length:\n\t\t\t\t\t# \traise Exception(\"The queue for predicate \" + str(p.id) + \" is over-full\")\n\n\t\t\t\t\tif not IP_Pair.objects.filter(predicate=p, inQueue=True).count() == p.num_pending:\n\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(p.id) + \": \" + str(IP_Pair.objects.filter(predicate=p, inQueue=True).count())\n\t\t\t\t\t\tprint \"Number pending for pred \" + str(p.id) + \": \" + str(p.num_pending)\n\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\n\t\t\t\tself.time_steps_array.append(time_clock)\n\n\t\t\t\t# increment seconds for when tasks per second less than 1\n\t\t\t\tsecs += 1\n\t\t\t\tratio=IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t# change the rate of task requests\n\t\t\t\t\ttps = self.set_tps(ratio, tps_start)\n\n\t\t\t\tif toggles.RESIZE_ACTIVE_TASKS:\n\t\t\t\t\tratio = IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\t\tactive_tasks_size = self.set_active_size(ratio, orig_active_tasks)\n\n\n\t\t\t\tif toggles.TRACK_ACTIVE_TASKS:\n\t\t\t\t\t# append a new counter for the next time step\n\t\t\t\t\tfor pred in self.pred_active_tasks:\n\t\t\t\t\t\tself.pred_active_tasks[pred].append(0)\n\n\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t_id = task.ip_pair.predicate.id\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t_id = 0\n\t\t\t\t\t\t# add one to the most recent counter\n\t\t\t\t\t\tself.pred_active_tasks[_id][-1] += 1\n\n\t\t\t\tprev_time = time_clock\n\t\t\t\tendTimes = []\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_times_array.append(time_clock)\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\t# check if any tasks have reached completion, update bookkeeping\n\t\t\t\t# print \"Removing tasks\"\n\t\t\t\tfor task in active_tasks:\n\t\t\t\t\tif (task.end_time <= time_clock):\n\t\t\t\t\t\tupdateCounts(task, task.ip_pair)\n\t\t\t\t\t\t#task.refresh_from_db()\n\t\t\t\t\t\tactive_tasks.remove(task)\n\t\t\t\t\t\tb_workers.remove(task.workerID)\n\t\t\t\t\t\tself.num_tasks += 1\n\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\tif not IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count() == task.ip_pair.predicate.num_pending:\n\t\t\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count())\n\t\t\t\t\t\t\t\tprint \"Number pending for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(task.ip_pair.predicate.num_pending)\n\t\t\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\t\t\t\t\telse:\n\t\t\t\t\t\tendTimes.append(task.end_time)\n\n\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Placeholder\"\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\n\t\t\t\t# decides whether to give out more tasks if tasks per second is less than 1\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\ttask_limit = tps\n\t\t\t\t\tcount = 0\n\t\t\t\t\tif tps < 1:\n\t\t\t\t\t\ttask_limit = 1\n\t\t\t\t\t\trefill = False\n\t\t\t\t\t\tif secs >= 1.0/tps:\n\t\t\t\t\t\t\trefill = True\n\t\t\t\t\t\t\tsecs = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\trefill = True\n\t\t\t\telse:\n\t\t\t\t\t# set up variables to function properly in case fixed active tasks size is being used\n\t\t\t\t\trefill = True\n\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\ttask_limit = active_tasks_size\n\t\t\t\t# fill the active task array with new tasks as long as some IPs need eval\n\t\t\t\tif refill:\n\t\t\t\t\twhile (count < task_limit) and IP_Pair.objects.filter(isDone=False).exists(): # and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, isDone=False).exists()): #or IP_Pair.objects.filter(inQueue=True, tasks_remaining__gt=0).exists()):\n\t\t\t\t\t# while (count < tps) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\t\t\t\t\t# while (len(active_tasks) < active_tasks_size) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\n\t\t\t\t\t\ttask, worker = self.issueTask(active_tasks, b_workers, time_clock, dictionary, switch)\n\n\t\t\t\t\t\tif task is not None:\n\n\t\t\t\t\t\t\t# TODO if we're in \"placeholder task\" mode, task should never be None\n\n\n\t\t\t\t\t\t\tactive_tasks.append(task)\n\t\t\t\t\t\t\tb_workers.append(worker)\n\n\t\t\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Placeholder\"\n\t\t\t\t\t\t\t# \telse:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\t\t\t\t\t\t\t# ITEM ROUTING DATA COLLECTION\n\t\t\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\t\t\tif task.ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\t\t\tseenItems.add(task.ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\t\t\tif task.ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# we couldn't give ANYONE a task; fast-forward to next task expiry\n\t\t\t\t\t\t\tself.no_tasks_to_give += 1\n\t\t\t\t\t\t\tif endTimes:\n\t\t\t\t\t\t\t\ttime_clock = min(endTimes) - 1\n\t\t\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcount = len(active_tasks)\n\n\t\t\t\tmove_window()\n\n\t\t\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t\t\tself.placeholder_change_count.append(DummyTask.objects.all().count())\n\t\t\t\t\tself.num_tasks_change_count.append(Task.objects.all().count())\n\t\t\t\ttime_clock += 1\n\n\n\n\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] >= time_clock:\n\t\t\t\t\tswitch += 1\n\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Simulaton completed ||| Simulated time = \" + str(time_clock) + \" | number of tasks: \" + str(self.num_tasks)\n\t\t\t\tprint \"Time steps: \" + str(len(self.time_steps_array))\n\t\t\t\tprint \"Predicates saved in active tasks dict: \" + str(self.pred_active_tasks.keys())\n\t\t\t\tprint \"Size of predicates' arrays: \" + str([len(self.pred_active_tasks[key]) for key in self.pred_active_tasks])\n\n\n\n\t\telse:\n\t\t\twhile(ip_pair != None):\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\t# only increment if worker is actually doing a task\n\t\t\t\tworkerID = self.pick_worker([0], [0]) # array needed to make pick_worker run\n\t\t\t\tworkerDone, workerDoneTime = worker_done(workerID)\n\t\t\t\tself.worker_done_time += workerDoneTime\n\n\t\t\t\tif not IP_Pair.objects.filter(isDone=False):\n\t\t\t\t\tip_pair = None\n\n\t\t\t\telif (workerDone):\n\t\t\t\t\tif not toggles.DUMMY_TASKS:\n\t\t\t\t\t\tself.num_placeholders += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\td = DummyTask(workerID=workerID)\n\t\t\t\t\t\td.save()\n\t\t\t\t\t\tself.num_tasks += 1\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"worker \" + workerID +\" has no tasks to do\"\n\n\t\t\t\telse:\n\t\t\t\t\tip_pair = pending_eddy(workerID)\n\n\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\tif ip_pair is not None: # if this is a real ip pair\n\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\tif ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\tseenItems.add(ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\tif ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\tif toggles.REAL_DATA :\n\t\t\t\t\t\ttask = self.simulate_task(ip_pair, workerID, 0, dictionary)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttask = self.syn_simulate_task(ip_pair, workerID, 0, switch, self.num_tasks)\n\n\t\t\t\t\tmove_window()\n\t\t\t\t\tself.num_tasks += 1\n\n\n\n\t\t\t\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tpredicate.refresh_from_db()\n\t\t\t\t\t\t\t\tscores[predNum].append(predicate.score)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=count+1)\n\t\t\t\t\t\t\t\tticketNums[count].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.ticketNums[predNum].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\n\t\t\t\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\tpredicate.refresh_from_db(fields=['trueSelectivity'])\n\t\t\t\t\t\t\t#print \"true selectivity: \", str(predicate.trueSelectivity)\n\t\t\t\t\t\t\tself.pred_selectivities[predNum].append(predicate.trueSelectivity)\n\n\t\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] == self.num_tasks:\n\t\t\t\t\t\tswitch += 1\n\n\n\n\t\tif toggles.DUMMY_TASKS:\n\t\t\tself.num_placeholders = DummyTask.objects.all().count()\n\t\t\tself.num_real_tasks = self.num_tasks - self.num_placeholders\n\n\t\t# TODO add cumulative work time and cumulative placeholder time separately\n\t\t# TODO make sure all graphs use appropriate information -- new data members\n\t\t# TODO change return stuff of run_sim to be none of the things it is now\n\n\t\t# save relevant values\n\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tself.simulated_time = time_clock\n\t\t\tself.simulated_time_array.append(self.simulated_time)\n\t\t\tself.cum_work_time_array.append(self.cum_work_time)\n\t\t\tself.cum_placeholder_time_array.append(self.cum_placeholder_time)\n\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\tself.num_real_tasks_array.append(self.num_real_tasks)\n\t\t\tself.num_placeholders_array.append(self.num_placeholders)\n\n\t\tif toggles.TEST_ACCURACY:\n\t\t\tself.get_incorrects()\n\t\t\tself.num_incorrect_array.append(self.num_incorrect)\n\n\t\t# if toggles.TRACK_IP_PAIRS_DONE:\n\t\t# \tdest = toggles.OUTPUT_PATH + \"ip_done_vs_tasks_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \tcsv_dest = dest_resolver(dest+\".csv\")\n\t\t#\n\t\t# \tdataToWrite = [self.ips_tasks_array, self.time_steps_array, self.ips_done_array]\n\t\t# \tgeneric_csv_write(csv_dest, dataToWrite) # saves a csv\n\t\t# \tif toggles.DEBUG_FLAG:\n\t\t# \t\tprint \"Wrote File: \" + csv_dest\n\t\t# \tif toggles.GEN_GRAPHS:\n\t\t# \t\tif (IP_Graph_2 == False and toggles.EDDY_SYS==2) or (IP_Graph_5==False and toggles.EDDY_SYS==5):\n\t\t# \t\t\tline_graph_gen(dataToWrite[0], dataToWrite[2], dest + \".png\",\n\t\t# \t\t\t\t\t\tlabels = (\"Number Tasks Completed\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\t\t\ttitle = \"Number IP Pairs Done vs. Number Tasks Completed\")\n\t\t# \t\t\tif toggles.SIMULATE_TIME:\n\t\t# \t\t\t\tdest1 = toggles.OUTPUT_PATH + \"ip_done_vs_time_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \t\t\t\tline_graph_gen(dataToWrite[1], dataToWrite[2], dest1+'.png',\n\t\t# \t\t\t\tlabels = (\"Time Steps\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\ttitle = \"Number IP Pairs Done vs. Time\")\n\t\t# \t\t\tif toggles.EDDY_SYS == 2:\n\t\t# \t\t\t\tIP_Graph_2 = True\n\t\t# \t\t\telif toggles.EDDY_SYS == 5:\n\t\t# \t\t\t\tIP_Graph_5 = True\n\n\t\t# TODO figure out this no_tasks thingie\n\t\t# produces/appends to CSVs\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t# dest = toggles.OUTPUT_PATH + \"noTasks.csv\"\n\t\t\t# with open(dest, 'a') as f:\n\t\t\t# \tf.write(str(no_tasks_to_give) + \",\")\n\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t# \tprint \"Wrote file: \" + dest\n\n\t\t\tdest = toggles.OUTPUT_PATH + \"placeholderTasks.csv\"\n\t\t\twith open(dest, 'a') as f1:\n\t\t\t\tf1.write(str(self.num_placeholders) + ',')\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote file: \" + dest\n\n\t\tif toggles.OUTPUT_SELECTIVITIES:\n\t\t\toutput_selectivities(toggles.RUN_NAME) # TODO make sure this still works\n\n\t\tif toggles.OUTPUT_COST:\n\t\t\toutput_cost(toggles.RUN_NAME)\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tpredScoresLegend = []\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\t\t\telse:\n\t\t\t\txMultiplier = toggles.NUM_QUESTIONS\n\t\t\t\tfor predNum in range(toggles.NUM_QUESTIONS):\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, scores, predScoresLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"predScores\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"scores\"))\n\n\n\n\t\tif toggles.COUNT_TICKETS:\n\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tticketCountsLegend = []\n\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\n\t\t\tticket_nums_shifted = [] # ticket_nums doesn't start at index 0, so create array to hold counts for each pred\n\t\t\tfor pred in self.ticket_nums:\n\t\t\t\tlengthdiff = len(self.ticket_nums[pred]) - time_proxy # how many more entries are there in ticket counts than time proxy\n\t\t\t\tif lengthdiff > 0:\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Warning: trimmed last \"+ str(lengthdiff) + \" entries off ticket counts, graph may not be accurate\"\n\t\t\t\t\tself.ticket_nums[pred] = self.ticket_nums[pred][:-lengthdiff] # trim to make lengths equal for plotting\n\t\t\t\tticket_nums_shifted.append(self.ticket_nums[pred]) # append in the new array\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tticketCountsLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, ticket_nums_shifted, ticketCountsLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"ticketCounts\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"Ticket counts\"))\n\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif not toggles.SIMULATE_TIME:\n\t\t\t\ttasks = range(len(self.consensus_size[0]))\n\t\t\t\tlegend = []\n\t\t\t\tdest = toggles.OUTPUT_PATH + \"consensus_size\"+str(self.sim_num)\n\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\n\t\t\t\telse:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\t\t\t\tgeneric_csv_write(dest+'.csv',self.consensus_size)\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tgraphGen.consensus_over_time(tasks, legend, self.consensus_size, dest)\n\t\t\t\tself.consensus_size=[]\n\n\t\t# TODO have this graph use the correct arrays\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tselectivitiesLegend = []\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tselectivitiesLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(self.num_tasks)]*len(toggles.CHOSEN_PREDS), self.pred_selectivities, selectivitiesLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"selectivities\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"Number of tasks completed in single simulation\", \"Predicate selectivities\"), scatter=True)\n\n\t\t# if this is the first time running a routing test\n\t\tif toggles.RUN_ITEM_ROUTING and not HAS_RUN_ITEM_ROUTING:\n\t\t\tHAS_RUN_ITEM_ROUTING = True\n\n\t\t\t# setup vars to save a csv + graph\n\t\t\tdest = toggles.OUTPUT_PATH+'_item_routing'+ str(toggles.SIMULATE_TIME)\n\t\t\tlabels = (str(predicates[0].question), str(predicates[1].question))\n\t\t\tdataToWrite = [labels,routingL[0],routingL[1]]\n\t\t\tgeneric_csv_write(dest+'.csv',dataToWrite) # saves a csv\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \"+dest+'.csv'\n\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\tgraphGen.item_routing(routingL[0],routingL[1], labels, dest)\n\n\n\n\t\t# if we're multi routing\n\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\tROUTING_ARRAY.append(routingC) #add the new counts to our running list of counts\n\n\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tsim_end = time.time()\n\t\tsim_time = sim_end - sim_start\n\t\tself.run_sim_time = sim_time\n\t\treturn",
"def _worker(self, args):\n pass",
"def PARALLEL_worker_mc_inv(procnum, num_samples_per_processor, inversion_type, M_amplitude, green_func_array, real_data_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, return_dict_MTs, return_dict_similarity_values_all_samples, return_dict_shift_idxs, return_dict_MT_single_force_rel_amps, return_dict_medium_1_medium_2_rel_amp_ratios, invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels, num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n print(\"Processing for process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")\n \n # Define temp data stores for current process:\n tmp_MTs = np.zeros((len(green_func_array[0,:,0]), num_samples_per_processor), dtype=float)\n tmp_similarity_values_all_samples = np.zeros(num_samples_per_processor, dtype=float)\n tmp_shift_idxs_all_samples = []\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_MT_single_force_rel_amps = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n tmp_medium_1_medium_2_rel_amp_ratios = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_frac_medium_2_diff_phases_dict = {} # Dictionary for temp storing of phase fractions of medium 1\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = np.zeros((num_samples_per_processor, 3), dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = []\n \n # Sort greens function storage if processing for multiple media:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n green_func_array_total_both_media = green_func_array.copy()\n \n # 3. Loop over samples, checking how well a given MT sample synthetic wavefrom from the forward model compares to the real data:\n for i in range(num_samples_per_processor):\n # Generate random medium amplitude ratio and associated greens functions (if required):\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n # If want to invert for ratio of meduim 1 to medium 2 separately for different phases:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_2_diff_phases_dict[\"P\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"S\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"surface\"] = np.random.uniform(0.0, 1.0)\n # Generate associated greens functions:\n green_func_array = np.zeros(np.shape(green_func_array_total_both_media[:,:,:,0]), dtype=float)\n # Loop over greens function for each station-phase:\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_2 = tmp_frac_medium_2_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array[j, :, :] = (1. - tmp_frac_medium_2)*green_func_array_total_both_media[j,:,:,0] + tmp_frac_medium_2*green_func_array_total_both_media[j,:,:,1] \n # Otherwise generate single fraction value and associated greens functions:\n else:\n frac_medium_2 = np.random.uniform(0.0, 1.0)\n green_func_array = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # 4. Generate synthetic waveform for current sample:\n # Vary moment amplitude randomly if specified:\n if invert_for_relative_magnitudes_switch:\n M_amplitude_exp_factor = np.random.uniform(low=rel_exp_mag_range[0], high=rel_exp_mag_range[1])\n M_amplitude = 10.**M_amplitude_exp_factor\n # And generate waveform from source mechanism tensor:\n if inversion_type==\"full_mt\":\n MT_curr_sample = generate_random_MT()*M_amplitude # Generate a random MT sample\n elif inversion_type==\"full_mt_Lune_samp\":\n MT_curr_sample = generate_random_MT_Lune_samp()*M_amplitude # Generate a random MT sample, sampled uniformly in Lune space\n elif inversion_type==\"DC\":\n MT_curr_sample = generate_random_DC_MT()*M_amplitude # Generate a random DC sample\n elif inversion_type==\"single_force\":\n MT_curr_sample = generate_random_single_force_vector()*M_amplitude # Generate a random single force sample\n elif inversion_type == \"DC_single_force_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_coupled_tensor() # Generate a random DC-single-force coupled sample, with associated relative amplitude of DC to single force\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_single_force_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_crack_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_crack_coupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"single_force_crack_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_single_force_crack_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n synth_waveform_curr_sample = forward_model(green_func_array, MT_curr_sample) # Note: Greens functions must be of similar amplitude units going into here...\n \n # 5. Compare real data to synthetic waveform (using variance reduction or other comparison metric), to assign probability that data matches current model:\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_waveform_curr_sample, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n \n # 6. Append results to data store:\n tmp_MTs[:,i] = MT_curr_sample[:,0]\n tmp_similarity_values_all_samples[i] = similarity_curr_sample\n tmp_shift_idxs_all_samples.append(list(shift_idxs))\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps[i] = random_DC_to_single_force_amp_frac\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,0] = tmp_frac_medium_2_diff_phases_dict[\"P\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,1] = tmp_frac_medium_2_diff_phases_dict[\"S\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,2] = tmp_frac_medium_2_diff_phases_dict[\"surface\"]\n else:\n tmp_medium_1_medium_2_rel_amp_ratios[i] = frac_medium_2\n \n if i % 10000 == 0:\n print(\"Processor number:\", procnum, \"- Processed for\",i,\"samples out of\",num_samples_per_processor,\"samples\")\n \n # 7. And convert misfit measure to likelihood function probability:\n tmp_similarity_values_all_samples = np.exp(-(1.-tmp_similarity_values_all_samples)/2.)\n \n # And return values back to script:\n return_dict_MTs[procnum] = tmp_MTs\n return_dict_similarity_values_all_samples[procnum] = tmp_similarity_values_all_samples\n return_dict_shift_idxs[procnum] = tmp_shift_idxs_all_samples\n return_dict_MT_single_force_rel_amps[procnum] = tmp_MT_single_force_rel_amps\n if num_phase_types_for_media_ratios>0:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios_multi_phases\n else:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios\n print(\"Finished processing process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")",
"def run(self):\n\t\t## findMinError\n\t\tself.count = 0\n\t\tprint 'Starting Process type', self.ftype\n\t\tself.min_error = 1\n\t\t# it = 0\n\t\t#self.threadnum = min(500, len(self.pool))\n\t\trows = 3\n\t\tself.threadnum = (len(self.pool)+2)/3\n\t\trows = len(self.pool)/self.threadnum\n\t\tlist_rowlists = [self.pool[x:x+rows] for x in xrange(0, len(self.pool), rows)]\n\t\tmapper = SimpleMapReduce(self.MapFind, self.Reduce, num_workers=self.threadnum)\n\t\tprint 'before mapper'\n\t\tresult = mapper(list_rowlists)\n\t\tprint result\n\t\t#self.min_row, error_infor\n\n\t\tself.min_threshold = error_infor[0]\n\t\tself.min_error = error_infor[1]\n\t\tself.min_flag = error_infor[2]\n\t\t# it += 1\n\t\t# if it%10==0:\n\t\t# \tprint 'type'+str(self.ftype),\"{0:.1%}\".format(float(it)/len(self.pool)), ' search completed'\n\t\treturn",
"def reduce_run():",
"def paralll_worker(rank, size,\n target_function=None,\n batch=None,\n fixed_args=None,\n output_queue=None):\n for input in batch:\n print(\"This is process {} out of {} operating on {}\".format(rank, size, input))\n result = target_function(*input, *fixed_args)\n if output_queue is not None:\n output_queue.put((input, result))",
"def compute_metrics(self, results: list) -> dict:",
"def processed(items, func, max_processes=5, max_queue=200, join=True,\n daemon=True):\n input_queue = Queue(maxsize=max_queue)\n output_queue = Queue(maxsize=max_queue)\n for item in items:\n input_queue.put(item, True)\n\n def wrapped_func(output_queue, item):\n try:\n func(item)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n output_queue.put(e)\n\n processes = []\n while not input_queue.empty():\n try:\n log.exception(output_queue.get_nowait())\n except Empty:\n pass\n while sum(1 for process in processes if process.is_alive()) >= max_processes:\n pass\n item = input_queue.get(True)\n processes.append(Process(target=wrapped_func, args = (output_queue, item,), daemon = daemon))\n processes[-1].start()\n input_queue.task_done()\n\n if join:\n while any(process.is_alive() for process in processes):\n pass",
"def run(self, to_process, duplicates):\n self.db_m = database_manager.DatabaseManager(self.settings)\n try:\n # Process queue while is not empty\n while True:\n data = to_process.get(True, 1)\n duplicate_count = self.consume_data(data)\n with duplicates.get_lock():\n duplicates.value += duplicate_count\n except queue.Empty:\n pass",
"def worker(scenes, cap_templates, ques_templates, worker_id, out_q):\n\n dialogs = []\n for index, scene in enumerate(scenes):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % \\\n (cur_time, worker_id, index, len(scenes), scene['image_index']))\n try:\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(json.loads(json.dumps(gen_dialog)))\n except:\n print('NOTE: Missing data for %d' % scene['image_index'])\n out_q.put({worker_id: dialogs})",
"def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return",
"def exec(list_req, wb,write,Total):\n ret = None\n\n if write==True:\n for tick in list_req:\n retrieve_score(wb,tick,increase=True,write = write)\n retrieve_score(wb,tick,increase=False,write = write) \n \n else:\n if Total == True:\n ret_inc = retrieve_score(wb,list_req[0],increase=True,write = write)\n ret_score = retrieve_score(wb,list_req[0],increase=False,write = write)\n for tick in list_req[1:]:\n ret_inc = ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score = ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n \n else:\n ret_inc = []\n ret_score = []\n for tick in list_req[1:]:\n ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n\n\n ret = (ret_score,ret_inc)\n\n \n return ret",
"def write_reps():\n global maxcount\n\n # Process the next set.\n for count, req in enumerate(req_queue):\n\n rep = {}\n\n if req['type'] == 'check':\n\n if req['body']['type'] == 'standard':\n job_set = standard_job_set(req['body']['msg'])\n run_num = req['body']['msg']['run']\n rep['result'] = check_job_set(run_num, job_set)\n\n else:\n rep['result'] = False\n\n if req['type'] == 'var':\n\n if req['body'] == 'nworkers':\n rep['result'] = nworkers\n\n elif req['body'] == 'njobs':\n rep['result'] = len(job_queue)\n\n else:\n rep['result'] = None\n\n if req['type'] == 'status':\n\n status = ['gm2-nmr-crunchd is running as process %i' % os.getpid()]\n jobs = ' '.join(['(%s, %s)' % (w[1], w[2]['name']) for w in workers])\n status.append(' running jobs: %s' % jobs)\n status.append(' queue has %i jobs' % len(job_queue))\n\n req['result'] = '\\n'.join(status)\n\n try:\n status_sck.send_json(rep)\n\n except(zmq.error.ZMQError):\n pass\n\n req_queue.remove(req)\n\n if count > maxcount:\n break",
"def _worker_main(self, task_queue, data_queue):\r\n while True:\r\n task = task_queue.get()\r\n sample = self._task_func(task)\r\n if sample is None:\r\n continue\r\n data_queue.put(sample)",
"def worker(num_loops, cnt):\t\n\n\tglobal mutex\n\n\tfor i in range(num_loops):\n\t\ttotal = 0\n\t\tinside =0\n\n\t\tfor j in range(1000):\n\t\t\tx = random.random()\n\t\t\ty = random.random()\n\n\t\t\tif (x*x + y*y) <= 1:\n\t\t\t\t\tinside += 1\n\n\t\t\ttotal += 1\n\n\t\tmutex.acquire()\n\t\tcnt.add(total, inside)\n\t\tmutex.release()",
"def check_prime_worker(job_queue):\n while True:\n # your code here\n # 1. get next available number from queue\n try:\n number = job_queue.get(block=False)\n print(f\"Process {current_process()} checks number {number}\")\n except Empty:\n break\n\n # 2. print the number and whether it\n # is prime or not, use is_prime()\n if is_prime(number):\n print(f\"{number} is prime\")\n else:\n print(f\"{number} is not prime\")\n\n # 3. use try/except to catch Empty exception\n # and quit the loop if no number remains in queue\n # done in step 1",
"def manager(num_thrds, num_loops):\n\n\tmutex.acquire()\n\tcnt.reset()\n\tmutex.release()\n\n\t# initialize the thread pool\n\tthread_pool = []\n\n\tfor i in range(num_thrds):\n\t\tthrd = threading.Thread(target=worker, args=(num_loops, cnt))\n\t\tthread_pool.append(thrd)\n\n\t# start threads\n\tfor i in range(len(thread_pool)):\n\t\tthread_pool[i].start()\n\n\tfor i in range(len(thread_pool)):\n\t\tthreading.Thread.join(thread_pool[i])\n\n\t#cnt.display()",
"def submit_multiplication():\n body = request.get_json(force=True)\n task_id = uuid.uuid4() # new unique id\n queue.enqueue(\n slow_multiply,\n task_id, body['x'], body['y']\n )\n return jsonify({'task_id': str(task_id)}), 202",
"def worker(queue, run):\n for args in iter(queue.get, None):\n try:\n run(*args)\n except Exception as e: # catch exceptions to avoid exiting the thread prematurely\n print('{} failed: {}'.format(args, e), file=sys.stderr)",
"def run_sc(no_prods, prev_ledg_update, list_of_workers, no_prod):\n \n list_of_rands = []\n\n for worker_info in reversed(list_of_workers):\n print(worker_info[0])\n if check_fees(worker_info[3]) == True:\n print(\"Worker \", worker_info[0], \"paid their fees\")\n\n elif check_fees(worker_info[3]) == False:\n \n print(\"Worker \", worker_info[0], \"did not pay their fees\")\n list_of_workers.remove(worker_info)\n \n continue \n \n if check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == True:\n print(\"Worker \", worker_info[0], \"has a well formed random\")\n \n\n elif check_corr_rando(worker_info[1], worker_info[2], prev_ledg_update) == False:\n print(\"Worker \", worker_info[0], \"failed to produce a well formed random\")\n list_of_workers.remove(worker_info)\n\n continue\n \n\n list_of_rands.append(worker_info[1])\n\n global_rand = gen_big_rand(list_of_rands)\n\n if global_rand == 0:\n print(\"Something went wrong global_rand was 0\")\n\n dist_list = get_dist_from_big_rand(global_rand, list_of_workers) \n PIDs = find_prod_ids(dist_list, no_prod)\n\n for producer in PIDs:\n print (\"Worker -->\", producer, \"has been selected as a producer for this cycle\")",
"def __init__(self, mk_func, *args,\n processes = 1,\n verbose = False,\n queue = SimpleQueue,\n pass_pid = False):\n # Create basic queues:\n todo, done = SimpleQueue (), SimpleQueue ()\n \n pool_size = processes if processes > 0 else max (1, cpu_count () - 1)\n pool = tuple (Process (target = worker_process,\n args = (pid, verbose, mk_func, todo, done,) + args,\n kwargs = dict (pass_pid = pass_pid))\n for pid in range (pool_size))\n\n self.verbose = verbose\n self.todo, self.done, self.pool = todo, done, pool",
"def find_trip_based_paths_process_worker(iteration, worker_num, input_network_dir, input_demand_dir,\n output_dir, todo_pathset_queue, done_queue, hyperpath, bump_wait_df, stop_times_df):\n worker_str = \"_worker%02d\" % worker_num\n\n from .FastTrips import FastTrips\n setupLogging(infoLogFilename = None,\n debugLogFilename = os.path.join(output_dir, FastTrips.DEBUG_LOG % worker_str), \n logToConsole = False,\n append = True if iteration > 1 else False)\n FastTripsLogger.info(\"Iteration %d Worker %2d starting\" % (iteration, worker_num))\n\n # the child process doesn't have these set to read them\n Assignment.read_configuration(override_input_network_dir=output_dir,\n override_input_demand_dir=input_demand_dir,\n config_file=Assignment.CONFIGURATION_OUTPUT_FILE)\n\n # this passes those read parameters and the stop times to the C++ extension\n Assignment.initialize_fasttrips_extension(worker_num, output_dir, stop_times_df)\n\n # the extension has it now, so we're done\n stop_times_df = None\n\n if iteration > 1:\n Assignment.set_fasttrips_bump_wait(bump_wait_df)\n\n while True:\n # go through my queue -- check if we're done\n todo = todo_pathset_queue.get()\n if todo == 'DONE':\n done_queue.put( (worker_num, 'DONE') )\n FastTripsLogger.debug(\"Received DONE from the todo_pathset_queue\")\n return\n\n # do the work\n pathset = todo\n\n FastTripsLogger.info(\"Processing person %20s path %d\" % (pathset.person_id, pathset.trip_list_id_num))\n # communicate it to the parent\n done_queue.put( (worker_num, \"STARTING\", pathset.person_id, pathset.trip_list_id_num ))\n\n trace_person = False\n if pathset.person_id in Assignment.TRACE_PERSON_IDS:\n FastTripsLogger.debug(\"Tracing assignment of person %s\" % pathset.person_id)\n trace_person = True\n\n try:\n (pathdict, perf_dict) = Assignment.find_trip_based_pathset(iteration, pathset, hyperpath, trace=trace_person)\n done_queue.put( (worker_num, \"COMPLETED\", pathset.trip_list_id_num, pathdict, perf_dict) )\n except:\n FastTripsLogger.exception(\"Exception\")\n # call it a day\n done_queue.put( (worker_num, \"EXCEPTION\", str(sys.exc_info()) ) )\n return",
"def _parallel_fit_eval(process_number, data, clfs, evaluators, scoring):\n clfs[process_number].fit(data, process_number)\n\n results = dict()\n\n results[\"train_score_\" + str(process_number)] = (clfs[process_number].returnTrainingScores())\n\n evaluators[process_number].evaluate(data, process_number, scoring)\n\n for key in evaluators[process_number].results.keys():\n results[key + '_test_score_' + str(process_number)] = evaluators[process_number].results[key]\n\n return results",
"def submit(num):\n task = FactorizationTask(num)\n q.put(task)\n return task.id",
"def _passing_args_impl(self, pool_class_factory):\n DELTA = 12\n ITERATIONS = 100\n pool = pool_class_factory()\n\n pool.start(CoeffMultiplierWorker, {'coeff': DELTA})\n for i in range(ITERATIONS):\n pool.ventilate(message='Vent data {}'.format(i), value=i)\n\n all_results = [pool.get_results() for _ in range(ITERATIONS)]\n self.assertEqual({DELTA}, set(np.diff(sorted(all_results))))\n\n pool.stop()\n pool.join()",
"def iterate_mproc_map(wrap_func, iterate_vals, nb_workers=CPU_COUNT, desc='', ordered=True):\n iterate_vals = list(iterate_vals)\n nb_workers = 1 if not nb_workers else int(nb_workers)\n nb_workers = CPU_COUNT if nb_workers < 0 else nb_workers\n\n if desc is not None:\n pbar = tqdm.tqdm(total=len(iterate_vals), desc=str('%r @%i-threads' % (desc, nb_workers)))\n else:\n pbar = None\n\n if nb_workers > 1:\n logging.debug('perform parallel in %i threads', nb_workers)\n # Standard mproc.Pool created a demon processes which can be called\n # inside its children, cascade or multiprocessing\n # https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic\n\n # pool = mproc.Pool(nb_workers)\n # pool = NonDaemonPool(nb_workers)\n pool = ProcessPool(nb_workers)\n # pool = Pool(nb_workers)\n mapping = pool.imap if ordered else pool.uimap\n else:\n logging.debug('perform sequential')\n pool = None\n mapping = map\n\n for out in mapping(wrap_func, iterate_vals):\n pbar.update() if pbar else None\n yield out\n\n if pool:\n pool.close()\n pool.join()\n pool.clear()\n\n pbar.close() if pbar else None",
"def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):\n with Timer('\\t{0} ({1}) completed in'.format(process_name, str(func))):\n pool = Pool(cpus)\n vals = pool.map(func, iterable)\n pool.close()\n return vals",
"async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:\n pass",
"def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes",
"def runWork(self,benches):\r\n\r\n self.finished = 0\r\n\r\n# callbacks = []\r\n# callbacks.append(self.fetchBenchResults)\r\n# callbacks.append(self.setToWork)\r\n# callbacks.append(self.fetchResults)\r\n\r\n self.log('Signal','deferring runBenches to thread','work')\r\n\r\n# for fetcher in fetchers:\r\n# threads.deferToThread(fetcher())\r\n# self.log('Signal','Started fetcher: %s' % repr(fetcher),'runWork')\r\n\r\n# self.runBenches(callbacks)\r\n self.setToWork(benches)",
"def add(x, y, Q):\n # the first worker adds their shares together\n # the second worker adds their shares together\n # the third worker adds their shares together\n\n shares = [(x[0] + y[0]) % Q,\n (x[1] + y[1]) % Q,\n (x[2] + y[2]) % Q]\n\n return shares",
"def multiproduce():\r\n if not producers:\r\n return []\r\n\r\n callback = producers.pop(0)\r\n\r\n if isinstance(callback, types.FunctionType):\r\n try:\r\n value = callback()\r\n except RPCError as inst:\r\n value = {'faultCode':inst.code, 'faultString':inst.text}\r\n\r\n if value is NOT_DONE_YET:\r\n # push it back in the front of the queue because we\r\n # need to finish the calls in requested order\r\n producers.insert(0, callback)\r\n return NOT_DONE_YET\r\n else:\r\n value = callback\r\n\r\n results.append(value)\r\n\r\n if producers:\r\n # only finish when all producers are finished\r\n return NOT_DONE_YET\r\n\r\n return results",
"def parallel(\n fn,\n workers=10,\n return_results=True,\n identifiers=None,\n args=None,\n kwargs=None,\n):\n # Check user input\n if args is not None and kwargs is not None:\n err = 'Amount of args must match those of kwargs'\n assert len(args) == len(kwargs), err\n\n if (args is not None or kwargs is not None) and identifiers is not None:\n err = 'Amount of identifier must match those of kw/args'\n n_args = len(args) if args is not None else len(kwargs)\n assert n_args == len(identifiers), err\n\n # Preprocessing for arguments lists\n identifiers = [] if identifiers is None else identifiers\n args = [] if args is None else args\n kwargs = [] if kwargs is None else kwargs\n\n if len(args) == 0 and len(kwargs) == 0:\n args = [None]\n kwargs = [None]\n else:\n if len(args) == 0:\n args = [[] for _ in range(len(kwargs))]\n if len(kwargs) == 0:\n kwargs = [dict() for _ in range(len(args))]\n\n # Initialize all the futures\n executor = futures.ThreadPoolExecutor(max_workers=workers)\n _futures = [\n executor.submit(fn, *args[i], **kwargs[i])\n for i in range(len(args))\n ]\n\n # Return only futures when requested\n if not return_results:\n return _futures\n\n # Block until we received all results\n if len(identifiers) > 0:\n results = {}\n else:\n results = []\n\n for i, future in enumerate(_futures):\n result = future.result()\n\n if len(identifiers) > 0:\n results[identifiers[i]] = result\n else:\n results.append(result)\n\n return results",
"def simplify_functions_parallel(expressions):\n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n\n processes = []\n factorised_expressions = []\n for index, item in enumerate(expressions):\n # print(\"item \", item, \"index\", index)\n processes.append(multiprocessing.Process(target=bar, args=(item, index, return_dict)))\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n ## Showing the difference in an example:\n # print(return_dict)\n for i in sorted(return_dict.keys()):\n factorised_expressions.append(return_dict[i])\n return factorised_expressions",
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)",
"def main(order_count):\n for id in range(MAX_ORDERS):\n while active_count() > MAX_QUEUE:\n print(\"..All permitted threads running: waiting\")\n sleep(LOOP_TIMEOUT)\n print(\"..Finished waiting\")\n o = Thread(target=order_gen, kwargs={\"id\": id})\n o.start()",
"def worker(problemDir, configDir, work_queue, done_queue):\n while True:\n problemID, configID = work_queue.get()\n print(\"received\")\n if problemID == STOP:\n # Poison pill\n print(\"Exiting worker process.\")\n done_queue.put(STOP)\n break\n testproblemList = TestProblem.get_all_from_file(problemID, problemDir)\n solverconfig = SolverConfiguration.from_file(configID, configDir)\n for testproblem in testproblemList:\n test_instance = TestInstance(testproblem, solverconfig)\n result = test_instance.run()\n done_queue.put(result)\n return",
"def start(self):\n\n # Producers.\n prod_objs = []\n prod_pipes = []\n\n for id in range(0, len(self.prod_kwargs)):\n args = self.prod_kwargs[id]\n\n if self.prod_use_threads:\n prod_obj = ProducerThread(self.prod_func, args, self.queue)\n else:\n prod_pipes.append(Pipe())\n prod_obj = ProducerProcess(self.prod_func, args, self.queue, prod_pipes[id][0])\n\n prod_objs.append(prod_obj)\n prod_obj.start()\n\n # Consumers.\n cons_objs = []\n cons_pipes = []\n\n for id in range(0, len(self.cons_kwargs)):\n args = self.cons_kwargs[id]\n\n if self.cons_use_threads:\n cons_obj = ConsumerThread(self.cons_func, args, self.queue)\n else:\n cons_pipes.append(Pipe())\n cons_obj = ConsumerProcess(self.cons_func, args, self.queue, cons_pipes[id][0])\n\n cons_objs.append(cons_obj)\n cons_obj.start()\n\n # Join on the producers.\n for prod_obj in prod_objs:\n prod_obj.join()\n\n # Shut down theconsumers.\n for cons_obj in cons_objs:\n cons_obj.shutdown()\n\n # Join on the consumers.\n for cons_obj in cons_objs:\n cons_obj.join()\n\n # Collect result values.\n result = {\"producers\": [],\n \"consumers\": []}\n\n for counter, prod_obj in enumerate(prod_objs):\n if isinstance(prod_obj, ProducerProcess):\n res = prod_pipes[counter][1].recv()\n else:\n res = prod_obj.result\n\n result[\"producers\"] = result[\"producers\"] + [res]\n\n for counter, cons_obj in enumerate(cons_objs):\n if isinstance(cons_obj, ConsumerProcess):\n res = cons_pipes[counter][1].recv()\n else:\n res = cons_obj.result\n\n result[\"consumers\"] = result[\"consumers\"] + [res]\n\n return result",
"def parallel_worker(jobs_queue):\n jobs = (functools.partial(get_and_format, **job) for job in jobs_queue)\n res = helpers.run_chunks_parallel(jobs, chunksize = 20, workers = 20)\n return res",
"def serial_worker(jobs_queue):\n return (get_and_format(**job) for job in jobs_queue)",
"def workflow(func, args, n_jobs=4, unit=\"sample(s)\"):\n if not isinstance(args, Sized):\n ValueError(\"`args` must have a length.\")\n\n results = []\n processes = []\n q = Queue()\n\n for arg in tqdm(args, unit=unit):\n p = Process(target=_process, args=(func, arg, q))\n p.start()\n processes.append(p)\n if len(processes) >= n_jobs:\n results, processes = _consume(processes, q, results, n_jobs)\n results, processes = _consume(processes, q, results, n_jobs)\n return results",
"def primes():\r\n try:\r\n args = request.args\r\n start_num, end_num = validate_request(args)\r\n # cache key\r\n key = f'primes:{start_num}:{end_num}'\r\n rv = cache.get(key)\r\n if rv is None: # not in cache\r\n job = get_primes_list.queue(start_num, end_num)\r\n print(job.get_id())\r\n cache.set(key, job.get_id(), timeout=3600)\r\n return jsonify(job.get_id()), 200\r\n else:\r\n return jsonify(rv), 200\r\n except Exception as e:\r\n raise InvalidUsage(\"Error Processing request {}\".format(e))",
"def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def worker(inQueue, outQueue):\n for i in iter(inQueue.get, 'STOP'):\n\n status = run(i)\n\n outQueue.put(( status ))",
"def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results",
"def parallel_map(work_func, *sequences, **kwargs):\n # kwargs\n cores = kwargs.get('cores', None)\n ordered = kwargs.get('ordered', False)\n buffer_factor = kwargs.get('buffer_factor', 2.0)\n use_multiprocessing = kwargs.get('use_multiprocessing', False)\n heart_beat = kwargs.get('heart_beat', 0.001)\n fill_activate = 'fill_void' in kwargs\n fill_value = kwargs.get('fill_void', None)\n name = kwargs.get('name', None)\n\n if name:\n log = logging.getLogger(__name__ + '[%s]' % name)\n else:\n log = logging.getLogger(__name__)\n\n if heart_beat <= 0:\n raise ValueError(\"heart_beat must be >0.\")\n\n if cores is None or cores <= 0:\n cores = multiprocessing.cpu_count()\n log.debug(\"Using all cores (%d)\", cores)\n else:\n log.debug(\"Only using %d cores\", cores)\n\n # Choose parallel types\n if use_multiprocessing:\n queue_t = multiprocessing.Queue\n worker_t = _WorkerProcess\n else:\n queue_t = queue.Queue\n worker_t = _WorkerThread\n\n queue_work = queue_t(int(cores * buffer_factor))\n queue_results = queue_t(int(cores * buffer_factor))\n\n log.log(1, \"Constructing worker processes\")\n workers = [worker_t(name, i, work_func, queue_work, queue_results,\n heart_beat)\n for i in range(cores)]\n\n log.log(1, \"Constructing feeder thread\")\n feeder_thread = _FeedQueueThread(name, sequences, queue_work,\n len(workers), heart_beat, fill_activate,\n fill_value)\n\n return ParallelResultsIterator(name, ordered, use_multiprocessing,\n heart_beat, queue_work,\n queue_results, feeder_thread, workers)",
"def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))",
"def _spawn_workers(self):\n self._event.set()\n self._workers = [ClassifierWorker(self._event, self._queue, self._results) for x in range(self._NUM_WORKERS)]\n [worker.start() for worker in self._workers]",
"def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']",
"def start_workers(self, window_size):\n input_q = mp.Queue(maxsize=self.processes)\n output_q = mp.Queue()\n workers = []\n for _ in range(self.processes):\n accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary)\n worker = AccumulatingWorker(input_q, output_q, accumulator, window_size)\n worker.start()\n workers.append(worker)\n\n return workers, input_q, output_q",
"def process():\n pass",
"def run(self):\n numbers = range(5)\n global queue\n while True:\n condition.acquire()\n if len(queue) == MAX_ITEMS:\n print(\"Queue is Full, producer is in Waiting state\")\n condition.wait() # This actually releases the lock and notifies other threads waiting on it - consumer in this case\n # if queue has space\n print(\"Space in Queue, Producer is adding numbers to queue\")\n number = random.choice(numbers)\n queue.append(number)\n print(\"Produced {}\".format(number))\n condition.notify()\n condition.release()\n time.sleep(random.random())",
"def run(self):\n metrics = Metrics()\n\n count = 0\n while not self.queue.empty():\n count += 1\n try:\n key = self.queue.get(timeout=1)\n except queue.Empty:\n continue\n\n try:\n self.copy_key(key)\n metrics.count()\n except Exception as err:\n self.log.error(f\"Error for key '{key}'\")\n self.log.debug(err, exc_info=True)\n metrics.error()\n\n self.log.info(f\"Thread completed. {count} keys processed.\")",
"def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())",
"def _worker(self, results):\n keys = {\n \"test-certificate-verify\": {\n \"MD5 forced\": 2,\n \"TLSv1.1 signature in TLSv1.2 Certificate Verify\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-sig-algs\": {\"MD5 first\": 2, \"MITIGATION\": \"SLOTH\"},\n \"test-clienthello-md5\": {\n \"only-md5-rsa-signature_algorithm\": 1,\n \"unknown-signature_algorithm-numbers\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-tls13-pkcs-signature\": {\n \"rsa_pkcs1_md5 signature\": 1,\n \"MITIGATION\": \"SLOTH_MD5_SIGNATURE_TLS_1_3\",\n },\n }\n return self._obtain_results(results, keys)",
"def calculate_pi_processes(nb_processes, nb_trials):\n # Launch a determined number (nb_processes) processes\n # This approach allow to manipulate process individually\n # However, results has to be managed by processes themselves\n #\n # In order to globally manage result\n # There's a necessity to use some shared resource\n # And because of its shared behaviour, Locks must be used in order to\n # avoid concurrrency problems (If multiple processes trying to write in\n # the variable at the same time, only data from one will be saved!)\n #\n # WARNING: Lock / Write / Unlock process is time-consuming.\n # This should be used only when required.\n lock = Lock()\n res = Value('i', 0)\n processes = []\n for i in range(nb_processes):\n p = Process(target=monte_carlo_trials,\n args=(round(nb_trials / nb_processes), res, lock))\n processes.append(p)\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n return estimated_pi(res.value, nb_trials)",
"def dispatch(self, queue):\n context = zmq.Context()\n socket = noBlockREQ(context)\n \n seedsQ1 = Queue()\n seedsQ2 = Queue()\n for address in self.seeds:\n seedsQ1.put(address)\n\n connectT = Thread(target=connectToSeeds, name=\"Connect to Seeds\", args=(socket, seedsQ1))\n connectT.start()\n\n toDisconnectQ = Queue()\n disconnectT = Thread(target=disconnectToSeeds, name=\"Disconnect to Seeds\", args=(socket, toDisconnectQ))\n disconnectT.start()\n\n pFindSeeds = Process(target=findSeeds, name=\"Find Seeds\", args=(set(self.seeds), [seedsQ1], [toDisconnectQ], log, 2000, 10, seedsQ2))\n pFindSeeds.start()\n\n pInput = Process(target=getSeedFromFile, name=\"Get seed from file\", args=(seedsQ1, seedsQ2))\n pInput.start()\n\n graph = {}\n depth = 1\n data = {}\n url_mapper = {url:f\"url_{i}\" for i, url in enumerate(self.urls)}\n \n src = set()\n while True: \n new_data = {}\n while len(self.urls):\n try:\n url = self.urls[0]\n self.urls.pop(0)\n self.urls.append(url)\n with counterSocketReq:\n socket.send_json((\"URL\", self.uuid, url))\n log.debug(f\"Send {url}\", \"dispatch\")\n response = socket.recv_pyobj()\n assert isinstance(response, tuple), f\"Bad response, expected <tuple> find {type(response)}\"\n assert len(response) == 2, \"bad response size\"\n assert response[0] == 'RESPONSE', \"Unexpected response format\"\n _, package = response\n log.debug(f\"Received a package with size: {len(package)}\", \"dispatch\")\n for recv_url, html in package.items():\n try:\n idx = self.urls.index(recv_url)\n log.info(f\"{recv_url} {GREEN}OK{RESET}\", \"dispatch\")\n new_data[recv_url] = html\n self.urls.pop(idx)\n except ValueError:\n log.debug(f'Unnecesary {recv_url}', 'dispatch')\n except AssertionError as e:\n log.error(e, \"dispatch\")\n except zmq.error.Again as e:\n log.debug(e, \"dispatch\")\n except Exception as e:\n log.error(e, \"dispatch\")\n time.sleep(0.8)\n \n log.info(f'Depth {depth} done', 'dispatch')\n for url, html in new_data.items():\n graph[url] = set()\n try:\n text = html.decode()\n soup = BeautifulSoup(html, 'html.parser')\n tags = soup.find_all(valid_tags)\n new_urls = [['src', 'href'][tag.has_attr('href')] for tag in tags]\n changes = []\n for i, attr in enumerate(new_urls):\n url_dir = urljoin(url, tags[i][attr])\n graph[url].add(url_dir)\n if url_dir not in url_mapper:\n url_mapper[url_dir] = f'url_{len(url_mapper)}'\n changes.append((tags[i][attr], url_mapper[url_dir]))\n if attr == 'src' or tags[i].name == 'link':\n src.add(url_dir)\n continue\n self.urls.append(url_dir)\n html = change_html(text, changes).encode()\n except UnicodeDecodeError:\n log.debug(f'{url} is not decodeable', 'dispatch')\n except: # BeautifulSoup strange exceptions related with his's logger\n pass\n new_data[url] = html\n data.update(new_data)\n self.urls = set(self.urls)\n self.urls.difference_update(self.old)\n self.old.update(self.urls)\n self.urls = list(self.urls)\n \n if depth > self.depth:\n break\n if depth == self.depth:\n src.difference_update(self.old)\n self.old.update(src)\n self.urls = list(src)\n depth += 1\n log.info(f\"Number of URLs to be requested for download: {RED}{len(self.urls)}{RESET}\", \"dispatch\")\n \n log.info(f\"Starting to write data\", \"dispatch\")\n for i, url in enumerate(self.originals):\n try:\n res = HtmlResponse(url=url, body=data[url], encoding='utf8')\n base = res.css('title::text')[0].get()\n except:\n base = f\"web_page_{i}\"\n try:\n os.makedirs(f'downloads/{base}-data')\n except:\n pass\n writer(f'downloads/{base}-data', url, set(), data, url_mapper, graph) \n \n html = data[url]\n if len(graph[url]) > 0:\n text = data[url].decode()\n changes = []\n for dep in graph[url]:\n name = url_mapper[dep]\n changes.append((name, f'{base}-data/{name}'))\n html = change_html(text, changes).encode()\n with open(f'downloads/{base}', 'wb') as fd:\n fd.write(html)\n \n log.info(f\"Dispatcher:{self.uuid} has completed his URLs succefully\", \"dispatch\")\n log.debug(f\"Dispatcher:{self.uuid} disconnecting from system\", \"dispatch\")\n #disconnect\n\n queue.put(True)\n pFindSeeds.terminate()\n pInput.terminate()",
"def rq_worker():\n setup_experiment(log)\n with Connection(db.redis_conn):\n # right now we care about low queue for bots\n worker = Worker(\"low\")\n worker.work()",
"def createProcesses(arguments, file_list, start, end):\r\n NUM_PROCESS = 1 # Check variables\r\n NUM_FILE = len(file_list)\r\n NUM_ARGS = len(arguments)\r\n\r\n proc_info = {}\r\n\r\n sem.acquire()\r\n processNumber.value += 1\r\n sem.release()\r\n\r\n for file in file_list:\r\n statinfo = os.stat(file)\r\n size = str(statinfo.st_size)\r\n proc_info.update({os.getpid():[file, size[:-1]]})\r\n\r\n result_list = [] # List that keeps all the results for each file\r\n PSTRING = \"\" # String that will be printed with the result of each read file\r\n\r\n date_beg = datetime.datetime.now()\r\n\r\n if \"-w\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countWords(file, start, end)))\r\n else:\r\n result_list.append(str(countWords(file, None, None)))\r\n\r\n elif \"-l\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countLines(file, start, end)))\r\n else:\r\n result_list.append(str(countLines(file, None, None)))\r\n\r\n if \"-L\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(biggestLine(file, start, end)))\r\n else:\r\n result_list.append(str(biggestLine(file, None, None)))\r\n\r\n elif \"-c\" in arguments:\r\n if nProcessesBiggerThanFiles:\r\n result_list.append(str(countCharacters(file, start, end)))\r\n else:\r\n result_list.append(str(countCharacters(file, None, None)))\r\n\r\n if int(NUM_PROCESS) < int(NUM_FILE):\r\n if NUM_FILE == 1:\r\n if NUM_ARGS > 1:\r\n for number in range(len(result_list)):\r\n division = int(number) % 2\r\n\r\n if division == 0:\r\n total[0] += int(result_list[number])\r\n else:\r\n if total[1] > int(result_list[number]):\r\n continue\r\n else:\r\n total[1] = int(result_list[number])\r\n\r\n else:\r\n if NUM_ARGS > 1:\r\n total[0] += int(result_list[-2])\r\n result_number = int(result_list[-1])\r\n\r\n if total[1] > int(result_number):\r\n pass\r\n else:\r\n total[1] = int(result_number)\r\n\r\n else:\r\n total[0] += int(result_list[-1])\r\n\r\n elif int(NUM_PROCESS) == int(NUM_FILE):\r\n if NUM_ARGS > 1:\r\n for number in range(len(result_list)):\r\n division = int(number) % 2\r\n\r\n if division == 0:\r\n total[0] += int(result_list[number])\r\n else:\r\n if total[1] > int(result_list[number]):\r\n continue\r\n else:\r\n total[1] = int(result_list[number])\r\n\r\n else:\r\n for number in result_list:\r\n total[0] += int(number)\r\n\r\n for number in result_list:\r\n PSTRING += str(number) + \" \"\r\n\r\n if \"-l\" in arguments:\r\n proc_info[os.getpid()].append([\"linhas\", result_list[0]])\r\n elif \"-c\" in arguments:\r\n proc_info[os.getpid()].append([\"caracteres\", result_list[0]])\r\n elif \"-w\" in arguments:\r\n proc_info[os.getpid()].append([\"palavras\", result_list[0]])\r\n\r\n PSTRING += file\r\n print(PSTRING)\r\n\r\n date_end = datetime.datetime.now()\r\n duration = date_end - date_beg\r\n hours, minutes, seconds, microseconds = convert_timedelta(duration)\r\n time_str = str(hours) + \":\" + str(minutes) + \":\" + str(seconds) + \":\" + str(microseconds)\r\n\r\n proc_info[os.getpid()].append(time_str)\r\n\r\n queue_proc.put(proc_info)",
"def rerank_mp(x2ys, x2cnt, x2xs, width, n_trans, num_workers):\n from multiprocessing import Pool\n\n shared_inputs = x2ys, x2cnt, x2xs, width, n_trans\n print(f\"Entering multiprocessing with {num_workers} workers...\"\n f\" (#words={len(x2ys)})\")\n with Pool(num_workers) as p:\n x2ys_cpe = dict(p.starmap(\n _rerank_mp,\n zip(x2ys.items(), it.repeat(shared_inputs)),\n ))\n return x2ys_cpe",
"def get_worker_processes(f, args, nproc=None, allow_scalar=False):\n\n import multiprocessing\n num_procs = get_num_processors(nproc)\n\n workers = [\n multiprocessing.Process(target=f, args=args) for _ in range(num_procs)\n ]\n if allow_scalar and len(workers) == 1:\n return workers[0]\n else:\n return workers",
"def trainer_func(w2t_m_queue, events, t2w_d_manager):\n average_iteration_time = 0\n trainer_start_time = time.time()\n trainer_nn = create_neural_network()\n t2w_d_manager.append(trainer_nn.get_weights())\n for i in range(ITERATIONS):\n # Wait for all workers to send their ready signals\n for worker_num in range(NUM_WORKERS):\n events[worker_num].wait()\n events[worker_num].clear()\n iteration_time = time.time()\n data = []\n # Dequeue batch of data from message queue and process it\n for _ in range(NUM_WORKERS):\n data_point = w2t_m_queue.get()\n data.append(data_point[0])\n message_to_share = do_something_with_data(data, trainer_nn)\n # Put weight data into manager\n t2w_d_manager[0] = message_to_share\n # Signal to workers they are allow to proceed\n events[\"Workers_can_proceed\"].set()\n average_iteration_time += time.time() - iteration_time\n\n average_iteration_time /= ITERATIONS*NUM_WORKERS\n print(\"-------------------------------------\")\n print(\"Trainer total dequeue time: \" + str.format('{0:.6f}', (time.time() - trainer_start_time)) + \"ms\")\n print(\"Trainer average dequeue time: \" + str.format('{0:.6f}', average_iteration_time*1000) + \"ms\")\n print(\"-------------------------------------\")",
"def run(self, worker, evaluator=None):\n pass",
"def run_multiprocessing(args, function):\n vcf_fn = args.data_file\n num_processes = args.num_threads\n if num_processes > 1:\n # Split the VCF into chunks\n callset = allel.read_vcf(vcf_fn, fields=[\"variants/CHROM\", \"variants/POS\"])\n pos_list = callset[\"variants/POS\"]\n chroms = callset[\"variants/CHROM\"]\n assert np.all(chroms == chroms[0])\n chrom = str(chroms[0])\n\n def get_chromosome_chunks(lst, num_processes):\n length = len(lst)\n n = math.ceil(length / num_processes)\n chunks = list()\n for index, i in enumerate(range(0, length, n)):\n if index != num_processes - 1:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[i + n])),\n )\n )\n else:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[-1])),\n )\n )\n return chunks\n\n chunks = get_chromosome_chunks(pos_list, num_processes)\n chunks_iter = iter(chunks)\n reports = list()\n completed_files = list()\n with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:\n for index, row in enumerate(pool.map(function, chunks_iter)):\n reports.append(row)\n print(\n \"Processed Chunk {}: {} with {} sites added.\".format(\n index, chunks[index][2], row[\"num_sites\"]\n )\n )\n if row[\"num_sites\"] > 0:\n completed_files.append(index)\n else:\n os.remove(args.output_file + str(index) + \"-lock\")\n\n # Combine reports and print\n master_report = reports[0]\n for report in reports[1:]:\n for var_type, val in report.items():\n master_report[var_type] += val\n print(master_report)\n\n # Combine sampledata files\n filenames = completed_files\n all_samples = []\n for name in filenames:\n all_samples.append(tsinfer.load(args.output_file + str(name)))\n os.remove(args.output_file + str(name))\n\n samples = all_samples[0].copy(args.output_file)\n samples.append_sites(*all_samples[1:])\n samples.finalise()\n assert np.all(np.diff(samples.sites_position[:]) > 0)\n\n else:\n raise ValueError"
] | [
"0.7811158",
"0.7189279",
"0.5911946",
"0.58161217",
"0.5799994",
"0.5654354",
"0.56341344",
"0.5627689",
"0.55830336",
"0.55784506",
"0.5456631",
"0.54565054",
"0.5428179",
"0.53641945",
"0.5327899",
"0.53268",
"0.53215057",
"0.5303377",
"0.527345",
"0.5256457",
"0.52415097",
"0.5232646",
"0.52181774",
"0.52178943",
"0.5192748",
"0.5176073",
"0.5170956",
"0.5165329",
"0.51647747",
"0.51600015",
"0.5144522",
"0.5135352",
"0.51254284",
"0.50946414",
"0.5085383",
"0.50827104",
"0.5076731",
"0.50630164",
"0.50360256",
"0.50356936",
"0.5035149",
"0.5029924",
"0.5025959",
"0.50213045",
"0.50203425",
"0.5018007",
"0.5017",
"0.5015986",
"0.50093645",
"0.5002392",
"0.50004274",
"0.49921387",
"0.4992109",
"0.49912146",
"0.4974509",
"0.49679774",
"0.49539724",
"0.4950555",
"0.49411973",
"0.4938974",
"0.49379537",
"0.4937419",
"0.49326915",
"0.4928268",
"0.49281663",
"0.49269322",
"0.49255338",
"0.4923667",
"0.4919142",
"0.49156475",
"0.49078372",
"0.4905177",
"0.489926",
"0.48923942",
"0.48866144",
"0.48859057",
"0.48772365",
"0.48615602",
"0.485915",
"0.48457444",
"0.48437056",
"0.4840393",
"0.48400727",
"0.4833113",
"0.48327687",
"0.4825394",
"0.48214346",
"0.48206997",
"0.48152265",
"0.48012313",
"0.47998062",
"0.479701",
"0.47927624",
"0.47927567",
"0.47908446",
"0.47900507",
"0.47858873",
"0.478108",
"0.47810048",
"0.4777091"
] | 0.7874289 | 0 |
Convert DottedRef expressions contained inside statemod variables. | def rewrite_statemods(statemods, from_args, base_offsets):
assert all(isinstance(sm, StateVar) for sm in statemods)
return [StateVar(name, init, rewrite_refs(update, from_args, base_offsets))
for name, init, update in statemods] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rewrite_refs(sexpr, from_args, base_offsets):\n\n def rewrite_node(sexpr):\n # Push unboxing into the state variables of distributed aggregates\n if isinstance(sexpr, expression.AggregateExpression):\n if sexpr.is_decomposable():\n ds = sexpr.get_decomposable_state()\n lsms = rewrite_statemods(ds.get_local_statemods(), from_args, base_offsets) # noqa\n rsms = rewrite_statemods(ds.get_remote_statemods(), from_args, base_offsets) # noqa\n\n if lsms or rsms:\n sexpr.set_decomposable_state(\n expression.DecomposableAggregateState(\n ds.get_local_emitters(), lsms,\n ds.get_remote_emitters(), rsms,\n ds.get_finalizer()))\n return sexpr\n\n if not isinstance(sexpr, expression.DottedRef):\n return sexpr\n elif sexpr.table_alias not in from_args:\n raise NoSuchRelationException(sexpr.table_alias)\n else:\n op = from_args[sexpr.table_alias]\n scheme = op.scheme()\n\n debug_info = None\n if not sexpr.field:\n offset = 0\n elif isinstance(sexpr.field, int):\n if sexpr.field >= len(scheme):\n raise ColumnIndexOutOfBounds(str(sexpr))\n offset = sexpr.field\n else:\n assert isinstance(sexpr.field, basestring)\n offset = scheme.getPosition(sexpr.field)\n debug_info = sexpr.field\n\n offset += base_offsets[sexpr.table_alias]\n return expression.UnnamedAttributeRef(offset, debug_info)\n\n def recursive_eval(sexpr):\n \"\"\"Rewrite a node and all its descendents\"\"\"\n newexpr = rewrite_node(sexpr)\n newexpr.apply(recursive_eval)\n return newexpr\n\n return recursive_eval(sexpr)",
"def translate(expr):\n return from_python(ast.parse(expr))",
"def normalise_ref(ref):\n if ref.startswith((\"builtins.\", \"__main__\")):\n return ref\n try:\n mod_name, name = ref.rsplit(\".\", maxsplit=1)\n mod = __import__(mod_name)\n for sub in mod_name.split(\".\")[1:]:\n mod = getattr(mod, sub)\n obj = getattr(mod, name)\n if isinstance(obj, ModuleType):\n return ref\n if getattr(obj, \"__name__\", None) is None:\n return ref\n\n return obj.__module__ + \".\" + obj.__name__\n except Exception:\n pass\n return ref",
"def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e",
"def __replaceArrRefs(self, tnode, replace_table):\n\n if isinstance(tnode, ast.NumLitExp):\n return tnode\n\n elif isinstance(tnode, ast.StringLitExp):\n return tnode\n\n elif isinstance(tnode, ast.IdentExp):\n return tnode\n\n elif isinstance(tnode, ast.ArrayRefExp):\n aref_str = str(tnode)\n if aref_str in replace_table:\n iname = replace_table[aref_str]\n return ast.IdentExp(iname)\n else:\n return tnode\n\n elif isinstance(tnode, ast.FunCallExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n tnode.args = [self.__replaceArrRefs(a, replace_table) for a in tnode.args]\n return tnode\n\n elif isinstance(tnode, ast.UnaryExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.BinOpExp):\n tnode.lhs = self.__replaceArrRefs(tnode.lhs, replace_table)\n tnode.rhs = self.__replaceArrRefs(tnode.rhs, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ParenthExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ExpStmt):\n if tnode.exp:\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.CompStmt):\n tnode.stmts = [self.__replaceArrRefs(s, replace_table) for s in tnode.stmts]\n return tnode\n\n elif isinstance(tnode, ast.IfStmt):\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n tnode.true_stmt = self.__replaceArrRefs(tnode.true_stmt, replace_table)\n if tnode.false_stmt:\n tnode.false_stmt = self.__replaceArrRefs(\n tnode.false_stmt, replace_table\n )\n return tnode\n\n elif isinstance(tnode, ast.ForStmt):\n if tnode.init:\n tnode.init = self.__replaceArrRefs(tnode.init, replace_table)\n if tnode.test:\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n if tnode.iter:\n tnode.iter = self.__replaceArrRefs(tnode.iter, replace_table)\n tnode.stmt = self.__replaceArrRefs(tnode.stmt, replace_table)\n return tnode\n\n else:\n err(\n \"orio.module.ortildriver.transformation internal error:OrTilDriver: unknown type of AST: %s\"\n % tnode.__class__.__name__\n )",
"def parse_depend_expr(parse_state):\n return _parse_depend_expr(parse_state)",
"def reduceFreeVariableMemberAccessChains(self,\n expr,\n freeVariableMemberAccessChainToImplValMap):\n renamedVariableMapping = {}\n for chain, implval in freeVariableMemberAccessChainToImplValMap.iteritems():\n assert isinstance(chain, str)\n chain = chain.split('.')\n\n if len(chain) == 1:\n renamedVariableMapping[chain[0]] = implval\n else:\n newName = Expression.freshVarname(\n '_'.join(chain),\n set(expr.mentionedVariables)\n )\n renamedVariableMapping[newName] = implval\n expr = expr.rebindFreeVariableMemberAccessChain(\n chain,\n newName\n )\n\n return expr, renamedVariableMapping",
"def reconstructY(self, inputs):\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n return act_dec(self.decodeY(inputs))",
"def subst(s, x):\n if isinstance(x, list):\n return [subst(s, xi) for xi in x]\n elif isinstance(x, tuple):\n return tuple([subst(s, xi) for xi in x])\n elif not isinstance(x, Expr):\n return x\n elif is_var_symbol(x.op):\n return s.get(x, x)\n else:\n return Expr(x.op, *[subst(s, arg) for arg in x.args])",
"def _transform_derivatives_on_rhs(self):\n for expr in self.model.search_for_assignments():\n self._process_operator(list(expr.operands())[1], u'diff', self._transform_derivative_on_rhs)",
"def expr_to_obj(s, name=None):\n\n # import re\n\n # Is our job already done?\n if isinstance(s, (RegimeElement)):\n return s\n\n # strip surrounding whitespace\n s = s.strip()\n\n # Do we have a alias?\n if StrToExpr.is_alias(s):\n return StrToExpr.alias(s)\n\n # re for an expression -> groups into lhs, op, rhs\n p_eqn = re.compile(\n r\"(?P<lhs>[a-zA-Z_]+[a-zA-Z_0-9]*(/?[a-zA-Z_]+[a-zA-Z_0-9]*)?)\"\n r\"\\s*(?P<op>[+\\-*/:]?=)\\s*(?P<rhs>.*)\")\n m = p_eqn.match(s)\n if not m:\n raise ValueError(\"Not a valid nineml expression: %s\" % s)\n\n # get lhs, op, rhs\n lhs, op, rhs = [m.group(x) for x in ['lhs', 'op', 'rhs']]\n\n # do we have an TimeDerivative?\n # re for lhs for TimeDerivative\n p_ode_lhs = re.compile(r\"(?:d)([a-zA-Z_]+[a-zA-Z_0-9]*)/(?:d)([a-zA-Z_]+\"\n r\"[a-zA-Z_0-9]*)\")\n m = p_ode_lhs.match(lhs)\n if m:\n if op != \"=\":\n raise ValueError(\"TimeDerivative lhs, but op not '=' in %s\" % s)\n\n dep_var = m.group(1)\n indep_var = m.group(2)\n return TimeDerivative(dep_var, indep_var, rhs, name=name)\n\n # Do we have an Inplace op?\n # if op in Inplace.op_name_map.keys():\n # return Inplace(lhs,op,rhs, name = name)\n\n # Do we have an assignment?\n if op == \"=\":\n return StateAssignment(lhs, rhs, name=name)\n\n # If we get here, what do we have?\n raise ValueError(\"Cannot map expr '%s' to a nineml Expression\" % s)",
"def _transform_derivative_on_rhs(self, expr):\n # Find the variable to use\n dep_var = expr.diff.dependent_variable.get_source_variable(recurse=True)\n indep_var = expr.diff.independent_variable.get_source_variable(recurse=True)\n ode = dep_var.get_ode_dependency(indep_var)\n rhs_var = ode.eq.rhs.variable.get_source_variable(recurse=True)\n # Ensure there's something mapped to it in this component\n rhs_var = self.connect_variables(rhs_var, (expr.component.name, rhs_var.name))\n # Update this expression\n parent = expr.xml_parent\n parent.xml_insert_after(expr, mathml_ci.create_new(parent, rhs_var.name))\n parent.safe_remove_child(expr)",
"def _map_state_vars_and_eqs(self):\n\n def get_used_eqs_and_state_vars(eq_to_expand, equations):\n \"\"\" Returns used equations and state vars for a given equation\n\n :param eq_to_expand: list containing equations to recurse over and expand definitions for\n note: expecting equations in [(lhs, rhs)] form.\n :param equations: set of equations to look for definitions in.\n :return: set of equations and set of used state vars.\n \"\"\"\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars\n\n for i, deriv in enumerate(self._model.y_derivatives):\n equations, used_state_vars = \\\n get_used_eqs_and_state_vars([(d.lhs, d.rhs) for d in self._derivative_equations if d.lhs == deriv],\n set(map(lambda e: (e.lhs, e.rhs), self._derivative_equations)))\n\n # get all the variables used in jacobian matrix entry and all variables used to define them\n used_jacobian_vars, used_jacobian_state_vars = \\\n get_used_eqs_and_state_vars([(None, self._jacobian_matrix[i, i])], set(self._jacobian_equations))\n\n for sv in self._formatted_state_vars:\n sv.setdefault('in_evaluate_y_derivative', []).append(sv['sympy_var'] in used_state_vars)\n sv.setdefault('in_evaluate_partial_derivative', []).append(sv['sympy_var'] in used_jacobian_state_vars)\n\n for eq in self._vars_for_template['y_derivative_equations']:\n self.eq_in_evaluate_y_derivative(eq, equations)\n\n for je in self._vars_for_template['jacobian_equations']:\n self.eq_in_evaluate_partial_derivative(je, used_jacobian_vars)",
"def _expandVariables (self, st : String) -> String:\n\n Logging.trace(\">>: %r\", st)\n cls = self.__class__\n\n # collect identifiers embedded in value and replace them by\n # their value\n ParseState_inLimbo = 0\n ParseState_inString = 1\n ParseState_inEscape = 2\n ParseState_inIdentifier = 3\n parseStateToString = { 0 : \"-\", 1 : \"S\",\n 2 : cls._escapeCharacter, 3 : \"I\" }\n\n parseState = ParseState_inLimbo\n result = \"\"\n identifier = \"\"\n fsaTrace = \"\"\n\n for ch in st:\n # process finite state automaton with three states based\n # on next character in string\n fsaTrace += (iif(fsaTrace == \"\", \"\", \" \")\n + \"[%s] %s\" % (parseStateToString[parseState], ch))\n\n if parseState == ParseState_inLimbo:\n if cls._identifierCharRegExp.search(ch):\n identifier = ch\n parseState = ParseState_inIdentifier\n else:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inString\n elif parseState == ParseState_inString:\n result += ch\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inLimbo\n elif ch == cls._escapeCharacter:\n parseState = ParseState_inEscape\n elif parseState == ParseState_inEscape:\n result += ch\n parseState = ParseState_inString\n elif parseState == ParseState_inIdentifier:\n if cls._identifierCharRegExp.search(ch):\n identifier += ch\n else:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n result += ch\n parseState = iif(ch == cls._doubleQuoteCharacter,\n ParseState_inString, ParseState_inLimbo)\n\n if parseState == ParseState_inIdentifier:\n identifierValue = self._findIdentifierValue(identifier)\n result += identifierValue\n \n Logging.trace(\"--: accumulatedFSATrace = %s\", fsaTrace)\n Logging.trace(\"<<: %r\", result)\n return result",
"def _eval_shallow(servicedef, obj, need_copy=False):\n\n # _eval_shallow() resolves $ref and $merge to their values in\n # source and with_. This is a *shallow* evaluation in that embedded\n # $ref or $merge at deeper levels are *not* resolved.\n #\n # For example, the following will be resolved:\n # { $ref: ... }\n # { $merge: ... }\n #\n # But the following will *not* be resolved\n # { type: object,\n # properties: { x: { $ref: ... } } }\n #\n # Need to loop in the event that a $ref resolves to another $ref\n # or a $ref to a $merge:\n #\n # { $ref: <target1> } --> { $ref: <target2> } --> { <value2> }\n #\n\n # Minimize copies so that we don't bloat memory\n done = False\n is_copy = False\n while not done:\n if '$merge' in obj:\n with Parser(obj['$merge'], 'eval_shallow') as merge_parser:\n merge_source = merge_parser.parse('source', save=False,\n required=True)\n merge_with = merge_parser.parse('with', save=False,\n required=True)\n\n # This always returns a copy\n obj = json_merge_patch(servicedef, merge_source, merge_with)\n is_copy = True\n\n elif '$ref' in obj:\n if len(list(obj.keys())) != 1:\n raise ParseError(\n \"$ref object may not have any other properties\", obj)\n\n sch = servicedef.find(obj['$ref'])\n obj = sch.input\n is_copy = False\n\n else:\n done = True\n\n if not is_copy and need_copy:\n obj = copy.copy(obj)\n\n return obj",
"def _replace_forward_references(t, context):\n if isinstance(t, str):\n return context[t]\n elif isinstance(t, Type):\n return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access\n else:\n return t",
"def polyrelsimp(expr):\n return expr.replace(lambda rel: isinstance(rel, Rel),\n lambda rel: expand_polyeq(rel))",
"def lang_postprocessing(variables):\n return variables",
"def promote_live_variables(paths):\n for path in paths:\n symbol_table = {} # We build a new symbol table for each path\n for block in path:\n if isinstance(block, BasicBlock):\n new_statements = []\n for statement in block.statements:\n # Replace any symbols currently in the symbol table\n statement = replace_symbols(statement, symbol_table, ctx=ast.Load)\n # Fold constants\n statement = constant_fold(statement)\n # Update symbol table if the statement is an assign\n if is_assign_to_name(statement):\n symbol_table[statement.targets[0].id] = statement.value\n new_statements.append(statement)\n block.statements = new_statements\n elif isinstance(block, Branch):\n # For branches we just promote in the condition\n block.cond = replace_symbols(block.cond, symbol_table, ctx=ast.Load)\n block.cond = constant_fold(block.cond)\n return paths",
"def convert_state_dict(state_dict):\n\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n state_dict[name] = v\n del state_dict[k]\n return state_dict",
"def __compileVariables(self):\n state_variables = []\n state_diffs = []\n networks = []\n frames = []\n vars = []\n\n language = self.language\n\n instansiate_variables = {}\n\n self.obj_vars = self.object_variables()\n for key, var in self.variables.items(): # Grouping the variables\n size = self.size_of_variable(var)\n variable_type = var.type\n if not size or key in self.variables_not_included: # Skip variable\n print('Variable not included:\\tvar: {}'.format(var.label))\n continue\n if var.equation_list and variable_type in ['constant', \"network\"]:\n self.constantEquations.append(*var.equation_list)\n continue\n if variable_type in ['constant', 'frame', 'state']:\n # print(dir(var.units))\n units_pp = var.units.prettyPrint()\n doc_pp = var.doc\n if units_pp:\n units_doc_str = '{}, {}'.format(units_pp, doc_pp)\n else:\n units_doc_str = '{}'.format('Empty', doc_pp)\n instansiate_variables[key] = self.variable_dict(var)\n if var.compiled_index_list: # Index sets need compilation\n string_version = self.matrix_string_zeros(size, prefix = ' ')\n mat = np.zeros(size)\n index = str(var.index_structures) # Convert to string\n rep = self.mod_index[var.index_structures[0]].printable()\n string_w_comments = self.matrix_to_str_w_line_comments(mat, rep,\n prefix = ' ')\n width = 79 - 16 - len(index) - len(var.compiled) # 16 is others\n width2 = 79 - 8 - len(units_pp) - len(doc_pp)\n cons_str = '\\n{0} = {ar}({v: <{msg_box}} {com} {ind}\\n {st} ){v: <{w2}} {com} {udoc}'\n cons_var_str = cons_str.format(var.compiled,\n com = CODE[language][\"comment\"],\n ar = CODE[language][\"list\"],\n st = string_w_comments,\n udoc = units_doc_str,\n ind = index,\n w2 = width2,\n w = width,\n v = '')\n # cons_str = '{0} = {ar}({v: <{msg_box}} {com} {ind}\\n {st})'\n # cons_var_str = cons_str.format(var.compiled,\n # ar = CODE[language]['list'],\n # st = string_w_comments,\n # ind = index,\n # msg_box = width,\n # v = '')\n else:\n string_version = self.matrix_string_zeros(size, prefix = ' ')\n index = 'none'\n width = 79 - 16 - len(index) - len(var.compiled) # 16 is others\n width2 = 79 - 9 - len(units_pp) - len(doc_pp)\n cons_str = '\\n{0} = {array}({val: <{msg_box}} # {ind}\\n {st}\\n ) {h: <{w2}} {com} {udoc}'\n cons_var_str = cons_str.format(var.compiled,\n array = CODE[language][\"list\"],\n com = CODE[language][\"comment\"],\n udoc = units_doc_str,\n st = string_version,\n ind = index,\n w = width,\n w2 = width2,\n h = '',\n val = '')\n if variable_type in ['frame']:\n frames.append(cons_var_str)\n elif variable_type in ['state']:\n if var.label in self.state_variables:\n state_variables.append(cons_var_str)\n else:\n pass\n else:\n vars.append(cons_var_str)\n elif variable_type in [\"network\"]:\n mat = self.populateNetworkVariable(var)\n string_mat = self.matrix_to_string(mat, prefix = ' ')\n index = str(var.index_structures) # Convert to string\n width = 79 - 20 - len(index) - len(var.compiled) # 16 is others\n if width < 1:\n width = 1\n nt_str = '{0} = {array}({val: <{msg_box}} # {ind}\\n {st})'\n netw_var_str = nt_str.format(var.compiled,\n array = CODE[language][\"list\"],\n st = string_mat,\n ind = index,\n w = width,\n val = '')\n networks.append(netw_var_str)\n putData(instansiate_variables, self.variable_instantiate_file)\n return [state_variables, state_diffs, networks, frames, vars]",
"def _convert_reference_fields_to_strings(self, xblock, jsonfields):\r\n assert isinstance(jsonfields, dict)\r\n for field_name, value in jsonfields.iteritems():\r\n if value:\r\n if isinstance(xblock.fields[field_name], Reference):\r\n jsonfields[field_name] = value.to_deprecated_string()\r\n elif isinstance(xblock.fields[field_name], ReferenceList):\r\n jsonfields[field_name] = [\r\n ele.to_deprecated_string() for ele in value\r\n ]\r\n elif isinstance(xblock.fields[field_name], ReferenceValueDict):\r\n for key, subvalue in value.iteritems():\r\n assert isinstance(subvalue, Location)\r\n value[key] = subvalue.to_deprecated_string()\r\n return jsonfields",
"def dotps(y):\n def isdot(e):\n return type(e) is T.TensorVariable and str(e).startswith('dot')\n return parms(y, isdot)",
"def _normalize_variable_recurrent_scope(scope: 'Scope'):\n ret_scope = scope.copy()\n for scope_element in ret_scope:\n if scope_element.calling_module_class_name in [\"Recurrent\", \"VariableRecurrent\",\n \"VariableRecurrentReverse\"]:\n scope_element.calling_module_class_name = \"NormalizedName_Recurrent\"\n return ret_scope",
"def test_dotexpr_lhs():\n a = Var(ArrayLiteral([1]).find(lambda v: v == 1))\n b = Var(Let(lambda b=[1, 2]: b).find(lambda v: v == 1))\n c = Var(String(\"hello\").concat(String(\" world\")))\n ignore(b)\n ignore(c)\n return a",
"def to_ak_expr(expr, aliases=dict(), transformer=Transformer()):\n transformer.aliases = aliases\n parsed = ast.parse(expr)\n transformer.visit(parsed)\n source = astor.to_source(parsed).strip()\n return source",
"def compile_expressions(self):\n if getattr(self, \"_attrs\", None):\n for k, v in self._attrs.items():\n try:\n Expression.compile_cache(v)\n except:\n pass\n if \"${\" in v and \"}\" in v:\n Expression.extract(v)\n if getattr(self, \"_let\", None):\n for k, v in self._let.items():\n try:\n Expression.compile_cache(v)\n except:\n pass\n if getattr(self, \"text\", None):\n Expression.extract(self.text)",
"def _convert_variables(self, abbr, candidates):\n if abbr is None:\n return [self.CI, self.F, self.R]\n if abbr == \"all\":\n return self._ensure_list(candidates, name=\"candidates\")\n abbr_dict = {\"C\": self.C, \"I\": self.CI, \"F\": self.F, \"R\": self.R, }\n variables = list(abbr) if isinstance(abbr, str) else abbr\n variables = [abbr_dict.get(v, v) for v in variables]\n return self._ensure_list(variables, candidates=candidates, name=\"variables\")",
"def unfix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].unfix(var_value)\r\n\r\n return m",
"def mag_postprocessing(variables):\n return variables",
"def decompose_expressions(sv): \r\n done=False # iterate until no more change \r\n while not done:\r\n done=True # set to False whenever there is a change \r\n # build expressions from clauses\r\n for nam in list(sv.Object_list): # list is modified in loop: use a copy\r\n nod=sv.Object[nam]\r\n li=[]\r\n for c,v in nod.clauses: # explore clauses \r\n k,w=c,v # copy of condition and value (may change)\r\n \r\n # cache condition \r\n if k:\r\n if not (k[0] in [Always, Start]+Glitch_list): # add 'begin' except for [Begin, End, Change, Always, Start]\r\n k=(Begin, k, None) \r\n k=(k[0], create_expression(sv, k[1]), None) # skip one level\r\n if k!=c: done=False # a change has occurred\r\n \r\n #cache value \r\n if w and tree_join(w)!=nam: # do not create circular ref \r\n if w[0] in Glitch_list: # do not cache [Begin, End, Change] \r\n w=(w[0], create_expression(sv, w[1]), None) \r\n elif w[0]==Comma:\r\n w=create_expression(sv, w) # process list \r\n elif ( w[1] and ( w[1][1] or w[1][2]) ) or \\\r\n ( w[2] and ( w[2][1] or w[2][2]) ): # do not cache a single operation \r\n w=(w[0], create_expression(sv, w[1]), create_expression(sv, w[2])) \r\n if w!=v: done=False # a change has occurred\r\n # store result\r\n li+=[(k,w)] # store one clause\r\n \r\n nod.clauses=li # store list of clauses\r",
"def _rename_bound_variables(self, formula, variables):\n new_vars = [self._bound_symbol(x) for x in variables]\n old_vars = [self.walk_symbol(x) for x in variables]\n new_formula = yicespy.yices_subst_term(len(variables), yicespy.make_term_array(new_vars),\n yicespy.make_term_array(old_vars), formula)\n return (new_formula, new_vars)",
"def test_references(self):\n a = DummyObject()\n d = {'a.a.a':1, 'a.b.a':3, 'b':a}\n # Check dict single level keys don't lose reference\n self.assertEqual( dottedDict(d).data['b'], d['b'] )\n self.assertEqual( dottedDict(d).data, dottedDict(dottedDict(d)).data )",
"def _parse_instance_reference(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Class, registry_expr: str,\n auto_id: mapry.py.generate.AutoID) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_CLASS_REF_TPL.render(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n uid=uid,\n class_name=a_type.name,\n registry_expr=registry_expr)",
"def infer_constants(formula, variables):\n if isinstance(variables, dict):\n for var in variables:\n other_vars = dict(variables)\n other_vars.pop(var)\n _check_var_conflicts({var}, other_vars)\n else:\n logger.error('infer constants does not know the variable domains.')\n warnings.warn(\n 'infer_constants can give an incorrect result '\n 'depending on the variable domains.\\n'\n 'If you give the variable domain definitions as dict, '\n 'then infer_constants will check for ambiguities.')\n tree = parser.parse(formula)\n old2new = dict()\n for u in tree:\n if u.type != 'var':\n continue\n if str(u) in variables:\n continue\n # Var (so NAME token) but not a variable\n # turn it into a string constant\n old2new[u] = nodes.Const(str(u))\n nx.relabel_nodes(tree, old2new, copy=False)\n return str(tree)",
"def repackage_var(x):\n if type(x) == Variable:\n return Variable(x.data)\n else:\n return tuple(repackage_var(v) for v in x)",
"def fix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].fix(var_value)\r\n\r\n return m",
"def _do_dots(self, value, *dots):\n for dot in dots:\n try:\n value = getattr(value, dot)\n except AttributeError:\n try:\n value = value[dot]\n except (TypeError, KeyError) as exc:\n raise TempliteValueError(\n f\"Couldn't evaluate {value!r}.{dot}\"\n ) from exc\n if callable(value):\n value = value()\n return value",
"def reflect_state(self, s):\n s[2:8] = reflect_control_vector(s[2:8])\n s[11:17] = reflect_control_vector(s[11:17])\n return s",
"def substitute_with_bindings(self,bindings):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in bindings:\n term[i] = bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))",
"def reconstructX(self, inputs):\n if self.act_dec is None:\n act_dec = lambda x: x\n else:\n act_dec = self.act_dec\n return act_dec(self.decodeX(inputs))",
"def resolve_variables(self, service, environment, extra_variables=None,\n require_all_replaced=True):\n all_vars = self.load_all_variables(service, environment, extra_variables)\n self.resolved_vars = recursive_replace_vars(\n all_vars, require_all_replaced,\n all_vars[EXCONF_VAR_TEMPLATE_COMMENT_BEGIN],\n all_vars[EXCONF_VAR_STR_TEMPLATE_PREFIX],\n all_vars[EXCONF_VAR_STR_TEMPLATE_SUFFIX])\n return self.resolved_vars",
"def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot",
"def to_symbolic(self):\n transformer = SymbolicAgents()\n self.expression = transformer.transform(self.expression)",
"def _subs(self, exp, p, seen):\n p, new = ASParameters._subs(self, exp, p, seen)\n if new:\n if self._has(\"theta\"):\n p._.theta = tuple(subs(th, *exp) for th in self._.theta)\n if self._has(\"omega\"):\n p._.omega = self._.omega.subs(*exp)\n return (p, new)",
"def unify(self,term,fact,bindings):\n\n n = len(term.split('(')[1][:-1].split(','))\n term_args = term.split('(')[1][:-1].split(',')\n fact_args = fact.split('(')[1][:-1].split(',')\n for i in range(n):\n if (not Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n if term_args[i] != fact_args[i]:\n return False\n elif (Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])):\n bindings[term_args[i]] = fact_args[i]\n elif (not Prover.is_var(term_args[i])) and (Prover.is_var(fact_args[i])):\n bindings[fact_args[i]] = term_args[i]\n return bindings",
"def nacs_postprocessing(variables):\n return variables",
"def chain_rule(cls, ctx, inputs, d_output):\n d_inputs = cls.backward(ctx, d_output)\n d_inputs = wrap_tuple(d_inputs)\n res = []\n for inp, d_input in zip(inputs, d_inputs):\n if not isinstance(inp, Variable) or inp.history is None:\n continue\n res.append(VariableWithDeriv(inp, d_input))\n return res",
"def test_domain_py_xrefs(app, status, warning):\n app.builder.build_all()\n\n def assert_refnode(node, module_name, class_name, target, reftype=None,\n domain='py'):\n attributes = {\n 'refdomain': domain,\n 'reftarget': target,\n }\n if reftype is not None:\n attributes['reftype'] = reftype\n if module_name is not False:\n attributes['py:module'] = module_name\n if class_name is not False:\n attributes['py:class'] = class_name\n assert_node(node, **attributes)\n\n doctree = app.env.get_doctree('roles')\n refnodes = list(doctree.traverse(addnodes.pending_xref))\n assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')\n assert_refnode(refnodes[1], None, None, 'top_level', 'meth')\n assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'meth')\n assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'meth')\n assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'meth')\n assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')\n assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')\n assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'meth')\n assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',\n 'NestedParentA.child_1', 'meth')\n assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth')\n assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'meth')\n assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')\n assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')\n assert len(refnodes) == 13\n\n doctree = app.env.get_doctree('module')\n refnodes = list(doctree.traverse(addnodes.pending_xref))\n assert_refnode(refnodes[0], 'module_a.submodule', None,\n 'ModTopLevel', 'class')\n assert_refnode(refnodes[1], 'module_a.submodule', 'ModTopLevel',\n 'mod_child_1', 'meth')\n assert_refnode(refnodes[2], 'module_a.submodule', 'ModTopLevel',\n 'ModTopLevel.mod_child_1', 'meth')\n assert_refnode(refnodes[3], 'module_a.submodule', 'ModTopLevel',\n 'mod_child_2', 'meth')\n assert_refnode(refnodes[4], 'module_a.submodule', 'ModTopLevel',\n 'module_a.submodule.ModTopLevel.mod_child_1', 'meth')\n assert_refnode(refnodes[5], 'module_b.submodule', None,\n 'ModTopLevel', 'class')\n assert_refnode(refnodes[6], 'module_b.submodule', 'ModTopLevel',\n 'ModNoModule', 'class')\n assert_refnode(refnodes[7], False, False, 'int', 'class')\n assert_refnode(refnodes[8], False, False, 'tuple', 'class')\n assert_refnode(refnodes[9], False, False, 'str', 'class')\n assert_refnode(refnodes[10], False, False, 'float', 'class')\n assert_refnode(refnodes[11], False, False, 'list', 'class')\n assert_refnode(refnodes[11], False, False, 'list', 'class')\n assert_refnode(refnodes[12], False, False, 'ModTopLevel', 'class')\n assert_refnode(refnodes[13], False, False, 'index', 'doc', domain='std')\n assert len(refnodes) == 14\n\n doctree = app.env.get_doctree('module_option')\n refnodes = list(doctree.traverse(addnodes.pending_xref))\n print(refnodes)\n print(refnodes[0])\n print(refnodes[1])\n assert_refnode(refnodes[0], 'test.extra', 'B', 'foo', 'meth')\n assert_refnode(refnodes[1], 'test.extra', 'B', 'foo', 'meth')\n assert len(refnodes) == 2",
"def test_01():\n text = \"a = 2 + 3 * (4 + 5)\"\n\n c = _ast.parse(text)\n print(_ast.dump(c))",
"def convert(osis_refs):\n is_string = False\n if isinstance(osis_refs, str):\n osis_refs = [osis_refs]\n is_string = True\n\n usfm_refs = []\n\n for ref in osis_refs:\n parts = ref.split('.')\n\n # If the book part is different, convert otherwise just uppercase it\n if parts[0].lower() in BOOKS.keys():\n parts[0] = BOOKS[parts[0].lower()]\n else:\n parts[0] = parts[0].upper()\n\n usfm_refs.append('.'.join(parts))\n\n return usfm_refs if is_string is False else usfm_refs[0]",
"def test_regref_transform(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float alpha = 0.5\\nfloat Delta=sqrt(2)\\nCoherent(alpha*q0, Delta*sqrt(pi), 0.2*10) | 0\\n\"\n )\n\n p = sym.Symbol(\"q0\")\n assert isinstance(bb.operations[0][\"args\"][0], RegRefTransform)\n assert bb.operations[0][\"args\"][0].func_str == str(0.5 * p)",
"def to_arrayref(u):\n if u.__class__ is node.funcall:\n try:\n if u.func_expr.props in \"UR\": # upd,ref\n u.__class__ = node.arrayref\n except:\n pass # FIXME",
"def _substitute(self, formula, subs):\n\n return subs.get(formula, formula)",
"def translate_dot_regime(\n regime: int,\n) -> Tuple[str, bool]:\n\n rev_mapping = {}\n for str_regime in [\"singledot\", \"doubledot\"]:\n idx = DOT_LABEL_MAPPING[str_regime]\n rev_mapping[idx[0]] = (str_regime, False)\n rev_mapping[idx[1]] = (str_regime, True)\n\n return rev_mapping[regime]",
"def testStateVariables(self):\n\n class S1(ClassWithCollections):\n v1 = StateVariable(enabled=True, doc=\"values1 is ...\")\n v1XXX = StateVariable(enabled=False, doc=\"values1 is ...\")\n\n\n class S2(ClassWithCollections):\n v2 = StateVariable(enabled=True, doc=\"values12 is ...\")\n\n class S1_(S1):\n pass\n\n class S1__(S1_):\n v1__ = StateVariable(enabled=False)\n\n class S12(S1__, S2):\n v12 = StateVariable()\n\n s1, s2, s1_, s1__, s12 = S1(), S2(), S1_(), S1__(), S12()\n\n self.failUnlessEqual(s1.states.isEnabled(\"v1\"), True)\n s1.v1 = 12\n s12.v1 = 120\n s2.v2 = 100\n\n self.failUnlessEqual(len(s2.states.listing), 1)\n\n self.failUnlessEqual(s1.v1, 12)\n try:\n tempvalue = s1__.v1__\n self.fail(\"Should have puked since values were not enabled yet\")\n except:\n pass",
"def expand_vars(args, diff=()):\n replacer = Placeholders(diff=diff)\n return replacer.expand_vars(args)",
"def deconstruct(self):\n c = self.__class__\n path = \"{}.{}\".format(c.__module__, c.__name__)\n return path, [self.value], {}",
"def CreateEBNFTransformer() -> TransformerFunc:\n return partial(traverse, transformation_table=EBNF_AST_transformation_table.copy())",
"def get_numbers_operators(text: str, var_dict: dict, svar_dict:dict) -> (list, list, dict):\n\n\n # Define regex to extract all numbers in a string, as well as placeholders for intermediate results.\n # These placeholders start with a character, followed by a sequence of characters and numbers.\n # Use re.findall method to get a list of all numbers from the string.\n variables_regex = r\"((?<=[\\+\\-\\*\\/\\^\\,])|^)\\s*[\\+\\-]?\\s*(\\d+\\.?\\d*(e-?\\d+)?|[A-Za-z]+[A-Za-z0-9]*)\"\n var_list = re.findall(variables_regex, text)\n var_list = [i[1] for i in var_list]\n\n # Create dynamic view objects of the keys in var_dict and svar_dict.\n var_dict_keys = var_dict.keys() # returns DYNAMIC view object\n svar_dict_keys = svar_dict.keys()\n\n # Loop over var_list to assign variables to numbers and to copy saved variables from svar_dict to var_dict.\n for idx, entry in enumerate(var_list):\n # Do nothing if an entry is already stored in var_dict\n if not entry in var_dict_keys:\n # Check if entry is contained in svar_dict\n if not entry in svar_dict_keys:\n var_list[idx] = float(entry)\n else:\n var_list[idx] = svar_dict[entry]\n else:\n var_list[idx] = var_dict.pop(entry)\n\n \n operator_string = re.sub(variables_regex, '', text)\n operator_list = [i for i in operator_string if i !=' ']\n\n # Return both lists and the dictionairy.\n return var_list, operator_list, var_dict",
"def convert_dependencies(dependencies, rule_in_pos_0=r\"\\n\"):\n rule_in_pos_0 = lex_bases.rule([\"JUMP_LINE\", False], rule_in_pos_0)\n converted_dependencies = None\n\n if (\n isinstance(dependencies, dict)\n and \"rules\" in dependencies.keys()\n and len(dependencies[\"rules\"]) >= 1\n ):\n converted_dependencies = dependencies\n # Load the jump line rule.\n converted_dependencies[\"rules\"].insert(0, rule_in_pos_0)\n\n elif isinstance(dependencies, list):\n converted_dependencies = dependencies\n\n converted_dependencies.insert(0, rule_in_pos_0)\n\n return converted_dependencies",
"def _apply_special_conversion_for_nested_expr(self, expr, defn_units, desired_units):\n for from_units, to_units in self.special_conversions.iterkeys():\n if (from_units.dimensionally_equivalent(defn_units)\n and to_units.dimensionally_equivalent(desired_units)):\n # We can apply this conversion\n expr = self.special_conversions[(from_units, to_units)](expr)\n DEBUG('units-converter', \"Used nested special conversion from\", repr(from_units), \"to\", repr(to_units))#, \"giving\", expr.xml())\n break\n# else:\n# print \"No on nested conv from\", repr(from_units), \"to\", repr(to_units)\n return expr",
"def to_eval(self):\n for _m in self.modules.values():\n _m.eval()",
"def substitute_variable_names(variables_binds, from_clause, to_clause):\n new_binds = []\n substitute = {}\n for index, param in enumerate(from_clause.parameters):\n if not param.is_constant() and not to_clause.parameters[index].is_constant():\n from_var_name = param.name\n to_var_name = to_clause.parameters[index].name\n substitute[from_var_name] = to_var_name\n\n for b in variables_binds:\n new_bind = {}\n for name, value in b.items():\n if name in substitute:\n new_bind[substitute[name]] = value\n if len(new_bind):\n new_binds.append(new_bind)\n\n return new_binds",
"def recursive_eval(sexpr):\n newexpr = rewrite_node(sexpr)\n newexpr.apply(recursive_eval)\n return newexpr",
"def substitute(expression, subs=None):\n if subs is None:\n subs = {}\n if isNumber(expression):\n return expression\n if isSymbol(expression):\n if expression.name in subs:\n return subs[expression.name]\n elif expression in subs:\n return subs[expression]\n else:\n return expression\n expr = expression.copy()\n # Must be an expression\n symbolDct = {s.name: s for s in expression.free_symbols}\n # Update entry in substitution to be the same as the expression\n newSubs = dict(subs)\n for key, value in subs.items():\n if key.name in symbolDct.keys():\n del newSubs[key]\n newSubs[symbolDct[key.name]] = value\n expr = expr.subs(newSubs)\n return sympy.simplify(expr)",
"def expand_bis(equation:sp.Eq):\n\n assert isinstance(equation,sp.Eq)\n symbols = equation.lhs.free_symbols | equation.rhs.free_symbols\n subs = []\n for symbol in symbols:\n if isinstance(symbol,BisSymbol):\n subs.append((symbol,symbol.parent_SI_symbol.bis_eq.rhs))\n\n expanded_equation = equation.subs(subs)\n return expanded_equation",
"def translate_f(self, formula):\n eng_formula = re.sub('x\\[\\d,\\d,\\d\\]', lambda match: self.fid_to_var(match.group()), str(formula))\n return eng_formula",
"def _canon_cg(expr):\n return expr.replace(CG, _canon_cg_core)",
"def idm_postprocessing(variables):\n return variables",
"def _processReusedPy(self, specnames, specdict, specials=[],\n dovars=True, dopars=True, doinps=True, illegal=[]):\n\n reused, specupdated, new_protected, order = _processReused(specnames,\n specdict,\n self.fspec.reuseterms,\n _indentstr)\n self.fspec._protected_reusenames = new_protected\n # symbols to parse are at indices 2 and 4 of 'reused' dictionary\n reusedParsed = self._parseReusedTermsPy(reused, [2, 4],\n specials=specials, dovars=dovars,\n dopars=dopars, doinps=doinps,\n illegal=illegal)\n reusedefs = {}.fromkeys(new_protected)\n for _, deflist in reusedParsed.items():\n for d in deflist:\n reusedefs[d[2]] = d\n return (concatStrDict(reusedefs, intersect(order, reusedefs.keys())),\n specupdated)",
"def process_name(self, stack):\n dot_op = self._toks(stack)\n toks = [t.value for t in Stack.flatten(dot_op)]\n # always remove the final dot\n assert toks[-1] == \".\"\n expr = \"\".join(toks[:-1])\n yield from self.dot.complete(expr)",
"def decode_reference(self, model, x, out_lens):\n model = getattr(model, 'module', model)\n with torch.no_grad():\n # Apply optional preprocessing\n logits, out_lens = model.encode(x, out_lens)\n output = []\n for batch_idx in range(logits.size(0)):\n inseq = logits[batch_idx, :, :].unsqueeze(1)\n logitlen = out_lens[batch_idx]\n sentence = self._greedy_decode(model, inseq, logitlen)\n output.append(sentence)\n\n return output",
"def test_dotted_named_entities_circular_references():\n from tests.dottedname.foo.bar.bop import Property\n\n p = Property(\n name='outer',\n nested={\n 'properties': [\n Property(name='inner')\n ]\n }\n )\n assert p\n assert isinstance(p.nested.properties, list)\n assert p.nested.properties[0].name == 'inner'",
"def transform():",
"def let():\n def from_many(*kv_pairs):\n new_bindings = {}\n for entry in kv_pairs:\n with match(entry) as case:\n with case('Quoted(Sexpr(Name(name), expr))') as [m]:\n new_bindings[m.name] = m.expr\n\n def _from_many(quoted_body):\n return EvaluateInContext(\n push_subscope_with(new_bindings),\n pop_subscope,\n quoted_body.subexpression\n )\n\n return e.Function({parse_fn(\"(λ &[any] . any)\"): _from_many})\n yield (\"(λ ...&[(name any)] . (λ &[any] . any))\", from_many)\n\n def from_one(key, value, quoted_body):\n return EvaluateInContext(\n push_subscope_with({key.subexpression.name: value}),\n pop_subscope,\n quoted_body.subexpression\n )\n yield (\"(λ &[name] any &[any] . any)\", from_one)",
"def _convert_ext_attrs(self, ast):\n self.ext_attrs = IDLExtAttrs(ast)",
"def makeSourceRefs(refs):\n s = ''\n if refs:\n if isiterable(refs):\n for ref in refs:\n s += '<SourceRef>B%s-%s</SourceRef>' % (NODEID, ref)\n else: s += '<SourceRef>B%s-%s</SourceRef>' % (NODEID, refs)\n return s",
"def convert_to_model(self, *args):\n state_xref_service_data, *_ = args\n return [StateXRefService(**state_xref_service) for state_xref_service in state_xref_service_data]",
"def substitute(self):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in self.bindings:\n term[i] = self.bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))",
"def _unify_variables(self, variables):\n variables = [self._lookup(i) if isinstance(i, str) else i\n for i in variables]\n return variables",
"def getXRefsFrom(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n\r\n\r\n # normalFlow = True\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n\r\n # needed to identify pool variables. drefs accessing the pool may access pointers\r\n # in the pool. the pointers should be retrieved instead\r\n size_pool = self.getSize(withPool=True)\r\n # for each instruction\r\n for i in idautils.FuncItems(self.func_ea):\r\n for xref in idautils.XrefsFrom(i, 0):\r\n # if the xref is to a far or near called function\r\n if xref.type == idc.fl_CN or xref.type == idc.fl_CF:\r\n if xref.to not in crefs:\r\n crefs.append(xref.to)\r\n # if the xref is to a read or write data access\r\n if xref.type == idc.dr_W or xref.type == idc.dr_R:\r\n if xref.to not in drefs:\r\n # if xref.to is in the pool, then retrieve content if it's a pointer\r\n if xref.to < self.func_ea + size_pool:\r\n # those are the references found at the pool location\r\n iteratedOnce = False\r\n for poolRef in idautils.XrefsFrom(xref.to, 0):\r\n if iteratedOnce:\r\n raise(FunctionException(\"%08X: there should only be one data xref in pool variable\"\r\n % (self.func_ea)))\r\n # there should only be one in the pool refernce\r\n if poolRef.to not in drefs:\r\n drefs.append(poolRef.to)\r\n iteratedOnce = True\r\n else:\r\n drefs.append(xref.to)\r\n\r\n # for ref in idautils.DataRefsFrom(self.func_ea):\r\n # drefs.append(ref)\r\n # for ref in idautils.DataRefsFrom(self.func_ea - 1):\r\n # drefs.append(ref)\r\n return crefs, drefs",
"def rst_dollars_to_math(rst_str,\n protector=rst_protector,\n dollar_repl=r\":math:`\\1`\"):\n if rst_str.find(\"$\") == -1:\n return rst_str\n out_str = protector.protect(rst_str)\n # matches $...$\n dollars = re.compile(r\"(?<!\\$)(?<!\\\\)\\$([^\\$]+?)\\$\")\n # regular expression for \\$\n slashdollar = re.compile(r\"\\\\\\$\")\n out_str = dollars.sub(dollar_repl, out_str)\n out_str = slashdollar.sub(r\"$\", out_str)\n return rst_protector.restore(out_str)",
"def _convert_children_to_literals(special_atom, bool_varlist, bool_var_to_special_atoms):\n new_args = [special_atom.args[0]]\n new_statements = []\n need_new_expression = False\n for child in special_atom.args[1:]:\n if type(child) in native_types or not child.is_expression_type():\n # Child is a literal. Simply append to new argument list.\n new_args.append(child)\n else:\n # We need to do a substitution\n need_new_expression = True\n new_indicator = bool_varlist.add()\n if type(child) in special_logical_atom_types:\n child_cnf = _convert_children_to_literals(child, bool_varlist, bool_var_to_special_atoms)\n bool_var_to_special_atoms[new_indicator] = child_cnf[0]\n else:\n child_cnf = to_cnf(new_indicator.equivalent_to(child), bool_varlist, bool_var_to_special_atoms)\n new_statements.append(child_cnf[0])\n new_args.append(new_indicator)\n new_statements.extend(child_cnf[1:])\n if need_new_expression:\n new_atom_with_literals = special_atom.__class__(new_args)\n return [new_atom_with_literals] + new_statements\n else:\n return [special_atom]",
"def __call__(self, text):\n return extract_references(text, self.preprocessor, self.model)",
"def vefi_postprocessing(variables):\n return variables",
"def _from_components(self, components):\n bijector = components.pop('bijector', self.transform_or_spec)\n return TransformedVariable(\n **components, initial_value=None, bijector=bijector,\n dtype=self.dtype, name=self.name)",
"def _parse_opt_out(\n tn,\n constant_tags,\n to_constant,\n):\n tn_ag = tn.copy()\n variables = []\n\n for t in tn_ag:\n\n if t.tags & constant_tags:\n t.modify(apply=to_constant)\n continue\n\n # append the raw data but mark the corresponding tensor\n # for reinsertion\n data = t.get_params()\n variables.append(data)\n t.add_tag(_VARIABLE_TAG.format(len(variables) - 1))\n\n return tn_ag, variables",
"def _expand_variables(input_str, cmake_vars):\n def replace(match):\n if match.group(1) in cmake_vars:\n return cmake_vars[match.group(1)]\n return \"\"\n return _CMAKE_ATVAR_REGEX.sub(replace,_CMAKE_VAR_REGEX.sub(replace, input_str))",
"def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation",
"def getattr_ref(self, interp, name, contextclass=None):\n interp.warn('Attempt to modify property of non-object')\n return interp.space.empty_ref()",
"def substitute(self, args, lvars):\n if is_String(args) and not isinstance(args, CmdStringHolder):\n args = str(args) # In case it's a UserString.\n try:\n def sub_match(match):\n return self.conv(self.expand(match.group(1), lvars))\n result = _dollar_exps.sub(sub_match, args)\n except TypeError:\n # If the internal conversion routine doesn't return\n # strings (it could be overridden to return Nodes, for\n # example), then the 1.5.2 re module will throw this\n # exception. Back off to a slower, general-purpose\n # algorithm that works for all data types.\n args = _separate_args.findall(args)\n result = []\n for a in args:\n result.append(self.conv(self.expand(a, lvars)))\n if len(result) == 1:\n result = result[0]\n else:\n result = ''.join(map(str, result))\n return result\n else:\n return self.expand(args, lvars)",
"def variabels_to_restore(scope=None, strip_scope=False):\n if scope:\n variable_map = {}\n variables_to_restore = slim.get_variables_to_restore(include=[scope])\n for var in variables_to_restore:\n if strip_scope:\n var_name = var.op.name[len(scope) + 1:]\n else:\n var_name = var.op.name\n variable_map[var_name] = var\n return variable_map\n else:\n return {var.op.name: var for var in slim.get_variables_to_restore()}",
"def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n pn_dot =\n pe_dot =\n pd_dot =\n\n # position dynamics\n u_dot =\n v_dot =\n w_dot =\n\n # rotational kinematics\n e0_dot =\n e1_dot =\n e2_dot =\n e3_dot =\n\n # rotatonal dynamics\n p_dot =\n q_dot =\n r_dot = \n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot",
"def _unify_exprs(self, exprs):\n if isinstance(exprs, (str, unicode)):\n # We are only being given a single string expression.\n exprs = self.exprs[exprs]\n elif isinstance(exprs, theano.tensor.basic.TensorVariable):\n # TODO: does this work in case of the GPU?\n exprs = exprs\n else:\n # We have several, either string or variable, thus make it a list\n # and substitute the strings.\n exprs = list(exprs)\n exprs = [self.exprs[i] if isinstance(i, str) else i for i in exprs]\n\n return exprs",
"def unquote():\n def _unquote(quoted):\n return quoted.subexpression\n yield (\"(λ &[any] . any)\", _unquote)",
"def subexpr_to_smtlib(expr, pre, suff='', fun_annotate_subexpr = None):\n if fun_annotate_subexpr is not None and pre in PythonOperators.logic_ops:\n return '(! (' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + \\\n ') :named ' + fun_annotate_subexpr() + ')'\n else:\n return '(' + pre + ' ' + ExprTranslator.concatenate_args(expr, fun_annotate_subexpr) + suff + ')'",
"def _dealias(self, obj, path=None):\n if path is None:\n path = set()\n\n def _path_string(p):\n return \", \".join([str(entry) for entry in p])\n\n # recurse in basic structures\n if isinstance(obj, list):\n return [self._dealias(o, path=path) for o in obj]\n if isinstance(obj, dict):\n return {k: self._dealias(v, path=path) for k, v in obj.items()}\n if isinstance(obj, tuple):\n return tuple([self._dealias(o, path=path) for o in obj])\n\n if not isinstance(obj, Cacheable):\n raise PipelineException(\"pipeline targets and their dependencies should be cacheable: %s (path=%s)\" % (repr(obj), _path_string(path)))\n\n # dealias object based on its uid\n\n if [o for o in path if o is obj]:\n # produce ordered proof of cycle\n raise PipelineException(\"Cycle in dependency graph detected: %s\", _path_string(path))\n\n path.add(obj)\n\n # fixme modularity -- need a \"getDeps\" interface\n if isinstance(obj, Transform):\n # recurse\n dealiased_deps = [self._dealias(obj.inputs[k].node, path=path) for k in obj.inputs]\n else:\n dealiased_deps = []\n\n path.remove(obj)\n\n build_node = BuildNode(obj)\n dealiased = self.by_uid.get(build_node.uid, None)\n\n if not dealiased:\n # first instance\n dealiased = build_node\n dealiased.deps = dealiased_deps\n\n self.by_uid[dealiased.uid] = dealiased\n self._log_progress(\"building graph\")\n return dealiased",
"def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left",
"def var_or_atomics():\n return Parse.plus(var_or_atomic())"
] | [
"0.5741872",
"0.5383682",
"0.5184983",
"0.5023414",
"0.5015211",
"0.49590716",
"0.4947012",
"0.49084386",
"0.48703378",
"0.4868576",
"0.48671764",
"0.4807401",
"0.4786829",
"0.47578138",
"0.47443202",
"0.471549",
"0.4715061",
"0.47128236",
"0.47061858",
"0.4674625",
"0.46744514",
"0.46668914",
"0.46650735",
"0.46643716",
"0.46144196",
"0.46078226",
"0.45977104",
"0.45894334",
"0.4577999",
"0.45770046",
"0.4569684",
"0.45677623",
"0.45457342",
"0.45456788",
"0.45369",
"0.4529431",
"0.45193276",
"0.45190644",
"0.45149708",
"0.45143944",
"0.4509812",
"0.44981974",
"0.44947395",
"0.44913423",
"0.44882905",
"0.44737473",
"0.44600925",
"0.44526988",
"0.44473243",
"0.44243535",
"0.44242993",
"0.44212335",
"0.44169718",
"0.4413418",
"0.44003424",
"0.43990463",
"0.43949997",
"0.43876913",
"0.4385551",
"0.43792683",
"0.43618253",
"0.43609318",
"0.43460113",
"0.4343115",
"0.43380108",
"0.4335988",
"0.4333804",
"0.43309548",
"0.43296328",
"0.4329509",
"0.43290403",
"0.43189538",
"0.4318885",
"0.43116477",
"0.4308704",
"0.43085518",
"0.43055332",
"0.4304978",
"0.43043342",
"0.43040234",
"0.4303186",
"0.43024617",
"0.4302316",
"0.43020475",
"0.4299101",
"0.42946103",
"0.42941132",
"0.4279912",
"0.42782816",
"0.4275798",
"0.42741847",
"0.42714968",
"0.4271058",
"0.4269687",
"0.4269587",
"0.42695177",
"0.42524412",
"0.42521328",
"0.42488238",
"0.4242682"
] | 0.5378944 | 2 |
Convert all DottedRef expressions into raw indexes. | def rewrite_refs(sexpr, from_args, base_offsets):
def rewrite_node(sexpr):
# Push unboxing into the state variables of distributed aggregates
if isinstance(sexpr, expression.AggregateExpression):
if sexpr.is_decomposable():
ds = sexpr.get_decomposable_state()
lsms = rewrite_statemods(ds.get_local_statemods(), from_args, base_offsets) # noqa
rsms = rewrite_statemods(ds.get_remote_statemods(), from_args, base_offsets) # noqa
if lsms or rsms:
sexpr.set_decomposable_state(
expression.DecomposableAggregateState(
ds.get_local_emitters(), lsms,
ds.get_remote_emitters(), rsms,
ds.get_finalizer()))
return sexpr
if not isinstance(sexpr, expression.DottedRef):
return sexpr
elif sexpr.table_alias not in from_args:
raise NoSuchRelationException(sexpr.table_alias)
else:
op = from_args[sexpr.table_alias]
scheme = op.scheme()
debug_info = None
if not sexpr.field:
offset = 0
elif isinstance(sexpr.field, int):
if sexpr.field >= len(scheme):
raise ColumnIndexOutOfBounds(str(sexpr))
offset = sexpr.field
else:
assert isinstance(sexpr.field, basestring)
offset = scheme.getPosition(sexpr.field)
debug_info = sexpr.field
offset += base_offsets[sexpr.table_alias]
return expression.UnnamedAttributeRef(offset, debug_info)
def recursive_eval(sexpr):
"""Rewrite a node and all its descendents"""
newexpr = rewrite_node(sexpr)
newexpr.apply(recursive_eval)
return newexpr
return recursive_eval(sexpr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_indices(self):\n\n def query(rel): \n return \"\"\"SELECT pg_class.relname, pg_index.indkey\n FROM pg_class, pg_index\n WHERE (pg_index.indexrelid = pg_class.oid)\n AND (pg_index.indrelid = (SELECT pg_class.oid FROM pg_class WHERE pg_class.relname = \\'{}\\'));\n \"\"\".format(rel)\n\n rels = tpch.schema.keys()\n idxs = dict.fromkeys(rels)\n\n with self.tpch_cxn.cursor() as curs:\n for rel in rels:\n curs.execute(query(rel))\n idxs_ = curs.fetchall()\n idxs_ = dict(idxs_) # index -> index keys \n \n # TODO this can be done cleanly in query\n # pg_index.indkey is a SQL array of attributes indices in their respective tables\n split=lambda attrs: attrs.split() \n cast=lambda attrs: list(map(lambda attr: int(attr)-1, attrs))\n invertindex=lambda attrs: list(np.array(schema[rel])[attrs])\n\n attrs = idxs_.values() \n attrs = list(map(split, attrs))\n attrs = list(map(cast, attrs))\n attrs = list(map(invertindex, attrs))\n\n idxs_ = {key : attrs[i] for i, key in enumerate(idxs_.keys())}\n idxs[rel] = idxs_\n return idxs",
"def get_indexes(cls):\n if cls.indexes:\n return cls.indexes\n indexes = []\n\n for name in cls.__dict__.keys():\n value = getattr(cls, name)\n if isinstance(value, Index):\n indexes.append(value)\n value.name = cls.__name__ + '#' + name\n\n cls.indexes = indexes\n return cls.indexes",
"def index_object(idxs=None):",
"def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)",
"def get_raw_indexes(self) -> List[Dict[str, Any]]:\n return self.http.get(self.config.paths.index)",
"def _do_index_fields(self, doc, generator, obj, obj_weight):\n for field in self.fields + self.tags:\n # Trying to resolve field value or skip it\n # Отладочка:\n # print(field, field.resolve(obj))\n try:\n value = field.resolve(obj)\n if value is None:\n continue\n except AttributeError:\n continue\n if field.prefix:\n fvalue = field.convert(value)\n doc.add_value(field.number, fvalue)\n prefix = smart_text(field.get_tag())\n value = smart_text(value)\n generator.index_text_without_positions(value, field.weight*obj_weight, prefix)\n if prefix: # if prefixed then also index without prefix\n generator.index_text_without_positions(value, field.weight*obj_weight)",
"def reconstruct_input(self, ix):",
"def _normalize_index(oid, index, queried_oid):\n prefix = oid[:len(queried_oid)]\n # TODO: This is hacky and should probably be fixed in the normalize_oid function of easysnmp\n if oid != prefix:\n index = oid[len(queried_oid) + 1:] + '.' + index\n\n return index",
"def test_index_reflection(self):\n import warnings\n def capture_warnings(*args, **kw):\n capture_warnings._orig_showwarning(*args, **kw)\n capture_warnings.warnings.append(args)\n capture_warnings._orig_showwarning = warnings.warn\n capture_warnings.warnings = []\n\n m1 = MetaData(testing.db)\n t1 = Table('party', m1,\n Column('id', String(10), nullable=False),\n Column('name', String(20), index=True), \n Column('aname', String(20))\n )\n m1.create_all()\n \n testing.db.execute(\"\"\"\n create index idx1 on party ((id || name))\n \"\"\") \n testing.db.execute(\"\"\"\n create unique index idx2 on party (id) where name = 'test'\n \"\"\")\n \n testing.db.execute(\"\"\"\n create index idx3 on party using btree\n (lower(name::text), lower(aname::text))\n \"\"\")\n \n try:\n m2 = MetaData(testing.db)\n\n warnings.warn = capture_warnings\n t2 = Table('party', m2, autoload=True)\n \n wrn = capture_warnings.warnings\n assert str(wrn[0][0]) == (\n \"Skipped unsupported reflection of expression-based index idx1\")\n assert str(wrn[1][0]) == (\n \"Predicate of partial index idx2 ignored during reflection\")\n assert len(t2.indexes) == 2\n # Make sure indexes are in the order we expect them in\n tmp = [(idx.name, idx) for idx in t2.indexes]\n tmp.sort()\n \n r1, r2 = [idx[1] for idx in tmp]\n\n assert r1.name == 'idx2'\n assert r1.unique == True\n assert r2.unique == False\n assert [t2.c.id] == r1.columns\n assert [t2.c.name] == r2.columns\n finally:\n warnings.warn = capture_warnings._orig_showwarning\n m1.drop_all()",
"def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices",
"def _convert_spec_to_indexes(self, merge_specification):\n new_merge_specification = []\n for combine_spec in merge_specification:\n new_combine_spec = []\n for column_identifier, percentage in combine_spec:\n for column_index in \\\n self.column_translator.id_to_indexes(column_identifier):\n new_combine_spec.append((column_index, percentage))\n new_merge_specification.append(new_combine_spec)\n return new_merge_specification",
"def index_to_expr(self, index):\n if isinstance(index, ast.Index):\n return index.value\n elif isinstance(index, ast.Slice):\n if index.lower is None and index.step is None:\n args = [ index.upper ]\n elif index.step is None:\n args = [ index.lower, index.upper ]\n else:\n args = [ index.lower, index.upper, index.step ]\n args = [ to_name_constant(None) if arg is None else arg\n for arg in args ]\n return to_call(to_name('slice'), args)\n elif isinstance(index, ast.ExtSlice):\n indexes = list(map(self.index_to_expr, index.dims))\n return ast.Tuple(elts=indexes, ctx=ast.Load())\n elif isinstance(index, ast.Tuple):\n elts = list(map(self.index_to_expr, index.elts))\n return ast.Tuple(elts=elts, ctx=ast.Load())\n else:\n return index",
"def dot_index(index, data):\n if index:\n for key in index.split('.'):\n if isinstance(data, list):\n data = [x[key] for x in data]\n else:\n data = data[key]\n if isinstance(data, list):\n return data\n if isinstance(data, dict):\n return data.items()\n else:\n return [data]",
"def _processUnhashableIndex(self, idx):\n from pyomo.core.expr import current as EXPR\n #\n # Iterate through the index and look for slices and constant\n # components\n #\n fixed = {}\n sliced = {}\n ellipsis = None\n _found_numeric = False\n #\n # Setup the slice template (in fixed)\n #\n if normalize_index.flatten:\n idx = normalize_index(idx)\n if idx.__class__ is not tuple:\n idx = (idx,)\n\n for i,val in enumerate(idx):\n if type(val) is slice:\n if val.start is not None or val.stop is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: start and stop values are not allowed.\")\n if val.step is not None:\n logger.warning(\n \"DEPRECATION WARNING: The special wildcard slice \"\n \"(::0) is deprecated. Please use an ellipsis (...) \"\n \"to indicate '0 or more' indices\")\n val = Ellipsis\n else:\n if ellipsis is None:\n sliced[i] = val\n else:\n sliced[i-len(idx)] = val\n continue\n\n if val is Ellipsis:\n if ellipsis is not None:\n raise IndexError(\n \"Indexed components can only be indexed with simple \"\n \"slices: the Pyomo wildcard slice (Ellipsis; \"\n \"e.g., '...') can only appear once\")\n ellipsis = i\n continue\n\n if hasattr(val, 'is_expression_type'):\n _num_val = val\n # Attempt to retrieve the numeric value .. if this\n # is a template expression generation, then it\n # should raise a TemplateExpressionError\n try:\n val = EXPR.evaluate_expression(val, constant=True)\n _found_numeric = True\n\n except TemplateExpressionError:\n #\n # The index is a template expression, so return the\n # templatized expression.\n #\n from pyomo.core.expr import current as EXPR\n return EXPR.GetItemExpression(tuple(idx), self)\n\n except EXPR.NonConstantExpressionError:\n #\n # The expression contains an unfixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is not a constant value. This is likely not what you meant to\ndo, as if you later change the fixed value of the object this lookup\nwill not change. If you understand the implications of using\nnon-constant values, you can get the current value of the object using\nthe value() function.\"\"\" % ( self.name, i ))\n\n except EXPR.FixedExpressionError:\n #\n # The expression contains a fixed variable\n #\n raise RuntimeError(\n\"\"\"Error retrieving the value of an indexed item %s:\nindex %s is a fixed but not constant value. This is likely not what you\nmeant to do, as if you later change the fixed value of the object this\nlookup will not change. If you understand the implications of using\nfixed but not constant values, you can get the current value using the\nvalue() function.\"\"\" % ( self.name, i ))\n #\n # There are other ways we could get an exception such as\n # evaluating a Param / Var that is not initialized.\n # These exceptions will continue up the call stack.\n #\n\n # verify that the value is hashable\n hash(val)\n if ellipsis is None:\n fixed[i] = val\n else:\n fixed[i - len(idx)] = val\n\n if sliced or ellipsis is not None:\n return _IndexedComponent_slice(self, fixed, sliced, ellipsis)\n elif _found_numeric:\n if len(idx) == 1:\n return fixed[0]\n else:\n return tuple( fixed[i] for i in range(len(idx)) )\n else:\n raise DeveloperError(\n \"Unknown problem encountered when trying to retrieve \"\n \"index for component %s\" % (self.name,) )",
"def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')",
"def to_tokens(self, idx: Union[int, Tuple[int], List[int], np.ndarray])\\\n -> Union[Hashable, List[Hashable]]:\n if isinstance(idx, (list, tuple)):\n return [self.all_tokens[i] for i in idx]\n elif isinstance(idx, np.ndarray):\n if idx.ndim == 0:\n return self.all_tokens[idx]\n elif idx.ndim == 1:\n return [self.all_tokens[i] for i in idx]\n else:\n raise ValueError('Unsupported numpy ndarray ndim={}'.format(idx.ndim))\n else:\n return self.all_tokens[idx]",
"def gen_array_index(self, expr: expressions.ArrayIndex):\n # Load base as an rvalue, to make sure we load pointers values.\n base = self.gen_expr(expr.base, rvalue=True)\n index = self.gen_expr(expr.index, rvalue=True)\n\n # Calculate offset:\n element_size = self.sizeof(expr.base.typ.element_type)\n index = self.builder.emit_cast(index, ir.ptr)\n offset = self.builder.emit_mul(index, element_size, ir.ptr)\n\n # Calculate address:\n return self.builder.emit_add(base, offset, ir.ptr)",
"def indexes_to_objects(self, index_vector: np.ndarray) -> Sequence[Any]:\n return [self.idx_to_obj[idx] for idx in index_vector if idx in self.idx_to_obj]",
"def indexes(self):\n return getattr(self, '_indexes', None)",
"def _get_sql_indexes(self, table_attr):\n index_template = 'CREATE INDEX %s_%s_x ON %s (\"%s\");\\n'\n indexes = '\\n';\n\n for index_name, index_attrs in table_attr['indexes'].iteritems():\n columns = list()\n for index_column_name in index_attrs['columns']:\n columns.append(table_attr['columns'][index_column_name]['name'])\n indexes += index_template % (table_attr['name'], index_attrs['name'], table_attr['name'], '\" ,\"'.join(columns))\n\n return indexes",
"def _idxs_postformat_null(self):\n pass",
"def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes",
"def _terms_to_idxs(self,\n level: str,\n term_to_idx: Dict[str, int]\n ) -> None:\n if level == 'token':\n path_in = self.path_token_terms\n path_out = self.path_idx_token_terms\n elif level == 'lemma':\n path_in = self.path_lemma_terms\n path_out = self.path_idx_lemma_terms\n else:\n raise Exception('Error! Level not known.')\n\n terms = set()\n\n with open(path_in, 'r', encoding='utf8') as fin:\n for line in fin:\n terms.add(line.strip('\\n'))\n\n term_cmd = []\n with open(path_out, 'w', encoding='utf8') as fout:\n for t in terms:\n term_cmd.append(term_to_idx[t])\n fout.write(str(term_to_idx[t]) + '\\n')",
"def objects_to_indexes(self, object_seq: Sequence[Any]) -> np.ndarray:\n res = np.zeros(len(object_seq))\n for i, obj in enumerate(object_seq):\n if obj in self.obj_to_idx:\n res[i] = self.obj_to_idx[obj]\n else:\n res[i] = self.start-1\n return res",
"def inverse_transform(self, indices):\n return self._to_words(obj=indices)",
"def transform(self,list_array):\n transformed_list = []\n for instance in list_array:\n transformed_list.append([self.notes_to_index[note] for note in instance])\n return np.array(transformed_list, dtype=np.int32)",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def return_tag_tokens(self, tags_indexes, observations):\n tag_pred = []\n for tag_index in tags_indexes:\n tag_pred.append(observations.T.index[tag_index])\n return tag_pred",
"def indexes(self):\r\n\r\n\r\n if not self.usesequence:\r\n\r\n if len(self.get_all_indexes()) != len(self.sortedindexes) \\\r\n or self.indexchanged or not self.sortedindexes:\r\n self.indexchanged = False\r\n self.sortedindexes = sorted(self.get_all_indexes(),\r\n key=lambda x_temp: Index(x_temp))\r\n return self.sortedindexes\r\n return self.sortedindexes\r\n else:\r\n if self.indexchanged:\r\n self.sortedindexes = self.default_dict['indexlist'].strings()\r\n return self.sortedindexes\r\n else:\r\n return self.sortedindexes",
"def idx2tokens(eval_file, ids, start_idxs, end_idxs):\n predictions = dict()\n for _id, start_idx, end_idx in zip(ids, start_idxs, end_idxs):\n context = eval_file[str(_id.item())]['context']\n spans = eval_file[str(_id.item())]['spans']\n quid = eval_file[str(_id.item())]['uuid']\n if start_idx >= len(spans) or end_idx >= len(spans):\n answer = \"\"\n else:\n start = spans[start_idx][0]\n # print(end_idx)\n # print(spans[end_idx])\n end = spans[end_idx][1]\n answer = context[start:end]\n predictions[quid] = answer\n return predictions",
"def generate_reverse_index(self):",
"def _index_and_mapping(self, namespace):\n index, doc_type = namespace.split('.', 1)\n return index.lower(), doc_type",
"def _resolve_index(self, cls):\n # If we have just a string, it's a simple index\n if isinstance(self.index, basestring):\n return self._resolve_name(cls, self.index)\n\n # Otherwise it must be an iterable\n for i in xrange(len(self.index)):\n # Of 2-tuples\n pair = self.index[i]\n if len(pair) != 2:\n raise TypeError(\"Invalid index: {!r}\".format(self.index))\n # Where the first is the key, and the second the direction\n self.index[i] = (self._resolve_name(cls, pair[0]), pair[1])\n\n return self.index",
"def _group_indexes(self, indexes):\n for index_name, (cardinality, link_names) in indexes:\n for link_name in link_names:\n yield link_name, index_name",
"def index(self, path):\n try:\n indices = [int(x) if x.isdigit() else x for x in split(r'[\\/\\[\\]]+', path[1:])]\n return reduce(lambda x, y: x[y], indices, self.document)\n except:\n return None",
"def _get_indices_from_payload(self):\n for _, value in self.s_namespaces.items():\n for index in value['indexes'].items():\n yield index",
"def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi",
"def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)",
"def indexes(self):\n indexes = self.execute(self.commands.get_indexes(self.name))\n return [Index(*tup) for tup in indexes]",
"def get_index_list(self, attached=\"main\"):\n self._check_connection()\n if attached == \"main\":\n request = \"\"\" SELECT name,tbl_name,sql\n FROM (SELECT * FROM sqlite_master UNION ALL SELECT * FROM sqlite_temp_master) AS temptbl\n WHERE type='index' ORDER BY name;\"\"\"\n else:\n request = \"\"\" SELECT name,tbl_name,sql\n FROM (SELECT * FROM %s.sqlite_master) AS temptbl\n WHERE type='index' ORDER BY name;\"\"\" % attached\n select = self._connection.execute(request)\n\n exp = re.compile(\"[(]([a-zA-Z0-9_,]+)[)]\")\n res = []\n for a, b, c in select:\n fi = exp.findall(c)\n if len(fi) != 1:\n raise DBException( # pragma: no cover\n \"Unable to extract index fields from %r\" % c)\n fi = tuple(s.strip() for s in fi[0].split(\",\"))\n res.append((a, b, c, fi))\n select.close()\n #self.LOG (\"number of indices \", len (res))\n select = res\n\n res = []\n if attached == \"main\":\n res = select\n else:\n for el in select:\n res.append((el[0], attached + \".\" + el[1], el[2], el[3]))\n #self.LOG (\"number of indices \", len (res))\n\n if attached == \"main\":\n attach = self.get_attached_database_list()\n for a in attach:\n if a in (\"main\", \"temp\"):\n continue\n r = self.get_index_list(a)\n res.extend(r)\n\n return res",
"def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]",
"def build_invert_relation(query_formatted, tree):\n match_dict = {} # Has structure {node object : [list of tuples of index]}\n tokens = tokenize_query(query_formatted)\n for node in PreOrderIter(tree):\n if getattr(node, 'id') not in node_types.KEY_PROPERTY:\n continue\n else:\n for field in node_types.KEY_PROPERTY[getattr(node, 'id')]:\n if not hasattr(node, field):\n continue\n value = getattr(node, field)\n matched_pos = search_query(value, tokens, query_formatted)\n if matched_pos is not None:\n if node in match_dict:\n match_dict[node] = match_dict[node] + matched_pos\n else:\n match_dict[node] = matched_pos\n return match_dict",
"def listtypeindexes(self):\n\n indexes = {}\n for dtype,value in sorted(self._allowed_patterns.items()):\n if value.has_key('index'):\n indexes[dtype] = value['index']\n\n return indexes",
"def get_descriptor_index_impls(reload_modules=False):\n this_dir = osp.abspath(osp.dirname(__file__))\n env_var = 'DESCRIPTOR_INDEX_PATH'\n helper_var = 'DESCRIPTOR_INDEX_CLASS'\n return plugin.get_plugins(__name__, this_dir, env_var, helper_var,\n DescriptorIndex, reload_modules=reload_modules)",
"def load(cls):\n df = Operator_Table.df\n df.operator = df.operator.apply(sp.normalize)\n df.operator_alias = df.operator_alias.apply(sp.normalize)\n df = df.rename(columns={\"operator_alias\": \"alias\"})\n return SQLIndex(data=df).set_index(\"operator\")",
"def indexes(self, fields):\r\n\r\n indexes = [self.index(field) for field in fields]\r\n\r\n return tuple(indexes)",
"def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )",
"def index_relations(gold_view):\n\n rel_lookup = {}\n for rel in gold_view.select(rel_type):\n arg1 = rel.arg1.argument\n arg2 = rel.arg2.argument\n if rel.category == 'CONTAINS':\n rel_lookup[(arg1, arg2)] = rel.category\n\n return rel_lookup",
"def indexer(expression, stream):\n def throw(node, item):\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__,\n item.__class__.__name__,\n )\n )\n\n def mkint(expression):\n if expression.data == 'integer':\n return int(expression.children[0])\n elif expression.data == 'float':\n idx = float(expression.children[0])\n if not idx.is_integer():\n idx = int(idx) + 1\n return idx\n else:\n assert False, 'bad number expression {}'.format(\n expression\n )\n\n def mkslice(expression):\n s, e = None, None\n for idx in expression.children:\n if idx.data == 'start':\n s = mkint(idx.children[0])\n elif idx.data == 'end':\n e = mkint(idx.children[0])\n yield slice(s, e)\n\n def mkindex(expression):\n if expression.data == 'expression':\n return evaluate(expression, stream)\n elif expression.data == 'slice':\n return mkslice(expression)\n elif expression.data == 'cname':\n return expression.children\n elif expression.data == 'string':\n return [expression.children[0][1:-1]]\n elif expression.data in ('integer', 'float'):\n return [mkint(expression)]\n else:\n assert False, 'bad index expression {}'.format(expression)\n\n for item in mkindex(expression.children[0]):\n for node in stream:\n if isinstance(node, Object):\n if isinstance(item, Primitive):\n item = str(item)[1:-1]\n if isinstance(item, basestring):\n yield node.get(item, null)\n continue\n\n if isinstance(node, List):\n if isinstance(item, Primitive):\n item = int(str(item))\n if isinstance(item, (int, slice)):\n try:\n yield node[item]\n except IndexError:\n yield null\n continue\n\n if not optional(expression):\n throw(node, item)",
"def _index(self):\n annotations = IAnnotations(self.portal)\n # create the error reference storage\n if annotations.get(INDEX_KEY) is None:\n annotations[INDEX_KEY] = OOBTree()\n return annotations[INDEX_KEY]",
"def _forest_field_indices(self):\n return self._ffi",
"def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)",
"def decodeindexes(self, idx):\n text = \"\"\n for elem in idx:\n char = self.index2char[elem]\n if char not in SPCHARS:\n text += char\n\n return text",
"def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs",
"def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)",
"def inverse_transform_labels(self, indices):\n classes = self.classes_()\n return [classes[ind] for ind in indices]",
"def getXRefsFrom(self):\r\n # type: () -> (list[int], list[int])\r\n crefs = []\r\n drefs = []\r\n\r\n\r\n # normalFlow = True\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n # for ref in idautils.CodeRefsFrom(self.func_ea-1, not normalFlow): # XrefsFrom\r\n # crefs.append(ref)\r\n\r\n # needed to identify pool variables. drefs accessing the pool may access pointers\r\n # in the pool. the pointers should be retrieved instead\r\n size_pool = self.getSize(withPool=True)\r\n # for each instruction\r\n for i in idautils.FuncItems(self.func_ea):\r\n for xref in idautils.XrefsFrom(i, 0):\r\n # if the xref is to a far or near called function\r\n if xref.type == idc.fl_CN or xref.type == idc.fl_CF:\r\n if xref.to not in crefs:\r\n crefs.append(xref.to)\r\n # if the xref is to a read or write data access\r\n if xref.type == idc.dr_W or xref.type == idc.dr_R:\r\n if xref.to not in drefs:\r\n # if xref.to is in the pool, then retrieve content if it's a pointer\r\n if xref.to < self.func_ea + size_pool:\r\n # those are the references found at the pool location\r\n iteratedOnce = False\r\n for poolRef in idautils.XrefsFrom(xref.to, 0):\r\n if iteratedOnce:\r\n raise(FunctionException(\"%08X: there should only be one data xref in pool variable\"\r\n % (self.func_ea)))\r\n # there should only be one in the pool refernce\r\n if poolRef.to not in drefs:\r\n drefs.append(poolRef.to)\r\n iteratedOnce = True\r\n else:\r\n drefs.append(xref.to)\r\n\r\n # for ref in idautils.DataRefsFrom(self.func_ea):\r\n # drefs.append(ref)\r\n # for ref in idautils.DataRefsFrom(self.func_ea - 1):\r\n # drefs.append(ref)\r\n return crefs, drefs",
"def process_and_dispatch(self):\n references = []\n for raw_block_references in self.raw_references:\n bibcode = raw_block_references['bibcode']\n block_references = raw_block_references['block_references']\n item_nums = raw_block_references.get('item_nums', [])\n\n parsed_references = []\n for i, reference in enumerate(block_references):\n reference = self.latex_reference.cleanup(reference)\n logger.debug(\"confTEX: parsing %s\" % reference)\n parsed_references.append(self.merge({'refstr': reference, 'refraw': reference}, self.any_item_num(item_nums, i)))\n\n references.append({'bibcode': bibcode, 'references': parsed_references})\n logger.debug(\"%s: parsed %d references\" % (bibcode, len(references)))\n\n return references",
"def _get_transformations(self, current_text, indices_to_modify):\n transformed_texts = []\n words = current_text.words\n for idx in indices_to_modify:\n word = words[idx]\n # expend when word in map\n if word in EXTENSION_MAP:\n expanded = EXTENSION_MAP[word]\n transformed_text = current_text.replace_word_at_index(idx, expanded)\n transformed_texts.append(transformed_text)\n\n return transformed_texts",
"def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]",
"def covariate_to_index(self):\n covariate_df = self.dismod_file.covariate\n return dict(covariate_df[[\"covariate_name\", \"covariate_id\"]].to_records(index=False))",
"def _vectorise_bag_of_pos_with_dependency(words, indexes):\n matrixes = []\n for i, index in enumerate(indexes):\n poss = _get_bag_of_pos_with_dependency(words, index)\n matrixes.append(\" \".join(poss))\n return cv_dependencies.transform(matrixes).toarray().flatten()",
"def index_nodes(self):\n index_nodes = []\n for node in self.nodes:\n if 'indexnode' == node.get('infos').get('type'):\n index_nodes.append(node)\n return index_nodes",
"def contextToIndices(self, context):\n return self.__valuesToIndices(self._settings.CONTEXT_VALUE_TO_INDEX, context)",
"def get_data_as_indices(self, file_name):\n X, Y = [],[]\n org_X, org_Y = [], []\n\n for (words, tags) in read_conll_file(file_name):\n word_indices, word_char_indices = self.get_features(words)\n tag_indices = [self.tag2idx.get(tag) for tag in tags]\n X.append((word_indices,word_char_indices))\n Y.append(tag_indices)\n org_X.append(words)\n org_Y.append(tags)\n return X, Y #, org_X, org_Y - for now don't use",
"def tokens_to_idxs(self, token_seqs, lexicon):\n idx_seqs = [[lexicon[token] if token in lexicon else lexicon['<UNK>'] for \n token in token_seq] for token_seq in token_seqs]\n return idx_seqs",
"def simple_reflection(self, i):\n return self.reflection(self.simple_root(i), self.simple_coroot(i))",
"def vectorize(self, ordering: SortedList, definitions: dict) -> list:\n vec = Vectorizer(ordering, definitions)\n self.expression = vec.transform(self.expression)\n return vec.visited",
"def simplify_expressions(lines):\n\n code_len = len(lines)\n # Simplify array index expressions\n for pos in range(code_len):\n line = lines[pos]\n line = re.sub('\\[(.+?)\\]', index_simplify, line)\n lines[pos] = line\n\n # Simplify mod expressions\n for pos in range(code_len):\n line = lines[pos]\n line = re.sub('\\((.+?)\\) %', mod_simplify, line)\n lines[pos] = line\n\n return lines",
"def indexes(self) -> list:\n return self._indexes",
"def DataRefsTo(ea):\n xref = ida_xref.xrefblk_t()\n yield from xref.drefs_to(ea)",
"def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)",
"def compile_expressions(self):\n if getattr(self, \"_attrs\", None):\n for k, v in self._attrs.items():\n try:\n Expression.compile_cache(v)\n except:\n pass\n if \"${\" in v and \"}\" in v:\n Expression.extract(v)\n if getattr(self, \"_let\", None):\n for k, v in self._let.items():\n try:\n Expression.compile_cache(v)\n except:\n pass\n if getattr(self, \"text\", None):\n Expression.extract(self.text)",
"def indices(self):\n return self.index.indices",
"def get_bases():\n\tbss = []\n\tfor es in MV.index:\n\t\tbs = []\n\t\tif es == ():\n\t\t\tbs.append(_1)\n\t\telse:\n\t\t\tfor js in es:\n\t\t\t\tbmv = reduce(operator.mul, map(lambda j: e[j], js))\n\t\t\t\tbs.append(bmv)\n\t\t\t\t\n\t\tbss.append(bs)\n\t\n\treturn bss",
"def _get_query_representation(self, query, index):\n term_frequencies = {term: query.count(term) for term in query}\n vec = np.zeros(shape=(index.num_terms,), dtype=np.float64)\n for i, term in enumerate(sorted(index.get_index_terms())):\n vec[i] = self._tfidf(\n term_frequencies.get(term, 0),\n index.get_document_frequency(term),\n index.get_document_count()\n )\n return vec",
"def cat_to_num(self):\n\n for col in self.conf_dict:\n indexer = StringIndexer(inputCol=col, outputCol=\"{}_indexed\".format(col))\n self.df = indexer.fit(self.df).transform(self.df)",
"def __expr_finalize__(self):\n self._functions = tuple(filter_ordered(flatten(detect_io(self.expr, relax=True))))\n self._dimensions = flatten(i.indices for i in self.functions if i.is_Indexed)\n self._dimensions = tuple(filter_ordered(self._dimensions))",
"def simplify_indices(kernel):\n from loopy.symbolic import SubstitutionRuleMappingContext as SRMC\n rule_mapping_context = SRMC(kernel.substitutions,\n kernel.get_var_name_generator())\n idx_simplifier = IndexSimplifier(rule_mapping_context, kernel)\n return rule_mapping_context.finish_kernel(idx_simplifier.map_kernel(kernel))",
"def index_terms(self, terms):\n index = dict()\n for term in terms:\n links = [cell.metadata[\"nbpages\"][\"link\"] for nb in self.notebooks\n for cell in nb.content.cells if re.search(term, cell.source) if \"nbpages\" in cell.metadata.keys()]\n index[term] = list(dict.fromkeys(links))\n return index",
"def reindex_subcomponent_taxa():\n pass",
"def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index",
"def index_to_selector(cls, idx):\n\n if isinstance(idx, pd.MultiIndex):\n return idx.tolist()\n else:\n return [(i,) for i in idx.tolist()]",
"def get_ndcs_gen(rec_ndcs_ls, lep_genindex):\n return [lep_genindex[ndx] for ndx in rec_ndcs_ls]",
"def index_matrix_to_objects(\n self, index_matrix: np.ndarray) -> Sequence[Sequence[Any]]:\n return [[self.idx_to_obj[idx] for idx in idxs if idx in self.idx_to_obj] for idxs in index_matrix]",
"def embed(raw_seq, index_dict):\n return np.asarray([index_dict[word.lower()]\n if word.lower() in index_dict\n else index_dict[OOV_TOKEN] for word in raw_seq])",
"def apply_to_exprs(self, *args):\n return _ida_hexrays.ctree_visitor_t_apply_to_exprs(self, *args)",
"def build_index(ast):\n html = []\n\n blocks = ast['blocks']\n for block in blocks:\n block_type = block['t']\n\n if block_type == 'Header':\n html.append(build_heading(block))\n elif block_type == 'Para':\n html.append(build_paragraph(block))\n\n return ''.join(html)",
"def to_listing(self) -> List[Dict]:\n return [obj.descriptor for ns in self.index.values() for obj in ns.values()]",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )",
"def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes",
"def _translate_indices_to_node_names(self, d, names):\t\n\n\t\tdef __translate(obj, names):\n\t\t\t\"\"\" Recursive translate indices into node names \"\"\"\n\t\t\tif isinstance(obj, int):\n\t\t\t\treturn names[obj]\n\t\t\telif isinstance(obj, list):\n\t\t\t\treturn [__translate(x, names) for x in obj]\n\t\t\telif isinstance(obj, dict):\n\t\t\t\tnew_obj = {}\n\t\t\t\tfor k,v in obj.items():\n\t\t\t\t\tnew_obj[__translate(k, names)] = __translate(v, names)\n\t\t\t\treturn new_obj\n\t\t\telse:\n\t\t\t\treturn obj\n\n\t\tnew_dict = __translate(d, names)\n\n\t\treturn new_dict",
"def entities_index_list(schema=None, expand=True):\n for schema in schema_scope(schema, expand=expand):\n for version in SETTINGS.INDEX_READ:\n yield schema_index(schema, version)",
"def get_formula_in_list(self):\n return tree_to_string(self.expression)",
"def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data",
"def get_exon_expr(gene, vstart, vstop, countinfo, Idx, seg_counts):\n out_shape = (seg_counts.shape[1] + 1) if len(seg_counts.shape) > 1 else 2\n # Todo: deal with absense of count file\n if vstart is np.nan or vstop is np.nan: # isolated exon case\n return np.zeros((0, out_shape), dtype='float')\n if countinfo is None or Idx.sample is None:\n return np.zeros((0, out_shape), dtype='float') #[np.nan]\n\n segments = gene.segmentgraph.segments\n\n sv1_id = bisect.bisect(segments[0], vstart) - 1\n sv2_id = bisect.bisect(segments[0], vstop) - 1\n if sv1_id == sv2_id:\n if len(seg_counts.shape) > 1:\n expr_list = np.c_[np.array([vstop - vstart]), [seg_counts[sv1_id, :]]]\n else:\n expr_list = np.array([(vstop - vstart, seg_counts[sv1_id])])\n else:\n if len(seg_counts.shape) > 1:\n expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, :]]\n else:\n expr_list = np.c_[segments[1, sv1_id:sv2_id + 1] - segments[0, sv1_id:sv2_id + 1], seg_counts[sv1_id:sv2_id + 1, np.newaxis]]\n expr_list[0, 0] -= (vstart - segments[0, sv1_id])\n expr_list[-1, 0] -= (segments[1, sv2_id] - vstop)\n if gene.strand == '-': # need to reverse epression list to match the order of translation\n expr_list = expr_list[::-1]\n return expr_list",
"def get_index_data():\n indexTickers = ['^DJI', '^RUA', '^GSPC', '^IXIC', '^SZSA', '^XCI', '^MSH']",
"def __replaceArrRefs(self, tnode, replace_table):\n\n if isinstance(tnode, ast.NumLitExp):\n return tnode\n\n elif isinstance(tnode, ast.StringLitExp):\n return tnode\n\n elif isinstance(tnode, ast.IdentExp):\n return tnode\n\n elif isinstance(tnode, ast.ArrayRefExp):\n aref_str = str(tnode)\n if aref_str in replace_table:\n iname = replace_table[aref_str]\n return ast.IdentExp(iname)\n else:\n return tnode\n\n elif isinstance(tnode, ast.FunCallExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n tnode.args = [self.__replaceArrRefs(a, replace_table) for a in tnode.args]\n return tnode\n\n elif isinstance(tnode, ast.UnaryExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.BinOpExp):\n tnode.lhs = self.__replaceArrRefs(tnode.lhs, replace_table)\n tnode.rhs = self.__replaceArrRefs(tnode.rhs, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ParenthExp):\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.ExpStmt):\n if tnode.exp:\n tnode.exp = self.__replaceArrRefs(tnode.exp, replace_table)\n return tnode\n\n elif isinstance(tnode, ast.CompStmt):\n tnode.stmts = [self.__replaceArrRefs(s, replace_table) for s in tnode.stmts]\n return tnode\n\n elif isinstance(tnode, ast.IfStmt):\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n tnode.true_stmt = self.__replaceArrRefs(tnode.true_stmt, replace_table)\n if tnode.false_stmt:\n tnode.false_stmt = self.__replaceArrRefs(\n tnode.false_stmt, replace_table\n )\n return tnode\n\n elif isinstance(tnode, ast.ForStmt):\n if tnode.init:\n tnode.init = self.__replaceArrRefs(tnode.init, replace_table)\n if tnode.test:\n tnode.test = self.__replaceArrRefs(tnode.test, replace_table)\n if tnode.iter:\n tnode.iter = self.__replaceArrRefs(tnode.iter, replace_table)\n tnode.stmt = self.__replaceArrRefs(tnode.stmt, replace_table)\n return tnode\n\n else:\n err(\n \"orio.module.ortildriver.transformation internal error:OrTilDriver: unknown type of AST: %s\"\n % tnode.__class__.__name__\n )",
"def index_data_annots(annots_df, daids, words, with_internals=True,\n aggregate=False, alpha=3, thresh=0):\n if utool.VERBOSE:\n print('[smk_index] index_data_annots')\n flann_params = {}\n _words = pdh.ensure_values(words)\n wordflann = nntool.flann_cache(_words, flann_params=flann_params,\n appname='smk')\n _daids = pdh.ensure_values(daids)\n _vecs_list = pdh.ensure_2d_values(annots_df['vecs'][_daids])\n _idx2_dvec, _idx2_daid, _idx2_dfx = nntool.invertable_stack(_vecs_list, _daids)\n\n # Pandasify\n if WITH_PANDAS:\n idx_series = pdh.IntIndex(np.arange(len(_idx2_daid)), name='idx')\n idx2_dfx = pdh.IntSeries(_idx2_dfx, index=idx_series, name='fx')\n idx2_daid = pdh.IntSeries(_idx2_daid, index=idx_series, name='aid')\n idx2_dvec = pd.DataFrame(_idx2_dvec, index=idx_series, columns=VEC_COLUMNS)\n else:\n idx2_dfx = _idx2_dfx\n idx2_daid = _idx2_daid\n idx2_dvec = _idx2_dvec\n pass\n\n invindex = InvertedIndex(words, wordflann, idx2_dvec, idx2_daid, idx2_dfx, daids)\n if with_internals:\n compute_data_internals_(invindex, aggregate, alpha, thresh) # 99%\n return invindex"
] | [
"0.54138833",
"0.5409497",
"0.5210687",
"0.5174154",
"0.51426494",
"0.51243144",
"0.50065845",
"0.49686384",
"0.4958775",
"0.49344054",
"0.4921746",
"0.48844215",
"0.48784012",
"0.48583516",
"0.48501158",
"0.48439074",
"0.4833522",
"0.48223832",
"0.48090807",
"0.4801545",
"0.47946066",
"0.47849804",
"0.47603294",
"0.47517923",
"0.47181013",
"0.4714163",
"0.47104216",
"0.46943673",
"0.4691256",
"0.46756276",
"0.46711364",
"0.4653564",
"0.46507326",
"0.46487108",
"0.46272725",
"0.46243355",
"0.4612802",
"0.4590436",
"0.45853844",
"0.45800692",
"0.45795876",
"0.45765954",
"0.45496118",
"0.45471644",
"0.4546087",
"0.45405185",
"0.45262185",
"0.45204017",
"0.45060405",
"0.45018235",
"0.45010296",
"0.44989052",
"0.4495824",
"0.44939652",
"0.44936767",
"0.4489846",
"0.44846556",
"0.44804904",
"0.44596735",
"0.4459516",
"0.44564453",
"0.4447208",
"0.44372326",
"0.4435674",
"0.44245163",
"0.4422615",
"0.4421228",
"0.44163972",
"0.44009683",
"0.4400171",
"0.43885222",
"0.43765166",
"0.43696284",
"0.43674162",
"0.43625048",
"0.43563354",
"0.43510672",
"0.43494844",
"0.43492898",
"0.43352818",
"0.43293324",
"0.43146774",
"0.43143627",
"0.43142453",
"0.4314122",
"0.43129468",
"0.43126264",
"0.4308291",
"0.43011025",
"0.43009713",
"0.4300355",
"0.42930788",
"0.42907894",
"0.42893887",
"0.42782396",
"0.42782325",
"0.42772424",
"0.42737547",
"0.4272433",
"0.42719525"
] | 0.5068832 | 6 |
Rewrite a node and all its descendents | def recursive_eval(sexpr):
newexpr = rewrite_node(sexpr)
newexpr.apply(recursive_eval)
return newexpr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def traverse(self, node):\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)",
"def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)",
"def _mutate_node(self, node):\n self.idx += 1\n\n if self.idx != self.r:\n return\n\n # Exclude some things like signatures, etc.\n exclusions = ['signature', 'crc']\n for ex in exclusions:\n if ex in node._pfp__name.lower():\n return\n\n if type(node) == pfp.fields.Dom:\n return\n elif self._base_name(node) == 'Struct':\n # This is a container, interested in\n # its children nodes\n return\n elif self._base_name(node) == 'Array':\n print(\"%s is an Array of %s (%s)\" % (node._pfp__name,\n node.field_cls, node.width))\n # I can change the data at once:\n node.raw_data = \"cacaca\"\n\n # Or iterate through its elements:\n # for e in node:\n # e._pfp__set_value(e._pfp__value + 1)\n else:\n # CORE TYPE\n # This is supposed to cast\n print('CORE TYPE?')\n node._pfp__set_value(1337)",
"def Rearrange(self, node):\n nnode = Node(node, \"%si\" % node.tag);\n nnode.children = node.children[1:];\n node.children[1:] = [nnode];",
"def visit(self, node):",
"def visit(self, node):",
"def concretise_ast(node):\n node.children = list(node.get_children())\n\n for child in node.children:\n counter = concretise_ast(child)",
"def _update_with_node(self, node: Node) -> None:\n\t\t# Get and test name\n\t\tname = node.name\n\t\tif name not in self.node_names:\n\t\t\t# Add if not added\n\t\t\tself.node_names.append(name)\n\t\t\t# Modify attributes to say \"Attribute - \" in the front\n\t\t\tattrs: List[str] = []\n\t\t\tfor attr in node.attributes:\n\t\t\t\tattrs.append(\"Attribute - \" + attr.title())\n\t\t\t# Create set, use Node attributes as base\n\t\t\tself.subnode_names[name] = set(attrs)\n\n\t\t# Iterate over SubNodes\n\t\tfor subnode in node.subnodes:\n\t\t\t# Set and test name\n\t\t\ts_name = subnode.name\n\t\t\tself.subnode_names[name].add(s_name)\n\n\t\t# Iterate over nodes\n\t\tfor nested_node in node.nodes:\n\t\t\tself._update_with_node(nested_node)",
"def traverse(self,node):\n self.ancestors[self.descendants[node]] = node\n for child in self.children[node]:\n self.traverse(child)\n self.descendants.union(child,node)\n self.ancestors[self.descendants[node]] = node\n self.visited.add(node)\n for query in self[node]:\n if query in self.visited:\n lca = self.ancestors[self.descendants[query]]\n self[node][query] = self[query][node] = lca",
"def _mutate_file(self, node, visited = set([])):\n for ch in self._get_children(node):\n\n if ch not in visited:\n visited.add(ch)\n\n try:\n self._mutate_node(ch)\n except Exception as e:\n print(e)\n\n # Recursion is a bitch\n self._mutate_file(ch, visited)",
"def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;",
"def transform(self, node):\r\n\r\n raise utils.MethodNotDefined(\"transform\",\r\n type(self), self.__class__.__name__)",
"def recursive_visit(self, node):\n node = self.generic_visit(node)\n\n # walk through the children: either iterate the node or look up the keys\n if hasattr(node, '_dict_keys'):\n for v in node._dict_keys:\n self.recursive_visit(getattr(node, v))\n\n if hasattr(node, '_list_keys'):\n for v in node._list_keys:\n self.recursive_visit(getattr(node, v))\n else:\n iter_target = None\n # need special handling of node.data or node_list in order to walk through all formatting node, e.g. endl\n if hasattr(node, 'node_list'): # use the unproxy list to get all formatting\n iter_target = node.node_list\n elif hasattr(node, 'data'):\n iter_target = node.data\n elif hasattr(node, '__iter__'):\n iter_target = node\n\n if iter_target:\n change_list = []\n for child in iter_target:\n new_node = self.recursive_visit(child)\n if new_node is not child:\n change_list.append((child, new_node))\n\n for original_child, new_child in change_list:\n i = original_child.index_on_parent\n iter_target.remove(original_child)\n iter_target.insert(i, new_child)\n\n return node",
"def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents",
"def mutate(cls, node):\n if node not in config.visited_nodes:\n if node.__class__ in [ast.Raise, ast.Assign, ast.AugAssign, ast.Call, ast.Expr] and node in config.nodes_to_remove:\n config.mutated = True\n original_node = deepcopy(node)\n parent = config.parent_dict[node]\n del config.parent_dict[node]\n config.nodes_to_remove.remove(node)\n node = ast.Pass()\n config.parent_dict[node] = parent\n config.node_pairs[node] = original_node\n config.current_mutated_node = node\n\n return node",
"def substituteNodes(node, replacements):\n p = node.parentNode\n for r in replacements:\n p.insertBefore(r, node)\n p.removeChild(node)",
"def visit_Node(self, node):\n pass",
"def _un_onnode(visitor, node, namespace):\n namespace.refresh(node._qname, node)\n visitor.mapacc(node._children, node._namespace)\n return node, namespace",
"def dfs_ast(func):\n def wrapper(self, node):\n new_node = func(self, node)\n for child in ast.iter_child_nodes(new_node):\n self.visit(child)\n return new_node\n\n return wrapper",
"def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt",
"def _redirect(self, node1, node2):\n if node1.parent.right is node1:\n node1.parent.right = node2\n else:\n node1.parent.left = node2",
"def mutate(cls, node):\n if node not in config.visited_nodes:\n if node.__class__ is ast.Break:\n config.mutated = True\n original_node = deepcopy(node)\n parent = config.parent_dict[node]\n del config.parent_dict[node]\n node = ast.Continue()\n config.parent_dict[node] = parent\n config.node_pairs[node] = original_node\n config.current_mutated_node = node\n\n elif node.__class__ is ast.Continue:\n config.mutated = True\n original_node = deepcopy(node)\n parent = config.parent_dict[node]\n del config.parent_dict[node]\n node = ast.Break()\n config.parent_dict[node] = parent\n config.node_pairs[node] = original_node\n config.current_mutated_node = node\n\n return node",
"def _clean_graph_visit(self, node, visited):\n visited[node] = True\n\n while True:\n rp_node = None\n rp_id = -1\n for n_id, n in enumerate(node.get_children()):\n if n.get_type() == CFGNodeType.END_IF:\n rp_node = n\n rp_id = n_id\n break\n\n # end node points to only one child,\n # so replace it\n if rp_node is not None and rp_node.get_children() != []:\n node.get_children()[rp_id] = rp_node.get_children()[0]\n\n # END-IF can be replaced by another, so continue until there's none\n if rp_node == None:\n break\n\n if node.get_type() == CFGNodeType.PSEUDO:\n self._clean_graph_visit(node.get_refnode(), visited)\n\n for child in node.get_children():\n if child not in visited:\n self._clean_graph_visit(child, visited)",
"def rewrite(self, dag: saldag.OpDag):\n ordered = dag.top_sort()\n if self.reverse:\n ordered = ordered[::-1]\n\n for node in ordered:\n print(type(self).__name__, \"rewriting\", node.out_rel.name)\n if isinstance(node, saldag.Aggregate):\n self._rewrite_aggregate(node)\n elif isinstance(node, saldag.Divide):\n self._rewrite_divide(node)\n elif isinstance(node, saldag.Project):\n self._rewrite_project(node)\n elif isinstance(node, saldag.Filter):\n self._rewrite_filter(node)\n elif isinstance(node, saldag.Multiply):\n self._rewrite_multiply(node)\n elif isinstance(node, saldag.RevealJoin):\n self._rewrite_reveal_join(node)\n elif isinstance(node, saldag.HybridJoin):\n self._rewrite_hybrid_join(node)\n elif isinstance(node, saldag.Join):\n self._rewrite_join(node)\n elif isinstance(node, saldag.Concat):\n self._rewrite_concat(node)\n elif isinstance(node, saldag.Close):\n self._rewrite_close(node)\n elif isinstance(node, saldag.Open):\n self._rewrite_open(node)\n elif isinstance(node, saldag.Create):\n self._rewrite_create(node)\n elif isinstance(node, saldag.Distinct):\n self._rewrite_distinct(node)\n else:\n msg = \"Unknown class \" + type(node).__name__\n raise Exception(msg)",
"def replace(self, element):\n for i, sibling in enumerate(self.parent._children):\n if sibling is self:\n self.parent._children[i] = element\n element.parent_docid = self.parent_docid",
"def _traverse_1_0_0(item):\n if 'child_nodes' in item.keys():\n for child_node in item['child_nodes']:\n _traverse_1_0_0(child_node)\n item['content'] = item['child_nodes']\n del item['child_nodes']",
"def save_node(self, node: Node):",
"def _rewrite_concat(self, node: saldag.Concat):\n\n if node.is_lower_boundary():\n\n out_stored_with = node.out_rel.stored_with\n for par in node.parents:\n if not par.is_root():\n par.out_rel.stored_with = copy.copy(out_stored_with)\n node.is_mpc = False",
"def update_node(self, node):\n return node.update()",
"def fn(node):\n if not node: return \n node.left, node.right = fn(node.right), fn(node.left)\n return node",
"def replace_node(self, node, new_nodes):\n parent = node.parent\n position = parent.childNodes.index(node)\n parent.removeChild(node)\n\n for n in new_nodes:\n parent.insertChild(position, n)\n position += 1",
"def walk(node):\r\n from collections import deque\r\n todo = deque([node])\r\n while todo:\r\n node = todo.popleft()\r\n todo.extend(iter_child_nodes(node))\r\n yield node",
"def mutate_expand_node(\n child, node=None, pb_en_out_link=config.MUTPB_EN_OUT_LINK):\n # TODO: can maybe be improved by sparqling\n if not node:\n nodes = list(child.nodes)\n node = random.choice(nodes)\n new_triple, _, _ = _mutate_expand_node_helper(node, pb_en_out_link)\n return child + (new_triple,)",
"def preprocessNode(self):\n while self.node.firstChild():\n self.node.firstChild().doDelete(self.node)",
"def preprocessNode(self):\n pass",
"def _iter_child_nodes_in_order(node):\n return _flatten_ast_nodes(_iter_child_nodes_in_order_internal_1(node))",
"def merge_nodes(self, parent, child):\n parent.key += child.key\n parent.real = child.real\n parent.value = child.value\n parent.children = child.children",
"def replace_node(self, node,new_node):\n #Special Case: Replace the root.\n if node == self.root :\n self.root = new_node\n return\n parent = node.parent\n if parent.left and parent.left == node:\n parent.left = new_node\n elif parent.right and parent.right == node:\n parent.right = new_node\n else:\n print(\"Incorrect Parent-Child relation!\")\n raise RuntimeError",
"def saveNodeAndSiblings(self, node):\n self.save(node)\n self.save(node.pref)\n self.save(node.nref)",
"def reweight(node, weights):\n logger.debug(\"Reweighting node %s\" % node)\n parents = node.parents()\n #1. Get the parents of the node to be reweighted\n pars = copy.copy(parents)\n\n #2. Remove any current parents from the reweighted node\n node.delParents(parents)\n\n for w in weights:\n\n logger.debug(\"Adding parents to weight node %s\" % w)\n #Add the previous parents from 1. as the parents of the WeightNode\n w.addParents(pars)\n\n #Add this WeightNode as the parent of the input node\n node.addParents([w])\n return node",
"def transform_all(self, node):\n # don't traverse, only handle field lists that are immediate children\n summary = []\n data = {}\n name, uid = _get_desc_data(node.parent)\n for child in node:\n if isinstance(child, remarks):\n remarks_string = transform_node(child)\n data['remarks'] = remarks_string\n elif isinstance(child, addnodes.desc):\n if child.get('desctype') == 'attribute':\n attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them\n\n for item in child:\n if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):\n # capture attributes data and cache it\n data.setdefault('added_attribute', [])\n\n item_ids = item.get('ids', [''])\n\n if len(item_ids) == 0: # find a node with no 'ids' attribute\n curuid = item.get('module', '') + '.' + item.get('fullname', '')\n # generate its uid by module and fullname\n else:\n curuid = item_ids[0]\n\n if len(curuid) > 0:\n parent = curuid[:curuid.rfind('.')]\n name = item.children[0].astext()\n\n if curuid in attribute_map:\n if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed\n attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())\n # concat the description of duplicated nodes\n else:\n attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']\n else:\n if _is_desc_of_enum_class(node):\n addedData = {\n 'uid': curuid,\n 'id': name,\n 'parent': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': item.parent.get('desctype'),\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext(),\n 'return': {\n 'type': [parent]\n }\n }\n }\n else:\n addedData = {\n 'uid': curuid,\n 'class': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': 'attribute',\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext()\n }\n }\n\n attribute_map[curuid] = addedData\n else:\n raise Exception('ids of node: ' + repr(item) + ' is missing.')\n # no ids and no duplicate or uid can not be generated.\n if 'added_attribute' in data:\n data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list\n\n # Don't recurse into child nodes\n continue\n elif isinstance(child, nodes.field_list):\n (entries, types) = _hacked_transform(self.typemap, child)\n _data = get_data_structure(entries, types, child)\n data.update(_data)\n elif isinstance(child, addnodes.seealso):\n data['seealso'] = transform_node(child)\n elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():\n # Remove the admonition node\n child_copy = child.deepcopy()\n child_copy.pop(0)\n data['example'] = transform_node(child_copy)\n else:\n content = transform_node(child)\n\n # skip 'Bases' in summary\n if not content.startswith('Bases: '):\n summary.append(content)\n\n if \"desctype\" in node.parent and node.parent[\"desctype\"] == 'class':\n data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.\n\n if summary:\n data['summary'] = '\\n'.join(summary)\n # Don't include empty data\n for key, val in data.copy().items():\n if not val:\n del data[key]\n data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent[\"desctype\"]) if \"desctype\" in node.parent else 'unknown'\n self.directive.env.docfx_info_field_data[uid] = data\n super(PatchedDocFieldTransformer, self).transform_all(node)",
"def convert(self, node):\n # get the conversion lut\n node_type = self.get_node_type(node)\n conversion_specs = self.conversion_spec_sheet.get(node_type)\n if not conversion_specs:\n print('No conversion_specs for: %s' % node_type)\n return\n\n # call any call_before\n call_before = conversion_specs.get('call_before')\n if call_before and callable(call_before):\n call_before(node)\n\n # some conversion specs doesn't require a new node to be created\n # so return early if this is the case\n if 'node_type' not in conversion_specs:\n return node\n\n node_creator = self.node_creator_factory(conversion_specs)\n rs_node = node_creator.create()\n\n # rename the material to have a similar name with the original\n if rs_node is not None:\n node_type_name = conversion_specs['node_type'] \\\n if isinstance(conversion_specs['node_type'], str) else \\\n conversion_specs['secondary_type'].replace(' ', '_')\n\n self.rename_node(\n rs_node,\n self.get_node_name(node).replace(\n node_type, node_type_name\n )\n )\n else:\n rs_node = node\n\n # set attributes\n attributes = conversion_specs.get('attributes')\n if attributes:\n for source_attr, target_attr in attributes.items():\n # value can be a string\n if isinstance(target_attr, basestring):\n # check incoming connections\n incoming_connections = \\\n self.get_node_inputs(node, source_attr)\n if incoming_connections:\n # connect any textures to the target node\n for input_ in incoming_connections:\n # input_ >> rs_node.attr(target_attr)\n self.connect_attr(\n input_,\n rs_node,\n target_attr\n )\n else:\n # just read and set the value directly\n self.set_attr(\n rs_node,\n target_attr,\n self.get_attr(node, source_attr)\n )\n\n elif isinstance(target_attr, list):\n # or a list\n # where we set multiple attributes in the rs_node to the\n # same value\n # source_attr_value = node.getAttr(source_attr)\n source_attr_value = self.get_attr(node, source_attr)\n for attr in target_attr:\n self.set_attr(rs_node, attr, source_attr_value)\n # for input_ in node.attr(source_attr).inputs(p=1):\n for input_ in self.get_node_inputs(node, source_attr):\n self.connect_attr(input_, rs_node, attr)\n elif isinstance(target_attr, dict):\n # or another dictionary\n # where we have a converter\n source_attr_value = self.get_attr(node, source_attr)\n for attr, converter in target_attr.items():\n if callable(converter):\n try:\n attr_value = converter(source_attr_value)\n except TypeError:\n # it should use two parameters, also include\n # the node itself\n try:\n attr_value = converter(\n source_attr_value,\n node\n )\n except TypeError:\n # so this is the third form that also\n # includes the rs node\n attr_value = converter(\n source_attr_value,\n node,\n rs_node\n )\n else:\n attr_value = converter\n self.set_attr(rs_node, attr, attr_value)\n\n # call any call_after\n call_after = conversion_specs.get('call_after')\n if call_after and callable(call_after):\n call_after(node, rs_node)\n\n return rs_node",
"def walk(node):\n\n traversed_nodes.append(node)\n \n # Do something with node value...\n print node.value\n\n # Recurse on each child node\n for child_node in node.child_nodes:\n if child_node not in traversed_nodes:\n walk(child_node)",
"def addChild(node):",
"def node_command(ctx, old, new):\n try:\n with ctx.obj[\"reader\"] as reader, ctx.obj[\"writer\"] as writer:\n writer.copy_schema(reader)\n writer.prepare_encode_cache()\n writer.rename_node(old.encode(\"utf-8\"), new.encode(\"utf-8\"))\n writer.write(reader)\n except Exception:\n click.secho(\"Failed!\", fg=\"red\", bold=True, err=True)\n raise\n else:\n click.secho(\"Done!\", fg=\"green\", err=True, bold=True)",
"def process(self, node, format):\n\n tab_spaces = int(node.attrib.get(\"tabspaces\", \"4\"))\n\n if format.endswith(\"latex\"):\n self.verbatimString = self.LaTeX_verbatimString\n else:\n self.verbatimString = self.XHTML_verbatimString\n #end if\n\n for child in node.iter():\n if child.text:\n child.text = self.verbatimString(child.text, tab_spaces)\n if child != node:\n if child.tail:\n child.tail = self.verbatimString(child.tail, tab_spaces)\n #end if\n #end for\n\n return node",
"def _mutate(self, p_mutate, mutation):\n self.children = mutation(self.children, p_mutate)",
"def _switch_nodes(self, walker):\n walker.prev.next = walker.next \n walker.next = walker.next.next \n walker.next.prev = walker\n walker.prev.next.prev = walker.prev\n walker.prev.next.next = walker \n walker.prev = walker.prev.next",
"def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)",
"def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]",
"def postorder_iterator(node):\n for child in node.children:\n yield from postorder_iterator(child)\n yield node",
"def prepare_node(self, node):\n # Every change at the position of node will be recognized\n aexpr(lambda: node.position, globals(), locals())\\\n .on_change(lambda obs, oldv, newv: self.set_node_position(node, *newv))",
"def sanitize(tree, namespaces):\n # Prepare the common structures for find and check for uniqueness\n nodes = {}\n error = False\n for node in tree.iterfind('./*[@NodeId]'):\n nid = node.get('NodeId')\n if nid in nodes:\n print('Sanitize Error: NodeId {} found twice'.format(nid), file=sys.stderr)\n error = True\n nodes[nid] = node\n if error:\n return False\n\n # Add reciprocal References\n # References are a tuple (SourceNode, ReferenceType, TargetNode) (Part 3, §4.3.4)\n # Only the SourceNode is required to be in the address space.\n # All References should be unique.\n # Note that if there is (a, type0, b) and (a, type1, b), they describe the same Reference\n # if type0 and type1 are subclasses of the same concrete ReferenceType.\n # When the reference a -> b exists, and when browsing b, b <- a also exists in the inverse direction.\n # We add reciprocal References to avoid their computations at browse-time.\n\n # First, compute the a -> b and b <- a sets of references\n # If no reference is missing, refs_fwd == refs_inv\n # In the Address Space, b <- a References are stored in b, hence the difficulty\n refs_fwd = {node: set() for node in nodes} # {a: {(type, b), ...}}\n refs_inv = {node: set() for node in nodes} # {a: {(type, b), ...}}, already existing inverse references b <- a are stored in refs_inv[a]\n for node in tree.iterfind('./*[uanodeset:References]', namespaces):\n nids = node.get('NodeId') # The starting node of the references below\n refs, = node.iterfind('uanodeset:References', namespaces)\n for ref in list(refs): # Make a list so that we can remove elements while iterating\n type_ref = ref.get('ReferenceType')\n nidt = ref.text.strip() # The destination node of this reference\n is_fwd = ref.get('IsForward') != 'false'\n if is_fwd:\n # We are in the case a -> b,\n # so a = nids, and b = nidt\n fwds = refs_fwd[nids]\n if (type_ref, nidt) in fwds:\n print('Sanitize: duplicate forward Reference {} -> {} (type {})'.format(nids, nidt, type_ref), file=sys.stderr)\n refs.remove(ref)\n fwds.add((type_ref, nidt))\n else:\n # We are in the case b <- a,\n # so b = nids, and a = nidt\n # and nids <- nidt will be stored in refs_inv[nidt]\n if nidt not in nodes:\n print('Sanitize: inverse Reference from unknown node, cannot add forward reciprocal ({} -> {}, type {})'\n .format(nids, nidt, type_ref), file=sys.stderr)\n continue\n invs = refs_inv[nidt]\n if (type_ref, nids) in invs:\n print('Sanitize: duplicate inverse Reference {} <- {} (type {})'.format(nidt, nids, type_ref), file=sys.stderr)\n refs.remove(ref)\n invs.add((type_ref, nids))\n\n # Now add inverse refs b <- a for which a -> b exists\n trs_fwd = set((a,t,b) for a,ltr in refs_fwd.items() for t,b in ltr)\n trs_inv = set((a,t,b) for a,ltr in refs_inv.items() for t,b in ltr)\n for a, t, b in trs_fwd - trs_inv:\n if b not in nodes:\n print('Sanitize: Reference to unknown node, cannot add inverse reciprocal ({} -> {}, type {})'.format(a, b, t), file=sys.stderr)\n else:\n print('Sanitize: add inverse reciprocal Reference {} <- {} (type {})'.format(b, a, t), file=sys.stderr)\n node = nodes[b]\n _add_ref(node, t, a, is_forward=False)\n # Add forward refs (there should be less)\n for a, t, b in trs_inv - trs_fwd:\n if a not in nodes:\n print('Sanitize: inverse Reference from unknown node, cannot add forward reciprocal ({} -> {}, type {})'.format(a, b, t), file=sys.stderr)\n else:\n print('Sanitize: add forward reciprocal Reference {} -> {} (type {})'.format(a, b, t), file=sys.stderr)\n node = nodes[a]\n _add_ref(node, t, b, is_forward=True)\n\n # Note: ParentNodeId is an optional attribute. It refers to the parent node.\n # In case the ParentNodeId is present, but the reference to the parent is not, the attribute is removed.\n # The reference to the ParentNodeId should be typed \"HasComponent\" (not verified)\n for node in tree.iterfind('./*[@ParentNodeId]'):\n # There may be no reference at all\n refs_nodes = node.findall('uanodeset:References', namespaces)\n if len(refs_nodes) < 1:\n print('Sanitize: child Node without references (Node {} has an attribute ParentNodeId but no reference)'\n .format(node.get('NodeId')), file=sys.stderr)\n # Note: the attrib member may be an interface, so this is not portable; however the ET lib does not provide other means to do this.\n del node.attrib['ParentNodeId']\n continue\n refs, = refs_nodes\n pnid = node.get('ParentNodeId')\n parent_refs = refs.findall('*[@IsForward=\"false\"]')\n if not any(parent.text.strip() == pnid for parent in parent_refs):\n print('Sanitize: child Node without reference to its parent (Node {}, which parent is {})'\n .format(node.get('NodeId'), pnid), file=sys.stderr)\n # Type is unknown in fact\n #refs.append(ET.Element('Reference', {'ReferenceType': 'HasComponent', 'IsForward': 'false'}, text=pnid))\n # Note: the attrib member may be an interface, so this is not portable; however the ET lib does not provide other means to do this.\n del node.attrib['ParentNodeId']\n\n # Note: we don't check that the Address Space Model specified in Part 3 is valid.\n\n # TODO: Remove empty <References />\n\n return True",
"def leaf_replace(self, node):\r\n if self.label is not None: # return if leaf node\r\n return\r\n left, right = self.left, self.right\r\n left.parents.remove(self) if self in left.parents else left.parents\r\n right.parents.remove(self) if self in right.parents else right.parents\r\n if node.label is None:\r\n internal = [node]\r\n else:\r\n internal = []\r\n while len(internal) > 0:\r\n l = internal.pop(0)\r\n if l.left.label is not None: # leaf\r\n if l.left.label == 0:\r\n l.left = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.left.label == 1:\r\n l.left = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.left)\r\n\r\n if l.right.label is not None: # leaf\r\n if l.right.label == 0:\r\n l.right = left\r\n left.parents.append(l) if l not in left.parents else left.parents\r\n elif l.right.label == 1:\r\n l.right = right\r\n right.parents.append(l) if l not in right.parents else right.parents\r\n else:\r\n internal.append(l.right)",
"def transform_all_subnodes(self, node, warn= False, skip_empty= False, ignored = None):\n assert isinstance(node, ET.Element), type(node)\n return self.transform_all(node, warn, skip_empty, ignored, node)",
"def swapChildren(self, *args):\n return _libsbml.ASTNode_swapChildren(self, *args)",
"def in_place_substitute(self):\r\n if self.substitute is not None:\r\n node = self.convert_type()\r\n self.leaf_replace(node) # for internals only\r\n self.root_replace(node)",
"def _rewrite_filter(self, node: saldag.Filter):\n\n out_rel_cols = node.out_rel.columns\n\n for in_col, out_col in zip(node.get_in_rel().columns, out_rel_cols):\n out_col.coll_sets |= copy.deepcopy(in_col.coll_sets)",
"def _alter_node(node):\n if isinstance(node, (de.TFRecordDataset, de.TextFileDataset)) and node.shuffle_level == de.Shuffle.GLOBAL:\n # Remove the connection between the parent's node to the current node because we are inserting a node.\n if node.output:\n node.output.pop()\n # Perform a fast scan for average rows per file\n if isinstance(node, de.TFRecordDataset):\n avg_rows_per_file = node.get_dataset_size(True) // len(node.dataset_files)\n else:\n avg_rows_per_file = node.get_dataset_size() // len(node.dataset_files)\n\n # Shuffle between 4 files with a minimum size of 10000 rows\n new_shuffle = node.shuffle(max(avg_rows_per_file * 4, 10000))\n return new_shuffle\n\n if isinstance(node, de.MapDataset):\n if node.python_multiprocessing:\n # Bootstrap can only be performed on a copy of the original dataset node.\n # Bootstrap on original dataset node will make all iterators share the same process pool\n node.iterator_bootstrap()\n if node.columns_order is not None:\n # Remove the connection between the parent's node to the current node because we are inserting a node.\n if node.output:\n node.output.pop()\n\n return node.project(node.columns_order)\n return node",
"def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()",
"def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(__class__, self).add_node(node)",
"def __decorate_nodes(nodes, space):\n for n in nodes:\n if n not in self.__node_spaces:\n self.__node_spaces[n] = set([])\n self.__node_spaces[n].add(space)\n pred_nodes = self.__plan_graph.subjects(AGORA.next, n)\n __decorate_nodes(pred_nodes, space)",
"def map_postorder(lamark_ast, visit_func):\n new_children = []\n for node in lamark_ast.get_children():\n new_child = map_postorder(node, visit_func)\n if new_child is not None:\n new_children.append(new_child)\n lamark_ast.set_children(new_children)\n return visit_func(lamark_ast)",
"def update_node(node, attribute, value):\n node.set(attribute, value)\n return",
"def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)",
"def bury(node, *subElements):\n for name in subElements:\n nextNode = None\n for child in node.childNodes:\n if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:\n nextNode = child\n break\n if nextNode:\n node = nextNode\n else:\n node = addElement(node, name)\n return node",
"def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)",
"def transform(self, ast_nodes: List[ast.ASTNode]) -> List[ast.ASTNode]:\n pass",
"def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node",
"def readd_node(self, u_node_id):\n child_node = util.return_element_from_list(int(u_node_id), self.node_memory)\n child_node.parent.children.append(child_node)\n self.nodes.insert(child_node.node_id, child_node)\n child_node.parent_formula.node_children.append(child_node)\n print(len(child_node.formulas))\n for f in child_node.formulas:\n self.undelete_formula_helper(f.unique_id)",
"def _replace(self, p, e):\n node = self._validate(p)\n old = node._element\n node._element = e\n return old",
"def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids",
"def changeAlias(self, alias, node):",
"def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()",
"def reverse_path_iterator(node):\n while node:\n yield node\n node = node.parent",
"def _update_ordering(self):\n self._descendants = sorted(self.unordered_descendants(), key=lambda node: node.ord)\n for (new_ord, node) in enumerate(self._descendants, 1):\n node.ord = new_ord",
"def replace_by_etree(self, root_el, el_idx=0):\n el = self.get_element_by_name(root_el.tag, el_idx)\n el[:] = list(root_el)\n el.attrib = root_el.attrib",
"def _process_children(self, node):\n for kid in node.children:\n self._process_node(kid)",
"def unpack(node):\n if node.is_root():\n raise ValueError('Cannot unpack root.')\n parent = node.parent\n blen = (node.length or 0.0)\n for child in node.children:\n clen = (child.length or 0.0)\n child.length = (clen + blen or None)\n parent.remove(node)\n parent.extend(node.children)",
"def __expandNodes(self, node):\n for childNode in node.children():\n if childNode.expanded:\n idx = self.__bookmarksModel.nodeIndex(childNode)\n idx = self.__proxyModel.mapFromSource(idx)\n self.bookmarksTree.setExpanded(idx, True)\n self.__expandNodes(childNode)",
"def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes",
"def exercise(xml):\n ns = {\"t\": \"http://martin.hoppenheit.info/code/generic-tree-xml\",\n \"e\": \"http://purl.org/dc/elements/1.1/\"}\n root = ET.fromstring(xml)\n for t in root.iter(\"{%s}title\" % ns[\"e\"]):\n t.text = t.text.upper()\n for prefix, uri in ns.items():\n ET.register_namespace(prefix, uri)\n return ET.tostring(root, encoding=\"unicode\")",
"def propagate_lineage(nodes):\n for name in nodes:\n # Called just to populate lineages, both way\n get_node_lineage(nodes, name, 'up', 'ancestors')\n get_node_lineage(nodes, name, 'down', 'descendants')",
"def _walk_ast_nodes_in_order(node):\n # The implementation is basically the same as ``ast.walk``, but:\n # 1. Use a stack instead of a deque. (I.e., depth-first search instead\n # of breadth-first search.)\n # 2. Use _iter_child_nodes_in_order instead of ``ast.iter_child_nodes``.\n todo = [node]\n while todo:\n node = todo.pop()\n yield node\n todo.extend(reversed(list(_iter_child_nodes_in_order(node))))",
"def prepare_node_attrs(self):",
"def remove_node(self, node):\n node.pre.post = node.post\n node.post.pre = node.pre",
"def swap_nodes(tree) -> None:\n if tree is None:\n raise ValueError('Empty tree')\n tmp = tree.left\n tree.left = tree.right\n tree.right = tmp",
"def update(self) -> None:\n\t\t# Clear attributes that will be updates\n\t\tself.node_names: List[str] = []\n\t\tself.subnode_names: Dict[str, Set[str]] = {}\n\t\t# Iterate over RootNodes\n\t\tname: str\n\t\ts_name: str\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Iterate over Nodes\n\t\t\tfor node in rootnode.nodes:\n\t\t\t\tself._update_with_node(node)\n\t\t\tif len(rootnode.subnodes):\n\t\t\t\t# Create Set in subnode_names for the RootNode's SubNodes\n\t\t\t\tself.subnode_names[rootnode.name] = set()\n\t\t\t\t# Iterate over SubNodes\n\t\t\t\tfor subnode in rootnode.subnodes:\n\t\t\t\t\tself.subnode_names[rootnode.name].add(subnode.name)",
"def _writeNode (self, node, parent=None):\n\t\t## Main:\n\t\tif (self._src_tree.is_node_tip (node)):\n\t\t\t# a simple (terminal) node\n\t\t\tname = node.get ('title') or node.get ('name')\n\t\t\t# if the name is not quoted and contains spaces, quote it\n\t\t\tif (not _quotedNameRegex.search (name)):\n\t\t\t\tif (_spacesInNameRegex.search (name)):\n\t\t\t\t\tname = \"'%s'\" % name\n\t\t\tself._dest_strm.write (name)\n\t\telse:\n\t\t\t# complex (internal) node\n\t\t\tself._dest_strm.write ('(')\n\t\t\tchildren = self._src_tree.node_children(node)\n\t\t\tfirst_node = True\n\t\t\tfor child in children:\n\t\t\t\tif (first_node):\n\t\t\t\t\tfirst_node = False\n\t\t\t\telse:\n\t\t\t\t\tself._dest_strm.write (', ')\n\t\t\t\tself._writeNode (child, node)\n\t\t\tself._dest_strm.write (')')\n\t\t\t# do support value\n\t\t\tsupval = node.get ('support', None)\n\t\t\tif (supval is not None):\n\t\t\t\tself._dest_strm.write (self._support_format % supval)\n\t\t# do the distance\n\t\tif parent:\n\t\t\tbr = self._src_tree.get_branch (node, parent)\n\t\t\t#dist = self._src_tree.get_distance (node, parent)\n\t\t\tdist = br.distance\n\t\telse:\n\t\t\tdist = node.get ('distance', None)\n\t\tif (dist is not None):\n\t\t\tself._dest_strm.write (':' + self._dist_format % dist)",
"def migrate_doc(doc: DocCursor) -> DocCursor:\n for transform in transforms:\n doc = transform(doc)\n doc.nested_set_renumber(bulk_create=False)\n for node in doc.walk():\n node.save()\n return doc",
"def replaceChild(self, *args):\n return _libsbml.ASTNode_replaceChild(self, *args)",
"def replaceNode(self, cur):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.xmlReplaceNode(self._o, cur__o)\n if ret is None:raise treeError('xmlReplaceNode() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp",
"def deep_replace(node, keys, value):\n if not isinstance(node, (Array, Document, Object)):\n raise TypeError(\"Expected Core API container type.\")\n\n if not keys:\n return value\n elif len(keys) == 1:\n return replace(node, keys[0], value)\n\n key = keys[0]\n next = node[key]\n child = deep_replace(next, keys[1:], value)\n return replace(node, key, child)",
"def rewrite_refs(sexpr, from_args, base_offsets):\n\n def rewrite_node(sexpr):\n # Push unboxing into the state variables of distributed aggregates\n if isinstance(sexpr, expression.AggregateExpression):\n if sexpr.is_decomposable():\n ds = sexpr.get_decomposable_state()\n lsms = rewrite_statemods(ds.get_local_statemods(), from_args, base_offsets) # noqa\n rsms = rewrite_statemods(ds.get_remote_statemods(), from_args, base_offsets) # noqa\n\n if lsms or rsms:\n sexpr.set_decomposable_state(\n expression.DecomposableAggregateState(\n ds.get_local_emitters(), lsms,\n ds.get_remote_emitters(), rsms,\n ds.get_finalizer()))\n return sexpr\n\n if not isinstance(sexpr, expression.DottedRef):\n return sexpr\n elif sexpr.table_alias not in from_args:\n raise NoSuchRelationException(sexpr.table_alias)\n else:\n op = from_args[sexpr.table_alias]\n scheme = op.scheme()\n\n debug_info = None\n if not sexpr.field:\n offset = 0\n elif isinstance(sexpr.field, int):\n if sexpr.field >= len(scheme):\n raise ColumnIndexOutOfBounds(str(sexpr))\n offset = sexpr.field\n else:\n assert isinstance(sexpr.field, basestring)\n offset = scheme.getPosition(sexpr.field)\n debug_info = sexpr.field\n\n offset += base_offsets[sexpr.table_alias]\n return expression.UnnamedAttributeRef(offset, debug_info)\n\n def recursive_eval(sexpr):\n \"\"\"Rewrite a node and all its descendents\"\"\"\n newexpr = rewrite_node(sexpr)\n newexpr.apply(recursive_eval)\n return newexpr\n\n return recursive_eval(sexpr)",
"def _process_node(nodes, variables, model_context, error_info):\n # iterate over copy to avoid concurrent change for add/delete\n if isinstance(nodes, OrderedDict):\n nodes_iterator = OrderedDict(nodes)\n else:\n nodes_iterator = dict(nodes)\n for key in nodes_iterator:\n value = nodes[key]\n\n # if the key changes with substitution, remove old key and map value to new key\n new_key = _substitute(key, variables, model_context, error_info)\n if new_key is not key:\n del nodes[key]\n nodes[new_key] = value\n\n if isinstance(value, dict):\n _process_node(value, variables, model_context, error_info)\n\n elif isinstance(value, list):\n for member in value:\n if type(member) in [str, unicode]:\n index = value.index(member)\n value[index] = _substitute(member, variables, model_context, error_info, key)\n\n elif type(value) in [str, unicode]:\n nodes[key] = _substitute(value, variables, model_context, error_info, key)",
"def copyRelNode(self,node):\n G = self.interactions\n \n newNode = self.newElement(RelNode,node.__dict__)\n # out-going edges never interfere between copies\n for (p1,p2,u) in G.out_edges(node):\n newEdge = self.newElement(RelEdge,u.__dict__)\n newEdge.bgn = newNode\n G.add_edge(newNode,p2,newEdge)\n\n # collect in-coming edges by type\n inCopy = {}\n for b in G.in_edges(node):\n typ = b[2].type\n if not inCopy.has_key(typ):\n inCopy[typ] = set()\n inCopy[typ].add(b[0])\n if inCopy.has_key('agpat'):\n agpats = inCopy.pop('agpat')\n else:\n agpats = []\n\n # non-agpat in-coming edges can be just copied\n for (k,v) in inCopy.items():\n for p1 in v:\n newEdge = self.newElement(RelEdge)\n newEdge.type = k\n newEdge.bgn = p1\n newEdge.end = newNode\n G.add_edge(p1,newNode,newEdge)\n\n # make copies of agpat parents (to avoid agpat between aliases)\n for a in agpats:\n newNode2 = self.copyRelNode(a)\n for b in [x for x in G.out_edges(newNode2) if x[1]==node]:\n G.delete_edge(b)\n b[2].end = newNode\n G.add_edge(newNode2,newNode,b[2])\n\n return(newNode)",
"def change_node(self, node: dict):\n # check if it is not overriding existing node\n if node.get('id') is not None:\n if node['id'] not in self._nodes:\n raise ValueError('tried to change non-existing node %s' % node['id'])\n else:\n raise ValueError('no id for node provided')\n\n # change attributes\n id_ = node['id']\n del node['id']\n for attribute in node:\n self._nodes[id_][attribute] = node[attribute]",
"def add_node(self, node):",
"def preprocess_xml(xml):\n logger.info(\"Preprocessing XML %s\", xml)\n for path, replacement in content.Macros():\n replacement = etree.fromstring('<ROOT>' + replacement + '</ROOT>')\n for node in xml.xpath(path):\n parent = node.getparent()\n idx = parent.index(node)\n parent.remove(node)\n for repl in replacement:\n parent.insert(idx, repl)\n idx += 1",
"def _replace(self, p, e):\n node = self._validate(p)\n old = node.element\n node.element = e\n return old",
"def depthFirstAddOne(node, notOkTree, isRoot=False):\n if not isRoot:\n e = notOkTree\n prev = e\n for label in node.heirarchy:\n label = cleanLabel(label)\n e = prev.find(label)\n if e is None:\n e = newElement(prev, label)\n prev = e\n addOne(e, 'transcripts')\n for c in node.children:\n depthFirstAddOne(c, notOkTree)"
] | [
"0.69255537",
"0.6654335",
"0.6639701",
"0.6633387",
"0.63784707",
"0.63784707",
"0.6252041",
"0.62423354",
"0.6227609",
"0.61130893",
"0.61077225",
"0.6052538",
"0.59229606",
"0.59091115",
"0.59052867",
"0.58922935",
"0.5864748",
"0.5787195",
"0.57765543",
"0.57686204",
"0.57519925",
"0.5724784",
"0.569295",
"0.56711584",
"0.56563896",
"0.5645269",
"0.5643263",
"0.5640294",
"0.5638905",
"0.56272507",
"0.56239295",
"0.5597623",
"0.5569545",
"0.5558781",
"0.5550059",
"0.55351037",
"0.5524665",
"0.5512268",
"0.5509608",
"0.54932",
"0.5467394",
"0.5451231",
"0.54500663",
"0.54410625",
"0.5434364",
"0.54315567",
"0.5419039",
"0.54172766",
"0.54160833",
"0.54112834",
"0.5410805",
"0.53661495",
"0.5359964",
"0.5352855",
"0.53528196",
"0.53455216",
"0.5339525",
"0.53391266",
"0.53376836",
"0.533583",
"0.5332981",
"0.5324875",
"0.5323976",
"0.5319263",
"0.5310005",
"0.5309436",
"0.5307331",
"0.5299872",
"0.5299714",
"0.5288301",
"0.52775025",
"0.5273016",
"0.5269573",
"0.5266835",
"0.5261017",
"0.5260836",
"0.5243412",
"0.52365524",
"0.52234614",
"0.52198154",
"0.5216498",
"0.52082455",
"0.51856136",
"0.51781297",
"0.5171552",
"0.51715374",
"0.51694",
"0.5158454",
"0.5147499",
"0.5145722",
"0.5139353",
"0.51302516",
"0.5118607",
"0.51142955",
"0.510851",
"0.51049095",
"0.51007164",
"0.5096563",
"0.5095574",
"0.5093836",
"0.5091948"
] | 0.0 | -1 |
Calculate the first column of each relation in the rollup schema. | def __calculate_offsets(from_args):
index = 0
offsets = {}
for _id in from_args.iterkeys():
offsets[_id] = index
index += len(from_args[_id].scheme())
return offsets | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _columns(cls, schema: dsl.Source.Schema) -> typing.Sequence[str]:\n return tuple(f.name for f in schema)",
"def get_schema(self):\n return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)",
"def get_schema(self):\n return ', '.join(\n '%s:%s' % (col, self.schema[col]) for col in self.schema)",
"def getrelations(self):\n return self.getfieldnames('ONE')",
"def create_first_column(df):\n return df.Driver.apply(str) + \"_\" + df.Trip.apply(str)",
"def dependent_cols():\n\n return ...",
"def origin_columns(self):\n return self.intersection + self.origin_renames",
"def schema(self):",
"def first(self):\n return self._reduce_for_stat_function(F.first, only_numeric=False)",
"def infer_at_col(column, table_list, table_json):\n\n def find_fk_cols(l_table, r_table, table_json, r_col=None):\n if r_col:\n for fk in table_json['foreign_keys']:\n if r_col == fk[0] and table_json['column_names'][fk[1]][0] == l_table:\n return True, fk[1], r_col\n elif r_col == fk[1] and table_json['column_names'][fk[0]][0] == l_table:\n return True, fk[0], r_col\n else:\n for fk in table_json['foreign_keys']:\n if table_json['column_names'][fk[0]][0] == r_table and table_json['column_names'][fk[1]][0] == l_table:\n return True, fk[1], fk[0]\n elif table_json['column_names'][fk[1]][0] == r_table and table_json['column_names'][fk[0]][\n 0] == l_table:\n return True, fk[0], fk[1]\n return False, 0, 0\n\n def find_col(l_tables, r_table, r_col, table_json):\n # foreign key search:\n r_col_idx = 0\n if r_col != \"*\":\n for i, col in enumerate(table_json['column_names_original']):\n if col[0] == r_table and r_col.lower() == col[1].lower():\n r_col_idx = i\n break\n\n for lt in l_tables[1]:\n sucess, left, right = find_fk_cols(lt, r_table, table_json, r_col_idx)\n if sucess:\n return left, right\n\n # same name key search:\n if r_col != \"*\":\n for lt in l_tables[1]:\n for i, col_o, col in zip(range(len(table_json['column_names_original'])),\n table_json['column_names_original'], table_json['column_names']):\n if col_o[0] == lt and (col_o[1] == r_col or col[1] == table_json['column_names'][r_col_idx][1]):\n return i, r_col_idx\n if table_json['column_names'][r_col_idx][1].count(\" \") == 2:\n # three match two to return\n col_r_names = table_json['column_names'][r_col_idx][1].split(\" \")\n for lt in l_tables[1]:\n for i, col in zip(range(len(table_json['column_names_original'])), table_json['column_names']):\n if col[0] == lt and (col[1].count(\" \") == 1):\n c_ls = col[1].split(\" \")\n if c_ls[0] in col_r_names and c_ls[1] in col_r_names:\n return i, r_col_idx\n for i, col in zip(range(len(table_json['column_names_original'])), table_json['column_names']):\n if col[0] == lt and (col[1].count(\" \") == 0):\n if col[1] in col_r_names and table_json[\"table_names\"][lt] in col_r_names and col[1] != \\\n table_json[\"table_names\"][lt]:\n return i, r_col_idx\n return r_col_idx, r_col_idx\n else:\n result = []\n for j, rcol_o, rcol in zip(range(len(table_json['column_names_original'])),\n table_json['column_names_original'], table_json['column_names']):\n if rcol[0] != r_table or rcol_o[1] == '*':\n continue\n for lt in l_tables[1]:\n for i, col_o, col in zip(range(len(table_json['column_names_original'])),\n table_json['column_names_original'], table_json['column_names']):\n if col_o[0] == lt and (col_o[1] == rcol_o[1] or col[1] == rcol[1]):\n result.append([i, j])\n if result:\n for r in result:\n if table_json['column_names_original'][r[0]][1] not in [\"name\", \"id\"] and \\\n table_json['column_names'][r[0]][1] not in [\"name\", \"id\"]:\n return r[0], r[1]\n return result[0][0], result[0][1]\n # if len(l_tables[1]) == 1:\n # # use the first table:\n # pl = primary_keys(table_json,l_tables[1][0])\n # return pl,pl\n if r_table >= 0:\n # use the right table:\n pl = primary_keys(table_json, r_table)\n if pl >= 0:\n return pl, pl\n if len(l_tables[1]) >= 1:\n pl = primary_keys(table_json, l_tables[1][0])\n if pl >= 0:\n return pl, pl\n return 0, 0\n\n col_right = column[3][1].split(\".\")\n table_right = col_right[0].lower()\n col_right = col_right[1].lower()\n table_right_idx = [n.lower() for n in table_json['table_names_original']].index(table_right)\n if table_right in table_list: # It will be the same column for both side\n if col_right == \"*\":\n primarykey = primary_keys(table_json, table_right_idx)\n if primarykey >= 0:\n column[3][1] = table_json['table_column_names_original'][primarykey][1]\n column[2][1][1] = column[3][1]\n else:\n if len(table_list) > 1:\n from_table_net, table_fk_list = get_table_network(table_json, table_list, None)\n if from_table_net and table_right_idx in from_table_net[1]:\n # There is same tables\n if col_right == \"*\":\n primarykey = primary_keys(table_json, table_right_idx)\n if primarykey >= 0:\n column[3][1] = table_json['table_column_names_original'][primarykey][1]\n column[2][1][1] = column[3][1]\n elif from_table_net:\n # There isn't same tables, so look for the foreign key relations.\n col_left_idx, col_right_idx = find_col(from_table_net, table_right_idx, col_right, table_json)\n column[3][1] = table_json['table_column_names_original'][col_right_idx][1]\n column[2][1][1] = table_json['table_column_names_original'][col_left_idx][1]\n else:\n # There isn't same tables, so look for the foreign key relations.\n table_left_idx = [n.lower() for n in table_json['table_names_original']].index(table_list[0].lower())\n col_left_idx, col_right_idx = find_col([[], [table_left_idx]], table_right_idx, col_right, table_json)\n column[3][1] = table_json['table_column_names_original'][col_right_idx][1]\n column[2][1][1] = table_json['table_column_names_original'][col_left_idx][1]\n return column",
"def get_relation(self, relation_name):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(sql.SQL(\"SELECT * FROM {} ORDER BY index\").format(sql.Identifier(relation_name)))\n relation = cur.fetchall()\n cur.close()\n return relation\n except Exception as e:\n print(e)",
"def ArrowSchema(self) -> pa.Schema:",
"def find_head(self, relation):\n\t\treturn re.search('(?<=\\().*(?=-[0-9]*,)',relation).group(0)",
"def compute_first(self):\n compute_first_sets(self, self.rules)",
"def columns(self):\n result = self.execute(self.commands.table_columns(self.name))\n return [x[0] for x in result]",
"def fetch_first(self, tablename):\n\n query = 'select * from ' + tablename + \" ASC LIMIT 1\"\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchall()\n if fetcheddata:\n fetcheddata = fetcheddata[0]\n fetcheddata = self.__helper._functions__rowtodict([fetcheddata])\n return fetcheddata[0]\n return None",
"def __get_relation_decl_template__(self, name):\n return None",
"def pivot_source(self):\n return self.container['pivot_source']",
"def normalize_columns_together(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmax=column_matrix.max()\n\tprint \"The maximum:\t \", max\n\tmin=column_matrix.min()\n\tprint \"The minimum:\t \", min\n\trange=max-min\n\tprint \"range: \", range\n\tcolumn_matrix=column_matrix-min\n\tnormalized=column_matrix/range\n\treturn normalized",
"def _get_columns(source):\n return _get_tuple(source)",
"def graphcols(self):\n columns = []\n table = self.__parent_table\n for col in self.__column_list:\n columns.append(table.table_column(col).title())\n return columns",
"def head(app, args):\n d = util.load(args.db, args.table)\n print('# data shape:', d.shape)\n print(d.head(args.no))",
"def member_names(self) -> Iterator[str]:\n return yield_column_names(self.schema)",
"def _first_raw_aggregate(self, *fields):\n for field in fields:\n if field in self._raw_fields_aggregate:\n return self._raw_fields_aggregate[field]",
"def _rewrite_concat(self, node: saldag.Concat):\n\n # Copy over columns from existing relation\n out_rel_cols = node.out_rel.columns\n\n # Combine per-column collusion sets\n for idx, col in enumerate(out_rel_cols):\n columns_at_idx = [in_rel.columns[idx] for in_rel in node.get_in_rels()]\n col.coll_sets = utils.coll_sets_from_columns(columns_at_idx)",
"def relation(self):\n # [(2,OBJ), (3,OBJ)])] => 2\n return len(self.relations) > 0 and self.relations[0][0] or None",
"def get_fields_relation(self):\n self.set_definition(sps21relation)\n return self.get_fields()",
"def correlation(row):\n return row['correlation']",
"def schema(self):\n return self.table_info.schema",
"def generate_1st_column(rows):\n\n values = [i for i in range(1, rows+1)]\n result = []\n result.append(['col1'])\n\n for i in range(0, len(values)):\n result.append([values[i]])\n display_indicator(ROWS, i, str(i) + \" numbers processed for column 1\")\n\n return result",
"def _get_cols_source(transforms: List[api.Transform]) -> List[str]:\n\n all_names = [transform.name for transform in transforms]\n all_cols = []\n for transform in transforms:\n for col in transform.cols_input:\n all_cols.append(col)\n\n # Dedup\n all_cols = sorted(list(set(all_cols)))\n cols_source = [col for col in all_cols if col not in all_names]\n cols_source = sorted(cols_source)\n\n return cols_source",
"def flat(self):\n if len(self.description) != 1:\n msg = \"Results set with %d cols cannot be treated as flat\"\n raise TypeError(msg % len(self.description))\n return [r[0] for r in self._rows]",
"def items(self):\r\n for column in self.table.columns:\r\n yield (column, self[column.name])",
"def _first_raw_value(self, *fields):\n aggregate = self._first_raw_aggregate(*fields)\n if aggregate:\n return aggregate[0]",
"def get_minimum_column(self):\n min_col = self.root.right\n current_col = min_col.right\n while current_col != self.root:\n if current_col.sum < min_col.sum:\n min_col = current_col\n # Move on to the next column\n current_col = current_col.right\n return min_col",
"def graph_fo_relation(self, universe):\n return FO_Relation([tuple(row) for row in self.table()], universe)",
"def schema(self):\n pass",
"def getUniversal(cls):\n temp = cls.A * cls.A\n l = []\n for i in temp:\n l.append(i)\n return Relation(*l,name = 'Universal Relation')",
"def get_header(conn, table):\r\n cursor = conn.cursor()\r\n header = f\"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '{table}'\"\r\n cursor.execute(header)\r\n table_header = cursor.fetchall() #returns list of tuples\r\n header_list = []\r\n for c_header in table_header:\r\n header_list.append(c_header[0])\r\n return header_list",
"def getColumnHeads(self, table=None):\n if table == None:\n table = self.tableName\n self._db._c.execute(\"PRAGMA table_info(\" + table + \")\")\n return [ col[1] for col in self._db._c.fetchall() ]",
"def column(self, model=None):\n try:\n schema = (self.__model or model).schema()\n except AttributeError:\n return None\n else:\n return schema.column(self.__column)",
"def get_source_fullname(col_name):\n src_dump = get_src_dump()\n info = src_dump.find_one({\"$where\":\"function() {if(this.upload) {for(var index in this.upload.jobs) {if(this.upload.jobs[index].step == \\\"%s\\\") return this;}}}\" % col_name})\n if info:\n name = info[\"_id\"]\n if name != col_name:\n # col_name was a sub-source name\n return \"%s.%s\" % (name,col_name)\n else:\n return name",
"def get_headers (self, table, schema = 'TABLES'):\n get_headers = (\"SELECT * FROM information_schema.columns WHERE \"\n \"table_schema = \" + schema + \" AND \"\n \"table_name = \" + table + \"\")\n b_sql, b_table, self.sql = self.sql, self.table, get_headers \n self.run()\n self.sql = b_sql\n headers = self.as_DataFrame()[3].tolist()\n self.table = b_table\n\n return headers",
"def derive(self, dataset, table_name):\n seen = set()\n\n for fk in dataset.metadata.get_foreign_keys(table_name):\n if fk[\"table\"] == table_name:\n # Skip this relationship if the target table is the child\n continue\n if not isinstance(fk[\"field\"], str):\n # Skip this relationship if it involves a composite key\n continue\n\n column_name = 'nb_rows_in_%s' % fk[\"table\"]\n if column_name in seen:\n continue\n seen.add(column_name)\n\n # Count the number of rows for each key.\n child_table = dataset.tables[fk[\"table\"]][[fk[\"field\"]]].copy()\n child_table = child_table.fillna(0.0)\n child_table[\"_dummy_\"] = 1.0\n child_counts = child_table.groupby(fk[\"field\"]).count()\n child_counts.columns = [column_name]\n\n # Merge the counts into the parent table\n parent_table = dataset.tables[table_name]\n parent_table = pd.merge(\n parent_table.reset_index(),\n child_counts.reset_index(),\n how='left',\n left_on=fk[\"ref_field\"],\n right_on=fk[\"field\"]\n ).set_index(fk[\"ref_field\"])\n\n # Set null counts to 0 and specify constraints\n values = parent_table[column_name].fillna(0.0).values\n\n # Build a derived column object\n derived_column = DerivedColumn()\n derived_column.table_name = table_name\n derived_column.values = values\n derived_column.field = {\n \"name\": column_name,\n \"data_type\": \"numerical\"\n }\n derived_column.constraint = {\n \"constraint_type\": \"lineage\",\n \"related_fields\": [\n {\"table\": fk[\"table\"], \"field\": fk[\"field\"]}\n ],\n \"fields_under_consideration\": [\n {\"table\": fk[\"ref_table\"], \"field\": column_name}\n ],\n \"expression\": \"datareactor.atoms.RowCountAtom\"\n }\n\n yield derived_column",
"def schema(self):\n return self._schema",
"def columns(self):\n\n return None",
"def _generate_sql_parts(self, node,i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in self.DiG.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,self.DiG.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[1],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_sql_parts(edge[1],i+1,colNames,sql)\n\t\t\t# if 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\t# referencesPersonFact = True\n\t\t# print('_generate_sql_parts')\n\t\t# print(colNames)\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\tnet_new_colNames = []\n\t\t# remove colNames of already in leaf table\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql",
"def get_first(self):\n return self.A[1][0] if self.n > 0 else None",
"def process_relation(self, name, table_name, key, value, all_time=None):\r\n ret = (\"%d\\tTotal %s %s %s\\n\" % \r\n (self.r.total_relation(table_name, key, value=value, all_time=all_time),\r\n name, self.phrase, self.time))\r\n return ret",
"def _generate_expanded_column_names(self):\n\n names = []\n # Get names of the descriptors\n des_names = [column for column in self.descriptor_dataframe][1:]\n\n # Generate expanded descriptor names for each compound\n for i in range(self.total_compounds):\n for des_name in des_names:\n name = 'compund_{}_{}'.format(i, des_name)\n names.append(name)\n\n return names",
"def freedom_columns():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(Freedom_short).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df.columns)[2:])",
"def columnTitles(self):\n \n pass",
"def columnTitles(self):\n \n pass",
"def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out",
"def head(self, xes):\n return xes[0]",
"def get_relation_fields(name):\n fields = []\n opts = my.rectypes[name]._meta\n for rel in opts.get_fields():\n # print(rel, rel.one_to_many or rel.many_to_many)\n if rel.one_to_many or rel.many_to_many:\n try:\n fields.append((rel.name, rel.get_internal_type(), rel.max_length))\n except AttributeError:\n fields.append((rel.name, rel.get_internal_type(), -1))\n return fields",
"def col(self):\n\t\treturn self.__col",
"def resolve_ref(self, document):\n if isinstance(self.source, int):\n return self.source\n else:\n try:\n return document.title_xref[self.source.lower()]\n except KeyError:\n raise Exception(\"Could not find column '%s' in document %s. Possible columns are: %s.\"% (self.source.lower(), document.f, document.title_xref))\n except AttributeError:\n raise Exception(\"Can't refer to columns by name (you referred to '%s') in document %s. Does the CSV have a header?\" % (self.source.lower(), document.f))",
"def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp",
"def auto_agg(sco_type, prop, col_type):\n\n # Don't aggregate certain columns; ignore them\n last = get_last(prop)\n if last in ['x_root', 'x_contained_by_ref', 'type', 'id']:\n return None\n\n if prop == 'number_observed':\n return 'SUM(\"number_observed\") AS \"number_observed\"'\n elif prop in ['first_observed', 'start']:\n return f'MIN(\"{prop}\") AS \"{prop}\"'\n elif prop in ['last_observed', 'end']:\n return f'MAX(\"{prop}\") AS \"{prop}\"'\n\n if ((sco_type == 'network-traffic' and prop.endswith('_port'))\n or (sco_type == 'process' and prop.endswith('pid'))):\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n elif col_type.lower() in ['integer', 'bigint']:\n agg = f'AVG(\"{prop}\")'\n alias = f'\"mean_{prop}\"'\n else:\n agg = f'COUNT(DISTINCT \"{prop}\")'\n alias = f'\"unique_{prop}\"'\n\n if len(alias) > 63:\n # PostgreSQL has a limit of 63 chars per identifier\n return None\n\n return f'{agg} AS {alias}'",
"def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")",
"def format_relation(relation: list):\n pattern = \"%1s%6s%8i%1i%1s%10.2f%10.2f%1i%5i%5i%1i%10.2f%10.2f%10.2f%1i\"\n return pattern % (\n relation[0],\n relation[1] if relation[1] is not None else 0,\n relation[2],\n relation[3],\n relation[4],\n relation[5],\n relation[6],\n relation[7],\n relation[8],\n relation[9],\n relation[10],\n relation[11],\n relation[12],\n relation[13],\n relation[14]\n )",
"def schema() -> None:\n pass",
"def schema(self):\n raise NotImplementedError",
"def merge_tables(tables):\n base = tables[0]\n for table in tables[1:]:\n for row_index, row in enumerate(table):\n # Chop off duplicate leftmost column\n base[row_index] += row[1:]\n return base",
"def header(self) -> List:\n return self.rows[0]",
"def _get_columns(cls, manifest_url):\n return [k[\"name\"] for k in\n json.loads(cls._read_s3_url(manifest_url))[\"schema\"][\"elements\"]]",
"def concat_columns(objs, validate_schema=True):\n if not objs:\n raise ValueError(\"No objects to concatenate\")\n\n table_name = \"\"\n\n logical_types = {}\n semantic_tags = {}\n col_descriptions = {}\n col_origins = {}\n col_metadata = {}\n table_metadata = {}\n use_standard_tags = {}\n\n index = None\n time_index = None\n\n # Record the typing information for all the columns that have Woodwork schemas\n col_names_seen = set()\n for obj in objs:\n ww_columns = {}\n if isinstance(obj.ww.schema, ww.table_schema.TableSchema):\n # Raise error if there's overlap between table metadata\n overlapping_keys = obj.ww.metadata.keys() & table_metadata.keys()\n if overlapping_keys:\n raise ValueError(\n f\"Cannot resolve overlapping keys in table metadata: {overlapping_keys}\"\n )\n\n table_metadata = {**obj.ww.metadata, **table_metadata}\n\n # Combine table names\n if obj.ww.name is not None:\n if table_name:\n table_name += \"_\"\n table_name += str(obj.ww.name)\n\n # Cannot have multiple tables with indexes or time indexes set\n if obj.ww.index is not None:\n if index is None:\n index = obj.ww.index\n else:\n raise IndexError(\n \"Cannot set the Woodwork index of multiple input objects. \"\n \"Please remove the index columns from all but one table.\"\n )\n if obj.ww.time_index is not None:\n if time_index is None:\n time_index = obj.ww.time_index\n else:\n raise IndexError(\n \"Cannot set the Woodwork time index of multiple input objects. \"\n \"Please remove the time index columns from all but one table.\"\n )\n\n ww_columns = obj.ww.schema.columns\n elif isinstance(obj.ww.schema, ww.column_schema.ColumnSchema):\n ww_columns = {obj.name: obj.ww.schema}\n\n # Compile the typing information per column\n for name, col_schema in ww_columns.items():\n if name in col_names_seen:\n raise ValueError(\n f\"Duplicate column '{name}' has been found in more than one input object. \"\n \"Please remove duplicate columns from all but one table.\"\n )\n logical_types[name] = col_schema.logical_type\n semantic_tags[name] = col_schema.semantic_tags - {\"time_index\"} - {\"index\"}\n col_metadata[name] = col_schema.metadata\n col_descriptions[name] = col_schema.description\n col_origins[name] = col_schema.origin\n use_standard_tags[name] = col_schema.use_standard_tags\n\n col_names_seen.add(name)\n\n # Perform concatenation with the correct library\n obj = objs[0]\n dd = import_or_none(\"dask.dataframe\")\n ks = import_or_none(\"databricks.koalas\")\n\n lib = pd\n if ww.accessor_utils._is_koalas_dataframe(\n obj\n ) or ww.accessor_utils._is_koalas_series(obj):\n lib = ks\n elif ww.accessor_utils._is_dask_dataframe(obj) or ww.accessor_utils._is_dask_series(\n obj\n ):\n lib = dd\n\n combined_df = lib.concat(objs, axis=1, join=\"outer\")\n\n # Initialize Woodwork with all of the typing information from the input objs\n # performing type inference on any columns that did not already have Woodwork initialized\n combined_df.ww.init(\n name=table_name or None,\n index=index,\n time_index=time_index,\n logical_types=logical_types,\n semantic_tags=semantic_tags,\n table_metadata=table_metadata or None,\n column_metadata=col_metadata,\n column_descriptions=col_descriptions,\n column_origins=col_origins,\n use_standard_tags=use_standard_tags,\n validate=validate_schema,\n )\n return combined_df",
"def get_colnames(self):\n\n cd = self.conn.execute('select * from atom')\n print('Possible column names are:')\n names = list(map(lambda x: x[0], cd.description))\n print('\\trowID')\n for n in names:\n print('\\t'+n)",
"def getRelation(self):\n objects_cls = self.getClass()\n if objects_cls:\n cldef = objects_cls._getClassDef()\n if cldef:\n return cldef.getRelation()",
"def _get_relationship_data(self):\n relationship_field = request.path.split('/')[-1]\n if current_app.config.get('DASHERIZE_API') == True:\n relationship_field = relationship_field.replace('-', '_')\n\n if relationship_field not in get_relationships(self.schema).values():\n raise RelationNotFound('', \"{} has no attribute {}\".format(self.schema.__name__, relationship_field))\n\n related_type_ = self.schema._declared_fields[relationship_field].type_\n related_id_field = self.schema._declared_fields[relationship_field].id_field\n model_relationship_field = get_model_field(self.schema, relationship_field)\n\n return relationship_field, model_relationship_field, related_type_, related_id_field",
"def intermediary_to_schema(tables, relationships, output):\n dot_file = _intermediary_to_dot(tables, relationships)\n #graph = AGraph()\n #graph = graph.from_string(dot_file)\n extension = output.split('.')[-1]\n #graph.draw(path=output, prog='dot', format=extension)\n #Source.from_file(filename, engine='dot', format=extension)\n return Source(dot_file, engine='dot', format=extension)",
"def first_principal_component(X):\n guess = [1 for _ in X[0]]\n unscaled_maximizer = maximize_batch(\n partial(directional_variance, X),\n partial(directional_variance_gradient, X),\n guess)\n return direction(unscaled_maximizer)",
"def recalculate_pivots(self):\n pass",
"def process_source(self):\n source_col = getattr(self.model_cls, self.source)\n return source_col",
"def user_col(self):\n if not self.col_name_mapping:\n return []\n user_sparse, user_dense = [], []\n if \"user_sparse_col\" in self.col_name_mapping:\n user_sparse = list(self.col_name_mapping[\"user_sparse_col\"].keys())\n if \"user_dense_col\" in self.col_name_mapping:\n user_dense = list(self.col_name_mapping[\"user_dense_col\"].keys())\n # The result columns will be sorted by key\n return user_sparse + user_dense",
"def columns(self):\r\n _columns = self.base_columns + self.veg_columns\r\n return _columns",
"def columns(self, model=None):\n for query in self.__queries:\n for column in query.columns(model=model):\n yield column",
"def _compute_columns(log: EventLog, prefix_length: int, padding: bool) -> list:\n return [\"trace_id\"] + \\\n sorted(list({\n event['concept:name']\n for trace in log\n for event in trace[:prefix_length]\n })) + \\\n ['0'] if padding else [] + \\\n ['label']",
"def first_visible_column(self):\n return self.container['first_visible_column']",
"def _ensure_schema_has_covariates(self, x_underscore_columns):\n previous_rename = self.covariate_rename\n if set(x_underscore_columns) == set(previous_rename.values()):\n return\n # Only rewrite schema if the x_<integer> list has changed.\n # because the schema depends on the number of covariates, not\n # their names.\n covariate_columns = list(x_underscore_columns)\n # ASCII sorting isn't correct b/c x_11 is before x_2.\n covariate_columns.sort(key=lambda x: int(x[2:]))\n for create_name in [\"data\", \"avgint\"]:\n empty = self.dismod_file.empty_table(create_name)\n without = [c for c in empty.columns if not c.startswith(\"x_\")]\n # The wrapper needs these columns to have a dtype of Real.\n empty = empty[without].assign(**{cname: np.empty((0,), dtype=np.float) for cname in covariate_columns})\n self.dismod_file.update_table_columns(create_name, empty)\n if getattr(self.dismod_file, create_name).empty:\n CODELOG.debug(f\"Writing empty {create_name} table with columns {covariate_columns}\")\n setattr(self.dismod_file, create_name, empty)\n else:\n CODELOG.debug(f\"Adding to {create_name} table schema the columns {covariate_columns}\")",
"def showSchema (self):\n\t\ts=[];add=s.append\n\t\tfor i in range(len(self.schema)):\n\t\t\tadd (\"%d. %s\" % (i+1, self.schema[i]))\n\t\treturn join (s, '\\n')",
"def focn(self):\n return self.table[1, 1] / (self.table[1, 0] + self.table[1, 1])",
"def get_schema(self):\r\n return self.__schema",
"def head_pumps(self):\n for name in self._head_pumps:\n yield name, self._data[name]",
"def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r",
"def rollup(self, name, keys, aggregators=None, callback=None) :\n\n class Wildcard(object) :\n def __repr__(self) :\n return '*'\n w = Wildcard()\n\n # get the records for this table\n #\n rows = [d.as_dict() for d in self]\n cols = [k.get_name() for k in keys]\n indx = {}\n for c in cols :\n indx[c] = {}\n for i in range(len(rows)) :\n r = rows[i]\n for c in cols :\n v = r[c]\n if v not in indx[c].keys() :\n indx[c][v] = set()\n\n indx[c][v].add(i)\n\n if not aggregators :\n aggregators = [datatable.selectors.count_aggregator()]\n agg_count = 1\n count_only = True\n else :\n agg_count = len(aggregators)\n count_only = False\n\n vals = []\n l = logging.getLogger('main')\n l.debug('Rolling up on keys %s' % str([k.get_name() for k in keys]))\n\n for i in range(len(cols)) :\n key = cols[i]\n kvs = list(indx[key].keys())\n kvs.append(w)\n vals.append(kvs)\n row_keys = list(itertools.product(*vals))\n\n rec = [None] * len(keys)\n if count_only :\n rec.append(0)\n else :\n rec.extend([None] * agg_count)\n cols.extend([a.get_name() for a in aggregators ])\n agg = []\n\n processed = 0\n skipped = 0\n total = len(row_keys)\n for rk in row_keys :\n if callback and not callback(rk) :\n skipped += 1\n continue\n\n row_indx = set(range(len(rows)))\n for i in range(len(rk)) :\n key = rk[i]\n if key == w :\n rec[i] = '***All***'\n else :\n rec[i] = key\n key_indx = indx[cols[i]][key]\n row_indx.intersection_update(key_indx)\n\n if count_only :\n rec[-1] = len(row_indx)\n else :\n sub_rows = [rows[i] for i in row_indx]\n agv = [a(sub_rows) for a in aggregators]\n rec[-agg_count:] = agv\n\n agg.append(rec[:])\n processed += 1\n\n l.info('DataTable.rollup: processed %d of %d keys; skipped %d' % (processed, total, skipped))\n return datatable.results.DataTableResults(name, cols, agg)",
"def test_get_relation_type(self):\n pass",
"def _get_column_name(df, name='agg'):\n while name in df.columns:\n name += '_'\n return name",
"def _get_table_columns(self):\n try:\n table_header = parse_table_head(self.table.value, version=self.version)\n merged_data = self.table.value[table_header.tdef_header_end:]\n if table_header.TDEF_header.next_page_ptr:\n merged_data = merged_data + self._merge_table_data(table_header.TDEF_header.next_page_ptr)\n\n parsed_data = parse_table_data(merged_data, table_header.real_index_count,\n table_header.column_count, version=self.version)\n\n # Merge Data back to table_header\n table_header['column'] = parsed_data['column']\n table_header['column_names'] = parsed_data['column_names']\n\n except ConstructError:\n logging.error(f\"Failed to parse table header {self.table.value}\")\n return\n col_names = table_header.column_names\n columns = table_header.column\n\n # Add names to columns metadata so we can use only columns for parsing\n for i, c in enumerate(columns):\n c.col_name_str = col_names[i].col_name_str\n\n # column_index is more accurate(id is always incremented so it is wrong when a column is deleted).\n # Some tables like the catalog don't have index, so if indexes are 0 use id.\n\n # create a dict of index to column to make it easier to access. offset is used to make this zero based\n offset = min(x.column_index for x in columns)\n column_dict = {x.column_index - offset: x for x in columns}\n # If column index is not unique try best effort\n if len(column_dict) != len(columns):\n # create a dict of id to column to make it easier to access\n column_dict = {x.column_id: x for x in columns}\n\n if len(column_dict) != table_header.column_count:\n logging.debug(f\"expected {table_header.column_count} columns got {len(column_dict)}\")\n return column_dict, table_header",
"def resolve(self):\n ind = [ i for i in combinations(range(self.nplex()),2) ]\n hi,lo = self.insertLevel(ind)\n lo.sort(axis=1)\n ind = sortByColumns(lo)\n return lo[ind]",
"def _get_fanout_columns(table_info):\n ret = []\n for t, cs in table_info.items():\n if t == PRIMARY_RELATION:\n continue\n if len(cs) == 1:\n ret.append(\"__fanout_{}\".format(t))\n else:\n for c in cs:\n ret.append(\"__fanout_{}__{}\".format(t, c))\n return ret",
"def header(self):\r\n # favour Column.header\r\n column_header = self.column.header\r\n if column_header:\r\n return column_header\r\n # fall back to automatic best guess\r\n return self.verbose_name",
"def COL(self) -> Column:\n fields_seq = self.SEQ\n col: Column = sql_funcs.col(fields_seq[0]) # pylint: disable=no-member\n for col_field_name in fields_seq[1:]:\n col = col[col_field_name]\n return col",
"def __call__(self, doc):\n name = self._rename if self._rename is not None else self._select\n if self._transform:\n col = Column(self._transform(x) for x in doc[self._select])\n else:\n col = doc[self._select]\n return (name, col)",
"def header(self):\n\n return [c.name for c in self.columns]",
"def __generate_table(package_response, primary_table_id):\n if isinstance(package_response, Package):\n primary_table = package_response.tables[primary_table_id]\n header_id = primary_table.definition.header_table_id\n header_table = package_response.tables[header_id]\n dimension_columns = list(filter(lambda column_obj: column_obj.is_dimension, primary_table.definition.columns))\n dimension_columns_count = len(dimension_columns)\n row_count = len(primary_table.data.rows)\n header_row_count = len(header_table.data.rows)\n\n headers = list(list())\n # Constructs the column headers by considering dimension columns and header rows\n for series_definition_column in header_table.definition.columns:\n header_row = list()\n for i in range(0, dimension_columns_count, 1):\n if dimension_columns[i].description is \"\":\n header_row.append(\" \")\n else:\n header_row.append(dimension_columns[i].description)\n\n for i in range(0, header_row_count, 1):\n header_row.append(str(_SeriesDataHelper.get_value_helper(header_table.data.columns[series_definition_column.id], series_definition_column.type, i, series_definition_column.format.null_format)))\n headers.append(header_row)\n\n data = list(list())\n # Constructs the column data\n for i in range(0, row_count, 1):\n data_row = list()\n for series_definition_column in primary_table.definition.columns:\n data_row.append(str(_SeriesDataHelper.get_value_helper(primary_table.data.columns[series_definition_column.id], series_definition_column.type, i, series_definition_column.format.null_format)))\n data.append(data_row)\n\n if len(header_table.definition.columns) > 1:\n data_frame = pd.DataFrame(data=data)\n data_frame.columns = pd.MultiIndex.from_arrays(headers)\n else:\n data_frame = pd.DataFrame(data=data, columns = headers[0])\n\n return data_frame\n\n else:\n ValueError(\"Response data passed should be of package type.\")",
"def pod(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[1, 0])",
"def get_initial_value(\n self, rel_name):\n return self._np_initval[rel_name].transpose()",
"def schema_ref(schema, table):\n return schema + '.' + table",
"def concat_columns(self, frame_list):\n frame_list = list(frame_list)\n if len(frame_list) <= 0:\n return None\n if len(frame_list) == 1:\n return frame_list[0]\n res = pl.concat(frame_list, how=\"horizontal\")\n return res"
] | [
"0.5502951",
"0.5398246",
"0.52835834",
"0.51542795",
"0.4965693",
"0.4851191",
"0.483501",
"0.47420785",
"0.47310165",
"0.47011814",
"0.46979633",
"0.46741876",
"0.46594706",
"0.46257573",
"0.45953274",
"0.45745704",
"0.45718452",
"0.45652014",
"0.4507311",
"0.44981587",
"0.4496589",
"0.44940507",
"0.44851866",
"0.447953",
"0.4474225",
"0.4469472",
"0.44691458",
"0.44500223",
"0.44462788",
"0.44448677",
"0.44425645",
"0.44313222",
"0.44145223",
"0.44074583",
"0.44000086",
"0.4381809",
"0.437024",
"0.436894",
"0.43687496",
"0.43610805",
"0.4358668",
"0.4357272",
"0.4353845",
"0.43497175",
"0.43456",
"0.43373966",
"0.43338397",
"0.43333256",
"0.43276614",
"0.43159372",
"0.4302837",
"0.4302369",
"0.4302369",
"0.4301056",
"0.43002978",
"0.42992353",
"0.42939332",
"0.42854816",
"0.42842653",
"0.42800948",
"0.4270862",
"0.42689675",
"0.42651576",
"0.4261842",
"0.42596123",
"0.42594385",
"0.4256664",
"0.42420655",
"0.42360505",
"0.42330676",
"0.4231143",
"0.42287233",
"0.42247242",
"0.4201969",
"0.42018571",
"0.4193008",
"0.41895628",
"0.41887844",
"0.41874963",
"0.41827002",
"0.41736162",
"0.4170675",
"0.41683167",
"0.4163647",
"0.4163028",
"0.41625494",
"0.41525593",
"0.415255",
"0.4152362",
"0.4151419",
"0.41508383",
"0.41501486",
"0.41476694",
"0.41468257",
"0.41448447",
"0.41419154",
"0.41419104",
"0.41394347",
"0.4133825",
"0.41314262",
"0.41309416"
] | 0.0 | -1 |
Merge a sequence of operations into a crossproduct tree. | def merge(from_args):
assert len(from_args) > 0
def cross(x, y):
return algebra.CrossProduct(x, y)
from_ops = from_args.values()
op = reduce(cross, from_ops)
return (op, __calculate_offsets(from_args)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))",
"def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])",
"def FO_Operation_Product(operations, d_universes):\n @FO_Operation_decorator(list(product(*d_universes)), operations[0].arity())\n def product_op(*args):\n result = []\n for i, t in enumerate(zip(*args)):\n result.append(operations[i](*t))\n return tuple(result)\n\n return product_op",
"def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result",
"def flatten(self):\n to_remove = []\n for elem in self.operands:\n # if element belong to same class (nested And's, Or's)\n if isinstance(elem, self.__class__):\n # recursive flattening first\n elem.flatten()\n # remove from current list\n to_remove.append(elem)\n\n # add new elements\n for elem in to_remove:\n self.operands.remove(elem)\n self.operands.extend(elem.operands)",
"def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]",
"def _operation_traverse(self, op, op_f, aggregate_f, combine_f): # noqa\n # apply op_f for each operation\n op_res = op_f(op)\n if len(op.children) == 0:\n return op_res # no children return\n else:\n # apply _operation_traverse recursively\n children = [\n self._operation_traverse(child, op_f, aggregate_f, combine_f)\n for child in op.children\n ]\n # combine the operation result with the children aggregated result\n return combine_f(op_res, aggregate_f(children))",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def extract_operator_products(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operator_products(arg, independent=independent)\n\n elif isinstance(e, Mul):\n c, o = split_coeff_operator(e)\n if o != 1:\n ops.append(o)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n no_ops = []\n for op in ops:\n no_op = normal_ordered_form(op.expand(), independent=independent)\n if isinstance(no_op, (Mul, Operator, Pow)):\n no_ops.append(no_op)\n elif isinstance(no_op, Add):\n for sub_no_op in extract_operator_products(no_op, independent=independent):\n no_ops.append(sub_no_op)\n else:\n raise ValueError(\"Unsupported type in loop over ops: %s: %s\" %\n (type(no_op), no_op))\n\n return list(set(no_ops))",
"def Composite(oper, **kw_kernels):\n oplib = {\n '+': dict(\n ufunc=np.add, # associated numpy ufunc\n jfunc=lambda F, f, j: j, # Jacobian evaluator\n jgen=lambda F_expr, j_expr, i: j_expr, # Jacobian code generator\n opname='Addtive',\n ),\n '*': dict(\n ufunc=np.multiply,\n jfunc=lambda F, f, j: F / f * j,\n jgen=lambda F_expr, j_expr, i: Template('(${X * })').render(\n X=F_expr[:i] + (j_expr,) + F_expr[i + 1:]\n ),\n opname='Product'\n ),\n }\n\n if oper not in oplib:\n raise ValueError(f'Invalid reduction operator {repr(oper)}.')\n\n @cpptype([(key, ker.dtype) for key, ker in kw_kernels.items()])\n class CompositeKernel(MicroKernel):\n @property\n def name(self):\n return 'Composite'\n\n @property\n def opname(self):\n return self._opname\n\n def __init__(self, opstr, ufunc, jfunc, jgen, opname, **kw_kernels):\n self.opstr = opstr\n self.ufunc = ufunc\n self.jfunc = jfunc\n self.jgen = jgen\n self._opname = opname\n self.kw_kernels = kw_kernels\n\n def __repr__(self):\n return Template('${cls}(${opstr}, ${kwexpr, })').render(\n cls=self.name,\n opstr=repr(self.opstr),\n kwexpr=[f'{k}={repr(K)}' for k, K in self.kw_kernels.items()])\n\n def __call__(self, X, Y, jac=False):\n if jac is True:\n F, J = list(\n zip(*[kernel(X[key], Y[key], True)\n for key, kernel in self.kw_kernels.items()])\n )\n S = self.ufunc.reduce(F)\n jacobian = np.array([\n self.jfunc(S, f, j) for i, f in enumerate(F) for j in J[i]\n ])\n return S, jacobian\n else:\n return self.ufunc.reduce([\n f(X[k], Y[k]) for k, f in self.kw_kernels.items()\n ])\n\n def gen_expr(self, x, y, theta_scope=''):\n F, J = list(\n zip(*[kernel.gen_expr('%s.%s' % (x, key),\n '%s.%s' % (y, key),\n '%s%s.' % (theta_scope, key))\n for key, kernel in self.kw_kernels.items()])\n )\n f = Template('(${F ${opstr} })').render(opstr=self.opstr, F=F)\n jacobian = [\n self.jgen(F, j, i) for i, _ in enumerate(F) for j in J[i]\n ]\n return f, jacobian\n\n @property\n def theta(self):\n return pretty_tuple(\n self.name,\n self.kw_kernels.keys()\n )(*[k.theta for k in self.kw_kernels.values()])\n\n @theta.setter\n def theta(self, seq):\n for kernel, value in zip(self.kw_kernels.values(), seq):\n kernel.theta = value\n\n @property\n def bounds(self):\n return pretty_tuple(\n self.name,\n self.kw_kernels.keys()\n )(*[k.bounds for k in self.kw_kernels.values()])\n\n @property\n def minmax(self):\n return self.ufunc.reduce(\n [k.minmax for k in self.kw_kernels.values()],\n axis=0\n )\n\n # for the .state property of cpptype\n for key in kw_kernels:\n setattr(CompositeKernel, key,\n property(lambda self, key=key: self.kw_kernels[key]))\n\n return CompositeKernel(oper, **oplib[oper], **kw_kernels)",
"def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result",
"def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result",
"def compose_children(self):\n for l_symbol, l_info in self.matrix[self.i][self.k].items():\n l_rhs = Nonterminal(l_symbol)\n for r_symbol, r_info in self.matrix[self.k][self.j].items():\n r_rhs = Nonterminal(r_symbol)\n\n # check the subtrees in [i][k] and [k][j] to see if you can make a valid rhs\n potential_rules = [p for p in self.grammar.productions(rhs=l_rhs) if p.rhs()[1] == r_rhs]\n for potential_rule in sorted(potential_rules, key=lambda x: x.prob()):\n new_lhs = potential_rule.lhs().symbol()\n new_tree = Tree(new_lhs, [l_info[1], r_info[1]])\n new_prob = log(potential_rule.prob()) + l_info[0] + r_info[0]\n if new_lhs not in self.matrix[self.i][self.j] or new_prob > self.matrix[self.i][self.j][new_lhs][0]:\n self.matrix[self.i][self.j][new_lhs] = (new_prob, new_tree)",
"def _flatten(self, op):\n if isinstance(self, op):\n for i, arg in enumerate(self._args):\n if isinstance(arg, self.DUAL):\n others = self._args[:i] + self._args[i+1:]\n expr = op.DUAL(*[op(a, *others) for a in arg.args])\n if isinstance(expr, OrAnd):\n return expr._flatten(op)\n else:\n return expr\n else:\n return self\n else:\n nested, others = list(), list()\n for arg in self._args:\n if arg.depth > 1:\n nested.append(arg)\n else:\n others.append(arg)\n args = [arg._flatten(op) for arg in nested] + others\n return op.DUAL(*args)",
"def build(cls, ops, signals):\n\n logger.debug(\"===================\")\n logger.debug(\"BUILD %s\", ops)\n\n if ops not in cls.op_builds:\n raise BuildError(\"Operators build has not been initialized \"\n \"(missed pre-build step)\")\n\n output = cls.op_builds[ops].build_step(signals)\n\n if isinstance(output, (tf.Tensor, tf.Variable)):\n output = [output]\n elif isinstance(output, tuple):\n output = list(output)\n\n return output",
"def cartesian_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_edges_cross_nodes(G, H))\n GH.add_edges_from(_nodes_cross_edges(G, H))\n return GH",
"def run_all(operations=ops):\n for operation in operations:\n run(operation)",
"def _combine(self, other, operation):\n if getattr(other, 'empty'):\n return self\n\n if self.empty:\n return other\n\n return QCombination(operation, [self, other])",
"def compute_operator(self, snapshots):\n\n # To avoid recursion function, use FIFO list to simulate the tree\n # structure\n data_queue = [snapshots.copy()]\n\n current_bin = 0\n while data_queue:\n Xraw = data_queue.pop(0)\n\n n_samples = Xraw.shape[1]\n\n step = max(1, int(np.floor(old_div(n_samples, self._nyq))))\n Xsub = Xraw[:, ::step]\n Xc = Xsub[:, :-1]\n Yc = Xsub[:, 1:]\n\n Xc, Yc = compute_tlsq(Xc, Yc, self._tlsq_rank)\n\n rho = old_div(float(self._max_cycles), n_samples)\n sub_operator = SubMrDMDOperator(svd_rank=self._svd_rank,\n eigs_divider=2. * np.pi * step, rho=rho)\n sub_operator.compute_operator(Xc, Yc)\n\n modes = sub_operator.modes\n eigs = sub_operator.eigenvalues\n Atilde = sub_operator.as_numpy_array\n b = sub_operator.compute_sub_amplitudes(Xc, self._opt)\n\n #---------------------------------------------------------------\n # DMD Amplitudes and Dynamics\n #---------------------------------------------------------------\n Vand = np.vander(np.power(eigs, old_div(1., step)), n_samples, True)\n\n Psi = (Vand.T * b).T\n\n self._modes.append(modes)\n self._b.append(b)\n self._Atilde.append(Atilde)\n self._eigenvalues.append(eigs)\n self._nsamples.append(n_samples)\n self._steps.append(step)\n\n if Xraw.dtype == 'float64':\n Xraw -= modes.dot(Psi).real\n else:\n Xraw -= modes.dot(Psi)\n\n if current_bin < 2**(self._max_level - 1) - 1:\n current_bin += 1\n half = int(np.ceil(old_div(Xraw.shape[1], 2)))\n data_queue.append(Xraw[:, :half])\n data_queue.append(Xraw[:, half:])\n else:\n current_bin += 1",
"def test_commutator_expansion():\n hs = LocalSpace(\"0\")\n A = OperatorSymbol('A', hs=hs)\n B = OperatorSymbol('B', hs=hs)\n C = OperatorSymbol('C', hs=hs)\n D = OperatorSymbol('D', hs=hs)\n alpha = symbols('alpha')\n assert Commutator(A + B, C).expand() == Commutator(A, C) + Commutator(B, C)\n assert Commutator(A, B + C).expand() == Commutator(A, B) + Commutator(A, C)\n assert Commutator(A + B, C + D).expand() == (\n Commutator(A, C)\n + Commutator(A, D)\n + Commutator(B, C)\n + Commutator(B, D)\n )\n assert Commutator(A + B, C + D + alpha).expand() == (\n Commutator(A, C)\n + Commutator(A, D)\n + Commutator(B, C)\n + Commutator(B, D)\n )",
"def chain(ops: Sequence[Task]) -> List[Relation]:\n return [Relation(from_task_id=a.task_id, to_task_id=b.task_id) for a, b in zip(ops, ops[1::])]",
"def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)",
"def commute_operands(self, node):\n def is_assumption(n):\n \"\"\"Return whether a node is an assumption.\"\"\"\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False\n\n def has_assumption(n):\n \"\"\"Return whether a BinOpCode contains an assumption.\"\"\"\n if not isinstance(n, types.BinOpCode):\n return False\n return any(is_assumption(i) for i in [n.left, n.right])\n\n def should_commute(n):\n return is_assumption(n) or has_assumption(n)\n\n # Commute operands of different operations.\n # e.g. 2 + assumption + 3 --> 2 + 3 + assumption\n if self.is_commutative(node) and has_assumption(node.left) and node.left.name == node.name:\n # Move the assumption so we can be sure it's in the attribute 'right'.\n if is_assumption(node.left.left):\n node.left.left, node.left.right = node.left.right, node.left.left\n\n self.debug('Commuting operations for %s and %s' % (format_structural_op(node.left), format_structural_op(node.right)), node.lineno)\n right = node.right\n node.right = node.left.right\n node.left.right = right\n\n if should_commute(node.left) or not should_commute(node.right):\n return\n\n if self.is_commutative(node):\n self.debug('Commuting operands for %s' % format_structural_op(node), node.lineno)\n node.left, node.right = node.right, node.left\n elif self.has_logical_equivalent(node):\n logmsg = 'Replacing %s with logical equivalent ' % format_structural_op(node)\n node.name = logical_equivalents[node.name]\n node.left, node.right = node.right, node.left\n logmsg += format_structural_op(node)\n self.debug(logmsg, node.lineno)",
"def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder",
"def generate_operations(self):\n combinations = self.COMBINATIONS.items()[:self.limit]\n for (term1, term2), type in combinations:\n yield (term1, term2, type)",
"def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(\n op_ctor(op0, op1, precision=precision)\n )\n # assigning attributes to the resulting node\n result = local_list[0]\n result.set_attributes(**kw)\n return result",
"def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder",
"def cross(A, B):\n return [a+b for a in A for b in B]",
"def cartesianProduct(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set([t for t in itertools.product(lhs, rhs)])",
"def test_concat_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(2, conn_graph.branch_count)\n self.assertEqual(13, len(conn_graph.get_all_ops()))\n self.assertEqual(12 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))\n\n # Check that the order of input products to the concat op matches the order of input tensors in the tf graph\n concat_tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"concatenate/concat\")\n concat_op = conn_graph.get_all_ops()['concatenate/concat']\n for index, product in enumerate(concat_op.get_input_products()):\n self.assertTrue(len(product.consumers) == 1)\n self.assertEqual(product.tensor_dict[product.consumers[0]], concat_tf_op.inputs[index])",
"def product(self):\n return self.left[:self.i] + self.right[self.i:], self.right[:self.i] + self.left[self.i:]",
"def __cross(self,A, B):\n return [s+t for s in A for t in B]",
"def expansion(self, actions):\n for action in actions: \n self.children[action[0]] = TreeNode()",
"def cross(a, b):\n return [s + t for s in a for t in b]",
"def test_split_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = split_and_concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['split_and_concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(1, conn_graph.branch_count)\n self.assertEqual(9, len(conn_graph.get_all_ops()))\n self.assertEqual(8 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))",
"def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]",
"def concatenate(expression, stream):\n # fork the stream for each subexpression\n streams = itertools.tee(stream, len(expression.children))\n return itertools.chain.from_iterable(\n evaluate(expression, stream)\n for expression, stream in zip(expression.children, streams)\n )",
"def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))",
"def apply(self) -> Operation:\n op = self.popleft()\n op()\n return op",
"def prod(self, args):\n assert len(args) > 0, \"Cannot compute an empty product in a semigroup\"\n return prod(args[1:], args[0])",
"def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]",
"def Chain(A, B):\n return _prodOperator(B, A)",
"def dissociate(op, args):\n result = []\n def collect(subargs):\n for arg in subargs:\n if arg.op == op: collect(arg.args)\n else: result.append(arg)\n collect(args)\n return result",
"def _expand_combinations(kernel: gpflow.kernels.Kernel) -> List[gpflow.kernels.Kernel]:\n if isinstance(kernel, (gpflow.kernels.Sum, gpflow.kernels.Product)):\n return kernel.children.values()\n\n return []",
"def _concat_rows_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"ConcatRowsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.ConcatRowsNode\"\n )\n common_columns = [c for c in op.columns_produced() if c != op.id_column]\n inputs = [self._compose_polars_ops(s, data_map=data_map) for s in op.sources]\n assert len(inputs) == 2\n inputs = [input_i.select(common_columns) for input_i in inputs] # get columns in same order\n if op.id_column is not None:\n inputs[0] = inputs[0].with_columns([_build_lit(op.a_name).alias(op.id_column)])\n inputs[1] = inputs[1].with_columns([_build_lit(op.b_name).alias(op.id_column)])\n res = pl.concat(inputs, how=\"vertical\")\n return res",
"def crossProduct4( set1, set2 ):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 4))\n set2 = asarray( set2, _aformat(set1))\n set2 = reshape( set2, (-1, 4))\n result = zeros( (len(set1),4), _aformat(set1))\n result[:,:3] = cross( set1[:,:3],set2[:,:3])\n result[:,3] = 1.0\n return result",
"def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)",
"def crossProduct( set1, set2):\n set1 = asarray( set1, _aformat(set1))\n set1 = reshape( set1, (-1, 3))\n set2 = asarray( set2, _aformat(set2))\n set2 = reshape( set2, (-1, 3))\n return cross( set1, set2 )",
"def ops_frac_to_cart(ops_flat, lattice):\n cart_ops = []\n for op in ops_flat:\n rot = [op[0:3], op[3:6], op[6:9]]\n trans = op[9:12]\n rot, trans = _operation_frac_to_cart(lattice, rot, trans)\n cart_ops.append(rot[0] + rot[1] + rot[2] + trans)\n return cart_ops",
"def tensor_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_directed_edges_cross_edges(G, H))\n if not GH.is_directed():\n GH.add_edges_from(_undirected_edges_cross_edges(G, H))\n return GH",
"def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))",
"def _tree_getitem(cls, op):\n out_series = op.outputs[0]\n combine_size = options.combine_size\n chunks = op.inputs[0].chunks\n while len(chunks) > combine_size:\n new_chunks = []\n for i in range(0, len(chunks), combine_size):\n chks = chunks[i : i + combine_size]\n if len(chks) == 1:\n chk = chks[0]\n else:\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n chk = concat_op.new_chunk(chks, dtype=chks[0].dtype)\n chk_op = SeriesIndex(labels=op.labels, is_intermediate=True)\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n chk = chk_op.new_chunk(\n [chk],\n shape=(np.nan,),\n dtype=chk.dtype,\n index_value=parse_index(pd.RangeIndex(-1)),\n **kw,\n )\n new_chunks.append(chk)\n chunks = new_chunks\n\n concat_op = DataFrameConcat(output_types=[OutputType.series])\n kw = {\"name\": out_series.name} if hasattr(out_series, \"name\") else {}\n kw[\"index\"] = (0,)\n chk = concat_op.new_chunk(chunks, dtype=chunks[0].dtype, **kw)\n index_op = SeriesIndex(labels=op.labels)\n chunk = index_op.new_chunk([chk], dtype=chk.dtype, **kw)\n new_op = op.copy()\n nsplits = ((len(op.labels),),) if isinstance(op.labels, list) else ()\n kw = out_series.params\n kw[\"nsplits\"] = nsplits\n kw[\"chunks\"] = [chunk]\n return new_op.new_tileables(op.inputs, kws=[kw])",
"def binary_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for type, expr_group in ctx.groupby_type():\n if type in (bool, Callable):\n continue\n # TODO: Allow tuple comparisons?\n if TypeAnnotation(type).iterable:\n continue\n\n for commutative_operator in self.commutative_operators:\n for left, right in combinations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=commutative_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )\n for dependent_operator in self.non_commutative_operators:\n for left, right in permutations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=dependent_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )",
"def extract_all_operators(e_orig):\n if debug:\n print(\"extract_all_operators: \", e_orig)\n\n if isinstance(e_orig, Operator):\n return [e_orig]\n\n e = drop_c_number_terms(normal_ordered_form(e_orig.expand(),\n independent=True))\n\n if isinstance(e, Pow) and isinstance(e.base, Operator):\n return [e]\n\n ops = []\n\n if isinstance(e, Add):\n for arg in e.args:\n ops += extract_all_operators(arg)\n\n if isinstance(e, Mul):\n op_f = [f for f in e.args if (isinstance(f, Operator) or\n (isinstance(f, Pow) and\n isinstance(f.base, Operator)))]\n ops.append(Mul(*op_f))\n ops += op_f\n\n unique_ops = list(set(ops))\n\n sorted_unique_ops = sorted(unique_ops, key=operator_order)\n\n return sorted_unique_ops",
"def flatten_exprseq_outside_computation(topconstruct):\n # expr seq are programs with parenthesis and\n # +/- sadly this is how parenthesis in computations end up\n for _, eseq_it in enumerate(\n query(\n #[is_layering([[TOP_LEVEL, syntax.PROGRAM], syntax.EXPR_SEQ])],\n [is_layering([syntax.EXPR_SEQ, syntax.PROGRAM])],\n TreeItem(topconstruct))\n ):\n eseq = eseq_it.construct\n eseq_it.replace_construct(eseq.args[0])",
"def test_pull_out_scalars():\n hs = LocalSpace(\"sys\")\n A = OperatorSymbol('A', hs=hs)\n B = OperatorSymbol('B', hs=hs)\n alpha, beta = symbols('alpha, beta')\n assert Commutator.create(alpha * A, B) == alpha * Commutator(A, B)\n assert Commutator.create(A, beta * B) == beta * Commutator(A, B)\n assert Commutator.create(alpha * A, beta * B) == alpha * beta * Commutator(\n A, B\n )",
"def union(cls, forms):\n \"\"\"This function must be recursive.\"\"\"\n if len(forms) == 2:\n return cls.unionTwoForms(forms[0], forms[1])\n else:\n pass\n\n result = forms[0]\n for form in forms[1:]:\n result.extend(cls.union(form, result))\n return result",
"def TensorProduct(**kw_kernels):\n return Composite('*', **kw_kernels)",
"def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)",
"def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)",
"def unary_op(self):\n return plist([op(x) for x in self], root=self.__root__)",
"def cu_for_merge(self, best1, best2, undo=True):\n\t\t#TODO - Might want to consider adding the instance to the merged node.\n\t\tfirst = best1\n\t\tsecond = best2\n\n\t\tif second < first:\n\t\t\ttemp = first \n\t\t\tfirst = second \n\t\t\tsecond = temp\n\n\t\tfirst_c = self.tree.children[first]\n\t\tsecond_c = self.tree.children[second]\n\n\t\tnew_c = self.tree.makeTree(self.tree.root, self.tree)\n\t\tnew_c.utility.update_counts_from_node(first_c)\n\t\tnew_c.utility.update_counts_from_node(second_c)\n\n\t\tself.tree.children.pop(second)\n\t\tself.tree.children.pop(first)\n\t\tself.tree.children.append(new_c)\n\n\t\tcu = self.utility.category_utility()\n\n\t\tif undo:\n\t\t\tself.tree.children.pop()\n\t\t\tself.tree.children.insert(first,first_c)\n\t\t\tself.tree.children.insert(second,second_c)\n\t\telse:\n\t\t\t# If we aren't undoing the merge then we have to add the leaves\n\t\t\tnew_c.children.append(first_c)\n\t\t\tfirst_c.parent = new_c\n\t\t\tnew_c.children.append(second_c)\n\t\t\tsecond_c.parent = new_c\n\t\t\tself.tree.mergedNodes.append([first_c, second_c])\n\t\t\tself.tree.splitMergeOrder.append(\"m\")\n\t\t\tfor m in self.tree.mergedNodes:\n\t\t\t\tif len(m) > 2 and first_c in m and second_c in m:\n\t\t\t\t\tm.remove(first_c)\n\t\t\t\t\tm.remove(second_c)\n\t\t\t\t\tm.append(new_c)\n\t\t\tfor s in self.tree.splitNodes:\n\t\t\t\tif len(s) > 2 and first_c in s and second_c in s:\n\t\t\t\t\ts.remove(first_c)\n\t\t\t\t\ts.remove(second_c)\n\t\t\t\t\ts.append(new_c)\n\t\treturn cu",
"def correlation_4op_3t(self, rho0, oplist, signature, tau):\n\n\n if len(oplist) != 4:\n raise ValueError('Number of operators is not 4.')\n\n a = operator_to_superoperator(oplist[0], signature[0])\n b = operator_to_superoperator(oplist[1], signature[1])\n c = operator_to_superoperator(oplist[2], signature[2])\n d = operator_to_superoperator(oplist[3], signature[3])\n\n for _ in oplist:\n if issparse(_):\n _ = _.toarray()\n\n\n # nmax = max(len(tau3), len(tau2), len(tau1))\n\n # g = -1j * (tau > 0) * self.propagator(tau)\n # G = np.zeros((self.dim, self.dim, len(tau)))\n\n # for n, elem in enumerate(g):\n # G[:,:, n] = g[n]\n\n if self.G is None:\n self.propagator(tau)\n\n G = self.G\n\n # unit operator in Liouville space\n N = self.dim\n idm = self.idm(sp=False)\n\n # print(d.shape, dm2vec(rho0.toarray()).shape)\n if issparse(rho0):\n rho = d.dot(dm2vec(rho0.toarray()))\n else:\n rho = d.dot(dm2vec(rho0))\n\n # print(type(rho), rho.shape)\n\n if issparse(rho): rho = rho.toarray()\n\n tmp = np.tensordot(G, rho, axes=((1), (0)))\n tmp = c.dot(tmp)\n tmp = np.tensordot(G, tmp, axes=([1], [0])) # ajk\n\n # tmp = np.einsum('dcj, ca, abk, b -> djk', G, c, G, rho)\n\n\n '''\n Scipy sparse matrix does not support dimensions more than 2, so\n ndarray has to be used for the tensor products.\n\n This can be improved by using sparse package.\n '''\n tmp = np.tensordot(b.todense(), tmp, axes=([1], [0]))\n # tmp = b.todense().dot(tmp)\n\n tmp = np.tensordot(G, tmp, axes=([1], [0]))\n\n return oe.contract('a, ab, bijk -> ijk', idm, a.todense(), tmp)\n\n # corr = np.einsum('a, ab, bck, bc, cdj, de, efi, f ->kji', idm, \\\n # left(a), G, left(b), left(c), G, left(d).dot(dm2vec(rho0))\n\n # return",
"def _cartesian_add(xs):\n return sum(prefer_static.reshape(x, shape=[-1] + [1]*(len(xs) - 1 - i))\n for i, x in enumerate(xs))",
"def compose_many(*fs):\n return reduce(compose, fs)",
"def reduce(self, app, nodes, result):",
"def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')",
"def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError(\"Composition of empty sequence not supported.\")",
"def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(a) / 2):len(a)]\n\n a21 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a21):\n a21[index] = row[0:int(len(row) / 2)]\n\n a22 = a[int(len(a) / 2):len(a)]\n for index, row in enumerate(a22):\n a22[index] = row[int(len(a) / 2):len(a)]\n\n b11 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b11):\n b11[index] = row[0:int(len(row) / 2)]\n\n b12 = b[0:int(len(b) / 2)]\n for index, row in enumerate(b12):\n b12[index] = row[int(len(b) / 2):len(b)]\n\n b21 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b21):\n b21[index] = row[0:int(len(row) / 2)]\n\n b22 = b[int(len(b) / 2):len(b)]\n for index, row in enumerate(b22):\n b22[index] = row[int(len(b) / 2):len(b)]\n\n c11 = matrix_add(recursive_multiply(a11, b11), recursive_multiply(a12, b21)) # C11 = A11*B11 + A12*B21\n c12 = matrix_add(recursive_multiply(a11, b12), recursive_multiply(a12, b22)) # C12 = A11*B12 + A12*B22\n c21 = matrix_add(recursive_multiply(a21, b11), recursive_multiply(a22, b21)) # C21 = A21*B11 + A22*B21\n c22 = matrix_add(recursive_multiply(a21, b12), recursive_multiply(a22, b22)) # C22 = A21*B12 + A22*B22\n\n # Append c12 to c11\n for row_index, row in enumerate(c11):\n for col_index, col in enumerate(c12):\n row.append(c12[row_index][col_index])\n\n # Append c22 to c21\n for row_index, row in enumerate(c21):\n for col_index, col in enumerate(c12):\n row.append(c22[row_index][col_index])\n\n # Append c21 to c11\n for i in c21:\n c11.append(i)\n\n return c11",
"def __opExpand2(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n dA = list(map(lambda x:x.states if x in A.v else 1 ,vall))\n dB = list(map(lambda x:x.states if x in B.v else 1 ,vall))\n if ( (out is not None) and (out.v == vall) ):\n f = out # if out can be written to directly, do so\n else: \n f = Factor(vall) # otherwise, make storage for output function\n op( A.t.reshape(dA,order='A') , B.t.reshape(dB,order='A'), out=f.t ) # TODO: order=A necessary?\n if (out is not None and f is not out):\n out.__build(f.v,f.t) # if out requested but not used, write f's table into out\n return out\n return f",
"def flatten(node: ir.Node) -> ir.Node:\n\n def visitor(node: ir.Node, args=None) -> ir.Node:\n if isinstance(node, ir.BinaryOp):\n\n # Flatten singleton BinaryOp\n if len(node.operand) == 1:\n return flatten(node.operand[0])\n\n # Flatten BinaryOp with reduction operators\n new_operator: List[str] = []\n new_operand: List[ir.Expr] = []\n for child_operator, child_operand in zip((None, *node.operator),\n node.operand):\n if child_operator is not None:\n new_operator.append(child_operator)\n # The first operator can always be flattened if two operations has the\n # same type.\n if child_operator in (None, '||', '&&', *'|&+*') and \\\n type(child_operand) is type(node):\n new_operator.extend(child_operand.operator)\n new_operand.extend(child_operand.operand)\n else:\n new_operand.append(child_operand)\n # At least 1 operand is flattened.\n if len(new_operand) > len(node.operand):\n return flatten(type(node)(operator=new_operator, operand=new_operand))\n\n # Flatten compound Operand\n if isinstance(node, ir.Operand):\n for attr in node.ATTRS:\n val = getattr(node, attr)\n if val is not None:\n if isinstance(val, ir.Node):\n return flatten(val)\n break\n else:\n raise util.InternalError('undefined Operand')\n\n # Flatten identity unary operators\n if isinstance(node, ir.Unary):\n minus_count = node.operator.count('-')\n if minus_count % 2 == 0:\n plus_count = node.operator.count('+')\n if plus_count + minus_count == len(node.operator):\n return flatten(node.operand)\n not_count = node.operator.count('!')\n if not_count % 2 == 0 and not_count == len(node.operator):\n return flatten(node.operand)\n\n # Flatten reduction functions\n if isinstance(node, ir.Call):\n operator = getattr(node, 'name')\n if operator in ir.REDUCTION_FUNCS:\n operands: List[ir.Expr] = []\n for operand in getattr(node, 'arg'):\n if (isinstance(operand, ir.Call) and\n getattr(operand, 'name') == operator):\n operands.extend(getattr(operand, 'arg'))\n else:\n operands.append(operand)\n if len(operands) > len(getattr(node, 'arg')):\n return flatten(ir.Call(name=operator, arg=operands))\n\n return node\n\n if not isinstance(node, ir.Node):\n return node\n\n return node.visit(visitor)",
"def reproduce(self, parents):\n children = []\n for i, parent in enumerate(parents):\n if i == len(parents)-1:\n parent2 = parents[0]\n else:\n parent2 = parents[i+1] if i % 2 == 0 else parents[i-1]\n child = self.crossover(parent, parent2)\n child = self.mutation(child)\n children.append(child)\n return children",
"def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)",
"def tree_collapse(tree):\n leaves = tree_leaves(tree)\n leaves = list(map(lambda params: params.flatten(), leaves))\n return jnp.concatenate(leaves, axis=0)",
"def compose(*funcs):\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')",
"def concat_succ(L):\n if len(L) < 2:\n return L\n res = []\n last = L.pop()\n othe = L.pop()\n for i in last:\n for j in othe:\n if type(i) is list:\n if type(j) is list:\n res.append(i+j)\n else:\n res.append(i+[j])\n elif type(j) is list:\n res.append([i] + j)\n else:\n res.append([i] + [j])\n L = [res] + L\n return concat_succ(L)",
"def _produce_child(self, parents, method=\"uniform_swap\"):\n crossover_binary_op = None\n if method == \"uniform_swap\":\n crossover_binary_op = self._uniform_swap\n elif method == \"single_swap\":\n crossover_binary_op = self._single_swap\n elif method == \"arithmetic\":\n crossover_binary_op = lambda p1, p2: (p1 + p2) / 2\n child = parents[0].clone().detach()\n for parent in parents[1:]:\n child = crossover_binary_op(child, parent)\n return child",
"def compose(*functions):\n return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)",
"def _kron_core(*ops, stype=None, coo_build=False, parallel=False):\n tmp_stype = \"coo\" if coo_build or stype == \"coo\" else None\n reducer = par_reduce if parallel else functools.reduce\n return reducer(functools.partial(kron_dispatch, stype=tmp_stype), ops)",
"def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)",
"def products(q_1: Qs, q_2: Qs, kind: str = \"\", reverse: bool = False) -> Qs:\n\n q_1_copy = deepcopy(q_1)\n q_2_copy = deepcopy(q_2)\n qs_left, qs_right = Qs(), Qs()\n\n # Diagonalize if need be.\n if ((q_1.rows == q_2.rows) and (q_1.columns == q_2.columns)) or (\n \"scalar_q\" in [q_1.qs_type, q_2.qs_type]\n ):\n\n if q_1.columns == 1:\n qs_right = q_2_copy\n qs_left = diagonal(q_1_copy, qs_right.rows)\n\n elif q_2.rows == 1:\n qs_left = q_1_copy\n qs_right = diagonal(q_2_copy, qs_left.columns)\n\n else:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n # Typical matrix multiplication criteria.\n elif q_1.columns == q_2.rows:\n qs_left = q_1_copy\n qs_right = q_2_copy\n\n else:\n print(\n \"Oops, cannot multiply series with row/column dimensions of {}/{} to {}/{}\".format(\n q_1.rows, q_1.columns, q_2.rows, q_2.columns\n )\n )\n\n # Operator products need to be transposed.\n operator_flag = False\n if qs_left in [\"op\", \"operator\"] and qs_right in [\"op\", \"operator\"]:\n operator_flag = True\n\n outer_row_max = qs_left.rows\n outer_column_max = qs_right.columns\n shared_inner_max = qs_left.columns\n projector_flag = (\n (shared_inner_max == 1) and (outer_row_max > 1) and (outer_column_max > 1)\n )\n\n result = [\n [q0(q_type=\"\") for _i in range(outer_column_max)]\n for _j in range(outer_row_max)\n ]\n\n for outer_row in range(outer_row_max):\n for outer_column in range(outer_column_max):\n for shared_inner in range(shared_inner_max):\n\n # For projection operators.\n left_index = outer_row\n right_index = outer_column\n\n if outer_row_max >= 1 and shared_inner_max > 1:\n left_index = outer_row + shared_inner * outer_row_max\n\n if outer_column_max >= 1 and shared_inner_max > 1:\n right_index = shared_inner + outer_column * shared_inner_max\n\n result[outer_row][outer_column] = add(result[outer_row][outer_column],\n product(qs_left.qs[left_index],\n qs_right.qs[right_index], kind=kind, reverse=reverse\n )\n )\n\n # Flatten the list.\n new_qs = [item for sublist in result for item in sublist]\n new_states = Qs(new_qs, rows=outer_row_max, columns=outer_column_max)\n\n if projector_flag or operator_flag:\n return transpose(new_states)\n\n else:\n return new_states",
"def call_actions(self, node):\n def inner_call_actions(node):\n sem_action = node.symbol.action\n if node.is_term():\n if sem_action:\n try:\n result = sem_action(node.context, node.value,\n *node.additional_data)\n except TypeError as e:\n raise TypeError('{}: terminal={} action={} params={}'\n .format(\n str(e),\n node.symbol.name,\n repr(sem_action),\n (node.context, node.value,\n node.additional_data))) from e\n else:\n result = node.value\n else:\n subresults = []\n # Recursive right to left, bottom up. Simulate LR\n # reductions.\n for n in reversed(node):\n subresults.append(inner_call_actions(n))\n subresults.reverse()\n\n if sem_action:\n assignments = node.production.assignments\n if assignments:\n assgn_results = {}\n for a in assignments.values():\n if a.op == '=':\n assgn_results[a.name] = subresults[a.index]\n else:\n assgn_results[a.name] = \\\n bool(subresults[a.index])\n if type(sem_action) is list:\n if assignments:\n result = \\\n sem_action[\n node.production.prod_symbol_id](\n node, subresults, **assgn_results)\n else:\n result = \\\n sem_action[\n node.production.prod_symbol_id](\n node.context, subresults)\n else:\n if assignments:\n result = sem_action(node.context, subresults,\n **assgn_results)\n else:\n result = sem_action(node.context, subresults)\n else:\n if len(subresults) == 1:\n # Unpack if single subresult\n result = subresults[0]\n else:\n result = subresults\n\n return result\n\n return inner_call_actions(node)",
"def build_triples(x, y, op_str):\n if op_str not in EXPECTED_OPS:\n raise ValueError(f\"{op_str} should be in {EXPECTED_OPS}\")\n\n session = x.session\n shape_x = x.shape\n shape_y = y.shape\n conf = session.config\n min_val = conf.min_value\n max_val = conf.max_value\n\n # TODO: Move this to a library specific file\n a = torch.randint(min_val, max_val, shape_x).long()\n b = torch.randint(min_val, max_val, shape_y).long()\n\n cmd = getattr(operator, op_str)\n c = modulo(cmd(a, b).long(), session)\n\n from sympc.tensor import AdditiveSharingTensor\n\n session_copy = session.get_copy()\n session_copy.config.enc_precision = 0\n\n a_sh = AdditiveSharingTensor(secret=a, session=session_copy)\n b_sh = AdditiveSharingTensor(secret=b, session=session_copy)\n c_sh = AdditiveSharingTensor(secret=c, session=session_copy)\n\n return a_sh, b_sh, c_sh",
"def concatenate(sequence):\n\n return Profiles([x.data for y in sequence for x in y],\n [x.description for y in sequence for x in y])",
"def _create_concat(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.attrs[\"axis\"]\n if factor < 0:\n factor = len(inputs[0].shape\n ) + factor # in order to support the negative axis\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)",
"def _filter_execution_path_operations(self, operations, fetches):\n\n # If no fetch provided, then return all operations.\n if fetches is None:\n return set(operations)\n # Convert to list, if a single element is provided.\n if not isinstance(fetches, (list, tuple)):\n fetches = [fetches]\n # If a tensor is given as fetch, convert it to op.\n op_fetches = []\n for fetch in fetches:\n if isinstance(fetch, ops.Operation):\n op_fetches.append(fetch)\n elif isinstance(fetch, tensor_lib.Tensor):\n op_fetches.append(fetch.op)\n else:\n raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'\n %fetch)\n\n execution_path_operations = set(op_fetches)\n traverse_stack = list(op_fetches)\n while True:\n if not traverse_stack:\n break\n head_op = traverse_stack.pop()\n input_ops = [tensor_input.op for tensor_input in head_op.inputs]\n input_ops.extend(head_op.control_inputs)\n\n for input_op in input_ops:\n if input_op not in execution_path_operations:\n # Filter out loop condition operations, tracing them causes a cycle.\n # Trace only the loop-body.\n if TensorTracer.loop_cond_op(input_op):\n continue\n execution_path_operations.add(input_op)\n traverse_stack.append(input_op)\n return execution_path_operations",
"def cartesian_graph(a):\n tile_a = tf.expand_dims(tf.tile(tf.expand_dims(a[0], 1), [1, tf.shape(a[1])[0]]), 2)\n tile_b = tf.expand_dims(tf.tile(tf.expand_dims(a[1], 0), [tf.shape(a[0])[0], 1]), 2)\n cart = tf.concat([tile_a, tile_b], axis=2)\n cart = tf.reshape(cart, [-1, 2])\n for c in a[2:]:\n tile_c = tf.tile(tf.expand_dims(c, 1), [1, tf.shape(cart)[0]])\n tile_c = tf.expand_dims(tile_c, 2)\n tile_c = tf.reshape(tile_c, [-1, 1])\n cart = tf.tile(cart, [tf.shape(c)[0], 1])\n cart = tf.concat([tile_c, cart], axis=1)\n return cart",
"def _iter_add(self, root):\n stack = [root]\n while stack:\n nodes = stack.pop()\n for node in nodes:\n if node in self._members:\n continue\n self._members.add(node)\n\n if isinstance(node, tf.Tensor):\n stack.append((node.op,))\n elif isinstance(node, tf.Operation):\n stack.append(node.inputs)",
"def calculate_sequence(e):\n result = []\n if isinstance(e, Dictation):\n # Manipulate the result by turning this expansion into its entire\n # parent chain so that it becomes a top level expansion in the sequence\n # with all relevant information denoted by the expansions wrapping it\n parent = e.parent\n while parent:\n e = type(parent)(e)\n\n # Go up to the next parent\n parent = parent.parent\n\n result.append(e)\n\n elif len(e.children) == 0:\n if isinstance(e, RuleRef) and dictation_in_expansion(e.rule.expansion):\n result.extend(calculate_sequence(e.rule.expansion))\n else:\n result.append(e)\n else:\n # Partition the children of the expansion so that dictation\n # expansions are placed after groups of normal expansions\n child_group = []\n\n # Remove and process each child from left to right\n while len(e.children) > 0:\n child_result = calculate_sequence(e.children.pop(0))\n\n # Process the child_result list\n for r in child_result:\n # Add child_group, the expansion r, and this expansion with\n # its remaining children to the result list appropriately\n if only_dictation_in_expansion(r): # fully processed\n # Add child_group to the result list appropriately\n new_expansion = generate_expansion_from_children(\n e, child_group\n )\n\n if new_expansion:\n result.append(new_expansion)\n\n # Reset child_group for the next partition\n child_group = []\n result.append(r)\n elif no_dictation_in_expansion(r): # no processing required\n child_group.append(r)\n\n elif dictation_and_literals_in_expansion(r):\n # Add child_group to the result list appropriately\n new_expansion = generate_expansion_from_children(\n e, child_group\n )\n\n if new_expansion:\n result.append(new_expansion)\n\n # Reset child_group for the next partition\n child_group = []\n\n # Append this expansion for further processing if it\n # still has children\n if len(e.children) >= 1:\n result.append(e)\n\n # If there are still children left in child_group, they must be\n # appended to the result appropriately\n if len(child_group) >= 1:\n new_expansion = generate_expansion_from_children(\n e, child_group\n )\n if new_expansion:\n result.append(new_expansion)\n\n return result",
"def poli_op(op, px, qx):\n if op == 1: # suma\n sol = [0 for _ in range(max(len(px), len(qx)))]\n for x in range(len(sol)): # això fa la suma. jo tampoc l'entendré d'aquí un mes, but it works\n if x < len(px):\n sol[-x-1] += px[-x-1]\n if x < len(qx):\n sol[-x-1] += qx[-x-1]\n\n elif op == 2: # resta\n sol = [0 for _ in range(max(len(px), len(qx)))]\n for x in range(len(sol)): # això fa la resta. jo tampoc l'entendré d'aquí un mes, but it works\n if x < len(px):\n sol[-x-1] += px[-x-1]\n if x < len(qx):\n sol[-x-1] -= qx[-x-1]\n\n elif op == 3: # multi\n sol = [0 for _ in range(len(px) + len(qx) - 1)]\n for x in range(len(px)):\n for y in range(len(qx)):\n if px[-x-1] and qx[-y-1]:\n sol[-(x+y)-1] += px[-x-1] * qx[-y-1]\n\n elif op == 4: # divi\n ...\n\n return sol",
"def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes",
"def _compile_ops(self, parameters, space_group):\n ke = self.wave_numbers[0]\n kw = self.wave_numbers[1]\n ki = self.wave_numbers[2:]\n mu = 1\n cavities = self.cavities\n ops = {}\n def add(i, j, op, key='default'):\n if (i, j) not in ops:\n ops[(i, j)] = {key: op}\n else:\n if key in ops[(i, j)]:\n raise ValueError(\"Duplicate key value provided in operator construction\")\n else:\n ops[(i, j)][key] = op\n\n # cavities\n for row, _ in enumerate(cavities):\n for col, _ in enumerate(cavities):\n if row == col:\n add(\n row, col,\n -1 * self.multitrace_operator(ki[row], mu, cavities[row], parameters=parameters, space_group=space_group)\n )\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[row], parameters=parameters, space_group=space_group),\n key='wall'\n )\n else:\n add(\n row, col,\n -1 * self.multitrace_operator(kw, mu, cavities[col], target=cavities[row], parameters=parameters, space_group=space_group)\n ),\n # # self to wall\n add(\n row, col+1,\n self.multitrace_operator(kw, mu, self.main, target=cavities[row], parameters=parameters, space_group=space_group)\n )\n \n for col, cavity in enumerate(cavities):\n add(\n row+1, col,\n -1 * self.multitrace_operator(kw, mu, cavity, target=self.main, parameters=parameters, space_group=space_group)\n )\n \n # external boundary\n add(\n row+1, col+1,\n self.multitrace_operator(kw, mu, self.main, parameters=parameters, space_group=space_group),\n key='wall'\n\n )\n add(\n row+1, col+1,\n self.multitrace_operator(ke, mu, self.main, parameters=parameters, space_group=space_group),\n key='exterior'\n )\n # finished\n return ops",
"def test_merge_only(self):\r\n x, y, z = tensor.vectors('x', 'y', 'z')\r\n t = x * y\r\n self.check([\r\n (x, t, (({}, False), ({t: x}, True))),\r\n (t * 2, x * 2, (({}, False), ({t: x}, True), )),\r\n (x * x, x * y, (({}, False), ({y: x}, True), )),\r\n (x * x, x * y, (({}, False), ({y: x}, True), )),\r\n (x * x + z, x * y + t, (({}, False),\r\n ({y: x}, False),\r\n ({y: x, t: z}, True))),\r\n ],\r\n debug=False)",
"def collect_numbers_and_powers(op_sum):\n return OperatorSum([collect_numbers(collect_powers(op))\n for op in op_sum.operators])",
"def combine(self):\n # If the contents of this command should be hidden from the main .cfg,\n # discard them.\n if self.hide_children:\n return \"\"\n\n # Set the evaluation state of this instance to COMBINE, as its code has\n # been generated.\n self.eval_state = COMMAND_EVAL_COMBINE\n\n # output will store the contents of this instance; meaning its code and\n # the code of its children.\n output = []\n\n # Loop through children and evaluate them.\n for ch in self.children:\n # Only evaluate children if they haven't been yet (i.e., their eval\n # state is not COMMAND_EVAL_COMBINE)\n if ch.eval_state == COMMAND_EVAL_REGISTER:\n gen = ch.generate()\n if gen is not None:\n output.append('alias \"'+str(ch)+'\" \"'+gen+'\"')\n output.extend(ch.combine())\n\n return output",
"def _op(op):\n def _process(self, ty, args=None, result=None, **metadata):\n if args is None:\n args = []\n assert ty is not None\n assert isinstance(args, list), args\n assert not any(arg is None for arg in flatten(args)), args\n result = Op(op, ty, args, result)\n if metadata:\n result.add_metadata(metadata)\n self._insert_op(result)\n return result\n\n def _process_void(self, *args, **kwds):\n result = kwds.pop('result', None)\n op = _process(self, types.Void, list(args), result)\n if kwds:\n op.add_metadata(kwds)\n return op\n\n if ops.is_void(op):\n build_op = _process_void\n else:\n build_op = _process\n\n if config.op_verify:\n build_op = op_verifier(build_op)\n\n return build_op",
"def map_product(process):\n\n process_params1 = set_extra_values(process['arguments'])\n process_params2 = get_process_params(process['arguments'], {'ignore_nodata': 'bool'})\n \n return map_default(process, 'product', 'reduce', {**process_params1, **process_params2})",
"def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])",
"def logical_op(self, other):\n if isinstance(other, plist):\n if len(self) == len(other):\n try:\n return plist([op(x, o) for x, o in zip(self, other)])\n except Exception:\n pass\n self_flat = self.ungroup(-1)\n other_flat = other.ungroup(-1)\n ids = op(set([id(x) for x in self_flat]),\n set([id(x) for x in other_flat]))\n if op is operator.__and__ or op is operator.__iand__:\n return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting\n else:\n return plist(\n [ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +\n [ids.remove(id(x)) or x for x in other_flat if id(x) in ids]\n ) # Don't pass root -- we are uprooting\n else:\n return plist([op(x, other) for x in self], root=self.__root__)",
"def assemble_operator(self, parameters, space_group='default'):\n N = len(self.cavity_grid.cavities) # system size\n operators = self.get_ops(parameters, space_group)\n\n A = assembly.BlockedOperator((N+1) * 2, (N+1) * 2)\n for (row, col), ops_dict in operators.items():\n ops = list(ops_dict.values())\n op_sum = sum(ops[1:], ops[0])\n assign_in_place_subblock(A, op_sum, row, col)\n\n return A"
] | [
"0.5873993",
"0.5623249",
"0.5586423",
"0.55603653",
"0.53981346",
"0.5373104",
"0.5341948",
"0.533148",
"0.5324388",
"0.5261074",
"0.5190329",
"0.5190329",
"0.51866263",
"0.5185752",
"0.51829857",
"0.517488",
"0.51386654",
"0.5113341",
"0.51113427",
"0.50928247",
"0.50926036",
"0.5075281",
"0.50709325",
"0.50672024",
"0.5062822",
"0.5050295",
"0.5047414",
"0.50362426",
"0.5031713",
"0.5031389",
"0.50128424",
"0.4996245",
"0.49954483",
"0.49913847",
"0.49836758",
"0.4982098",
"0.4979624",
"0.49500558",
"0.4942456",
"0.49412745",
"0.49312586",
"0.4924134",
"0.4903102",
"0.4886316",
"0.4883272",
"0.4866946",
"0.48554713",
"0.4849003",
"0.48420733",
"0.48371676",
"0.48273495",
"0.4820581",
"0.48202404",
"0.48161906",
"0.48094296",
"0.48052344",
"0.48013005",
"0.4787384",
"0.47869232",
"0.47811314",
"0.47761193",
"0.47731343",
"0.47575757",
"0.47568694",
"0.4756281",
"0.47559807",
"0.47455978",
"0.4745051",
"0.47405136",
"0.47386685",
"0.4729326",
"0.47278193",
"0.47268847",
"0.47266608",
"0.4725949",
"0.471958",
"0.47071743",
"0.47044054",
"0.46972302",
"0.46935877",
"0.46934497",
"0.4690647",
"0.46806133",
"0.46761817",
"0.46744257",
"0.46721664",
"0.4671105",
"0.46708274",
"0.4670687",
"0.4658654",
"0.46517673",
"0.46488813",
"0.46443954",
"0.46376348",
"0.46356845",
"0.4632885",
"0.4629195",
"0.46277776",
"0.46230504",
"0.46190172"
] | 0.605311 | 0 |
Set the test up. | def setup_class(cls):
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
shutil.copytree(Path(CUR_PATH, "data", "dummy_aea"), Path(cls.t, "dummy_aea"))
os.chdir(Path(cls.t, "dummy_aea"))
cls.runner = CliRunner() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n\n self._set_up()",
"def setUp(self):\n MainTests.setUp(self)",
"def setUp(self):\n \n pass",
"def setUp(self):\n\n # setup init variables\n self.init_vars = {\n 'suppress_logfile': True,\n 'verbosity': 0,\n 'mothur_seed': 54321,\n }\n\n # setup directories for testing\n test_dir = os.path.join(os.getcwd(), 'tests')\n self.test_output_dir = os.path.join(test_dir, 'test_output')\n if not os.path.isdir(self.test_output_dir):\n os.makedirs(self.test_output_dir)\n self.test_input_dir = os.path.join(test_dir, 'test_data')\n\n return",
"def setUp(self):\n print(\"New test by Nikolay Melnik\")",
"def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')",
"def setUp(self):\n test_env_setup()",
"def setUp(self):\n\n pass",
"def setUp(self):\n\n pass",
"def setUp(self) :\n pass",
"def setUp(self):\n self.setup_beets()",
"def setUp(self):\n\n return",
"def setUp(self) -> None:\n pass",
"def setUp(self) -> None:\n pass",
"def setUp(self):\n pass #because we dont have anything to setup.",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def _set_up():\n repl._setUp = self.setUp",
"def setUp(self):\n setUp()",
"def setUp(self):\n print('Calling \\'setUp\\'')",
"def setUp(self):\n\n BaseTest.setUp(self)",
"def setUp(self):\n self",
"def setUp(self):\n self",
"def setUp(self):\r\n pass",
"def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\r\n pass # nothing used by all\r",
"def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()",
"def setUp(self):\r\n pass # nothing required by all\r",
"def setUp(self):\n # Used to initialize objects that should be re-initialized or\n # re-created for each individual test\n self.t = Task()\n\n self.t.config(\"alias.from\", \"to\")",
"def setUp(self):\n print(\"\\nIn setUp()...\")",
"def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]",
"def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass",
"def setUp(self):\n super(BasicTestCase, self).setUp()",
"def setUp(self):\n raise NotImplementedError",
"def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()",
"def setUp(self):\n\n # Setup for all test cases.\n controllers = com.discover_controllers_on_network()\n self.controller, _, connected = com.connect_robot_with_ipaddr(controllers, '127.0.0.1')\n if not connected:\n print 'Couldn\\'t connect to controller. Test will not be run.'\n sys.exit()\n is_logged_in, _ = user_auth.logon_robot_controller_default(self.controller)\n if not is_logged_in:\n print 'Couldn\\'t log in. Test will not be run.'\n sys.exit()\n\n # Additional setup for some test cases.\n test_desc = self.shortDescription()\n if test_desc == 'Tests edit_and_write_rapid_data_property with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()\n elif test_desc == 'Tests edit_and_write_rapid_data with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()",
"def setup( self ):",
"def setUp(self) -> None:\n self.engine = EvalHPOA()",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setUp(self):\n self.example = Example()",
"def setUpTestCase(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setUp(self) -> None:\n\n self.checker = CheckerBase()",
"def setup(self) -> None:",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n super().setUp()\n self.runner = CliRunner()",
"def setUp(self):\r\n super(EETestCase, self).setUp()"
] | [
"0.82482773",
"0.82482773",
"0.81176686",
"0.800283",
"0.7907327",
"0.78918254",
"0.7887326",
"0.7848355",
"0.7842833",
"0.7832785",
"0.7832785",
"0.781454",
"0.78136706",
"0.7806924",
"0.78026885",
"0.78026885",
"0.77940094",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7766595",
"0.77608186",
"0.77478987",
"0.7743035",
"0.76929235",
"0.76929235",
"0.768341",
"0.7623276",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.75897497",
"0.75282216",
"0.7513549",
"0.7501416",
"0.7496145",
"0.7493589",
"0.7474445",
"0.7467448",
"0.7464891",
"0.7457519",
"0.7449974",
"0.7449959",
"0.74333304",
"0.7428299",
"0.7428299",
"0.7428299",
"0.7425823",
"0.74212027",
"0.74118286",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7376384",
"0.7364325",
"0.7359819",
"0.7359819",
"0.7359506",
"0.73563415",
"0.73563415",
"0.73493826",
"0.73490524"
] | 0.0 | -1 |
Test getting the agent name. | def test_get_agent_name(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "agent.agent_name"],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "Agent0\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def get_test_name(request):\n return request.node.name",
"def get_name():",
"def GetModernizedTestName(self, arg):\n return arg",
"def get_name() -> str:\n pass",
"def get_name() -> str:",
"def botname(self):\n return settings.AIM_USERNAME",
"def testbed_name(self): \n return \"C-Lab\"",
"def getAgentID(self):\n\t\treturn self.agentID",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def agent(self):\n return self.__agent",
"def get_name():\n return \"Boss\"",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def test_get_component_name(initialized_bmi):\n name = initialized_bmi.get_component_name()\n assert isinstance(name, str)\n\n return name",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def name(self) -> Dict[str, str]:\n self.__logger.debug('Eva.name called')\n return self.__http_client.name()",
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"",
"def get_name(self):\n pass",
"def get_name(self):\n pass",
"def test_name(self):\n g = h5g.create(self.fid, '/foobar')\n self.assertEqual(h5i.get_name(g), '/foobar')",
"def get_name(self):",
"def get_name(self):",
"def test_getfriendlyname(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = b'soapaction: \"urn:Belkin:service:basicevent:1#GetFriendlyName\"'\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200\n\n root = ET.fromstring(resp.text)\n assert root.find(\".//FriendlyName\").text == \"fake switch one\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def name(self) -> str:\n return self.inst['targetname']",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_getHostnameFromURL(self):\n agent = txrecaptcha._getAgent(self.reactor, self.url)\n contextFactory = agent._contextFactory\n self.assertRegexpMatches(contextFactory.hostname,\n '.*www\\.example\\.com')",
"def get_name(self) -> str:\n pass",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def _get_name(self):\n return self.name",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def given_name(self):\n if \"givenName\" in self._prop_dict:\n return self._prop_dict[\"givenName\"]\n else:\n return None",
"def given_name(self):\n if \"givenName\" in self._prop_dict:\n return self._prop_dict[\"givenName\"]\n else:\n return None",
"def get_name(self): #Doctests, pour tester directement les méthodes\n return self.__name",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def get_name(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetName', self.handle)",
"def name():\n pass",
"def name():\n pass",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def test_get_name(self):\n\n self.metadata.create_or_update(data=self.create)\n\n res = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n self.assertEqual(res.name, self.entity.name)",
"def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name"
] | [
"0.6913163",
"0.69033694",
"0.6782439",
"0.6780959",
"0.6780959",
"0.6644797",
"0.6546011",
"0.63555205",
"0.6076936",
"0.6031",
"0.59842354",
"0.59433043",
"0.5923481",
"0.58773845",
"0.5849658",
"0.58205575",
"0.5818908",
"0.5801076",
"0.5792172",
"0.578954",
"0.575222",
"0.57455957",
"0.5730772",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.56884176",
"0.56689733",
"0.5660837",
"0.564893",
"0.56482047",
"0.56339383",
"0.5620987",
"0.5615942",
"0.5556728",
"0.5556728",
"0.55514944",
"0.55366784",
"0.55366784",
"0.55276746",
"0.55262285",
"0.55191886",
"0.55154043",
"0.55099106",
"0.54937285",
"0.5492086",
"0.5480317",
"0.54758",
"0.5472672",
"0.54684025",
"0.5464456",
"0.54573184",
"0.54573184",
"0.5451833",
"0.54513204",
"0.5442185",
"0.5435332",
"0.54254717",
"0.54254717",
"0.54198587",
"0.54166406",
"0.541476",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341"
] | 0.8780365 | 0 |
Test getting the agent name. | def test_get_agent_default_routing(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "agent.default_routing"],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "{}\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def get_test_name(request):\n return request.node.name",
"def get_name():",
"def GetModernizedTestName(self, arg):\n return arg",
"def get_name() -> str:\n pass",
"def get_name() -> str:",
"def botname(self):\n return settings.AIM_USERNAME",
"def testbed_name(self): \n return \"C-Lab\"",
"def getAgentID(self):\n\t\treturn self.agentID",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def agent(self):\n return self.__agent",
"def get_name():\n return \"Boss\"",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def test_get_component_name(initialized_bmi):\n name = initialized_bmi.get_component_name()\n assert isinstance(name, str)\n\n return name",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def name(self) -> Dict[str, str]:\n self.__logger.debug('Eva.name called')\n return self.__http_client.name()",
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_bnd_mate_name():\n\n mate = bnd_mate_name(ALT, \"2\")\n assert mate == \"17\"",
"def get_name(self):\n pass",
"def get_name(self):\n pass",
"def test_name(self):\n g = h5g.create(self.fid, '/foobar')\n self.assertEqual(h5i.get_name(g), '/foobar')",
"def get_name(self):",
"def get_name(self):",
"def test_getfriendlyname(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = b'soapaction: \"urn:Belkin:service:basicevent:1#GetFriendlyName\"'\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200\n\n root = ET.fromstring(resp.text)\n assert root.find(\".//FriendlyName\").text == \"fake switch one\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def name(self) -> str:\n return self.inst['targetname']",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_getHostnameFromURL(self):\n agent = txrecaptcha._getAgent(self.reactor, self.url)\n contextFactory = agent._contextFactory\n self.assertRegexpMatches(contextFactory.hostname,\n '.*www\\.example\\.com')",
"def get_name(self) -> str:\n pass",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def _get_name(self):\n return self.name",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def given_name(self):\n if \"givenName\" in self._prop_dict:\n return self._prop_dict[\"givenName\"]\n else:\n return None",
"def given_name(self):\n if \"givenName\" in self._prop_dict:\n return self._prop_dict[\"givenName\"]\n else:\n return None",
"def get_name(self): #Doctests, pour tester directement les méthodes\n return self.__name",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def get_name(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetName', self.handle)",
"def name():\n pass",
"def name():\n pass",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def test_get_name(self):\n\n self.metadata.create_or_update(data=self.create)\n\n res = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n self.assertEqual(res.name, self.entity.name)",
"def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name",
"def _get_name(self):\n return self.__name"
] | [
"0.8780365",
"0.6913163",
"0.69033694",
"0.6782439",
"0.6780959",
"0.6780959",
"0.6644797",
"0.6546011",
"0.63555205",
"0.6076936",
"0.6031",
"0.59842354",
"0.59433043",
"0.5923481",
"0.58773845",
"0.5849658",
"0.58205575",
"0.5818908",
"0.5801076",
"0.5792172",
"0.578954",
"0.575222",
"0.57455957",
"0.5730772",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.56884176",
"0.56689733",
"0.5660837",
"0.564893",
"0.56482047",
"0.56339383",
"0.5620987",
"0.5615942",
"0.5556728",
"0.5556728",
"0.55514944",
"0.55366784",
"0.55366784",
"0.55276746",
"0.55262285",
"0.55191886",
"0.55154043",
"0.55099106",
"0.54937285",
"0.5492086",
"0.5480317",
"0.54758",
"0.5472672",
"0.54684025",
"0.5464456",
"0.54573184",
"0.54573184",
"0.5451833",
"0.54513204",
"0.5442185",
"0.5435332",
"0.54254717",
"0.54254717",
"0.54198587",
"0.54166406",
"0.541476",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341",
"0.5410341"
] | 0.0 | -1 |
Test getting the 'dummy' skill name. | def test_get_skill_name(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "skills.dummy.name"],
standalone_mode=False,
)
assert result.exit_code == 0
assert result.output == "dummy\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def test_get_github_name_negative(self):\n self.assertIsNone(app.get_github_name(\"undefined_user12345\")[\"user\"])",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()",
"def test_name(self):\n dtt = self.TDTT(when=self.txt_when)\n expected_name = self.txt_when\n self.assertEquals(expected_name, dtt.name)\n self.assertEquals(expected_name, '{}'.format(dtt))\n expected_logged = '{}({})'.format(dtt.typename(), self.txt_when)\n self.assertEquals(expected_logged, dtt.logged)",
"def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_get_by_name1(self):\n pass",
"def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])",
"def test_get_tool_by_name(tmp_path, caplog, base_db):\r\n caplog.set_level(logging.DEBUG)\r\n tool = base_db.get_single_tool(FAKE_TOOL_INFO.get(\"name\"))\r\n assert tool.name == FAKE_TOOL_INFO.get(\"name\")\r\n tool = base_db.get_single_tool(\"non-existing\")\r\n assert not tool",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def fixture_other_case() -> str:\n return \"angrybird\"",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def test_api():\n # person id for one long time employee\n content = get_person(10050)\n assert content['preferredName'].endswith('immel')",
"def get_random_male_name ():\n return db_random_pop_default(DB_FIRST_MALE, \"John\")",
"def test_get_component_name(initialized_bmi):\n name = initialized_bmi.get_component_name()\n assert isinstance(name, str)\n\n return name",
"def get_name():\n return \"Boss\"",
"def name():\r\n return _random.choice([male_first(), female_first()])",
"def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')",
"def test_dummy():",
"def test_full_name(self):\n current_resume = resume.objects.first()\n expected = 'Nicholas Bielinski'\n case = current_resume.full_name()\n self.assertEqual(case, expected)",
"def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'",
"def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')",
"def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)",
"def test_get_and_has__name_only():\n contract = ContractHandler.get(\"DataTokenTemplate\")\n assert contract.address[:2] == \"0x\"\n assert \"totalSupply\" in str(contract.abi)\n\n assert ContractHandler.has(\"DataTokenTemplate\")\n assert not ContractHandler.has(\"foo name\")",
"def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)",
"def test_first_last(self):\r\n full_name = get_full_name('janis','joplin')\r\n # expected value\r\n self.assertEqual(full_name,'Janis Joplin')",
"def get_sample_name(self):\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['user_filename_input']\n\t\texcept Exception, e:\n\t\t\treturn None",
"def fixture_microbial_sample_id():\n return \"microbial_sample_test\"",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def __getitem__(self, skillName):\r\n return self.getSkill(skillName)",
"def get_name():",
"def get_test(arn=None):\n pass",
"def test_no_skill_request(self):\n actions.login(ADMIN_EMAIL)\n\n response = self.get(self.URL)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_skill_created(self):\n\t\tself.skill.save()\n\t\tskill_instance = Skill.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tskill_instance.user,\n\t\t\tself.skill.user,\n\t\t\t'User don\\'t match.'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tskill_instance.tag,\n\t\t\tself.tag,\n\t\t\t'Skill tag\\'s don\\'t match.'\n\t\t)",
"def getSkill(self, skillName):\r\n if self.__contains__(skillName):\r\n return self.skills[skillName]\r\n return None",
"def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")",
"def test_hello_without_name(self, app_mock):\n\n # Call /api/hello using test client, storing the response in res \n res = app_mock.test_client().get('/api/hello')\n\n # Assert that the status code returned is 400\n assert res.status_code == 400\n \n # Assert that the response should be a json token as below\n assert json.loads(res.data) == {'message': 'No name specified', 'action': 'Please specify a name in query string'}",
"def nice_name():\n\n pass",
"def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_get_github_name_concrete(self):\n self.assertEqual(app.get_github_name(\"dhh\")[\"user\"][\"name\"], \"David Heinemeier Hansson\")",
"def give_names(x): \n if x == 0:\n return 'Lost'\n else:\n return 'Won/Broke Even'",
"def test_get_non_existent_skill_progress(self):\n self._build_sample_graph()\n student = models.Student(user_id='1')\n tracker = SkillCompletionTracker()\n result = tracker.get_skills_progress(student, [self.sc.id])\n self.assertEqual(SkillCompletionTracker.NOT_ATTEMPTED,\n result[self.sc.id][0])",
"def test_that_name_saved():\n custom_sum_name = \"custom_sum\"\n\n assert custom_sum.__name__ == custom_sum_name",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_nice_name_returns_username(self):\n\n class UserNoName():\n username = 'my_username'\n\n def get_full_name(self):\n return None\n\n rendered = self.render_nice_name(UserNoName())\n\n self.assertEquals(rendered, 'my_username')",
"def get_name() -> str:\n pass",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"async def random_name(self):\r\n data_response = await self.http.endpoint(\"name\")\r\n return result.TextResult(data_response, target=\"name\")",
"def test_task_name(self, task_mock):\n from sosbeacon.event.message import get_student_broadcast_task\n\n student_key = Mock()\n student_key.urlsafe.return_value = \"STUDENTKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"EVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"MESSAGEKEY\"\n\n batch_id = \"BATCHID\"\n\n get_student_broadcast_task(\n student_key, event_key, message_key, batch_id)\n\n task_name = task_mock.call_args[1]['name']\n self.assertIn('STUDENTKEY', task_name)\n self.assertNotIn('EVENTKEY', task_name)\n self.assertIn('MESSAGEKEY', task_name)\n self.assertIn('BATCHID', task_name)",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def test_get_name(self):\n provider = AWSLocalProvider()\n self.assertEqual(provider.name(), Provider.PROVIDER_AWS_LOCAL)",
"def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name",
"def test_single_skill_request(self):\n self._add_aggregates()\n actions.login(ADMIN_EMAIL)\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [self.skill_ids[0]]}, True))\n\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n\n expected_header = ['Date', str(self.skill_ids[0])]\n expected_data = [[self.day1, 1], [self.day2, 2]]\n result = transforms.loads(payload)\n self.assertEqual(expected_header, result['column_headers'])\n self.assertEqual(len(expected_data), len(result['data']))\n for row in expected_data:\n self.assertIn(row, result['data'])",
"def test_title(names):",
"def test_first_last(self):\n\n full_name = get_full_name(\"pony\", \"cat\")\n self.assertEqual(full_name, \"Pony Cat\")\n\n full_name = get_full_name(\"goat\", \"cat\")\n self.assertEqual(full_name, \"Goat Cat\")",
"def test_is_AKs_preHandSimple_correct(self):\n self.assertEqual(self.hand.getPreHandSimple(), 'AKs')",
"def test_name_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_name('Missing ObservationType Name')",
"def get_name() -> str:",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def get_test_name(request):\n return request.node.name",
"def exp_name() -> str:\n return 'test-' + str(uuid.uuid4())",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_get_response(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('what do you want')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def testName(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"name\")\n\n self.util.stringPropertyTest(self, project, \"name\")",
"def test_thingname_nokey(self, mock):\n self.assertRaises(\n KeyError,\n lf.lambda_handler, event=self.lambdaevent_nokey, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def get_trial_name(user, trial):\n return \"Suturing_{}00{}\".format(user, trial)",
"def test_name_search(self):\n # A name in the database\n search_string = \"Umut\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the name field of the result\n self.assertEqual(search_string,search_result[0]['name'],\"It doesn't return the user with the name {}\".format(search_string))",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_three(self):\n name = get_formatted_name('david', 'malan', 'j')\n self.assertEqual(name, 'David J Malan')",
"def test_get_by_name2(self):\n pass",
"def name():\n\n pass",
"def test_thingname_nostr(self, mock):\n mock.configure_mock(**(self.config_shadowget(ParamValidationError(\n report='UnitTest'))))\n self.assertRaises(\n ParamValidationError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()"
] | [
"0.73746306",
"0.6601982",
"0.6425845",
"0.62760645",
"0.6175571",
"0.6128249",
"0.61196023",
"0.6061607",
"0.60439354",
"0.60229063",
"0.5997637",
"0.5944493",
"0.59063286",
"0.58992296",
"0.5851394",
"0.58460176",
"0.58350754",
"0.5834596",
"0.58218324",
"0.5789334",
"0.5785506",
"0.57852894",
"0.5779153",
"0.5772882",
"0.57647854",
"0.57553804",
"0.57519287",
"0.57473546",
"0.5745198",
"0.5730602",
"0.5714662",
"0.57046264",
"0.57021606",
"0.56771255",
"0.56684214",
"0.5665953",
"0.56613266",
"0.5648243",
"0.56460696",
"0.56451946",
"0.5644681",
"0.56159335",
"0.56158185",
"0.5614738",
"0.5609857",
"0.5596904",
"0.55958706",
"0.55939835",
"0.5592028",
"0.5588615",
"0.558681",
"0.5578869",
"0.55710435",
"0.5565944",
"0.5564057",
"0.5553788",
"0.55482894",
"0.5542626",
"0.55411184",
"0.55265754",
"0.5521201",
"0.55155",
"0.5510704",
"0.5508071",
"0.55074936",
"0.55055445",
"0.5493689",
"0.54926264",
"0.5490276",
"0.54892564",
"0.5484596",
"0.5484379",
"0.54804486",
"0.5474542",
"0.54739547",
"0.5473042",
"0.54660696",
"0.546505",
"0.5458914",
"0.54587525",
"0.5456552",
"0.54552835",
"0.54501504",
"0.54492307",
"0.54490006",
"0.54485685",
"0.5444644",
"0.54441786",
"0.54431486",
"0.54431486",
"0.54403657",
"0.5433018",
"0.5428253",
"0.54251754",
"0.5413296",
"0.540479",
"0.5400732",
"0.5397845",
"0.53975713",
"0.5392619"
] | 0.864754 | 0 |
Test getting the 'dummy' skill name. | def test_get_nested_attribute(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"skills.dummy.behaviours.dummy.class_name",
],
standalone_mode=False,
)
assert result.exit_code == 0
assert result.output == "DummyBehaviour\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def test_get_github_name_negative(self):\n self.assertIsNone(app.get_github_name(\"undefined_user12345\")[\"user\"])",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()",
"def test_name(self):\n dtt = self.TDTT(when=self.txt_when)\n expected_name = self.txt_when\n self.assertEquals(expected_name, dtt.name)\n self.assertEquals(expected_name, '{}'.format(dtt))\n expected_logged = '{}({})'.format(dtt.typename(), self.txt_when)\n self.assertEquals(expected_logged, dtt.logged)",
"def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_get_by_name1(self):\n pass",
"def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])",
"def test_get_tool_by_name(tmp_path, caplog, base_db):\r\n caplog.set_level(logging.DEBUG)\r\n tool = base_db.get_single_tool(FAKE_TOOL_INFO.get(\"name\"))\r\n assert tool.name == FAKE_TOOL_INFO.get(\"name\")\r\n tool = base_db.get_single_tool(\"non-existing\")\r\n assert not tool",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def fixture_other_case() -> str:\n return \"angrybird\"",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def test_api():\n # person id for one long time employee\n content = get_person(10050)\n assert content['preferredName'].endswith('immel')",
"def get_random_male_name ():\n return db_random_pop_default(DB_FIRST_MALE, \"John\")",
"def test_get_component_name(initialized_bmi):\n name = initialized_bmi.get_component_name()\n assert isinstance(name, str)\n\n return name",
"def get_name():\n return \"Boss\"",
"def name():\r\n return _random.choice([male_first(), female_first()])",
"def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')",
"def test_dummy():",
"def test_full_name(self):\n current_resume = resume.objects.first()\n expected = 'Nicholas Bielinski'\n case = current_resume.full_name()\n self.assertEqual(case, expected)",
"def test_it_has_a_name():\n rob = Unicorn('Robert')\n assert rob.name == 'Robert'",
"def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')",
"def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)",
"def test_get_and_has__name_only():\n contract = ContractHandler.get(\"DataTokenTemplate\")\n assert contract.address[:2] == \"0x\"\n assert \"totalSupply\" in str(contract.abi)\n\n assert ContractHandler.has(\"DataTokenTemplate\")\n assert not ContractHandler.has(\"foo name\")",
"def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)",
"def test_first_last(self):\r\n full_name = get_full_name('janis','joplin')\r\n # expected value\r\n self.assertEqual(full_name,'Janis Joplin')",
"def get_sample_name(self):\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['context_tags'].attrs['user_filename_input']\n\t\texcept Exception, e:\n\t\t\treturn None",
"def fixture_microbial_sample_id():\n return \"microbial_sample_test\"",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def __getitem__(self, skillName):\r\n return self.getSkill(skillName)",
"def get_name():",
"def get_test(arn=None):\n pass",
"def test_no_skill_request(self):\n actions.login(ADMIN_EMAIL)\n\n response = self.get(self.URL)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_skill_created(self):\n\t\tself.skill.save()\n\t\tskill_instance = Skill.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tskill_instance.user,\n\t\t\tself.skill.user,\n\t\t\t'User don\\'t match.'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tskill_instance.tag,\n\t\t\tself.tag,\n\t\t\t'Skill tag\\'s don\\'t match.'\n\t\t)",
"def getSkill(self, skillName):\r\n if self.__contains__(skillName):\r\n return self.skills[skillName]\r\n return None",
"def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")",
"def test_hello_without_name(self, app_mock):\n\n # Call /api/hello using test client, storing the response in res \n res = app_mock.test_client().get('/api/hello')\n\n # Assert that the status code returned is 400\n assert res.status_code == 400\n \n # Assert that the response should be a json token as below\n assert json.loads(res.data) == {'message': 'No name specified', 'action': 'Please specify a name in query string'}",
"def nice_name():\n\n pass",
"def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_get_github_name_concrete(self):\n self.assertEqual(app.get_github_name(\"dhh\")[\"user\"][\"name\"], \"David Heinemeier Hansson\")",
"def give_names(x): \n if x == 0:\n return 'Lost'\n else:\n return 'Won/Broke Even'",
"def test_get_non_existent_skill_progress(self):\n self._build_sample_graph()\n student = models.Student(user_id='1')\n tracker = SkillCompletionTracker()\n result = tracker.get_skills_progress(student, [self.sc.id])\n self.assertEqual(SkillCompletionTracker.NOT_ATTEMPTED,\n result[self.sc.id][0])",
"def test_that_name_saved():\n custom_sum_name = \"custom_sum\"\n\n assert custom_sum.__name__ == custom_sum_name",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_nice_name_returns_username(self):\n\n class UserNoName():\n username = 'my_username'\n\n def get_full_name(self):\n return None\n\n rendered = self.render_nice_name(UserNoName())\n\n self.assertEquals(rendered, 'my_username')",
"def get_name() -> str:\n pass",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"async def random_name(self):\r\n data_response = await self.http.endpoint(\"name\")\r\n return result.TextResult(data_response, target=\"name\")",
"def test_task_name(self, task_mock):\n from sosbeacon.event.message import get_student_broadcast_task\n\n student_key = Mock()\n student_key.urlsafe.return_value = \"STUDENTKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"EVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"MESSAGEKEY\"\n\n batch_id = \"BATCHID\"\n\n get_student_broadcast_task(\n student_key, event_key, message_key, batch_id)\n\n task_name = task_mock.call_args[1]['name']\n self.assertIn('STUDENTKEY', task_name)\n self.assertNotIn('EVENTKEY', task_name)\n self.assertIn('MESSAGEKEY', task_name)\n self.assertIn('BATCHID', task_name)",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def test_get_name(self):\n provider = AWSLocalProvider()\n self.assertEqual(provider.name(), Provider.PROVIDER_AWS_LOCAL)",
"def generate_name(self):\n name = self._generate_test_name()\n while self.exists(name):\n name = self._generate_test_name()\n return name",
"def test_single_skill_request(self):\n self._add_aggregates()\n actions.login(ADMIN_EMAIL)\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [self.skill_ids[0]]}, True))\n\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n\n expected_header = ['Date', str(self.skill_ids[0])]\n expected_data = [[self.day1, 1], [self.day2, 2]]\n result = transforms.loads(payload)\n self.assertEqual(expected_header, result['column_headers'])\n self.assertEqual(len(expected_data), len(result['data']))\n for row in expected_data:\n self.assertIn(row, result['data'])",
"def test_title(names):",
"def test_first_last(self):\n\n full_name = get_full_name(\"pony\", \"cat\")\n self.assertEqual(full_name, \"Pony Cat\")\n\n full_name = get_full_name(\"goat\", \"cat\")\n self.assertEqual(full_name, \"Goat Cat\")",
"def test_is_AKs_preHandSimple_correct(self):\n self.assertEqual(self.hand.getPreHandSimple(), 'AKs')",
"def test_name_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_name('Missing ObservationType Name')",
"def get_name() -> str:",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def get_test_name(request):\n return request.node.name",
"def exp_name() -> str:\n return 'test-' + str(uuid.uuid4())",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_get_response(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('what do you want')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def testName(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"name\")\n\n self.util.stringPropertyTest(self, project, \"name\")",
"def test_thingname_nokey(self, mock):\n self.assertRaises(\n KeyError,\n lf.lambda_handler, event=self.lambdaevent_nokey, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def get_trial_name(user, trial):\n return \"Suturing_{}00{}\".format(user, trial)",
"def test_name_search(self):\n # A name in the database\n search_string = \"Umut\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the name field of the result\n self.assertEqual(search_string,search_result[0]['name'],\"It doesn't return the user with the name {}\".format(search_string))",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_three(self):\n name = get_formatted_name('david', 'malan', 'j')\n self.assertEqual(name, 'David J Malan')",
"def test_get_by_name2(self):\n pass",
"def name():\n\n pass",
"def test_thingname_nostr(self, mock):\n mock.configure_mock(**(self.config_shadowget(ParamValidationError(\n report='UnitTest'))))\n self.assertRaises(\n ParamValidationError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()"
] | [
"0.864754",
"0.73746306",
"0.6601982",
"0.6425845",
"0.62760645",
"0.6175571",
"0.6128249",
"0.61196023",
"0.6061607",
"0.60439354",
"0.60229063",
"0.5997637",
"0.5944493",
"0.59063286",
"0.58992296",
"0.5851394",
"0.58460176",
"0.58350754",
"0.5834596",
"0.58218324",
"0.5789334",
"0.5785506",
"0.57852894",
"0.5779153",
"0.5772882",
"0.57647854",
"0.57553804",
"0.57519287",
"0.57473546",
"0.5745198",
"0.5730602",
"0.5714662",
"0.57046264",
"0.57021606",
"0.56771255",
"0.56684214",
"0.5665953",
"0.56613266",
"0.5648243",
"0.56460696",
"0.56451946",
"0.5644681",
"0.56159335",
"0.56158185",
"0.5614738",
"0.5609857",
"0.5596904",
"0.55958706",
"0.55939835",
"0.5592028",
"0.5588615",
"0.558681",
"0.5578869",
"0.55710435",
"0.5565944",
"0.5564057",
"0.5553788",
"0.55482894",
"0.5542626",
"0.55411184",
"0.55265754",
"0.5521201",
"0.55155",
"0.5510704",
"0.5508071",
"0.55074936",
"0.55055445",
"0.5493689",
"0.54926264",
"0.5490276",
"0.54892564",
"0.5484596",
"0.5484379",
"0.54804486",
"0.5474542",
"0.54739547",
"0.5473042",
"0.54660696",
"0.546505",
"0.5458914",
"0.54587525",
"0.5456552",
"0.54552835",
"0.54501504",
"0.54492307",
"0.54490006",
"0.54485685",
"0.5444644",
"0.54441786",
"0.54431486",
"0.54431486",
"0.54403657",
"0.5433018",
"0.5428253",
"0.54251754",
"0.5413296",
"0.540479",
"0.5400732",
"0.5397845",
"0.53975713",
"0.5392619"
] | 0.0 | -1 |
Test that the 'get' fails because the root is not recognized. | def test_no_recognized_root(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "wrong_root.agent_name"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The root of the dotted path must be one of: {}".format(
ALLOWED_PATH_ROOTS
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_error_html_using_get(self):\n pass",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_get_fantray_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Fantray.get, session, node)",
"def test_get(self):\n pass",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_lookup_missing(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n ret = env.lookup('foo')\n self.assertIsNone(ret)",
"def test_get_root_html1(self):\n pass",
"def test_tree_with_one_node_root_exists(one_t):\n assert one_t.root",
"def test_missing_root_setting(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n error = MagicMock()\n error.side_effect = _error\n\n settings.MADCAP_FLARE_ROOT.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_get_not_found(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': INVALID_UUID},\n )\n response = self.request_knox(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"wrong_root.agent_name\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass",
"def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def test_get_document_inexistent(empty_index):\n with pytest.raises(Exception):\n empty_index().get_document(\"123\")",
"def test_get_root_html(self):\n pass",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_root_invalid_revision(self, mock_get_entity):\n mock_get_entity.side_effect = AzureMissingResourceHttpError(\"Not Found\", 404)\n\n url = '/?{0}=InvalidRevsion'.format(TestConfig.REVISION_PARAMETER)\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 404)",
"def test_gettem_using_get(self):\n pass",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get_root_html3(self):\n pass",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_get_root_html2(self):\n pass",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def test_get_user_404(self):\n resp = self.app.get('/users/thisuserdoesntexist')\n assert resp.status_code == 404",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_small_tree_has_no_root(small_tree):\n assert small_tree.root.left is None",
"def test_get_powersupply_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Powersupply.get, session, node)",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_b_get_no_items(self):\n storage = FileStorage()\n get = storage.get(User, 123)\n self.assertEqual(None, get)",
"def test_get_empty_ring(self): \n cons_hash = ConsistentHash(2)\n\n threw_value_error = False\n try:\n cons_hash.get_node('192.168.1.1')\n except exceptions.ValueError:\n threw_value_error = True\n self.assertTrue(threw_value_error)",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def test_05a_get_nonexistant_app(self):\r\n res = self.app.get('/app/nonapp', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_get_specific_pacient_not_found(self):\n url = '/api/v1/pacientes/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_non_existent_item(self):\n\n response = self.client.get('/api/v1/category/200',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_get_order_not_found(self):\n resp = self.app.get('/orders/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_order_not_found(self):\n resp = self.app.get('/orders/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_get_not_found(self):\n url = reverse('notification', kwargs={'way_id': 999, 'notification_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('notification', kwargs={'way_id': 100, 'notification_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get1(self):\n pass",
"def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_get2(self):\n pass",
"def test_resource_collection_get_missing_resource(self):\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n resource = collection.get('missing-uri')\n\n self.assertIsNone(resource)",
"def test_error404():\n response = echo_client(\"GET test/test HTTP/1.1\")\n assert '404' in response",
"def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def test_get_a_thing_that_doesnt_exist(self) -> None:\n with self.assertRaises(things.NoSuchThing):\n things.get_a_thing(2)",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())",
"def test_get_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.get(\"key 1\")",
"def check_for_root(self):\n if self.root is None:\n raise ValueError(\"root is NoneType\")",
"def test_nosuch_detail(self):\n\t\tresponse = self.client.get(\"/post/2/\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t# We got an error before trying to use a template,\n\t\t# so no template was accessed\n\t\t###print(f\"@@@ {response}\")\n\t\t###print(f\"@@@ @@@ template name: {response.template_name}\")\n\t\t###self.assertTemplateUsed(response, None)\n\t\tself.assertFalse(hasattr(response, \"template_name\"))",
"def test_get_specific_office_not_found(self):\n url = '/api/v1/consultorios/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))",
"def test_graph_retrieve_bad(self):\n fuseki = GraphStore()\n with self.assertRaises(ConnectionError):\n fuseki._graph_retrieve(\"default\")",
"async def test_missing(cli):\n response = await cli.get(f'/result/nope')\n assert response.status == 404",
"def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")",
"def test_get_leader(self):\n self._mock_api(200, 'foo.example.com')\n self.assertEquals(self.client.election.get('/mysql'), 'foo.example.com')\n self._mock_api(200,'')\n self.assertRaises(etcd.EtcdException, self.client.election.get, '/mysql')",
"def test_non_existent_question(self):\n\n res = self.app.get('/api/v1/questions/'+str(56))\n self.assertEqual(res.status_code, 404)",
"def test_get_for_not_found_team(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/Team_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def test_bst_single_node():\n assert BST(1).root is None",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)",
"def testGet(self):\n response = self.runGet(self.root)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(len(data), 1)",
"def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')",
"def test_tree_intersection_error():\n with pytest.raises(AttributeError):\n assert tree_intersection(1, 2)",
"def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_05c_get_nonexistant_app_tutorial(self):\r\n res = self.app.get('/app/noapp/tutorial', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_get404(self):\n with self.assertRaises(Exception) as context:\n self.api.get(\"card_fake\", limit=2)\n self.assertTrue(\"card not found\" in context.exception.__str__())",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_removing_root(item):\n item.root = None\n assert not item.has_root",
"def test_request_root(self):\n response = requests.get(self.url + '/')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 2)\n self.assertIn('users', json.keys())\n self.assertIn('groups', json.keys())\n\n users = json.get('users')\n groups = json.get('groups')\n self.assertIsInstance(users, list)\n self.assertIsInstance(groups, list)\n self.assertEqual(len(users), 2)\n self.assertEqual(len(groups), 3)\n self.assertIn('John', users)\n self.assertIn('Jane', users)\n self.assertIn('Human', groups)\n self.assertIn('Male', groups)\n self.assertIn('Female', groups)"
] | [
"0.81144464",
"0.81144464",
"0.70011485",
"0.68381655",
"0.68031454",
"0.667773",
"0.6674683",
"0.6666335",
"0.6611255",
"0.65303046",
"0.6396875",
"0.6372637",
"0.6341329",
"0.634079",
"0.63209414",
"0.6298034",
"0.6295935",
"0.6282365",
"0.62819725",
"0.6272043",
"0.6221722",
"0.6214911",
"0.61907995",
"0.6163433",
"0.61542517",
"0.61445373",
"0.6136732",
"0.6135725",
"0.6131959",
"0.61318105",
"0.6129109",
"0.61206007",
"0.6115306",
"0.61005056",
"0.6093168",
"0.608727",
"0.6060806",
"0.6040993",
"0.60241604",
"0.6007799",
"0.59971756",
"0.59855205",
"0.5977899",
"0.59651375",
"0.59607345",
"0.596006",
"0.59570324",
"0.5952519",
"0.5951611",
"0.59398186",
"0.59396225",
"0.59368455",
"0.5935492",
"0.5932272",
"0.5932272",
"0.59221846",
"0.59221846",
"0.59201795",
"0.59177715",
"0.5915094",
"0.5905965",
"0.5904858",
"0.5889973",
"0.5873371",
"0.58691716",
"0.5868397",
"0.5862687",
"0.5862448",
"0.5855224",
"0.5849953",
"0.584978",
"0.5845066",
"0.5843223",
"0.5843177",
"0.5842172",
"0.5836407",
"0.5832554",
"0.58319634",
"0.5824606",
"0.58228976",
"0.5817463",
"0.5817089",
"0.58065975",
"0.5806151",
"0.58043253",
"0.5797344",
"0.5795132",
"0.57949924",
"0.578454",
"0.5783732",
"0.5781583",
"0.5781583",
"0.5767013",
"0.57663506",
"0.5764684",
"0.5764684",
"0.57631063",
"0.575528",
"0.57538813",
"0.5749696"
] | 0.65112627 | 10 |
Test that the 'get' fails because the path is too short but the root is correct. | def test_too_short_path_but_root_correct(self):
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "config", "get", "agent"], standalone_mode=False
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
)
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "skills.dummy"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def verify_root_path(self) -> None:\n path = \"/\"\n with self.assertRaises(AccessDeniedException):\n verify_file_path(path)",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)",
"def test_get_absolute_path():\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"../foo\"), \"/bar/foo\")\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"/foo\"), \"/foo\")",
"def validate_short_path(short_path):",
"def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def test_client_id_path() -> None:\n assert indieauth._parse_client_id(\"http://ex.com\").path == \"/\"\n assert indieauth._parse_client_id(\"http://ex.com/hello\").path == \"/hello\"\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello/.world\").path == \"/hello/.world\"\n )\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello./.world\").path\n == \"/hello./.world\"\n )\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/.\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/./yo\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/../yo\")",
"def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"wrong_root.agent_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)",
"def test_error_html_using_get(self):\n pass",
"def test_nonexistent_path(tmpdir_factory):\n folder = Path(tmpdir_factory.mktemp('git'))\n path = folder.joinpath('nonexistent')\n\n with pytest.raises(ValueError):\n gitb.pull(path)",
"def test_append_slash_slashless_unknown(self):\n request = self.rf.get(\"/unknown\")\n response = CommonMiddleware(get_response_404)(request)\n self.assertEqual(response.status_code, 404)",
"def test_invalid_pathname(self):\n self.assertFalse(Util.is_pathname_valid(''))",
"def test_get_file_with_remote_and_short_SHA1_error(self):\n with self.assertRaises(ShortSHA1Error):\n self.remote_tool.get_file('README', 'd7e96b3')",
"def test_get_path_not_exist(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n expected = None\n actual = Hash(self.file).get()\n self.assertEqual(expected, actual)",
"def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"",
"def test_github_path_purepath():\n p = github_api.GithubPath('/tensorflow/datasets/tree/master/')\n sub_p = p / 'some_folder'\n assert isinstance(sub_p, github_api.GithubPath)\n assert str(p) == '/tensorflow/datasets/tree/master'\n assert p == github_api.GithubPath.from_repo('tensorflow/datasets')",
"def test_00(self):\n result = resolve_path({'_id': '1'}, '')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, '/')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'foo.png')\n expected = '/foo.png'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'bar/foo.js')\n expected = '/bar/foo.js'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'main.js')\n expected = '/main.js'\n self.assertEqual(result, expected)",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_url_path(self):\n response = self.client.get('/planner/recipes/1/')\n self.assertEqual(response.status_code, 200)",
"def test_not_a_valid_fuzz_path(self):\n self.assertFalse(cifuzz.check_fuzzer_build('not/a/valid/path'))",
"def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')",
"def local_assert_empty(path):\n try:\n local = get_local(path)\n except ValueError:\n return\n raise ValueError(\"Something exists at %s\" % local.path)",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def test_root() -> Path:\n return TEST_ROOT",
"def test_docs_paths():\n assert os.path.exists('test/examples/docs/paths-root-api.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')",
"def path_home_mock():\n raise AttributeError()",
"def test_get_failure(self, mock_exists):\n static_path = self.finder.find('can/put/anything/here.js')\n self.assertEqual(static_path, ())",
"def verify_non_existing_path(self) -> None:\n path = \"/some/non/existing/path\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)",
"def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"wrong_root.agent_name\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def test_get_path_returns_none_for_bad_key(\n self, audio_store_and_expected_files, key):\n audio_store = audio_store_and_expected_files[0]\n assert audio_store.get_path(key) is None",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)",
"def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")",
"def test_nonexistent_path(tmpdir):\n with pytest.raises(IOError):\n checksum(tmpdir.join(\"does-not-exist.txt\").strpath)",
"def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))",
"def test_verify_path2_17(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_environment_path_subdir_leadingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"/keys\"\n )",
"def test_append_slash_have_slash(self):\n request = self.rf.get(\"/slash/\")\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)",
"def path_validate(path):\n # functionality to be added later\n return path",
"def test_error404():\n response = echo_client(\"GET test/test HTTP/1.1\")\n assert '404' in response",
"def test_collisions_file_path(self):\n self.assertRaises(ValueError, collisions_clean, \"not_a_file_path\")",
"def test_sha1_from_path(self):\n self.assertEqual(TEST_SHA1, _get_sha1_from_path(TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/' + TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/test/' + TEST_SHA1))",
"def test_find_path_bi():\n assert True",
"def assert_path(self, root: Node, path: str) -> None:\n\n if not self.__assert_path(root, path):\n raise Exception('Path \\'{}\\' not found in root node:\\n{}'.format(path, root))",
"def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False",
"def testInvalidPostPath(self):\n for path in ('framework', 'endpoint', 'invalid'):\n status, _ = self._http_post(path, \"some-data\")\n self.assertEqual(status, 404)",
"def test_get_base_url():\n eq_(get_base_url(\"http://foo.com/bar/baz\"), \"http://foo.com\")\n eq_(get_base_url(\"https://foo.com:443/foo/bar\"), \"https://foo.com:443\")",
"def test_computed_url(self):\n t = TwoHundredRequest()\n self.assertEqual(\"twohundred\", t.url_path())",
"def test_get_single_different(single_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n single_bucket.get(\"key 2\")",
"def test_BenchmarkSuite_invalid_path_access(benchmark_suite: typing.Callable):\n bs = benchmark_suite()\n with test.Raises(TypeError):\n _ = bs.path",
"def test_get_strict_no_sdb_in_uri():\n\n msg = 'SDB uri must start with \"sdb://\"'\n with pytest.raises(SaltInvocationError, match=msg) as cm:\n sdb.get(\"://salt/foo\", strict=True)",
"def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)",
"def test_append_slash_disabled(self):\n request = self.rf.get(\"/slash\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)",
"def test_AlgorithmsIdHandler_GET_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))",
"def test_local_path(nexus_base, path):\n assert nexus_base.local_path == path",
"def test_base_path(original_base_path, args):\n if args.skip_redirects:\n return original_base_path\n\n # WARNING: some redirects are hardcoded to production URLs.\n # Both staging and production will rate limit us.\n response = session.head(args.root_url + original_base_path, allow_redirects=True)\n\n if 200 <= response.status_code < 300:\n return response.url.replace('https://www.gov.uk', '').replace(args.root_url, '')\n elif response.status_code == 429:\n response.raise_for_status()\n else:\n if response.status_code not in (410,):\n sys.stderr.write(\"Unexpected response {} for {}\\n\".format(response.status_code, original_base_path))\n return None",
"def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def test_missing_root_setting(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n error = MagicMock()\n error.side_effect = _error\n\n settings.MADCAP_FLARE_ROOT.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})",
"def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())",
"def test_empty(self):\n self.assertFalse(os.path.exists('/'))",
"def test_url_path(self):\n response = self.client.get('/planner/recipes/')\n self.assertEqual(response.status_code, 200)",
"def test_gettem_using_get(self):\n pass",
"def test_get_storage_invalid_suffix(self):\r\n self.assertRaises(KeyError, self.profile.get_storage, ('testing.json,'))",
"def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?",
"def test_get_query_subdir(): # ***Incomplete test\n ##########################\n # Arrange.\n outdir = \"outdir\"\n\n ##########################\n # Act.\n #x = get_query_subdir(outdir)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_get(self):\n pass",
"def test_environment_path_subdir_trailingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"keys/\"\n )",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_invalid_path(self, tmp_path):\n other_path = tmp_path / \"other\"\n other_path.mkdir()\n pattern = (\n \"Could not find any of configuration files '.kedro.yml, pyproject.toml'\"\n )\n with pytest.raises(KedroContextError, match=re.escape(pattern)):\n load_context(str(other_path))"
] | [
"0.6826721",
"0.6826721",
"0.6796174",
"0.6690637",
"0.6464489",
"0.63691777",
"0.6339634",
"0.631256",
"0.6309888",
"0.6286524",
"0.6273844",
"0.6235973",
"0.62328714",
"0.61507434",
"0.6129715",
"0.61153513",
"0.6104261",
"0.6071585",
"0.60438406",
"0.60402846",
"0.60037696",
"0.60023564",
"0.5958214",
"0.5913527",
"0.5907943",
"0.59011805",
"0.5889473",
"0.58736956",
"0.5872585",
"0.5870241",
"0.58701175",
"0.58625925",
"0.58447045",
"0.58407456",
"0.58354855",
"0.5819282",
"0.581641",
"0.5812207",
"0.58063424",
"0.58029115",
"0.5792709",
"0.57924855",
"0.5781331",
"0.57806504",
"0.5769531",
"0.57671887",
"0.57284725",
"0.5727917",
"0.57108563",
"0.57085335",
"0.57067925",
"0.5686902",
"0.56808966",
"0.56800365",
"0.56785333",
"0.56726444",
"0.56655425",
"0.5661862",
"0.5658465",
"0.5657874",
"0.5653589",
"0.5647715",
"0.563782",
"0.5637739",
"0.5636322",
"0.5625602",
"0.5623424",
"0.5618656",
"0.5612509",
"0.56097806",
"0.56077516",
"0.5606046",
"0.56057507",
"0.55984044",
"0.55977035",
"0.55950767",
"0.55926543",
"0.5588742",
"0.5587249",
"0.55853075",
"0.55782807",
"0.55775106",
"0.5577356",
"0.55749613",
"0.5569097",
"0.556846",
"0.5568219",
"0.55664307",
"0.5565153",
"0.5556719",
"0.5530424",
"0.5529576",
"0.55261016",
"0.5520476",
"0.5514394",
"0.5505059",
"0.5495622",
"0.5489434",
"0.54827",
"0.5475715"
] | 0.68506765 | 0 |
Test that the 'get' fails because the resource does not exist. | def test_resource_not_existing(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"connections.non_existing_connection.name",
],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "Resource connections/non_existing_connection does not exist."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_request_invalid_resource(self):\n response = requests.get(self.url + '/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404",
"def test_request_users_user_invalid_resource(self):\n response = requests.get(self.url + '/users/John/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_resource_collection_get_missing_resource(self):\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n resource = collection.get('missing-uri')\n\n self.assertIsNone(resource)",
"async def test_missing(cli):\n response = await cli.get(f'/result/nope')\n assert response.status == 404",
"def test_read_non_existent(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 999},\n ),\n )\n\n content = {'detail': 'Not found.'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)",
"def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_task_not_found(self):\n task_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"TaskNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get_not_found(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': INVALID_UUID},\n )\n response = self.request_knox(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_fail(self):\n response = self.second_client.get(self.url)\n self.assertEquals(response.status_code, 400)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get404(self):\n with self.assertRaises(Exception) as context:\n self.api.get(\"card_fake\", limit=2)\n self.assertTrue(\"card not found\" in context.exception.__str__())",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.place == storage.get(Place, self.place.id))\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_non_existent_item(self):\n\n response = self.client.get('/api/v1/category/200',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(TestModel.DoesNotExist):\r\n TestModel.objects.get(test_id=100)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_get_car_invalid_id():\n response = client.get(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_get(self):\n self.assertEqual(403, self.response.status_code)",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_get_book_with_id_does_not_exist(self):\n\t\tlogin_data = self.register_and_login_in_user()\n\t\ttoken = login_data['auth_token']\n\n\t\t# get book id\n\t\tbook = self.client.get(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json'\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book not found')\n\t\tself.assertEqual(book.status_code, 404)",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_invalid_resource_list_404(self):\n url = reverse(\"resources:resources\", (\"invalid\",))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_not_found(self):\n url = reverse('notification', kwargs={'way_id': 999, 'notification_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('notification', kwargs={'way_id': 100, 'notification_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def testNotFound(self):\n response = requests.post(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_user_404(self):\n resp = self.app.get('/users/thisuserdoesntexist')\n assert resp.status_code == 404",
"def test_non_existent_question(self):\n\n res = self.app.get('/api/v1/questions/'+str(56))\n self.assertEqual(res.status_code, 404)",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_request_returns_404(client):\n assert client.get(\"/url_que_nao_existe\").status_code == 404",
"def test_get_object_not_found(self, employee_model):\n employee_model.DoesNotExist = Employee.DoesNotExist\n employee_model.objects.get.side_effect = employee_model.DoesNotExist\n\n with self.assertRaises(Http404):\n self.view.get_object(1)",
"def test_invalid_resource_endpoint_returns_error(self):\n self.add_tasks()\n response = self.app.get('api/v1/tasks/209', follow_redirects=True)\n self.assertEquals(response.status_code, 404)\n self.assertEquals(response.mimetype, 'application/json')\n self.assertIn(b'Element does not exist', response.data)",
"def test_get_a_thing_that_doesnt_exist(self) -> None:\n with self.assertRaises(things.NoSuchThing):\n things.get_a_thing(2)",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_handle_non_existed_assignment_properly(user_client):\n response = user_client.get(\"/api/v1/assignments/999/\")\n assert response.data == {\"detail\": \"Not found.\"}",
"def test_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_get_specific_pacient_not_found(self):\n url = '/api/v1/pacientes/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())",
"def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def testNotFound(self):\n data = {'text': 'toto'}\n response = requests.put(url=self.invalid_url, json=data)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_get_movie_404(self):\n res = self.client().get('/api/movies/9000')\n self.assertEqual(res.status_code, 404)",
"def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/revisions/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)\n\n # correct job type, bad version\n url = '/%s/job-types/my-job/9.9.9/revisions/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_validate_get_single_resource(client):\n response = client.get('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def test_get_meals_with_invalid_url(test_client):\n response = test_client.get(\"/api/v2/menu/\")\n assert response.status_code == 404",
"def test_nonexisting_event(self):\n response = self.client.get(\"/events/1\")\n self.assertEqual(response.status_code, 404)",
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_get_image_exists_not(self):\n with self.assertRaises(errors.NotFound):\n self.docker.images.get(\"image_does_not_exists\")",
"def test_fetch_url_not_ok():\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.bad_request\n with assert_raises(NotFoundError):\n fetch_url(\"http://example.com\", TIMEOUT, getLogger())",
"def test_failed_get_resource(self, mock_spotify_api_class, invalid_request):\n with patch('bpm.spotify.requests.get') as mock_requests:\n mock_requests.return_value = invalid_request\n result = mock_spotify_api_class.get_resource(\"\")\n assert result == {}",
"def test_retrieve_not_found(self):\n\n # get a valid digest\n content = \"\"\"\\xe1\\xbc\\x84\\xce\\xbd\\xce\\xb4\\xcf\\x81\\xce\\xb1\n \\xce\\xbc\\xce\\xbf\\xce\\xb9\n \\xe1\\xbc\\x94\\xce\\xbd\\xce\\xbd\\xce\\xb5\\xcf\\x80\\xce\\xb5\"\"\"\n namespace = 'default'\n collection = generate_collection(namespace, [content])\n preupload_status = self.call_api(\n 'preupload', self.message_to_dict(collection), 200)\n message = preupload_status.json.get(u'items', [{}])[0]\n\n # get the digest\n request = preupload_status_to_request(message, content)\n embedded = validate(\n request.upload_ticket, handlers_endpoints_v1.UPLOAD_MESSAGES[0])\n\n # don't upload data; try to retrieve\n retrieve_request = handlers_endpoints_v1.RetrieveRequest(\n digest=embedded['d'], namespace=handlers_endpoints_v1.Namespace())\n with self.call_should_fail('404'):\n self.call_api('retrieve', self.message_to_dict(retrieve_request), 200)",
"def test_get_single_movie_incorrect_id(client):\n resp = client.get(f\"/api/movies/{30}/\")\n assert resp.status_code == 404",
"def test_nosuch_detail(self):\n\t\tresponse = self.client.get(\"/post/2/\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t# We got an error before trying to use a template,\n\t\t# so no template was accessed\n\t\t###print(f\"@@@ {response}\")\n\t\t###print(f\"@@@ @@@ template name: {response.template_name}\")\n\t\t###self.assertTemplateUsed(response, None)\n\t\tself.assertFalse(hasattr(response, \"template_name\"))",
"def test_patch_a_resource_that_does_not_exist():\n pass",
"def test_get_specific_office_not_found(self):\n url = '/api/v1/consultorios/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_not_found(self):\n with self.assertRaises(UserNotFoundException):\n self._storage.get_by_username(\"test\")",
"def test_returns_404_if_user_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/queries/non_existent/touched/\")\n self.assertEqual(response.status_code, 404)",
"async def test_api_get_non_existing_state(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(\"/api/states/does_not_exist\")\n assert resp.status == HTTPStatus.NOT_FOUND",
"def test_details_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/revisions/9/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_feeds_do_not_exist(self):\n rv = self.client.get('/user/who.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/project/fake.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/team/not-real.xml')\n eq_(rv.status_code, 404)",
"def test_get_for_not_found_team(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/Team_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(self.table.DoesNotExist):\r\n self.table.objects.get(test_id=100)",
"def test_invalid_route_is_status_404(self):\n response = self.client.get(\"/bad\")\n self.assertTrue(response.status_code == 404)",
"def testNotFound(self):\n response = requests.delete(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.place == storage.get(Place, self.place.id))\n self.assertIn('error', json_data.keys())\n self.assertEqual(json_data['error'], 'Not found')",
"def test_entity_doesnt_exist(self):\n key = ndb.Key(models.InstanceTemplateRevision, 'fake-key')\n urls = snapshots.fetch(key)\n self.failIf(urls)",
"def test_404_not_found(app, client):\n\n response = client.get(\"/notexistpage\")\n assert response.status_code == 404\n assert \"404 Not Found\" in str(response.data)",
"def assertHttpNotFound(self, resp):\r\n return self.assertEqual(resp.status_code, 404)",
"def test_neg_exists_with_non_existent_data(self, key, ex, ex_code):\n try:\n key, meta = self.as_connection.exists(key)\n assert meta is None\n \"\"\"\n We are making the api backward compatible. In case of RecordNotFound an\n exception will not be raised. Instead Ok response is returned withe the\n meta as None. This might change with further releases.\n \"\"\"\n except ex as exception:\n assert exception.code == ex_code",
"def _does_not_exist():\n response_payload = dict(\n message=\"Recipe does not exist!\"\n )\n response_payload = jsonify(response_payload)\n return make_response(response_payload, 404)",
"def test_get_actor_404(self):\n res = self.client().get('/api/actors/9000')\n self.assertEqual(res.status_code, 404)",
"def test_fake_get_url(self):\n resp = self.app.get('/api/v1/g?url=somefalseurl')\n self.assertEqual(resp.status_code, 500)",
"def test_retrieve_invalid_course(self):\n path = reverse('commerce_api:v1:courses:retrieve_update', args=['a/b/c'])\n response = self.client.get(path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 404",
"def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_404_page_not_found():\n with app.test_client() as c:\n response = c.get('/this_page_not_exists')\n assert response.status_code == 404",
"def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND",
"def test_404(client, route):\n response = client.get(route)\n assert b'Page not found' in response.data\n assert \"404\" in response.status",
"def test_05d_get_nonexistant_app_results_json(self):\r\n res = self.app.get('/app/noapp/24/results.json', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)",
"def test_GET3(self):\n r = requests.get(self.address)\n self.assertEqual(r.status_code, 400)",
"async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)",
"def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])",
"def test_get(self):\n self.assertEqual(\n self.attempts[0],\n self.resource.get(self.attempts[0][_ATTEMPT.attempt_id]))",
"def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")"
] | [
"0.8803426",
"0.87292",
"0.8429405",
"0.797745",
"0.797067",
"0.7951216",
"0.77274334",
"0.76254976",
"0.75959903",
"0.7590098",
"0.7587531",
"0.75486064",
"0.75073713",
"0.75045615",
"0.7498573",
"0.7495179",
"0.74433476",
"0.74420184",
"0.741603",
"0.74085885",
"0.7400039",
"0.7398629",
"0.7382048",
"0.73699325",
"0.73690605",
"0.73535556",
"0.7347148",
"0.73465496",
"0.7345764",
"0.7342662",
"0.7332499",
"0.73255116",
"0.7320085",
"0.73132056",
"0.73090756",
"0.73072743",
"0.73003703",
"0.7292498",
"0.7273389",
"0.7272762",
"0.7270031",
"0.72523135",
"0.7205318",
"0.71921545",
"0.7190604",
"0.71898127",
"0.7177927",
"0.716818",
"0.7155045",
"0.7140065",
"0.7138074",
"0.7135617",
"0.7134366",
"0.7133743",
"0.71322304",
"0.7128149",
"0.71277976",
"0.71191067",
"0.71043926",
"0.7103689",
"0.7083774",
"0.70784736",
"0.7052145",
"0.7051573",
"0.70460045",
"0.70452905",
"0.70378673",
"0.7034268",
"0.70306927",
"0.70269465",
"0.70253474",
"0.7023615",
"0.70200735",
"0.7019849",
"0.7019669",
"0.7017646",
"0.7017378",
"0.701283",
"0.70112884",
"0.7009305",
"0.7004491",
"0.6995937",
"0.6986967",
"0.69863486",
"0.6980379",
"0.6970595",
"0.69673157",
"0.6957711",
"0.6949208",
"0.6938101",
"0.69372314",
"0.6932026",
"0.6915868",
"0.69046074",
"0.68921274",
"0.6878727",
"0.68675065",
"0.6867281",
"0.6863891",
"0.6862653"
] | 0.6998007 | 81 |
Test that the 'get' fails because the attribute is not found. | def test_attribute_not_found(self):
with pytest.raises(
ClickException, match=r"Attribute `.* for .* config does not exist"
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"skills.dummy.non_existing_attribute",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bad_get_property(self):\n s = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n with pytest.raises(AttributeError):\n s.bad_get",
"def test_attribute_not_found(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `non_existing_attribute` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute\",\n \"value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_get_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_classproperty_without_fget(self):\n p = classproperty()\n with self.assertRaises(AttributeError):\n p.__get__('x')",
"def test_untranslated_get(self):\n try:\n value = SimpleModel().tr_title\n except Exception as e:\n self.assertIsInstance(e, TranslationDoesNotExist)\n self.assertIsInstance(e, AttributeError)\n else:\n self.fail(f\"Expected exception from reading untranslated title, got {repr(value)}.\")\n\n # Raising attribute error gives some additional benefits:\n self.assertEqual(getattr(SimpleModel(), \"tr_title\", \"FOO\"), \"FOO\")\n self.assertFalse(hasattr(SimpleModel(), \"tr_title\"))",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_staticproperty_without_fget(self):\n p = staticproperty()\n with self.assertRaises(AttributeError):\n p.__get__('x')",
"def test_page_getattr_should_not_exist(test_page):\n test_page.navigate()\n\n with pytest.raises(AttributeError):\n assert test_page.foobar()",
"def test_get_tag_fail(self):\n self.assertRaises(AttributeError, get_tag, None, \"h1\")\n self.assertRaises(\n AttributeError, get_tag, \"<h1>This is not a XML tag object</h1>\", \"h1\"\n )",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_fails_when_getting_non_dict_attribute_in_between(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.skills.some_attribute\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = \"Attribute 'skills' is not a dictionary.\"\n assert result.exception.message == s",
"def test_missing_attribute(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('TEST_SETTING')",
"def test_get_property_missing(self):\r\n try:\r\n value = self.config.option2\r\n assert value\r\n except Exception as e:\r\n self.assertIsInstance(e, OptionValueNotSetError)\r\n self.assertNotIn('option2', self.config.values)",
"def test_getattr_error_attr_not_found():\n with pytest.raises(ImportError):\n from astropy.cosmology.flrw import this_is_not_a_variable # noqa: F401",
"def test_with_nonexisting_attr(create_file_with_text):\n test_class = KeyValueStorage(create_file_with_text)\n with pytest.raises(ValueError, match=\"No such key\"):\n test_class[\"wrong_attribute\"]",
"def test_attributenamenotfound(self):\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])",
"def test_get_attrib(self):\n self.assertEqual(\"true\", get_attrib(self.xml, \"exists\"))\n self.assertEqual(0, get_attrib(self.xml, \"default\", default=0))\n self.assertEqual(23, get_attrib(self.xml, \"integer\", cast=int))\n self.assertEqual(1.354, get_attrib(self.xml, \"float\", cast=float))\n self.assertRaises(ValueError, get_attrib, *(self.xml, \"noexist\", \"unittest\"))",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_register_nonexisting_attr(self):\n pass",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def __getattr__ (self, attr):\n try:\n return self.get_value (attr)\n except exc.x_not_found:\n try:\n return self.get_key (attr)\n except exc.x_not_found:\n raise AttributeError",
"def test_missing_attribute(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"attribute\": \"missing\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n self.opp.states.set(\"sensor.test_state\", \"State\", {\"attr\": \"2\"})\n self.opp.block_till_done()\n self.opp.states.set(\"sensor.test_state\", \"State\", {\"attr\": \"1\"})\n self.opp.block_till_done()\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"off\"",
"def test_no_password_getter(self):\n self.user.password = '123456'\n with self.assertRaises(AttributeError):\n self.user.password",
"def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"",
"def test_query_for_unknown_attribute_raise(test_store):\n query: Generator[Person, None, None] = test_store.get_by(not_the_droids=9000)\n\n with pytest.raises(AttributeError):\n next(query)",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_get_fails_when_getting_non_dict_attribute(self):\n attribute = \"protocols\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", f\"skills.dummy.{attribute}.protocol\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{attribute}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_get_a_thing_that_doesnt_exist(self) -> None:\n with self.assertRaises(things.NoSuchThing):\n things.get_a_thing(2)",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_search_missing_attribute(self):\n # prepare search mock\n self.mock_conn.search.return_value = True\n # bind operation\n self.mock_conn.bind.return_value = True\n # response to the search and bind calls\n self.mock_conn.response.__len__.return_value = 1\n # removed title attribute to cause error\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n\n # perform action\n ldap_manager = ldap.MANAGER()\n\n # validate response\n with self.assertRaisesRegex(\n RuntimeError,\n 'User attribute title not found in server response'):\n ldap_manager.authenticate('baruser', 'barpwd')",
"def test_fetchParserUnknownAttribute(self):\n p = imap4._FetchParser()\n self.assertRaises(Exception, p.parseString, b\"UNKNOWN\")",
"def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')",
"def test_no_metaclass_get(self):\n obj = BadTestObject()\n with self.assertRaises(TypeError):\n x = obj.test_setting",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_get_value_missing(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n\r\n self.assertRaises(OptionValueNotSetError, self.config.get_value, name, option)\r\n self.assertNotIn(name, self.config.values)",
"def test_missing_attribute(self):\n params = {\n 'timestamp': '2018-11-01T12:01:01Z',\n 'parameters': {\n 'name': 'Test'\n }\n }\n signature = app.utils.generate_signed_data(\n params,\n settings.PRIVATE_KEY\n )\n headers = {\n 'X-Pot-Signature': base64.b64encode(signature).decode(\"utf-8\"),\n 'X-Pot-App': 'bar'\n }\n self._response = self._app.post_json('/fetch',\n params=params,\n headers=headers,\n expect_errors=True)\n\n self.assertMatchSnapshot(self._response.json_body)",
"def test_get_attribute_data(self):\n pass",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_get_non_existent_item(self):\n\n response = self.client.get('/api/v1/category/200',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')",
"def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")",
"def test_getitem_missing(self, env: yaenv.Env):\n with pytest.raises(yaenv.EnvError) as err:\n _ = env['MISSING']\n assert 'Missing' in str(err.value)",
"def test_get_nonexistent_runtime_property(self):\n script_path = self._create_script(\n linux_script='''#! /bin/bash -e\n ctx instance runtime-properties nonexistent\n ''',\n windows_script='''\n ctx instance runtime-properties nonexistent\n ''')\n\n with self.assertRaises(tasks.ProcessException) as cm:\n self._run(script_path=script_path)\n\n self.assertIn(os.path.basename(script_path), cm.exception.command)\n self.assertEqual(cm.exception.exit_code, 1)\n self.assertTrue(string_in_log('RequestError', self._caplog))\n self.assertTrue(string_in_log('nonexistent', self._caplog))",
"def test_get_coord_by_attr_invalid():\n pass",
"def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False",
"def test_init_attributes(self):\n t = self.Test({'id': 1, 'poop': 'abc'})\n\n self.assertEqual(t.id, 1)\n self.assertEqual(t.name, None)\n self.assertRaises(AttributeError, t.__getattribute__, 'poop')",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AttributeAbility, 'Invalid')\n self.assertRaises(AbilityError, AttributeAbility, '', 3)",
"def test_get_attributes(self):\n pass",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='Dam')\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', attr='ST')",
"def test_get_component_with_invalid_name():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component')",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def _safe_getattr(value, attr, default):\n try:\n return getattr(value, attr)\n except Exception:\n return default",
"def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(TestModel.DoesNotExist):\r\n TestModel.objects.get(test_id=100)",
"def test_get_method(self):\n field = self.base_field\n sch = SchemaField(field)\n self.assertTrue(hasattr(sch, 'get'))\n self.assertEqual(field.get('Name'), sch.get('Name'))\n self.assertEqual(field.get('constraints'), sch.get('constraints'))\n self.assertEqual(None, sch.get('bad_keys'))\n self.assertEqual('default', sch.get('bad_keys', 'default'))",
"def test_attribute_missing_validation():\n\n @attr.s\n class Foo(object):\n something = attr.ib()\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)",
"def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))",
"def __getattr__(self, name):\n raise MockException(\n \"Mock object %s has no attribute '%s'\" %\n (mock_object_names[id(self)], name)\n )",
"def attribute(self, key):\n attribute = self.getAttributes()[key]\n if attribute == None:\n MissingValueException(key)\n else:\n return attribute",
"def get(self, attr):\n try:\n return getattr(self, attr)\n except:\n print(\"%s is not an attribute of this instance\" % attr)\n return None",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)",
"def __tr_getattr__(self, name):\n raise AttributeError(name)",
"def __tr_getattr__(self, name):\n raise AttributeError(name)",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_get_accessor_raises_exception_if_not_created_and_no_uname_password(\n RallyAccessor):\n assert_raises(Exception, get_accessor)\n assert_false(RallyAccessor.called)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def attr(self, name):\r\n return Assert(getattr(self.obj, name))",
"def test_error_html_using_get(self):\n pass",
"async def test_get_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.get('x')",
"def test_getattr():\n atom = ATOMClassifier(X_class, y_class, random_state=1)\n atom.balance(strategy=\"smote\")\n atom.run(\"Tree\")\n assert isinstance(atom.tree.shape, tuple)\n assert isinstance(atom.tree.alcohol, pd.Series)\n assert isinstance(atom.tree.head(), pd.DataFrame)\n with pytest.raises(AttributeError, match=r\".*has no attribute.*\"):\n print(atom.tree.data)",
"def _fake_safe_get(self, value):\n try:\n val = getattr(self.configuration, value)\n except AttributeError:\n val = None\n return val",
"def testExceptionRaisedByGetattr(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ttry:\n\t\t\tx.g\n\t\t\tself.fail()\n\t\texcept Exception, e:\n\t\t\tpass",
"def test_public_attr(self):\n self.assertFalse(hasattr(self.file_storage, \"foo.json\"))",
"def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def checkattr(name):\n\n def check(obj):\n try:\n attrgetter(name)(obj)\n return True\n except AttributeError:\n return False\n\n return check",
"def test_get_fields_and_lookups_field_does_not_exist(self):\n with self.assertRaises(exceptions.FieldDoesNotExist):\n utils.get_fields_and_lookups(Protected, 'nofield__icontains')",
"def test_get_object_not_found(self, employee_model):\n employee_model.DoesNotExist = Employee.DoesNotExist\n employee_model.objects.get.side_effect = employee_model.DoesNotExist\n\n with self.assertRaises(Http404):\n self.view.get_object(1)",
"def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))",
"def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))",
"def test_existing_attribute(self):\n self.assertEqual(import_from_setting('TEST_SETTING'), 1)",
"def test_resource_collection_get_missing_resource(self):\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n resource = collection.get('missing-uri')\n\n self.assertIsNone(resource)",
"def test_non_existent_key(self):\n ttl = self.cache.ttl('does_not_exist')\n self.assertEqual(ttl, 0)",
"def test_other_user_kvs_get_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.get(self.other_key_factory(self.existing_field_name))",
"def _static_hasattr(value, attr):\n try:\n object.__getattribute__(value, attr)\n except AttributeError:\n return False\n else:\n return True",
"def _fget(self):\n # type: (...) -> Any\n try:\n return getattr(self, private_attr)\n except AttributeError:\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n _get_type_name(type_), attr\n )\n )",
"def test_properties_get(self):\n pass",
"def test_name_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.name = 'bar'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Category') is None)",
"def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Category') is None)",
"def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(self.table.DoesNotExist):\r\n self.table.objects.get(test_id=100)"
] | [
"0.78341216",
"0.76988006",
"0.76461685",
"0.7441677",
"0.7264206",
"0.7244423",
"0.71043795",
"0.7093183",
"0.70701784",
"0.70177764",
"0.6991154",
"0.69798416",
"0.69683987",
"0.69441843",
"0.6912952",
"0.6902508",
"0.68767726",
"0.6857589",
"0.6852551",
"0.6847994",
"0.6835904",
"0.682553",
"0.679566",
"0.6722924",
"0.67059094",
"0.669912",
"0.6673834",
"0.66701895",
"0.6668156",
"0.6644111",
"0.662125",
"0.65879846",
"0.6513669",
"0.6513185",
"0.65051216",
"0.64952916",
"0.64742744",
"0.64706975",
"0.6449944",
"0.64297557",
"0.64279723",
"0.64162797",
"0.64028716",
"0.63977927",
"0.63946825",
"0.63871086",
"0.6371932",
"0.6371929",
"0.63697684",
"0.6354132",
"0.6348463",
"0.63438046",
"0.6337905",
"0.6325874",
"0.6314525",
"0.63140327",
"0.63140327",
"0.6309462",
"0.62805426",
"0.62611324",
"0.62525505",
"0.6237637",
"0.62234634",
"0.62203133",
"0.6217764",
"0.6216701",
"0.62056446",
"0.62056446",
"0.6193367",
"0.6193367",
"0.6191696",
"0.6188934",
"0.6174475",
"0.6164789",
"0.6158105",
"0.6150277",
"0.6146299",
"0.6134793",
"0.6127868",
"0.61228955",
"0.6121579",
"0.61213726",
"0.61213726",
"0.6119958",
"0.6116961",
"0.60994005",
"0.60861117",
"0.60849077",
"0.6081319",
"0.60789317",
"0.6073407",
"0.6062154",
"0.60510635",
"0.6044942",
"0.6024602",
"0.6020873",
"0.6015024",
"0.6015024",
"0.60097086",
"0.60074085"
] | 0.7648934 | 2 |
Test that getting the 'dummy' skill behaviours works. | def test_get_whole_dict(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "skills.dummy.behaviours"],
standalone_mode=False,
)
assert result.exit_code == 0
actual_object = json.loads(result.output)
expected_object = {
"dummy": {
"args": {"behaviour_arg_1": 1, "behaviour_arg_2": "2"},
"class_name": "DummyBehaviour",
},
"dummy_behaviour_same_classname": {
"args": {"behaviour_arg_1": 1, "behaviour_arg_2": "2"},
"class_name": "DummyBehaviour",
"file_path": "dummy_subpackage/foo.py",
},
}
assert actual_object == expected_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_get_response_no_dialog(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response()\n self.assertEqual(response, expected_response)\n self.assertFalse(skill.speak_dialog.called)\n self.assertTrue(skill.bus.emit.called)\n sent_message = skill.bus.emit.call_args[0][0]\n self.assertEqual(sent_message.msg_type, 'mycroft.mic.listen')",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_get_response(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('what do you want')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_dummy():",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_intent_classifier_get_testing_samples(self):\n pass",
"def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.act()",
"def test_model_initialization():\n MyModel(\"model\", SkillContext())",
"def test_intent_classifier_add_testing_samples(self):\n pass",
"def test_theft_and_stealing(self):",
"def test_sounds_get(self):\n pass",
"def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])",
"def test_get_scenario(self):\n pass",
"def test_skills(self):\n yield self.nodes[0].overlay.trustchain.add_skill('test')\n yield self.deliver_messages()\n peer1_pub_key = self.nodes[0].overlay.trustchain.my_peer.public_key.key_to_bin()\n self.assertTrue(self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key))\n\n skills = self.nodes[1].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertTrue(skills)\n\n # Peer 2 endorses peer 1 now\n block, _ = yield self.nodes[1].overlay.trustchain.endorse_skill(peer1_pub_key, skills[0]['block_num'])\n yield self.deliver_messages()\n self.assertTrue(self.nodes[1].overlay.trustchain.persistence.did_endorse_skill(block))\n\n skills = self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertEqual(skills[0]['endorsements'], 1)",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def sample_action(self, obs):\n pass",
"def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()",
"def sample_action(self):\n raise NotImplementedError",
"def test_no_skill_request(self):\n actions.login(ADMIN_EMAIL)\n\n response = self.get(self.URL)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_ai_undefined(self, mock_get, mock_get_categories):\n\n # We mock ai_list\n mock_get.return_value = {\n 'ai_list': [\n factory.build(\n dict,\n FACTORY_CLASS=AiFactory,\n ai_status='ai_undefined'\n )\n ]\n }\n response = self.client.get(reverse('studio:summary'))\n self.assertContains(response, 'Not Started')",
"def test_testing():\n Pendulum = pu.Pendulum()\n ans = Pendulum.dummytest()\n assert ans",
"def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_skill_created(self):\n\t\tself.skill.save()\n\t\tskill_instance = Skill.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tskill_instance.user,\n\t\t\tself.skill.user,\n\t\t\t'User don\\'t match.'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tskill_instance.tag,\n\t\t\tself.tag,\n\t\t\t'Skill tag\\'s don\\'t match.'\n\t\t)",
"def test_history_SinglePlayer_NoHistory(test_name):\n\n env = build_test_env(\n test_name,\n \"tests/gdy/test_step_SinglePlayer_SingleActionType.yaml\",\n enable_history=False\n )\n\n obs, reward, done, info = env.step(1)\n\n assert 'History' not in info\n\n sample = env.action_space.sample()\n assert isinstance(sample, int)",
"def test_get_non_existent_skill_progress(self):\n self._build_sample_graph()\n student = models.Student(user_id='1')\n tracker = SkillCompletionTracker()\n result = tracker.get_skills_progress(student, [self.sc.id])\n self.assertEqual(SkillCompletionTracker.NOT_ATTEMPTED,\n result[self.sc.id][0])",
"def test_get_scenarios(self):\n pass",
"def test_matcher_called(self):\n\n skill = _TestSkill(None, None)\n message = Mock()\n skill.hello_skill(message)\n\n self.assertTrue(message.respond.called_once)",
"def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}",
"def test_dummy_test():\n pass",
"def test_fake_health_get(self):\n pass",
"def test_get_skill_with_questions(self):\n\n # map a skill to two questions\n skill_graph = SkillGraph.load()\n skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n q1 = self._create_mc_question('description 1')\n q2 = self._create_mc_question('description 2')\n q1.dict[SKILLS_KEY] = [skill.id]\n q2.dict[SKILLS_KEY] = [skill.id]\n models.QuestionDAO.save_all([q1, q2])\n\n # get skills\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n\n # assert that it's linked to two questions\n self.assertEqual(2, len(skills[0]['questions']))",
"def test_default_sound_system(self):\n\n self.assertFalse(self.mc.machine_config['sound_system']['enabled'])\n self.assertIsNone(self.mc.sound_system)",
"def test_matcher_on_instance(self):\n\n skill = _TestSkill(None, None)\n self.assertTrue(hasattr(skill.hello_skill, \"matchers\"))",
"def test_out_of_order(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"tan\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')",
"def sample_action(self, obs, explore_prob):\n raise NotImplementedError",
"def test_no_skill_aggregate(self):\n actions.login(ADMIN_EMAIL)\n\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [1]}, True))\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})",
"def test_get_goal(self):\n pass",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def dummy(self):\n pass",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def lua_test():\n pass",
"def test_skills_updated(self):\n assert self.skill_config.skills == {self.new_skill_id}",
"def testBeliefs1sk(self):",
"def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None",
"def test_get_response_text(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'green'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('tell me a color')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)\n skill.speak_dialog.assert_called_with('tell me a color',\n {},\n expect_response=True,\n wait=True)",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def test_get_goals(self):\n pass",
"def test(self, state):\n\n # manual dice should have been typed in by this point, if they don't\n # exist exit\n if state.dice == \"manual\" and (\n state.rolls is None or state.rolls == []):\n return state\n\n test_dict = {\"attr\": self._test_1dice,\n \"fight_talent\": self._test_1dice,\n \"advantage\": self._test_1dice,\n \"skill\": self._test_3dice,\n \"spell\": self._test_3dice,\n \"misc\": self._test_misc}\n\n state = test_dict[state.selection.category](state)\n\n return state",
"def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)",
"def sample(self):",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()",
"def get_first_sample() -> Sample:\n print(get_intro_message())\n\n user_satisfied = False\n while not user_satisfied:\n sample = get_single_sample()\n\n print(\"Sample recorded : \\\"\" + sample.string + \"\\\"\")\n user_satisfied = get_binary_validation(\n \"Do you want to keep this sample ?\", True\n )\n\n sample.impostor = False\n\n return sample",
"def test():\n pass",
"def test_nothing(self):",
"def startTestHook(self):",
"def testPlaybackMechanism(self):\n\t\tx = BaseAction('x')\n\t\tself.failIf(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failIf(x.playbackPolicy.isReadyForRemoval)\n\t\tx.playback()\n\t\tself.failUnless(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failUnless(x.playbackPolicy.isReadyForRemoval)",
"def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))",
"def test_empty_functions():",
"def test_intent_support(self):\n dispatcher = self.get_dispatcher()\n for intent in self.get_intents():\n self.assertIsNot(dispatcher(intent), None)",
"def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)",
"def setUp(self):\n self.game = BuildGame()\n self.context = BuildPlayerContext()\n self.effects = [DummyEffect() for i in range(10)]\n \n self.calledUnregisterActivatable = False\n self.context.owner.unregisterActivatable = self.unregisterActivatable",
"def test_get_skills_multiple_lessons(self):\n skill_graph = SkillGraph.load()\n\n skill_1 = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n unit = self.course.add_unit()\n unit.title = 'Test Unit'\n lesson1 = self.course.add_lesson(unit)\n lesson1.title = 'Test Lesson 1'\n lesson2 = self.course.add_lesson(unit)\n lesson2.title = 'Test Lesson 2'\n self.course.save()\n lesson1.properties[SKILLS_KEY] = [skill_1.id]\n lesson2.properties[SKILLS_KEY] = [skill_1.id]\n self.course.save()\n\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n # All lessons listed\n self.assertEqual(2, len(skills[0]['lessons']))",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_plays_get(self):\n pass",
"async def test_default(self, dm):\n request = create_request(\"other\", \"other\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"default\"",
"def test_single_skill_request(self):\n self._add_aggregates()\n actions.login(ADMIN_EMAIL)\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [self.skill_ids[0]]}, True))\n\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n\n expected_header = ['Date', str(self.skill_ids[0])]\n expected_data = [[self.day1, 1], [self.day2, 2]]\n result = transforms.loads(payload)\n self.assertEqual(expected_header, result['column_headers'])\n self.assertEqual(len(expected_data), len(result['data']))\n for row in expected_data:\n self.assertIn(row, result['data'])",
"def test_bardspeechinteractor(setup_qt): # pylint: disable=redefined-outer-name,unused-argument\n config = {\n \"timeout for command\" : 1,\n \"sensitivities\" : [1.0],\n \"interval\": 10,\n \"recogniser\" : \"testing\",\n \"test signals\" : [\n [\"start_listen\", False],\n [\"google_api_not_understand\", False],\n [\"google_api_request_failure\", \"the internet is broken\"],\n [\"voice_command\", \"next\"],\n [\"voice_command\", \"map\"],\n [\"voice_command\", \"clear\"],\n [\"start_processing_request\", False],\n [\"unknown_command\", \"what's this\"],\n [\"voice_command\", \"quit\"]],\n }\n\n bard_speech = configure_speech_interaction(config,\n _FakeVisualisationControl())\n\n # I think the following slots should be fired by the bard_speech, but\n # I can't make it work. So let's just run them here.\n with pytest.raises(NextTargetEvent):\n bard_speech._on_voice_signal('next') # pylint: disable=protected-access\n with pytest.raises(TurnOnAllEvent):\n bard_speech._on_voice_signal('clear') # pylint: disable=protected-access\n with pytest.raises(CycleAnatomyEvent):\n bard_speech._on_voice_signal('map') # pylint: disable=protected-access\n\n _on_google_api_not_understand()\n _on_google_api_request_failure()\n _on_start_processing_request()\n _on_start_listen()\n\n sleep(0.5)\n\n bard_speech.stop_listener()",
"def test_intent_classifier_get_training_samples(self):\n pass",
"def test_actor_matches_activity(self):",
"def test_roll_or_hold(self):\n INPUT.side_effect = ['R', 'H', 'h', 'z', '12345', 'r']\n pig = game.pig.Pig('PlayerA', 'PlayerB')\n self.assertEqual(pig.roll_or_hold(), 'roll')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'roll')",
"def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')",
"def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.testing = 1",
"def test_03_visit_special(self):",
"def test_get_bios_policy_by_moid(self):\n pass",
"def test_get_game(self):\n pass",
"def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)",
"def test_no_audio():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/chirp_noaudio.mp3\")\n no_audio_file_struct.features_file = \"features/chirp_noaudio.json\"\n feat_type = FeatureTypes.framesync\n CQT(no_audio_file_struct, feat_type, sr=22050).features\n assert (os.path.isfile(no_audio_file_struct.features_file))\n with open(no_audio_file_struct.features_file) as f:\n data = json.load(f)\n assert(CQT.get_id() in data.keys())",
"def test(self):\n pass",
"def test_register_context_error(self):\n @self.skill.register('test_logic')\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['test_logic']()\n self.assertRaises(RuntimeError, sample_func)",
"def test_intent_classifier_del_testing_samples_all(self):\n pass",
"def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)",
"def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def testDefaultSettingOfOnePlayack(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)\n\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failUnless(policy.isReadyForRemoval)",
"def sample(self):\n # This method is set in __init__.\n pass",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def test_noop(self):\n base_env = _DiscreteEnvironmentOneReward(\n action_dtype=np.int64,\n reward_spec=specs.Array(dtype=np.float32, shape=()))\n wrapped_env = wrappers.DelayedRewardWrapper(base_env, accumulation_period=1)\n base_episode_reward = _episode_reward(base_env)\n wrapped_episode_reward = _episode_reward(wrapped_env)\n self.assertEqual(base_episode_reward, wrapped_episode_reward)",
"def test_registred(\n self, mock_get_ai_details, mock_get_ai, mock_get_purchased, mock_get_categories\n ):\n\n # We mock API calls\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n mock_get_purchased.return_value.json.return_value = [\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory)\n ]\n\n response = self.client.get(reverse(\n 'studio:skills',\n kwargs={\n 'aiid': self.ai['aiid']\n }\n ))\n self.assertEqual(response.status_code, 200)",
"def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]"
] | [
"0.66428226",
"0.65308845",
"0.63746595",
"0.62595034",
"0.62110424",
"0.6191015",
"0.61859745",
"0.6046809",
"0.6027827",
"0.60012347",
"0.5986034",
"0.59540683",
"0.59339106",
"0.5929021",
"0.5864972",
"0.5860361",
"0.5837059",
"0.5821178",
"0.5818226",
"0.5801682",
"0.5796743",
"0.57712",
"0.574137",
"0.5738717",
"0.5729282",
"0.5728524",
"0.571203",
"0.57113206",
"0.57113206",
"0.57102317",
"0.5700376",
"0.5685873",
"0.56810135",
"0.5677234",
"0.56725186",
"0.56717527",
"0.56433785",
"0.5618484",
"0.56083846",
"0.5590336",
"0.55791724",
"0.55658096",
"0.55628085",
"0.55437547",
"0.5543571",
"0.5543334",
"0.5532003",
"0.5531536",
"0.5522385",
"0.5520108",
"0.5520055",
"0.5515509",
"0.54905045",
"0.5476034",
"0.5458725",
"0.5456383",
"0.54497004",
"0.5449549",
"0.5436756",
"0.5436395",
"0.5430529",
"0.5429927",
"0.5427716",
"0.54053503",
"0.5397012",
"0.5392791",
"0.53897125",
"0.53839356",
"0.53833157",
"0.5382692",
"0.5374044",
"0.53707755",
"0.53637785",
"0.53637654",
"0.53627527",
"0.5358358",
"0.5356168",
"0.5351928",
"0.53505147",
"0.5350405",
"0.5348886",
"0.5344149",
"0.53440213",
"0.534356",
"0.53330326",
"0.532803",
"0.5326718",
"0.5326489",
"0.53251547",
"0.5319407",
"0.53178215",
"0.5312478",
"0.52993095",
"0.5294554",
"0.5294554",
"0.5294554",
"0.5294554",
"0.5294554",
"0.5293983",
"0.5291706",
"0.528938"
] | 0.0 | -1 |
Test that getting the 'dummy' skill behaviours works. | def test_get_list(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"vendor.fetchai.connections.p2p_libp2p.config.entry_peers",
],
standalone_mode=False,
)
assert result.exit_code == 0
assert result.output == "[]\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_get_response_no_dialog(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response()\n self.assertEqual(response, expected_response)\n self.assertFalse(skill.speak_dialog.called)\n self.assertTrue(skill.bus.emit.called)\n sent_message = skill.bus.emit.call_args[0][0]\n self.assertEqual(sent_message.msg_type, 'mycroft.mic.listen')",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_get_response(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'ice creamr please'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('what do you want')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_dummy():",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_intent_classifier_get_testing_samples(self):\n pass",
"def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.act()",
"def test_model_initialization():\n MyModel(\"model\", SkillContext())",
"def test_intent_classifier_add_testing_samples(self):\n pass",
"def test_theft_and_stealing(self):",
"def test_sounds_get(self):\n pass",
"def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])",
"def test_get_scenario(self):\n pass",
"def test_skills(self):\n yield self.nodes[0].overlay.trustchain.add_skill('test')\n yield self.deliver_messages()\n peer1_pub_key = self.nodes[0].overlay.trustchain.my_peer.public_key.key_to_bin()\n self.assertTrue(self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key))\n\n skills = self.nodes[1].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertTrue(skills)\n\n # Peer 2 endorses peer 1 now\n block, _ = yield self.nodes[1].overlay.trustchain.endorse_skill(peer1_pub_key, skills[0]['block_num'])\n yield self.deliver_messages()\n self.assertTrue(self.nodes[1].overlay.trustchain.persistence.did_endorse_skill(block))\n\n skills = self.nodes[0].overlay.trustchain.persistence.get_skills(peer1_pub_key)\n self.assertEqual(skills[0]['endorsements'], 1)",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def sample_action(self, obs):\n pass",
"def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()",
"def sample_action(self):\n raise NotImplementedError",
"def test_no_skill_request(self):\n actions.login(ADMIN_EMAIL)\n\n response = self.get(self.URL)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_ai_undefined(self, mock_get, mock_get_categories):\n\n # We mock ai_list\n mock_get.return_value = {\n 'ai_list': [\n factory.build(\n dict,\n FACTORY_CLASS=AiFactory,\n ai_status='ai_undefined'\n )\n ]\n }\n response = self.client.get(reverse('studio:summary'))\n self.assertContains(response, 'Not Started')",
"def test_testing():\n Pendulum = pu.Pendulum()\n ans = Pendulum.dummytest()\n assert ans",
"def test_skill_created(self):\n\t\tself.skill.save()\n\t\tskill_instance = Skill.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tskill_instance.user,\n\t\t\tself.skill.user,\n\t\t\t'User don\\'t match.'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tskill_instance.tag,\n\t\t\tself.tag,\n\t\t\t'Skill tag\\'s don\\'t match.'\n\t\t)",
"def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_history_SinglePlayer_NoHistory(test_name):\n\n env = build_test_env(\n test_name,\n \"tests/gdy/test_step_SinglePlayer_SingleActionType.yaml\",\n enable_history=False\n )\n\n obs, reward, done, info = env.step(1)\n\n assert 'History' not in info\n\n sample = env.action_space.sample()\n assert isinstance(sample, int)",
"def test_get_non_existent_skill_progress(self):\n self._build_sample_graph()\n student = models.Student(user_id='1')\n tracker = SkillCompletionTracker()\n result = tracker.get_skills_progress(student, [self.sc.id])\n self.assertEqual(SkillCompletionTracker.NOT_ATTEMPTED,\n result[self.sc.id][0])",
"def test_get_scenarios(self):\n pass",
"def test_matcher_called(self):\n\n skill = _TestSkill(None, None)\n message = Mock()\n skill.hello_skill(message)\n\n self.assertTrue(message.respond.called_once)",
"def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}",
"def test_dummy_test():\n pass",
"def test_fake_health_get(self):\n pass",
"def test_get_skill_with_questions(self):\n\n # map a skill to two questions\n skill_graph = SkillGraph.load()\n skill = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n q1 = self._create_mc_question('description 1')\n q2 = self._create_mc_question('description 2')\n q1.dict[SKILLS_KEY] = [skill.id]\n q2.dict[SKILLS_KEY] = [skill.id]\n models.QuestionDAO.save_all([q1, q2])\n\n # get skills\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n\n # assert that it's linked to two questions\n self.assertEqual(2, len(skills[0]['questions']))",
"def test_default_sound_system(self):\n\n self.assertFalse(self.mc.machine_config['sound_system']['enabled'])\n self.assertIsNone(self.mc.sound_system)",
"def test_matcher_on_instance(self):\n\n skill = _TestSkill(None, None)\n self.assertTrue(hasattr(skill.hello_skill, \"matchers\"))",
"def test_out_of_order(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"tan\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')",
"def sample_action(self, obs, explore_prob):\n raise NotImplementedError",
"def test_no_skill_aggregate(self):\n actions.login(ADMIN_EMAIL)\n\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [1]}, True))\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n result = transforms.loads(payload)\n\n self.assertEqual(['Date'], result['column_headers'])\n self.assertEqual([], result['data'])",
"def test_get_goal(self):\n pass",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def test_lti20_get_no_score_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'GET')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(response.json, {\"@context\": \"http://purl.imsglobal.org/ctx/lis/v2/Result\",\r\n \"@type\": \"Result\"})",
"def dummy(self):\n pass",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_skills_updated(self):\n assert self.skill_config.skills == {self.new_skill_id}",
"def lua_test():\n pass",
"def testBeliefs1sk(self):",
"def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None",
"def test_get_response_text(self):\n skill = create_skill()\n skill._wait_response = mock.Mock()\n skill.speak_dialog = mock.Mock()\n\n expected_response = 'green'\n skill._wait_response.return_value = expected_response\n response = skill.get_response('tell me a color')\n self.assertEqual(response, expected_response)\n self.assertTrue(skill.speak_dialog.called)\n skill.speak_dialog.assert_called_with('tell me a color',\n {},\n expect_response=True,\n wait=True)",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def test_get_goals(self):\n pass",
"def test(self, state):\n\n # manual dice should have been typed in by this point, if they don't\n # exist exit\n if state.dice == \"manual\" and (\n state.rolls is None or state.rolls == []):\n return state\n\n test_dict = {\"attr\": self._test_1dice,\n \"fight_talent\": self._test_1dice,\n \"advantage\": self._test_1dice,\n \"skill\": self._test_3dice,\n \"spell\": self._test_3dice,\n \"misc\": self._test_misc}\n\n state = test_dict[state.selection.category](state)\n\n return state",
"def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)",
"def sample(self):",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()",
"def get_first_sample() -> Sample:\n print(get_intro_message())\n\n user_satisfied = False\n while not user_satisfied:\n sample = get_single_sample()\n\n print(\"Sample recorded : \\\"\" + sample.string + \"\\\"\")\n user_satisfied = get_binary_validation(\n \"Do you want to keep this sample ?\", True\n )\n\n sample.impostor = False\n\n return sample",
"def test():\n pass",
"def test_nothing(self):",
"def startTestHook(self):",
"def testPlaybackMechanism(self):\n\t\tx = BaseAction('x')\n\t\tself.failIf(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failIf(x.playbackPolicy.isReadyForRemoval)\n\t\tx.playback()\n\t\tself.failUnless(x.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failUnless(x.playbackPolicy.isReadyForRemoval)",
"def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))",
"def test_empty_functions():",
"def test_intent_support(self):\n dispatcher = self.get_dispatcher()\n for intent in self.get_intents():\n self.assertIsNot(dispatcher(intent), None)",
"def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)",
"def setUp(self):\n self.game = BuildGame()\n self.context = BuildPlayerContext()\n self.effects = [DummyEffect() for i in range(10)]\n \n self.calledUnregisterActivatable = False\n self.context.owner.unregisterActivatable = self.unregisterActivatable",
"def test_get_skills_multiple_lessons(self):\n skill_graph = SkillGraph.load()\n\n skill_1 = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n unit = self.course.add_unit()\n unit.title = 'Test Unit'\n lesson1 = self.course.add_lesson(unit)\n lesson1.title = 'Test Lesson 1'\n lesson2 = self.course.add_lesson(unit)\n lesson2.title = 'Test Lesson 2'\n self.course.save()\n lesson1.properties[SKILLS_KEY] = [skill_1.id]\n lesson2.properties[SKILLS_KEY] = [skill_1.id]\n self.course.save()\n\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n # All lessons listed\n self.assertEqual(2, len(skills[0]['lessons']))",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_single_skill_request(self):\n self._add_aggregates()\n actions.login(ADMIN_EMAIL)\n get_url = '%s?%s' % (self.URL, urllib.urlencode({\n 'ids': [self.skill_ids[0]]}, True))\n\n response = self.get(get_url)\n self.assertEqual(200, response.status_int)\n payload = transforms.loads(response.body)['payload']\n\n expected_header = ['Date', str(self.skill_ids[0])]\n expected_data = [[self.day1, 1], [self.day2, 2]]\n result = transforms.loads(payload)\n self.assertEqual(expected_header, result['column_headers'])\n self.assertEqual(len(expected_data), len(result['data']))\n for row in expected_data:\n self.assertIn(row, result['data'])",
"def test_plays_get(self):\n pass",
"async def test_default(self, dm):\n request = create_request(\"other\", \"other\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"default\"",
"def test_bardspeechinteractor(setup_qt): # pylint: disable=redefined-outer-name,unused-argument\n config = {\n \"timeout for command\" : 1,\n \"sensitivities\" : [1.0],\n \"interval\": 10,\n \"recogniser\" : \"testing\",\n \"test signals\" : [\n [\"start_listen\", False],\n [\"google_api_not_understand\", False],\n [\"google_api_request_failure\", \"the internet is broken\"],\n [\"voice_command\", \"next\"],\n [\"voice_command\", \"map\"],\n [\"voice_command\", \"clear\"],\n [\"start_processing_request\", False],\n [\"unknown_command\", \"what's this\"],\n [\"voice_command\", \"quit\"]],\n }\n\n bard_speech = configure_speech_interaction(config,\n _FakeVisualisationControl())\n\n # I think the following slots should be fired by the bard_speech, but\n # I can't make it work. So let's just run them here.\n with pytest.raises(NextTargetEvent):\n bard_speech._on_voice_signal('next') # pylint: disable=protected-access\n with pytest.raises(TurnOnAllEvent):\n bard_speech._on_voice_signal('clear') # pylint: disable=protected-access\n with pytest.raises(CycleAnatomyEvent):\n bard_speech._on_voice_signal('map') # pylint: disable=protected-access\n\n _on_google_api_not_understand()\n _on_google_api_request_failure()\n _on_start_processing_request()\n _on_start_listen()\n\n sleep(0.5)\n\n bard_speech.stop_listener()",
"def test_intent_classifier_get_training_samples(self):\n pass",
"def test_actor_matches_activity(self):",
"def test_roll_or_hold(self):\n INPUT.side_effect = ['R', 'H', 'h', 'z', '12345', 'r']\n pig = game.pig.Pig('PlayerA', 'PlayerB')\n self.assertEqual(pig.roll_or_hold(), 'roll')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'roll')",
"def test_win(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = list(\"ant\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('Yes! The secret word is \"ant\"! '\n 'You have won!')",
"def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.testing = 1",
"def test_03_visit_special(self):",
"def test_get_game(self):\n pass",
"def test_get_bios_policy_by_moid(self):\n pass",
"def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)",
"def test_register_context_error(self):\n @self.skill.register('test_logic')\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['test_logic']()\n self.assertRaises(RuntimeError, sample_func)",
"def test_no_audio():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/chirp_noaudio.mp3\")\n no_audio_file_struct.features_file = \"features/chirp_noaudio.json\"\n feat_type = FeatureTypes.framesync\n CQT(no_audio_file_struct, feat_type, sr=22050).features\n assert (os.path.isfile(no_audio_file_struct.features_file))\n with open(no_audio_file_struct.features_file) as f:\n data = json.load(f)\n assert(CQT.get_id() in data.keys())",
"def test(self):\n pass",
"def test_intent_classifier_del_testing_samples_all(self):\n pass",
"def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)",
"def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def testDefaultSettingOfOnePlayack(self):\n\t\tpolicy = FixedCountPolicy()\n\t\tself.failIf(policy.hasBeenPlayedBack)\n\t\tself.failIf(policy.isReadyForRemoval)\n\t\tpolicy.playback()\n\t\tself.failUnless(policy.hasBeenPlayedBack)\n\t\tself.failUnless(policy.isReadyForRemoval)",
"def sample(self):\n # This method is set in __init__.\n pass",
"def test_noop(self):\n base_env = _DiscreteEnvironmentOneReward(\n action_dtype=np.int64,\n reward_spec=specs.Array(dtype=np.float32, shape=()))\n wrapped_env = wrappers.DelayedRewardWrapper(base_env, accumulation_period=1)\n base_episode_reward = _episode_reward(base_env)\n wrapped_episode_reward = _episode_reward(wrapped_env)\n self.assertEqual(base_episode_reward, wrapped_episode_reward)",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def test_registred(\n self, mock_get_ai_details, mock_get_ai, mock_get_purchased, mock_get_categories\n ):\n\n # We mock API calls\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n mock_get_purchased.return_value.json.return_value = [\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory),\n factory.build(dict, FACTORY_CLASS=AiFactory)\n ]\n\n response = self.client.get(reverse(\n 'studio:skills',\n kwargs={\n 'aiid': self.ai['aiid']\n }\n ))\n self.assertEqual(response.status_code, 200)",
"def skills():\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]"
] | [
"0.6645116",
"0.65316606",
"0.6374316",
"0.62608534",
"0.62119484",
"0.6192046",
"0.6187593",
"0.6046467",
"0.60248756",
"0.6003139",
"0.598586",
"0.5953502",
"0.5934477",
"0.5929376",
"0.58642447",
"0.5859839",
"0.583532",
"0.5821343",
"0.58196056",
"0.5803544",
"0.5798271",
"0.57705647",
"0.5742915",
"0.57363856",
"0.5727159",
"0.5726594",
"0.5712541",
"0.57124615",
"0.571235",
"0.571235",
"0.5700195",
"0.568642",
"0.56801",
"0.56784517",
"0.5675194",
"0.566852",
"0.56411463",
"0.56193495",
"0.5606923",
"0.5590803",
"0.5579359",
"0.55672264",
"0.5561169",
"0.5543988",
"0.5543208",
"0.5542301",
"0.5530336",
"0.5529594",
"0.55224335",
"0.55219954",
"0.55190086",
"0.55133355",
"0.5490961",
"0.54769987",
"0.54581285",
"0.5456305",
"0.5450347",
"0.54499906",
"0.54397535",
"0.54354453",
"0.54302865",
"0.54279065",
"0.54244137",
"0.54049695",
"0.539657",
"0.5395191",
"0.5385024",
"0.5384175",
"0.5382951",
"0.538285",
"0.5374076",
"0.5371984",
"0.53641003",
"0.53636783",
"0.5362411",
"0.5357715",
"0.5356876",
"0.53522545",
"0.5351879",
"0.53517973",
"0.5347892",
"0.5343434",
"0.5343288",
"0.5343027",
"0.53334224",
"0.5328122",
"0.53266007",
"0.5324767",
"0.5323348",
"0.53206897",
"0.531632",
"0.5310954",
"0.5300142",
"0.5293788",
"0.5292586",
"0.5292586",
"0.5292586",
"0.5292586",
"0.5292586",
"0.5292334",
"0.5290163"
] | 0.0 | -1 |
Test that getting a nested object in 'dummy' skill fails because path is not valid. | def test_get_fails_when_getting_nested_object(self):
with pytest.raises(
ClickException, match=r"Attribute `.* for .* config does not exist"
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"skills.dummy.non_existing_attribute.dummy",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def test_get_powersupply_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Powersupply.get, session, node)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last",
"def test_load_path(parser):\n doc = parser.load(pathlib.Path('jsonexamples') / 'small' / 'demo.json')\n doc.at_pointer('/Image/Width')",
"def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def nested_get(\n d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True\n) -> t.Optional[t.Any]:\n for name, key in path:\n d = d.get(key) # type: ignore\n if d is None:\n if raise_on_missing:\n name = \"table\" if name == \"this\" else name\n raise ValueError(f\"Unknown {name}: {key}\")\n return None\n\n return d",
"def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None",
"def _getattr_path(obj: Any, path: str) -> Any:\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_descriptor_with_nopath(self):\r\n\r\n self._get_descriptor_with_invalid_link(NoPathToItem)",
"def test_no_path():\n test = [{'key': 'val'}, []]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'path list empty' in str(t_result.failure())",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def access_path(data: dict or any, path: list[str]) -> any:\n if path:\n first = path[0]\n rest = path[1:]\n return access_path(data[first], rest)\n return data",
"def test_dotwiz_plus_get_item():\n dd = DotWizPlus()\n dd.a = [{'one': 1, 'two': {'key': 'value'}}]\n\n item = dd['a'][0]\n assert isinstance(item, DotWizPlus)\n assert item['one'] == 1\n\n assert item['two']['key'] == 'value'",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")",
"def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))",
"def testIsInterestingPath(self):\n # pylint: disable=protected-access\n self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH))",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_get_chain_by_id(self):\n pass",
"def test_find_path_bi():\n assert True",
"def test_get_linecard_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Linecard.get, session, node)",
"def test_invoke_invalid_object(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"this/path/doesnt/exist.ext\"\n lambda_handler(mock_event, mock_context)",
"def test_seeds_seed_path_get_nested(client, single_nested_seed):\n name, value = single_nested_seed\n\n respose = client.get(f\"/v1/seeds/{parse.quote_plus(name)}\")\n\n assert respose.status_code == 200\n assert respose.data.decode() == value",
"def test_get_next_section_linear_nested(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n section1 = create_section(title=\"Background and context\",\n story=story,\n root=True)\n section2 = create_section(title=\"Decisions to be made\", story=story)\n section3 = create_section(title=\"Who has been involved\", \n story=story)\n section4 = create_section(title=\"Next steps\", story=story)\n SectionRelation.objects.create(parent=section1, child=section2)\n SectionRelation.objects.create(parent=section2, child=section3)\n SectionRelation.objects.create(parent=section3, child=section4)\n self.assertEqual(story.structure.get_next_section(section1), \n section2)\n self.assertEqual(story.structure.get_next_section(section2),\n section3)\n self.assertEqual(story.structure.get_next_section(section3),\n section4)\n self.assertEqual(story.structure.get_next_section(section4),\n None)",
"def traverse(object, path, default=None, request=None):",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def testInitialize(self):\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n data_stream=u'test', location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=u'/test', parent=None)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(inode=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec, bogus=u'BOGUS')",
"def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_get_part(self):\n pass",
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):",
"def test_level_depth(chikin):\n assert chikin.depth == 0\n assert str(chikin.section) == 'Chikin Tales'\n assert chikin.section.depth == 1\n assert chikin.section.subsection.depth == 2",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_empty_path_list(mock_hvac_client_read, mock_load, localhost_client, gen_input_config, gen_vault_response_kv1):\n mock_hvac_client_read.return_value = gen_vault_response_kv1()\n mock_load.return_value = gen_input_config(vault_secrets={\"\": \"some_secret\"})\n\n with pytest.raises(RuntimeError):\n localhost_client.load(\"in.json\")\n\n mock_hvac_client_read.assert_called_with(\"some_secret\")\n mock_load.assert_called_with(\"in.json\")",
"def test_spector_init_error_no_survey(obj_dirobj):\n\tobj = obj_dirobj\n\n\twith pytest.raises(Exception) as e:\n\t\ts = spector.Spector(obj=obj)",
"def test_get_fantray_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Fantray.get, session, node)",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_getChild(self):\n request = _FakeRequest([\"a\", \"b\"], [\"x\", \"y\"])\n child = resource.getChildForRequest(self.resource, request)\n self.assertEqual(child, self.leaf)\n\n self.assertEqual(request.prepath, [\"a\", \"b\"])\n self.assertEqual(request.postpath, [\"x\", \"y\"])",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_get_next_section_linear_nested(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Background and context\",\n story=story, layout=layout,\n root=True)\n section2 = create_section(title=\"Decisions to be made\", story=story, layout=layout)\n section3 = create_section(title=\"Who has been involved\", \n story=story, layout=layout)\n section4 = create_section(title=\"Next steps\", story=story, layout=layout)\n SectionRelation.objects.create(parent=section1, child=section2)\n SectionRelation.objects.create(parent=section2, child=section3)\n SectionRelation.objects.create(parent=section3, child=section4)\n self.assertEqual(story.structure.get_next_section(section1), \n section2)\n self.assertEqual(story.structure.get_next_section(section2),\n section3)\n self.assertEqual(story.structure.get_next_section(section3),\n section4)\n self.assertEqual(story.structure.get_next_section(section4),\n None)",
"def test_load_str(parser):\n with pytest.raises(ValueError):\n parser.load('jsonexamples/invalid.json')\n\n doc = parser.load(\"jsonexamples/small/demo.json\")\n doc.at_pointer('/Image/Width')",
"def test_getitem():\n atom = ATOMClassifier(X_class, y_class, random_state=1)\n atom.run(\"Tree\")\n assert atom.tree[\"alcohol\"].equals(atom.dataset[\"alcohol\"])\n with pytest.raises(TypeError, match=r\".*subscriptable with type str.*\"):\n print(atom.tree[2])",
"def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_team_template_folders_find_one_get(self):\n pass",
"def test_first_level_from_bids_no_derivatives(tmp_path):\n bids_path = create_fake_bids_dataset(\n base_dir=tmp_path,\n n_sub=1,\n n_ses=1,\n tasks=[\"main\"],\n n_runs=[1],\n with_derivatives=False,\n )\n with pytest.raises(ValueError, match=\"derivatives folder not found\"):\n first_level_from_bids(\n dataset_path=bids_path, task_label=\"main\", space_label=\"MNI\",\n slice_time_ref=None,\n )",
"def test_invalid_session_populate_children(self):\n pod1 = Pod('1')\n node = Node('1', '2', 'Spine1', 'spine', pod1)\n self.assertRaises(TypeError, node.populate_children)",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def test_bad_structures(bad_structures, mapper):\n for index, structure in enumerate(bad_structures):\n # This is for helping devs finding any errors that may occur\n print(f\"Trying structure number {index} from 'test_bad_structures.json'\")\n with pytest.raises(ValidationError):\n StructureResource(**mapper(MAPPER).map_back(structure))",
"def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')",
"def testStudyPath(self):\n study_path = dicom_path.FromString(tdpu.STUDY_PATH_STR)\n self._AssertStoreAttributes(study_path)\n self.assertEqual(study_path.study_uid, tdpu.STUDY_UID)\n self.assertIsNone(study_path.series_uid)\n self.assertIsNone(study_path.instance_uid)\n self.assertEqual(study_path.type, dicom_path.Type.STUDY)\n self.assertEqual(study_path.dicomweb_path_str, tdpu.DICOMWEB_PATH_STR)\n self.assertEqual(str(study_path), tdpu.STUDY_PATH_STR)\n self.assertEqual(str(study_path.GetStorePath()), tdpu.STORE_PATH_STR)\n self.assertEqual(str(study_path.GetStudyPath()), tdpu.STUDY_PATH_STR)",
"def test_github_path_purepath():\n p = github_api.GithubPath('/tensorflow/datasets/tree/master/')\n sub_p = p / 'some_folder'\n assert isinstance(sub_p, github_api.GithubPath)\n assert str(p) == '/tensorflow/datasets/tree/master'\n assert p == github_api.GithubPath.from_repo('tensorflow/datasets')",
"def test_addPath_obviousCycle(self):\n g = Garden()\n self.assertRaises(CycleError, g.addPath, 'foo', 'v1', [\n ('foo', 'v1'),\n ])",
"def _test_single_prerecorded_api_call(app, path, prerecorded, contexts={}):\n rv = app.get(path)\n assert rv.status_code == 200\n response = json.loads(rv.get_data().decode('utf8'))\n if type(prerecorded) is list:\n response = response['items']\n compare_objects(contexts, '', prerecorded, response)\n return False",
"def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')",
"def test_team_template_folders_id_parent_get(self):\n pass",
"def testGetTagPathsForObjectIDsWithoutData(self):\n self.assertEqual([], list(getTagPathsForObjectIDs([])))",
"def test_invalid_path() -> None:\n path = rsc / \"does-not-exist.ods\"\n with pytest.raises(FileNotFoundError, match=\"does not exist\"):\n read_ods(path)",
"def test_Tree():",
"def constrained_lens_object_test():\n return # TODO",
"def getPath(obj):",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_first_level_from_bids_no_tr(tmp_path_factory):\n bids_dataset = _new_bids_dataset(tmp_path_factory.mktemp(\"no_events\"))\n json_files = get_bids_files(main_path=bids_dataset,\n file_tag=\"bold\",\n file_type=\"json\")\n for f in json_files:\n os.remove(f)\n\n with pytest.warns(\n UserWarning,\n match=\"'t_r' not provided and cannot be inferred\"):\n first_level_from_bids(\n dataset_path=bids_dataset,\n task_label=\"main\",\n space_label=\"MNI\",\n slice_time_ref=None, t_r=None\n )",
"def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)",
"def test_get_previous_section_linear_nested(self):\n title = (\"Neighborhood Outreach for I-70 Alignment Impacting \"\n \"Elyria, Globeville and Swansea\")\n summary = \"\"\"\n The City of Denver and Colorado Department of Transportation \n (CDOT) are working together to do neighborhood outreach\n regarding the I-70 alignment between Brighton Boulevard and\n Colorado. For detailed information on the neighborhood outreach\n efforts please visit www.DenverGov.org/ccdI70.\n \"\"\"\n byline = \"Denver Public Works and CDOT\"\n story = create_story(title=title, summary=summary, byline=byline)\n section1 = create_section(title=\"Background and context\",\n story=story,\n root=True)\n section2 = create_section(title=\"Decisions to be made\", story=story)\n section3 = create_section(title=\"Who has been involved\", \n story=story)\n section4 = create_section(title=\"Next steps\", story=story)\n SectionRelation.objects.create(parent=section1, child=section2)\n SectionRelation.objects.create(parent=section2, child=section3)\n SectionRelation.objects.create(parent=section3, child=section4)\n self.assertEqual(story.structure.get_previous_section(section1), \n None)\n self.assertEqual(story.structure.get_previous_section(section2),\n section1)\n self.assertEqual(story.structure.get_previous_section(section3),\n section2)\n self.assertEqual(story.structure.get_previous_section(section4),\n section3)",
"def test_read_json(self, magic_0, magic_1):\n expected = {\n 'key_1': [1, 2, 3, 4, 5],\n 'key_2': ['a', 'b', 'c', 'd', 'e']\n }\n result = helpers.read_json(r\"path\")\n self.assertEqual(expected, result)",
"def test_path(tmp_path: Path) -> None:\n path = tmp_path / \"repository\"\n repository = Repository.init(path)\n assert path == repository.path",
"def test_nested_query():\n schema = graphene.Schema(query=NestedQuery)\n response = schema.execute(\"{topLevel {name, leaf {value , leaflets {value} } } }\")\n assert to_dict(response.data) == {\n \"topLevel\": {\n \"name\": \"top level name\",\n \"leaf\": {\n \"value\": \"some leaf value\",\n \"leaflets\": [{\"value\": \"subleaf1\"}, {\"value\": \"subleaf2\"}],\n },\n }\n }",
"def test_nested():\n # pylint: disable=no-member\n assert issubclass(NestedSchema, graphene.ObjectType)\n assert isinstance(NestedSchema.name, graphene.String)\n assert isinstance(NestedSchema.leaf, graphene.Field)\n assert str(NestedSchema.leaf.type) == \"Leaf\"\n assert isinstance(NestedSchema.leaf.type.value, graphene.String)\n assert isinstance(NestedSchema.leaf.type.leaflets, graphene.List)",
"def test_pod_valid_session(self):\n session = 'bogus'\n parent = PhysicalModel()\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def test_dotted_named_entities_circular_references():\n from tests.dottedname.foo.bar.bop import Property\n\n p = Property(\n name='outer',\n nested={\n 'properties': [\n Property(name='inner')\n ]\n }\n )\n assert p\n assert isinstance(p.nested.properties, list)\n assert p.nested.properties[0].name == 'inner'",
"def test_get_fails_when_getting_non_dict_attribute_in_between(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.skills.some_attribute\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = \"Attribute 'skills' is not a dictionary.\"\n assert result.exception.message == s",
"def test_example_json(self):\n json_data = '{ \"a\": 1, \"b\": true, \"c\": { \"d\": 3, \"e\": \"test\" } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened),\n json.loads('{ \"a\": 1, \"b\": true, \"c.d\": 3, \"c.e\": \"test\" }'))",
"def testGetTagPathsAndObjectIDsWithoutData(self):\n self.assertEqual([], list(getTagPathsAndObjectIDs([])))",
"def test_circular_nested(self):\n obj = {}\n obj[\"list\"] = [{\"obj\": obj}]\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def test_basic_prop(chikin):\n assert str(chikin) == '[document]' == chikin.name\n assert chikin.depth == 0\n assert len(chikin.branches) == 2\n assert isinstance(chikin.section, TreeOfContents)",
"def test_nested():\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.root.metadata.choose(lambda m: (m[\"name\"], m.uid))))\n assert \"type\" in res # from conditions\n assert \"reason\" in res # from conditions\n assert \"name\" in res # from metadata\n assert \"uid\" in res # from metadata",
"def test_load_data_base(self):\n pltp = get_object_or_404(Loaded_Pltp, name=\"test\")\n pl = pltp.loaded_pl_set.all()\n \n self.assertEqual(pltp.name, \"test\")\n #test json\n \n self.assertEqual(len(pl), 2)\n \n self.assertEqual(pl[0].name, \"test1\")\n #test dirname + json\n self.assertEqual(pl[1].name, \"test2\")\n #test dirname + json",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def testGetTagPathsAndObjectIDsWithUnknownObjectID(self):\n user = createUser(u'user', u'secret', u'User', u'[email protected]')\n user.namespaceID = createNamespace(user, user.username, None).id\n tag = createTag(user, user.namespace, u'name1')\n createTag(user, user.namespace, u'name2')\n createTagValue(user.id, tag.id, uuid4(), 42)\n self.assertEqual([], list(getTagPathsAndObjectIDs([uuid4()])))",
"def test_drs_get_object_failure(self, testapp, testing_download): # noQA fixture\n res = testapp.get(testing_download)\n drs_object_uri = res.json['uuid']\n\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/not_a_uri/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/{drs_object_uri}/accesss/https')",
"def test_get_file_object(self):\n pass",
"def test_tree_intersection_name_exists():\n assert tree_intersection",
"def testInitialize(self):\n path_spec = apm_path_spec.APMPathSpec(parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(parent=None)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(\n parent=self._path_spec, bogus='BOGUS')",
"def assertSelfReferential(self, obj):\r\n copy = self.get_json(obj[\"url\"])\r\n self.assertEqual(obj, copy)",
"def test_get_foods(self):\n pass",
"def test_invalid_path(self, tmp_path):\n other_path = tmp_path / \"other\"\n other_path.mkdir()\n pattern = (\n \"Could not find any of configuration files '.kedro.yml, pyproject.toml'\"\n )\n with pytest.raises(KedroContextError, match=re.escape(pattern)):\n load_context(str(other_path))",
"def test_banana_subjects_path(self):\n with self.assertRaises(ValueError) as exc:\n t = self.create_request_object(dataset_type=\"pineapple\")\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is pineapple\",\n str(exc.exception))"
] | [
"0.608936",
"0.6087938",
"0.60613996",
"0.59348756",
"0.59342015",
"0.59085965",
"0.5834793",
"0.58287233",
"0.5761605",
"0.57341295",
"0.5677793",
"0.56325066",
"0.5624134",
"0.558056",
"0.55728745",
"0.5569612",
"0.55691016",
"0.5559063",
"0.5552797",
"0.5551889",
"0.5543451",
"0.553241",
"0.55232847",
"0.5497863",
"0.5474393",
"0.5466955",
"0.54574656",
"0.5449676",
"0.5449676",
"0.54269326",
"0.5425631",
"0.5419794",
"0.5418897",
"0.541867",
"0.5413921",
"0.53960633",
"0.5392045",
"0.53879625",
"0.5376783",
"0.53730863",
"0.53716594",
"0.53693086",
"0.53665423",
"0.536469",
"0.53588873",
"0.5327623",
"0.53266346",
"0.53233755",
"0.53218347",
"0.53016555",
"0.5295311",
"0.5295142",
"0.529509",
"0.5286119",
"0.52780104",
"0.5277815",
"0.52762806",
"0.52743495",
"0.527239",
"0.52720624",
"0.5255636",
"0.52494234",
"0.52400964",
"0.5237788",
"0.5232357",
"0.523008",
"0.52288586",
"0.52243936",
"0.522353",
"0.5222273",
"0.5217044",
"0.52096206",
"0.51993054",
"0.51984894",
"0.51935065",
"0.5192011",
"0.5191165",
"0.5187071",
"0.5183733",
"0.5165713",
"0.5162262",
"0.51505727",
"0.51483476",
"0.5144465",
"0.51430136",
"0.5137875",
"0.51334405",
"0.513093",
"0.51304317",
"0.5128711",
"0.51234907",
"0.51226854",
"0.51198393",
"0.51165414",
"0.51113147",
"0.51090777",
"0.51041657",
"0.5103725",
"0.5102895",
"0.50997764"
] | 0.6654142 | 0 |
Test that the get fails because the path point to a nondict object. | def test_get_fails_when_getting_non_dict_attribute(self):
attribute = "protocols"
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", f"skills.dummy.{attribute}.protocol"],
standalone_mode=False,
)
assert result.exit_code == 1
s = f"Attribute '{attribute}' is not a dictionary."
assert result.exception.message == s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None",
"def test_no_path():\n test = [{'key': 'val'}, []]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'path list empty' in str(t_result.failure())",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_bad_get_property(self):\n s = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n with pytest.raises(AttributeError):\n s.bad_get",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False",
"def testNotExistingPath(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f['data'] = 1\n\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=1):\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='raise')",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_drs_get_object_failure(self, testapp, testing_download): # noQA fixture\n res = testapp.get(testing_download)\n drs_object_uri = res.json['uuid']\n\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/not_a_uri/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/{drs_object_uri}/accesss/https')",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def test_get_fails_when_getting_non_dict_attribute_in_between(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.skills.some_attribute\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = \"Attribute 'skills' is not a dictionary.\"\n assert result.exception.message == s",
"def test___getitem___invalid_index(self):\n with pytest.raises(TypeError):\n self.Person.objects()[\"a\"]",
"def nested_get(\n d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True\n) -> t.Optional[t.Any]:\n for name, key in path:\n d = d.get(key) # type: ignore\n if d is None:\n if raise_on_missing:\n name = \"table\" if name == \"this\" else name\n raise ValueError(f\"Unknown {name}: {key}\")\n return None\n\n return d",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test__init__keyerror(self):\n mocked_reconstructor = Mock()\n db_response = {}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == None",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_none(self):\n models.storage.close()\n models.storage = models.engine.db_storage.DBStorage()\n models.storage.reload()\n obj = self.populate()\n\n found = models.storage.get(type(obj[0]), None)\n self.assertEqual(found, None)\n with self.assertRaises(KeyError):\n models.storage.get(None, obj[0].id)\n with self.assertRaises(KeyError):\n models.storage.get(None, None)",
"def testDictDoesNotContain(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" not in d1:\n print d1[\"nonsense\"] # Dead code\n else:\n print d1[\"x\"]\n\n d2 = {}\n if \"x\" not in d2:\n pass\n else:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" not in d3:\n print d3[\"y\"]\n else:\n print d3[\"x\"]\n \"\"\")",
"def test_get_path_not_exist(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n expected = None\n actual = Hash(self.file).get()\n self.assertEqual(expected, actual)",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')",
"def test_invalid_file_path(self):\n # Test with an invalid file path\n\n #setup\n filepath = \".data/kano/test.txt\"\n expected_result = {\n \"type\": \"\",\n \"city\": \"\",\n \"state\": \"\",\n \"coordinates\": [\"\", \"\"],\n '': {}\n }\n\n #result\n assert extractor.get_metadata_from_filepath(filepath) == expected_result",
"def test_badyvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {\"foo\": 1}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_other_user_kvs_get_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.get(self.other_key_factory(self.existing_field_name))",
"def test_dict_with_invalid_version(self):\n\n invalid_version_info = (-1, -1, -1)\n d = LexicalDictionary(invalid_version_info)\n\n with self.assertRaises(FileNotFoundError):\n lp = Lexpp(external_dict=d)",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())",
"def test_entity_doesnt_exist(self):\n key = ndb.Key(models.InstanceTemplateRevision, 'fake-key')\n urls = snapshots.fetch(key)\n self.failIf(urls)",
"def test_quest_load_data_fail(testing_quest_page):\n testing_quest_page.save()\n\n # fetch the data\n doc = testing_quest_page.doc_ref.get()\n data = testing_quest_page.storage_model.parse_obj(doc.to_dict())\n\n # mess with the data\n data.serialized_data = json.dumps({\"this\": \"nonesense\"})\n testing_quest_page.doc_ref.set(data.dict())\n\n # try to load with the bad version\n with pytest.raises(QuestLoadError):\n testing_quest_page.load()\n\n # cleanup\n testing_quest_page.delete()",
"def test_get_invalid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertRaises(KeyError, ars.__getitem__, 'invalid_section')",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')",
"def test_load_json_file_not_found_error() -> None:\n fname = \"invalid_file.json\"\n\n assert load_json(fname) == {}\n assert load_json(fname, default=\"\") == \"\"\n assert load_json_object(fname) == {}\n assert load_json_object(fname, default={\"Hi\": \"Peter\"}) == {\"Hi\": \"Peter\"}\n assert load_json_array(fname) == []\n assert load_json_array(fname, default=[\"Hi\"]) == [\"Hi\"]",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def strict_path_lookup(data_obj, xj_path, force_type=None):\n\n value, exists = path_lookup(data_obj, xj_path)\n if exists:\n if force_type is not None:\n if not isinstance(value, force_type):\n raise XJPathError('Found value is a wrong type',\n (xj_path, force_type))\n return value\n else:\n raise XJPathError('Path does not exist', (xj_path,))",
"def test_get_invalid_section(self):\n arm = self.ar[2009][11]\n self.assertRaises(KeyError, arm.__getitem__, 'invalid_section')",
"def test_nonexistentObject(self):\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.remoteEntropyStore.getObject(objectId)\n response = self.agent.responses.pop()\n self.assertEqual([], self.agent.responses)\n response.code = http.NOT_FOUND\n response.respond('Not found')\n f = self.failureResultOf(d, NonexistentObject)\n self.assertEqual(f.value.objectId, objectId)",
"def test_b_get_no_items(self):\n storage = FileStorage()\n get = storage.get(User, 123)\n self.assertEqual(None, get)",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def test_to_json_file_non_dict(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n self.assertRaises(TypeError, lambda: Dict(1).to_json_file(output_file))\n self.assertRaises(TypeError, lambda: Dict(\"100\").to_json_file(output_file))\n self.assertRaises(\n TypeError, lambda: Dict(\"{'hello': 'world'}\").to_json_file(output_file)\n )\n\n File(output_file).delete()",
"def test_get_fields_and_lookups_invalid_lookup(self):\n with self.assertRaises(exceptions.FieldError):\n utils.get_fields_and_lookups(Protected, 'protector__date__hour')",
"def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.place == storage.get(Place, self.place.id))\n self.assertEqual(json_data['error'], 'Not found')",
"def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")",
"def test_get_or_440(self):\n self.Person.drop_collection()\n test_person = self.Person(name='Test')\n test_person.save()\n t2 = get_object_or_404(self.Person, id=test_person.id)\n self.assertEqual(test_person.id, t2.id)\n self.assertRaises(Http404, get_object_or_404, self.Person, id=\"4ba90141142bb528b1000001\")\n self.Person.drop_collection()",
"def test_get_nonexistent_runtime_property_json(self):\n script_path = self._create_script(\n linux_script='''#! /bin/bash -e\n ctx -j instance runtime-properties nonexistent\n ''',\n windows_script='''\n ctx -j instance runtime-properties nonexistent\n ''')\n\n with self.assertRaises(tasks.ProcessException) as cm:\n self._run(script_path=script_path)\n\n self.assertIn(os.path.basename(script_path), cm.exception.command)\n self.assertEqual(cm.exception.exit_code, 1)\n self.assertTrue(string_in_log('RequestError', self._caplog))\n self.assertTrue(string_in_log('nonexistent', self._caplog))",
"def test_get_key_not_defined_yet(self):\n storage = SessionStorage()\n\n self.assertNotIn('key1', storage)\n s1 = storage['key1']\n self.assertIn('key1', storage)\n\n self.assertNotIn('key2', storage)\n s2 = storage['key2']\n self.assertIn('key2', storage)\n\n self.assertIsNot(s1, s2)",
"def testNotFound(self):\n response = requests.post(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_other_typeerror_2(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, '1')",
"def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )",
"def test_entities__EntityOrder__get__2(entityOrder, unknownEntity):\n with pytest.raises(KeyError):\n entityOrder.get(unknownEntity)",
"def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_get_property_missing(self):\r\n try:\r\n value = self.config.option2\r\n assert value\r\n except Exception as e:\r\n self.assertIsInstance(e, OptionValueNotSetError)\r\n self.assertNotIn('option2', self.config.values)",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def test_get_users_from_invalid_json():\n with pytest.raises(ValueError):\n Users.from_json(file_path='{0}/json_input/invalid.json'.format(os.path.dirname(os.path.abspath(__file__))))",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_empty_dict(self):\n argument = {}\n with self.assertRaises(IndexError):\n file_io.top_ten(argument)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_save_json_with_invalid_step(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n\n with pytest.raises(ValueError):\n save_json(temp_dir, data, step={\"invalid\": \"dict\"})",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_badxvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {\"foo\": 1}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_get_required_parameters_by_path_can_be_asserted(self) -> None:\n\n self.parameter_store.client.get_parameters_by_path.return_value = {\n \"Parameters\": [\n # Only one of the required parameters is returned.\n {\"Name\": \"/path/sub/key\", \"Value\": \"foo_ssm_value_1\"},\n {\"Name\": \"/path/sub/key2\", \"Value\": \"foo_ssm_value_2\"},\n ]\n }\n\n expected_msg = \"Missing parameters [baz, foo/bar] on path /path/sub/\"\n with self.assertRaises(MissingParameterError, msg=expected_msg) as exc_info:\n self.parameter_store.get_parameters_by_path(\n \"/path/sub/\", required_parameters={\"baz\", \"foo/bar\", \"key\"}\n )\n assert exc_info.exception.parameter_path == \"/path/sub/\"\n assert len(exc_info.exception.parameter_names) == 2\n assert sorted(exc_info.exception.parameter_names) == sorted([\"baz\", \"foo/bar\"])",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_traversal_invalid_string(traversal_test_trie):\n with pytest.raises(KeyError):\n gen = traversal_test_trie.traversal('invalid')\n next(gen)",
"def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def test_nonexistentObject(self):\n objectId = u'sha256:NOSUCHOBJECT'\n d = self.contentStore.getObject(objectId)\n return self.assertFailure(d, NonexistentObject\n ).addCallback(lambda e: self.assertEquals(e.objectId, objectId))",
"def test_fetch_nonexist_pdbid(self):\n pdbid = '1000'\n with self.assertRaisesRegex(ValueError, 'PDB ID not exist'):\n fetch(pdbid)",
"def get_safe(dict_instance, keypath, default=None):\n try:\n obj = dict_instance\n keylist = keypath if type(keypath) is list else keypath.split('.')\n for key in keylist:\n obj = obj[key]\n return obj\n except Exception, ex:\n return default",
"def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_validate_bad_data(self, value):\n opt = scheme.DictOption('test-opt', scheme.Scheme())\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def test_empty_dict(self):\n read_on_template = field_template_read({})\n self.assertFalse(read_on_template)\n self.assertEqual(read_on_template, {})",
"def test_failure(t):\n objmap = ObjectMap({}, modname=\"py.module.name\", classname=\"ClassName\")\n ret = _create_object(objmap)\n t.assertIsNone(ret)",
"def __getitem__(self, path):\n\n items = self.__dict__\n\n for key in self.__check_path__(path):\n if not isinstance(items, (dict, PlaneDict)):\n raise KeyError(key)\n items = items[key]\n\n if isinstance(items, dict):\n items = PlaneDict(items)\n\n return items",
"def test_properties_get(self):\n pass",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_no_data():\n test = [{}, ['keys']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'keys' in str(t_result.failure())",
"def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)",
"def test_getitem(self):\n for name, det in self.sampler.detectors.items():\n fromGetItem = self.sampler[name]\n self.assertIs(det, fromGetItem, msg=name)\n with self.assertRaises(KeyError):\n self.sampler['this should fail']",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)"
] | [
"0.7242658",
"0.69208175",
"0.6540039",
"0.6447734",
"0.64288974",
"0.63505775",
"0.63080657",
"0.6304861",
"0.6304807",
"0.6277467",
"0.62695235",
"0.6268575",
"0.62654454",
"0.6243012",
"0.62048596",
"0.6178751",
"0.6153575",
"0.61444944",
"0.6129947",
"0.60959595",
"0.60739017",
"0.60687387",
"0.60613734",
"0.6054829",
"0.6047759",
"0.60417306",
"0.60377896",
"0.59949595",
"0.598962",
"0.598962",
"0.595189",
"0.59468967",
"0.5946779",
"0.5939544",
"0.5935956",
"0.5929859",
"0.59223896",
"0.59172904",
"0.5912087",
"0.591027",
"0.59076905",
"0.5905516",
"0.5895364",
"0.5894207",
"0.5881866",
"0.5881749",
"0.5855901",
"0.58352625",
"0.5803372",
"0.5802861",
"0.57943773",
"0.57880944",
"0.5787942",
"0.5778917",
"0.5778917",
"0.5775004",
"0.5769612",
"0.5758512",
"0.5758144",
"0.5756897",
"0.5756585",
"0.5751329",
"0.57403743",
"0.5740084",
"0.5738257",
"0.5732827",
"0.57232976",
"0.57176846",
"0.5716234",
"0.57100594",
"0.57100594",
"0.5709764",
"0.570838",
"0.5703335",
"0.5701509",
"0.5696041",
"0.56887037",
"0.5687835",
"0.5686418",
"0.56840706",
"0.5682785",
"0.56812644",
"0.568087",
"0.56791687",
"0.5670201",
"0.5667884",
"0.5661445",
"0.5661421",
"0.5659765",
"0.56547666",
"0.56529266",
"0.5647439",
"0.56413585",
"0.56399757",
"0.5636667",
"0.5630768",
"0.56305844",
"0.5630137",
"0.5625985",
"0.5623372"
] | 0.5976294 | 30 |
Test that the get fails because an object in between is not a dictionary. | def test_get_fails_when_getting_non_dict_attribute_in_between(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "agent.skills.some_attribute"],
standalone_mode=False,
)
assert result.exit_code == 1
s = "Attribute 'skills' is not a dictionary."
assert result.exception.message == s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_badyvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {\"foo\": 1}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def check_for_dict(check):",
"def test_badxvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {\"foo\": 1}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_get_cases_for_dict(self):\n pass",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_obj_dict(self):\n obj = storage.all()\n self.assertIsInstance(obj, dict)",
"def test_toomanyargsfortodict(self):\n with self.assertRaises(TypeError) as e:\n b1 = BaseModel()\n b1.to_dict(\"foo\")\n self.assertEqual(str(e.exception), \"to_dict() takes 1 positional\" +\n \" argument but 2 were given\")",
"def test_invalid_dict(self):\r\n data = '\"\\\\\"Test\\\\tTesting\"'\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('JSON should be dict', response.content)\r\n self.assertEqual(response.status_code, 400)",
"def test_creation_dict():\n with pytest.raises(ValueError) as __:\n value = dict()\n __ = param.Integer(value=value)",
"def testDictDoesNotContain(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" not in d1:\n print d1[\"nonsense\"] # Dead code\n else:\n print d1[\"x\"]\n\n d2 = {}\n if \"x\" not in d2:\n pass\n else:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" not in d3:\n print d3[\"y\"]\n else:\n print d3[\"x\"]\n \"\"\")",
"def test_errors_dict_interface():\n validator = Object(properties={\"example\": Integer()})\n value, error = validator.validate_or_error({\"example\": \"abc\"})\n assert dict(error) == {\"example\": \"Must be a number.\"}\n\n validator = Object(properties={\"example\": Integer()})\n value, error = validator.validate_or_error({\"example\": \"abc\"})\n assert error[\"example\"] == \"Must be a number.\"\n\n validator = Object(additional_properties=Object(additional_properties=Integer()))\n value, error = validator.validate_or_error({\"example\": {\"nested\": \"abc\"}})\n assert dict(error) == {\"example\": {\"nested\": \"Must be a number.\"}}\n\n validator = Integer()\n value, error = validator.validate_or_error(\"abc\")\n assert error[\"\"] == \"Must be a number.\"\n\n validator = Integer()\n value, error = validator.validate_or_error(\"abc\")\n assert dict(error) == {\"\": \"Must be a number.\"}",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_empty_dict_coerce():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test([(\"something\", \"is_true\")])",
"def test_issue_74():\n patient = Patient(active=True, address=[])\n assert \"address\" not in patient.dict()\n assert patient.dict(exclude_none=False)[\"address\"] == []",
"def test___getitem___invalid_index(self):\n with pytest.raises(TypeError):\n self.Person.objects()[\"a\"]",
"def test__init__keyerror(self):\n mocked_reconstructor = Mock()\n db_response = {}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == None",
"def test_circular_dict(self):\n obj = {}\n obj[\"obj\"] = obj\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def test_dictionary_coerce():\n\n @type_checked\n def _run_test(something:{int: str}):\n for key, value in something.items():\n assert isinstance(key, int)\n assert isinstance(value, str)\n\n _run_test(something={123: \"abc\", 2314: 12312, \"123\": \"abc\"})",
"def test_get_object_dict(self):\n review = self.review[0].get_dict()\n self.assertIsNotNone(review['reviewer_id'])\n self.assertIsNotNone(review['book_id'])\n self.assertEqual(5, review['rate'])",
"def test_search_validator_bad_data():\n sval = helpers.search_validator()\n assert not sval.validate({})\n bad = dict(foo=\"bar\", baz=42)\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"vin\": \"\"}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"foo\": \"bar\"}}')\n assert not sval.validate(bad)",
"def test_15_dict_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, {})\n self.assertEqual(\n \"height must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle({\"a\": 1, \"b\": 2, \"c\": 3}, 2)\n self.assertEqual(\n \"width must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, {\"a\": 1})\n self.assertEqual(\n \"x must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, 0, {\"hi\": None})\n self.assertEqual(\n \"y must be an integer\",\n str(x.exception))",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty, dicty],\n Base.from_json_string(json.dumps([dicty, dicty])))",
"def test_if_it_accepts_dictionary(self):\n with self.assertRaises(TypeError):\n prime_numbers({})",
"def test_empty_dict():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test({\"foo\": \"bar\"})",
"def test_example():\n with pytest.raises(\n AssertionError,\n match=expected_error_match,\n ):\n actual = {\n \"test1\": 1,\n \"test2\": \"foo\",\n \"bar\": {\"cheese\": \"parrot\", \"rabbit\": [\"black\", \"knight\"], \"other\": \"oops\"},\n }\n assert actual == Alike(\n {\n \"something\": A.is_missing,\n \"test2\": \"foo\",\n \"test1\": A < 2,\n \"bar\": {\n \"cheese\": A.is_present,\n \"rabbit\": [\"black\", \"wrong\"],\n \"other\": A.is_missing,\n },\n }\n )",
"def test_empty_dict_by_name():\n\n @type_checked\n def _run_test(thing:dict):\n assert isinstance(thing, dict)\n\n _run_test({\"baz\": True})",
"def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def test_empty_dict_failure():\n\n @type_checked\n def _run_test(thing:{}): pass\n\n with pytest.raises(TypeError):\n _run_test(1)",
"def test_getitem_required(self):\n self.assertEqual(self.tester['physical_location'], 'ANL')\n self.assertEqual(self.tester['collection_timestamp'],\n datetime(2011, 11, 11, 13, 00, 00))\n self.assertTrue(self.tester['has_physical_specimen'])",
"def test_two_keys():\n test = [{'key1': {'key2': 'val1'}}, ['key1', 'key2']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty], Base.from_json_string(json.dumps([dicty])))",
"def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")",
"def test_dict_keys_time_err(self):\n val = datetime.time(12, 15, 59, 111, tzinfo=pytz.timezone(\"Asia/Shanghai\"))\n with pytest.raises(orjson.JSONEncodeError):\n orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)",
"def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_str_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(\n lambda: inner_test(param='{\"json\": \"Not allowed.\"}'), 3113\n )",
"def test_dict(self):\n self.assertValue(\n {'foo': 'foo', 'bar': 43, 'zippy': 'zoo'},\n 'bar: 43 foo: foo zippy: zoo\\n'\n )",
"def test_fails_on_dict(self):\n invalid_credentials_dict_not_array_twine = \"\"\"\n {\n \"credentials\": {\n \"name\": \"MY_API_SECRET_KEY\",\n \"purpose\": \"Token for accessing a 3rd party API service\"\n }\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_dict_not_array_twine)",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def expect_obj(expected, actual):\n return {k: v for k, v in expected.iteritems() if actual.get(k) != v}",
"def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')",
"def test_empty_dict(self):\n argument = {}\n with self.assertRaises(IndexError):\n file_io.top_ten(argument)",
"def testDictContains(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" in d1:\n print d1[\"x\"]\n else:\n print d1[\"nonsense\"] # Dead code\n\n d2 = {}\n if \"x\" in d2:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" in d3:\n print d3[\"x\"]\n else:\n print d3[\"y\"]\n \"\"\")",
"def test_badsizevaluedicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square({\"foo\": 1}, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_iterate_arlequin_with_dict_return():\n for entry in iterate_arlequin(SNPS_TWO_POPS_TEXT):\n assert isinstance(entry, dict)",
"def testDictMaybeContains(self):\n ty = self.Infer(\"\"\"\\\n if __random__:\n x = {\"a\": 1, \"b\": 2}\n else:\n x = {\"b\": 42j}\n if \"a\" in x:\n v1 = x[\"b\"]\n if \"a\" not in x:\n v2 = x[\"b\"]\n \"\"\", deep=False)\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Dict\n x = ... # type: Dict[str, int or complex]\n v1 = ... # type: int\n v2 = ... # type: complex\n \"\"\")",
"def test_response_json(self):\n response = self.client.search()\n self.assertTrue(isinstance(response.json, dict))\n\n\n # with invalid json\n from rubber import settings\n settings.RUBBER_MOCK_HTTP_RESPONSE = \"\"\";;;\"\"\"\n \n response = self.client.search()\n self.assertIsNone(response.json)",
"def test_empty_dict(self):\n read_on_template = field_template_read({})\n self.assertFalse(read_on_template)\n self.assertEqual(read_on_template, {})",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']",
"def test_get_fields_and_lookups_invalid_lookup(self):\n with self.assertRaises(exceptions.FieldError):\n utils.get_fields_and_lookups(Protected, 'protector__date__hour')",
"def test_get_book(session, client, book1_dict, book2_dict, book3_dict, expected_book3_fulldict):\n get_response = client.get(\"/books/1\")\n assert 404 == get_response.status_code\n\n \"\"\"Add data to db\"\"\"\n json_data = json.dumps(book1_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book2_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book3_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n \"\"\"get on non-empy resource\"\"\"\n get_response = client.get(\"/books/3\")\n assert 200 == get_response.status_code\n payload = get_response.get_json()\n assert expected_book3_fulldict == payload\n\n \"\"\"get with invalid input\"\"\"\n get_response = client.get(\"/books/L\")\n assert 400 == get_response.status_code\n\n \"\"\"get with non-existant resource\"\"\"\n get_response = client.get(\"books/7\")\n assert 404 == get_response.status_code\n\n \"\"\"get with out of range\"\"\"\n get_response = client.get(\"books/-1\")\n assert 400 == get_response.status_code",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_entities__EntityOrder__get__2(entityOrder, unknownEntity):\n with pytest.raises(KeyError):\n entityOrder.get(unknownEntity)",
"def test_isadict(self):\n # It is a dict-subclass, so this kind of pointless, but it doen't hurt.\n d, m = dict(a=5), ConfigDict(a=5)\n d['key'], m['key'] = 'value', 'value'\n d['k2'], m['k2'] = 'v1', 'v1'\n d['k2'], m['k2'] = 'v2', 'v2'\n self.assertEqual(d.keys(), m.keys())\n self.assertEqual(list(d.values()), list(m.values()))\n self.assertEqual(d.get('key'), m.get('key'))\n self.assertEqual(d.get('cay'), m.get('cay'))\n self.assertEqual(list(iter(d)), list(iter(m)))\n self.assertEqual([k for k in d], [k for k in m])\n self.assertEqual(len(d), len(m))\n self.assertEqual('key' in d, 'key' in m)\n self.assertEqual('cay' in d, 'cay' in m)\n self.assertRaises(KeyError, lambda: m['cay'])",
"def test_get_or_440(self):\n self.Person.drop_collection()\n test_person = self.Person(name='Test')\n test_person.save()\n t2 = get_object_or_404(self.Person, id=test_person.id)\n self.assertEqual(test_person.id, t2.id)\n self.assertRaises(Http404, get_object_or_404, self.Person, id=\"4ba90141142bb528b1000001\")\n self.Person.drop_collection()",
"def test_basedict2(self):\n tester = BaseModel()\n self.assertIn(\"id\", tester.to_dict())\n self.assertIn(\"created_at\", tester.to_dict())\n self.assertIn(\"updated_at\", tester.to_dict())",
"def test_generic_request_correct_params_type(self, api_instance):\n action = \"GenericRequestBadParamsException\"\n\n # Any dict should pass (including an empty one)\n assert api_instance.generic_request(action=action, params={})\n\n # Non-dict values should NOT pass\n param_values = [\n [\"Lists\", \"don't\", \"work\"],\n (\"Tuples\", \"don't\", \"ether\"),\n 3, # Integer? No thank you.\n \"No to a string!\",\n {\"You\", \"made\", \"a\", \"set,\", \"silly!\"},\n ]\n for val in param_values:\n with pytest.raises(ValueError):\n assert api_instance.generic_request(action, params=val)",
"def test_load_bad_datetime_arg(self):\n expected = {\n \"release_day\": 2,\n \"closing_date\": {\n \"bad_arg\": 12,\n \"month\": 10,\n \"year\": 2013,\n \"day\": 18,\n \"__type__\": \"datetime.date\"\n }\n }\n with open('tests/bad_datetime_arg.json', 'r') as json_file:\n self.assertEqual(expected, morejson.load(json_file))",
"def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())",
"def test4(self) -> None:\n dict_ = {\"key0\": \"value0\", \"key1\": None}\n actual_result = list(hdict.get_nested_dict_iterator(dict_))\n expected_result = [((\"key0\",), \"value0\"), ((\"key1\",), None)]\n self.assertListEqual(actual_result, expected_result)",
"def test_json_direct(self): \n response = client.result(True, 'json', 'unittest', test_data = self.test_data)\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')",
"def test_invalid_values(self):\n self.assertEqual(dictsort([1, 2, 3], \"age\"), \"\")\n self.assertEqual(dictsort(\"Hello!\", \"age\"), \"\")\n self.assertEqual(dictsort({\"a\": 1}, \"age\"), \"\")\n self.assertEqual(dictsort(1, \"age\"), \"\")",
"def test_list_instead_of_dict() -> None:\n dict_ = {\n \"resourceType\": \"Observation\",\n \"status\": \"final\",\n \"code\": {\"coding\": [{\"system\": \"test\", \"code\": \"test\"}]},\n }\n subject = { # Dictionary -> OK\n \"reference\": \"Patient/475\",\n \"display\": \"REF\",\n }\n dict_[\"subject\"] = subject\n r4.from_dict(dict_)\n with pytest.raises(pydantic.ValidationError):\n dict_[\"subject\"] = [subject] # As a list -> not expected\n r4.from_dict(dict_)",
"def test_payment_accepted_invalid_dict(self):\r\n baseline = {\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n }\r\n wrong = {\r\n 'orderNumber': 'k',\r\n }\r\n # tests for missing key\r\n for key in baseline:\r\n params = baseline.copy()\r\n del params[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)\r\n\r\n # tests for keys with value that can't be converted to proper type\r\n for key in wrong:\r\n params = baseline.copy()\r\n params[key] = wrong[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)",
"def test_drs_get_object_failure(self, testapp, testing_download): # noQA fixture\n res = testapp.get(testing_download)\n drs_object_uri = res.json['uuid']\n\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/not_a_uri/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/https')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access/')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/access')\n with pytest.raises(Exception):\n testapp.get(f'/ga4gh/drs/v1/objects/{drs_object_uri}/accesss/https')",
"def test_entities__EntityOrder__get__3(entityOrder, minimalEntity):\n with pytest.raises(KeyError):\n entityOrder.get(minimalEntity)",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test5(self) -> None:\n dict_ = {\"key0\": {\"key00\": None}, \"key1\": \"value1\"}\n actual_result = list(hdict.get_nested_dict_iterator(dict_))\n expected_result = [((\"key0\", \"key00\"), None), ((\"key1\",), \"value1\")]\n self.assertListEqual(actual_result, expected_result)",
"def test_other_user_kvs_get_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.get(self.other_key_factory(self.existing_field_name))",
"def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )",
"def test_get_multipleobjects_exception(self):\r\n with self.assertRaises(self.table.MultipleObjectsReturned):\r\n self.table.objects.get(test_id=1)",
"def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')",
"def test2(self) -> None:\n dict_ = {\n \"key0\": {\"key00\": \"value00\", \"key01\": \"value01\"},\n \"key1\": \"value1\",\n }\n actual_result = list(hdict.get_nested_dict_iterator(dict_))\n expected_result = [\n ((\"key0\", \"key00\"), \"value00\"),\n ((\"key0\", \"key01\"), \"value01\"),\n ((\"key1\",), \"value1\"),\n ]\n self.assertListEqual(actual_result, expected_result)",
"def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))",
"def test():\n test = [{'key': 'val1'}, ['key']]\n assert fetch_data_by_keys(*test).unwrap() == 'val1'",
"def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_get_invalid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertRaises(KeyError, ars.__getitem__, 'invalid_section')",
"def test_get_fails_when_getting_non_dict_attribute(self):\n attribute = \"protocols\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", f\"skills.dummy.{attribute}.protocol\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{attribute}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_RestrictingNodeTransformer__visit_NotIn_Dict():\n assert restricted_eval('2 not in {1: 1, 2: 2, 3: 3}') is False",
"def test_get_multipleobjects_exception(self):\r\n with self.assertRaises(TestModel.MultipleObjectsReturned):\r\n TestModel.objects.get(test_id=1)",
"def isdictinstance(obj):\n return isinstance(obj, dict) or isinstance(obj, DotDict)",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def testArguments(self):\n with self.assertRaises(TypeError):\n Manager.Properties.Version.Get(get_object(TOP_OBJECT), {})",
"def test_get_other_typeerror_2(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, '1')",
"def test_get_invalid_section(self):\n arm = self.ar[2009][11]\n self.assertRaises(KeyError, arm.__getitem__, 'invalid_section')",
"def test__getitem__(self, in_, key, out_):\n if isinstance(out_, Exception):\n with pytest.raises(type(out_)) as excinfo:\n in_[key] # pylint: disable=W0104, pointless-statement\n assert excinfo.value.args[0] == out_.args[0]\n return\n assert in_[key] == out_",
"def test_getitem(self):\n for name, det in self.sampler.detectors.items():\n fromGetItem = self.sampler[name]\n self.assertIs(det, fromGetItem, msg=name)\n with self.assertRaises(KeyError):\n self.sampler['this should fail']",
"def test_Container_Get_Item_By_Key(self):\n test = ee.Dictionary({\"key1\": 1, \"key2\": 2})[\"key1\"]\n self.assertIsInstance(test, ee.computedobject.ComputedObject)",
"def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))",
"def quacks_like_dict(object):\n return isinstance(object, Mapping)",
"def test_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_protobuf_to_proto_plus, wrong_type\n )"
] | [
"0.67920804",
"0.6693874",
"0.6692907",
"0.65984184",
"0.65272987",
"0.6486978",
"0.6475015",
"0.6475015",
"0.6454139",
"0.642352",
"0.6370944",
"0.6345449",
"0.63148177",
"0.6294746",
"0.62850076",
"0.6246014",
"0.62105167",
"0.61512166",
"0.6147409",
"0.6141971",
"0.6139562",
"0.61337703",
"0.6119449",
"0.6118927",
"0.61166394",
"0.60936236",
"0.6093469",
"0.607177",
"0.6065208",
"0.60379165",
"0.60273737",
"0.6017907",
"0.59688073",
"0.59649044",
"0.5964272",
"0.5962722",
"0.59458625",
"0.5923877",
"0.5923711",
"0.5919734",
"0.59109664",
"0.5908094",
"0.5908094",
"0.5904287",
"0.59041756",
"0.5900411",
"0.58993256",
"0.5893402",
"0.588366",
"0.5877981",
"0.58494633",
"0.58479184",
"0.58212",
"0.58115715",
"0.58115715",
"0.58102304",
"0.57900983",
"0.5778451",
"0.5768068",
"0.57547665",
"0.5753182",
"0.5750103",
"0.5744972",
"0.57420164",
"0.5735868",
"0.5734188",
"0.5734044",
"0.5727423",
"0.57266724",
"0.5723759",
"0.5719865",
"0.57124346",
"0.57115287",
"0.5699018",
"0.56983536",
"0.5694993",
"0.5694571",
"0.56943536",
"0.569029",
"0.568574",
"0.5683277",
"0.5682188",
"0.5677577",
"0.56736016",
"0.5673206",
"0.5671565",
"0.56712323",
"0.5670062",
"0.56522864",
"0.5648614",
"0.56475633",
"0.5647028",
"0.56470275",
"0.56439185",
"0.56353664",
"0.5632744",
"0.56266814",
"0.5626489",
"0.5625431",
"0.5615521"
] | 0.64717823 | 8 |
Test that getting a vendor component with wrong component type raises error. | def test_get_fails_when_getting_vendor_dependency_with_wrong_component_type(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"vendor.fetchai.component_type_not_correct.error.non_existing_attribute",
],
standalone_mode=False,
)
assert result.exit_code == 1
s = "'component_type_not_correct' is not a valid component type. Please use one of ['protocols', 'connections', 'skills', 'contracts']."
assert result.exception.message == s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_component_with_invalid_name():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component')",
"def test_register_component_with_invalid_type():\n\n with pytest.raises(InvalidComponentTypeError):\n component = CoreObject()\n application_services.register_component(component)",
"def test_remove_component_invalid():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component_to_remove')",
"def test_register_component_with_invalid_type_only_component():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyComponentMock('only_component')\n application_services.register_component(component)",
"def test_register_component_with_invalid_name():\n\n with pytest.raises(InvalidComponentNameError):\n component = ComponentWithInvalidNameMock('')\n application_services.register_component(component)",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_type(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"No module named 'packages.some_author.some_type'\"\n ),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=r\"No module named packages.some_author.some_type; 'some_type' is not a valid type name, choose one of \\['protocols', 'connections', 'skills', 'contracts'\\]\",\n ):\n load_component_from_config(component_configuration)",
"def test_get_component_with_invalid_custom_key():\n\n component = ComponentWithInvalidCustomKeyMock('component_with_invalid_key')\n custom_component = DuplicateComponentWithInvalidCustomKeyMock('component_with_invalid_key',\n component_custom_key=3000)\n application_services.register_component(component)\n application_services.register_component(custom_component)\n assert application_services.get_component('component_with_invalid_key',\n component_custom_key=999) == component\n\n application_services.remove_component(component.get_id())\n application_services.remove_component(custom_component.get_id())",
"def test_register_component_with_invalid_type_only_manager():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyManagerMock()\n application_services.register_component(component)",
"def test_component_loading_component_exception(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=AEAComponentLoadException(\"Generic exception\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"Package loading error: An error occurred while loading protocol an_author/a_protocol:0.1.0: Generic exception\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)",
"def test_remove_component():\n\n component = application_services.get_component('database.component')\n application_services.remove_component(component.get_id())\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('database.component')\n\n application_services.register_component(component)",
"def test_component_loading_module_not_found_error_non_framework_package(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'generic.package'\"),\n ):\n with pytest.raises(ModuleNotFoundError):\n load_component_from_config(component_configuration)",
"def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)",
"def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_author(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'packages.some_author'\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author; No AEA package found with author name 'some_author'\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_generic_module_not_found_error(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"Package loading error: An error occurred while loading .*: Generic error\"\n ),\n ):\n with pytest.raises(ModuleNotFoundError, match=\"Generic error\"):\n load_component_from_config(component_configuration)",
"def test_component_loading_generic_exception(component_configuration):\n\n with mock.patch.object(\n Protocol, \"from_config\", side_effect=Exception(\"Generic exception\")\n ):\n with pytest.raises(\n Exception, match=\"Package loading error: An error occurred while loading\"\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_name(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"No module named 'packages.some_author.protocols.some_name'\"\n ),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author.protocols.some_name; No AEA package found with author name 'some_author', type 'protocols', name 'some_name'\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_module_not_found_error_framework_package(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'packages'\"),\n ):\n with pytest.raises(ModuleNotFoundError, match=\"No module named 'packages'\"):\n load_component_from_config(component_configuration)",
"def test_invalid_odata_version():\n\n with pytest.raises(PyODataException) as e_info:\n pyodata.Client(SERVICE_URL, requests, 'INVALID VERSION')\n\n assert str(e_info.value).startswith('No implementation for selected odata version')",
"def test_type_check(ExampleComponentClass):\n\n instance = ExampleComponentClass()\n\n configure(instance, {\"a\": 4.5}, name=\"x\")\n\n # Attempting to access the field should now raise a type error.\n with pytest.raises(\n TypeError,\n match=\"Field 'a' of component 'x' is annotated with type '<class 'int'>', which is not satisfied by value 4.5.\",\n ):\n instance.a",
"def test_get_software(self):\n pass",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_get_device_unknown():\n device = get_device(SERIAL, CREDENTIAL, \"unknown\")\n assert device is None",
"async def test_device_unknown_error(hass):\n with patch.object(axis.device, \"get_device\", side_effect=Exception):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def test_component_remove_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component remove bad_component')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"async def test_select_errors(hass: HomeAssistant, vehicle_type: str):\n\n entity_registry = mock_registry(hass)\n device_registry = mock_device_registry(hass)\n\n invalid_upstream_exception = exceptions.InvalidUpstreamException(\n \"err.tech.500\",\n \"Invalid response from the upstream server (The request sent to the GDC is erroneous) ; 502 Bad Gateway\",\n )\n\n with patch(\"homeassistant.components.renault.PLATFORMS\", [SELECT_DOMAIN]):\n await setup_renault_integration_vehicle_with_side_effect(\n hass, vehicle_type, invalid_upstream_exception\n )\n await hass.async_block_till_done()\n\n mock_vehicle = MOCK_VEHICLES[vehicle_type]\n check_device_registry(device_registry, mock_vehicle[\"expected_device\"])\n\n expected_entities = mock_vehicle[SELECT_DOMAIN]\n assert len(entity_registry.entities) == len(expected_entities)\n for expected_entity in expected_entities:\n entity_id = expected_entity[\"entity_id\"]\n registry_entry = entity_registry.entities.get(entity_id)\n assert registry_entry is not None\n assert registry_entry.unique_id == expected_entity[\"unique_id\"]\n state = hass.states.get(entity_id)\n assert state.state == STATE_UNAVAILABLE\n for attr in FIXED_ATTRIBUTES:\n assert state.attributes.get(attr) == expected_entity.get(attr)\n # Check dynamic attributes:\n assert state.attributes.get(ATTR_ICON) == get_no_data_icon(expected_entity)\n assert ATTR_LAST_UPDATE not in state.attributes",
"def test_unknown_service(self):\n raise NotImplementedError # FIXME",
"def test_register_component_duplicate():\n\n component = DuplicateComponentMock('component_duplicate')\n application_services.register_component(component)\n\n with pytest.raises(DuplicateComponentIDError):\n application_services.register_component(component)\n\n application_services.remove_component(component.get_id())",
"def test_badComponentName(self):\n nPins = 12\n fuelDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 0.9, \"id\": 0.0, \"mult\": nPins}\n cladDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n fuel = Circle(\"fuel\", \"UZr\", **fuelDims)\n clad = Circle(\"clad_4.2.3\", \"HT9\", **cladDims)\n gapDims = {\n \"Tinput\": 25.0,\n \"Thot\": 430.0,\n \"od\": \"clad_4.2.3.id\",\n \"id\": \"fuel.od\",\n \"mult\": nPins,\n }\n gapDims[\"components\"] = {\"clad_4.2.3\": clad, \"fuel\": fuel}\n with self.assertRaises(ValueError):\n _gap = Circle(\"gap\", \"Void\", **gapDims)",
"def test_get_other_typeerror(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, 3.4)",
"def test_project_component_does_not_exist():\n project = MagicMock()\n project.source_directory = os.path.realpath(os.path.dirname(__file__))\n result = project_component.create(project, 'fake-include-path')\n assert project_component.COMPONENT([], []) == result",
"def test_get_software_bundle(self):\n pass",
"def test_prevent_wrong_type(self):\n self.assertRaises(cinv.host.Error, self.wrong_host_type)",
"def test_ccextractor_version_not_found(self):\n response = self.app.test_client().get('/test/ccextractor/0.8494')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_update_software_component_for_system_module(self):\n pass",
"def test_get_component_descriptors_by_type_using_get(self):\n pass",
"def test_avp_vendor(self):\n # Vendor specific flags means you need a non default vendor ID\n with self.assertRaises(CodecException):\n avp_val = avp.UnknownAVP(\n 0, b'',\n flags=avp.FLAG_VENDOR,\n vendor=avp.VendorId.DEFAULT,\n )\n out_buf = bytearray(avp_val.length)\n avp_val.encode(out_buf, 0)\n\n avp_val = avp.UnknownAVP(\n 0, b'',\n flags=avp.FLAG_VENDOR,\n vendor=1,\n )\n out_buf = bytearray(avp_val.length)\n avp_val.encode(out_buf, 0)\n self._compare_avp(avp_val, out_buf)\n\n avp_val = avp.UnknownAVP(\n 0, b'',\n flags=avp.FLAG_VENDOR,\n vendor=0x00FFFFFF,\n )\n out_buf = bytearray(avp_val.length)\n avp_val.encode(out_buf, 0)\n self._compare_avp(avp_val, out_buf)\n\n # Avp vendor in range\n with self.assertRaises(CodecException):\n avp_val = avp.UnknownAVP(\n 0, b'',\n flags=avp.FLAG_VENDOR,\n vendor=-1,\n )\n out_buf = bytearray(avp_val.length)\n avp_val.encode(out_buf, 0)\n\n # Avp vendor in range\n with self.assertRaises(CodecException):\n avp_val = avp.UnknownAVP(\n 0, b'',\n flags=avp.FLAG_VENDOR,\n vendor=0xFFFFFFFF + 1,\n )\n out_buf = bytearray(avp_val.length)\n avp_val.encode(out_buf, 0)",
"async def test_bad_trigger_platform(hass):\n with pytest.raises(vol.Invalid) as ex:\n await async_validate_trigger_config(hass, [{\"platform\": \"not_a_platform\"}])\n assert \"Invalid platform 'not_a_platform' specified\" in str(ex)",
"def test_replace_software_asset_for_software_component(self):\n pass",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_suffix(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"No module named 'packages.some_author.protocols.some_name.some_subpackage'\"\n ),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author.protocols.some_name.some_subpackage; The package 'packages/some_author' of type 'protocols' exists, but cannot find module 'some_subpackage'\",\n ):\n load_component_from_config(component_configuration)",
"def test_get_other_typeerror_2(self):\n v = versions.Version(name='foo', version='1.2.3')\n self.assertRaises(TypeError, v._get_other, '1')",
"def test_component_resolution_same_file_err():\n\n with pytest.raises(InterpStackTrace) as exc_info:\n snippet_eval(ComponentSnippet(modulea.ComponentResolutionSameFileErr()))\n assert 'DefinitelyNotExistingComponent' in str(exc_info.value)",
"def test_get_component_OFF(self):\n self._ucr({\n 'repository/online/component/b': 'no',\n 'repository/online/component/b/foo': 'bar',\n })\n c = self.u.get_component('b')\n self.assertEqual({'name': 'b', 'activated': False, 'foo': 'bar'}, c)",
"def test_wrong_type(self):\n msg = 'Widget type is not valid. Valid widget types are: ' + \\\n 'basic, default, formula, histogram, category, animation, time-series.'\n\n with pytest.raises(ValueError) as e:\n Widget({'type': 'xxx'}).get_info()\n assert str(e.value) == msg",
"def test_entities__entity_by_name__3(stubEntities, entityAdapters):\n with pytest.raises(ValueError):\n IEntity('icemac.addressbook.tests.stubs.Duck')",
"def test_entities__EntityOrder__get__5(entityOrder):\n person = IEntity(IPerson)\n with zope.component.hooks.site(None):\n with pytest.raises(zope.component.ComponentLookupError):\n entityOrder.get(person)",
"def test_register_component():\n\n component = ComponentMock('component1')\n application_services.register_component(component)\n assert application_services.get_component('component1') == component\n application_services.remove_component(component.get_id())",
"def test__get_component_version_empty(self):\n self._ucr({'repository/online/component/a/version': ''})\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): '',\n })\n ver = U.UCS_Version((MAJOR, MINOR, 0)) # comonent.erratalevel!\n comp_ver = self.u._get_component_versions('a', start=ver, end=ver)\n self.assertEqual(set((ver,)), comp_ver)",
"def test_get_component_ON(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/foo': 'bar',\n })\n c = self.u.get_component('a')\n self.assertEqual({'name': 'a', 'activated': True, 'foo': 'bar'}, c)",
"def test_get_simulator_device_type_by_platform_not_found(self, _, _2):\n with self.assertRaises(test_runner.SimulatorNotFoundError) as context:\n iossim_util.get_simulator_device_type_by_platform(\n iossim_util.get_simulator_list(), 'iPhone XI')\n expected_message = ('Simulator does not exist: Not found device '\n '\"iPhone XI\" in devicetypes')\n self.assertTrue(expected_message in str(context.exception))",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_get_device_intf():\n\n cable = Cable(device_a_name=\"deva\", interface_a_name=\"inta\", device_z_name=\"devb\", interface_z_name=\"intb\")\n assert cable.get_device_intf(\"a\") == (\"deva\", \"inta\")\n assert cable.get_device_intf(\"z\") == (\"devb\", \"intb\")\n\n with pytest.raises(ValueError):\n cable.get_device_intf(\"v\")",
"def test_invalid_type_cr_spec(self):\n QPS_SPECS_NONAMES = {\n \"circuits\": [{\n \"quantum_registers\": [{\n \"size\": 3}],\n \"classical_registers\": [{\n \"name\": 1,\n \"size\": 3}]\n }]\n }\n\n self.assertRaises(QISKitError, QuantumProgram, specs=QPS_SPECS_NONAMES)",
"def test_entity_id() -> None:\n schema = vol.Schema(cv.entity_id)\n\n with pytest.raises(vol.MultipleInvalid):\n schema(\"invalid_entity\")\n\n assert schema(\"sensor.LIGHT\") == \"sensor.light\"",
"def test_component_loading_instantiation_exception(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=AEAInstantiationException(\"Generic exception\"),\n ):\n with pytest.raises(AEAInstantiationException):\n load_component_from_config(component_configuration)",
"def test_versionComponents(self):\n self.assertEqual(\n (int, int, int),\n tuple(\n type(info) for info\n in [nevow.version.major, nevow.version.minor, nevow.version.micro]))",
"def test_listVendorWithNoParams(self):\r\n result = self.client.listVendors({'i_customer': 1})\r\n assert result['result']=='OK'",
"def test_gpus_raises():\n gpus = \"1\"\n\n with pytest.raises(ValueError):\n cli._gpus(gpus)",
"def test_get_component_with_default_key():\n\n default_component = application_services.get_component('database.component')\n assert application_services.get_component('database.component',\n component_custom_key=DEFAULT_COMPONENT_KEY) \\\n == default_component",
"def test_entities__entity_by_name__4(stubEntities, entityAdapters):\n with pytest.raises(ValueError):\n IEntity('icemac.addressbook.tests.conftest.Duck')",
"def test_entities__entity_by_name__2(stubEntities, entityAdapters):\n with pytest.raises(ValueError):\n IEntity(u'asdf')",
"def test_get_device(self):\n pass",
"def test_get_device(self):\n pass",
"def test_will_not_get_instance_type_with_bad_id(self):\n self.assertRaises(exception.InstanceTypeNotFound,\n instance_types.get_instance_type, 'asdf')",
"def test_missing_version(self):\n\n params_82 = {'ReQuEsT': \"DescribeCoverage\", 'SeRvIcE': \"WCS\", \"BOGUS\": \"SSS\"}\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n soup.find('ServiceExceptionReport'),\n msg=\"The server should return an exception if the version is not included in a DescribeCoverage request.\")",
"def test_single_field_failure(self, client):\n with pytest.raises(CastorException) as e:\n client.single_field_dependency(2)\n assert str(e.value) == \"404 Entity not found.\"",
"def test_get_systemcontroller_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Systemcontroller.get, session, node)",
"def test_get_component_descriptors_by_types_using_get(self):\n pass",
"async def test_get_device_unknown_error(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.AxisException\n ), pytest.raises(axis.errors.AuthenticationRequired):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")",
"def test_need_client(self):\n self.assertRaises(TypeError, ACMEAccount)",
"async def test_configure_service_with_faulty_field(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n await setup_deconz_integration(hass, aioclient_mock)\n\n data = {SERVICE_FIELD: \"light/2\", SERVICE_DATA: {}}\n\n with pytest.raises(vol.Invalid):\n await hass.services.async_call(\n DECONZ_DOMAIN, SERVICE_CONFIGURE_DEVICE, service_data=data\n )\n await hass.async_block_till_done()",
"def test__get_component_version_short(self):\n self._ucr({'repository/online/component/a/version': '%d.%d' % (MAJOR, MINOR)})\n ver = self.u._get_component_versions('a', None, None)\n self.assertEqual(set((U.UCS_Version((MAJOR, MINOR, 0)),)), ver)",
"def test_get_latest_component_id_from_prefix_negative():\n agent_config = MagicMock()\n agent_config.package_dependencies = {}\n\n result = get_latest_component_id_from_prefix(\n agent_config, (ComponentType.PROTOCOL, \"author\", \"name\")\n )\n assert result is None",
"def test_import_string_missing_class_or_attribute(self):\n valid_module = 'ttgn.pokedex'\n invalid_class = 'NonexistentClass'\n with pytest.raises(ImportError) as error:\n utils.import_string('{}.{}'.format(valid_module, invalid_class))\n assert 'Module {} has no class or attribute {}'.format(\n valid_module, invalid_class) == str(error.value)",
"def test_component_add_error_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component add component1 new_user')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def test_validate_c_tag_fail(self):\n\n s = Mock()\n s.c_tag = 111\n s.onu_device = \"BRCM1234\"\n\n self.models_decl.RCORDSubscriber_decl.objects.filter.return_value = [s, self.rcord_subscriber]\n\n with self.assertRaises(Exception) as e:\n self.rcord_subscriber.save()\n\n self.assertEqual(e.exception.message, \"The c_tag you specified (111) has already been used on device BRCM1234\")\n self.models_decl.RCORDSubscriber_decl.save.assert_not_called()",
"def test_invalid_version_value(self):\n self.assertRaises(TypeError, versions.Version, version=1, name='foo')",
"def test_version_type(self):\n self.assertIsInstance(get_version(), str)",
"def test_invalid_version(self):\n\n params_82 = {'ReQuEsT': \"DescribeCoverage\", 'SeRvIcE': \"WCS\", \"BOGUS\": \"SSS\", 'Version': \"0.0.0.0\"}\n response = self.query_server(params_82)\n soup = BeautifulSoup(response.text, 'xml')\n self.assertTrue(\n soup.find('ServiceExceptionReport'),\n msg=\"The server should return an exception if an invalid version is submitted with a DescribeCoverage request.\"\n )",
"def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME",
"def test_get_all_components():\n\n components = ['api.component',\n 'api.router.component',\n 'configuration.component',\n 'database.component',\n 'database.migration.component',\n 'globalization.locale.component',\n 'globalization.datetime.component',\n 'logging.component',\n 'converters.deserializer.component',\n 'security.component',\n 'security.authentication.component',\n 'security.authorization.component',\n 'security.encryption.component',\n 'security.hashing.component',\n 'security.permission.component',\n 'security.session.component',\n 'security.token.component',\n 'packaging.component',\n 'caching.component']\n\n assert all(application_services.get_component(component) is not None\n for component in components)",
"def test_entities__entity_by_name__1(stubEntities, entityAdapters):\n with pytest.raises(ValueError):\n IEntity('asdf')",
"def test_entities__Entity__getClass__2():\n e = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n e.getClass()",
"async def test_api_get_components(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(const.URL_API_COMPONENTS)\n result = await resp.json()\n assert set(result) == hass.config.components",
"def test_get_systems(self):\n pass",
"def test_subsystems(self):\n pass",
"def test_entities__Entity__name__2(address_book):\n entity = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n entity.name",
"def AssertDevice(self, device):\n cmd = ('getprop(\"ro.product.device\") == \"%s\" || '\n 'abort(\"E%d: This package is for \\\\\"%s\\\\\" devices; '\n 'this is a \\\\\"\" + getprop(\"ro.product.device\") + \"\\\\\".\");') % (\n device, common.ErrorCode.DEVICE_MISMATCH, device)\n self.script.append(cmd)",
"def test_component_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component remove component1')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_constructor_missing_config():\n with pytest.raises(TypeError):\n Unpacker()",
"def is_vendor(schema_obj):\n\n return isinstance(schema_obj, schema.Vendor)",
"def test_register_component_with_custom_key_duplicate():\n\n custom_component = DuplicateDatabaseComponentMock('database.component',\n component_custom_key=1000)\n application_services.register_component(custom_component)\n\n with pytest.raises(DuplicateComponentIDError):\n application_services.register_component(custom_component)\n\n application_services.remove_component(custom_component.get_id())",
"def test_import_software_asset(self):\n pass",
"def test_get_pricing_with_incorrect_instrument():\n res = oanda.get_pricing(CONFIG, 'XXX500_WRONG')\n assert res[0] == 400",
"def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule",
"def test_configure_non_interactive_missing_field_value(ExampleComponentClass):\n\n with pytest.raises(\n ValueError,\n match=r\"^No configuration value found for annotated field 'FAKE_NAME.a' of type 'int'.\",\n ):\n configure(ExampleComponentClass(), {\"b\": \"bar\"}, name=\"FAKE_NAME\")",
"def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)",
"def test_incompatible_subscription_and_tenant():\n pass"
] | [
"0.7484369",
"0.70648474",
"0.6751108",
"0.6693934",
"0.65564436",
"0.6499122",
"0.6351091",
"0.6348619",
"0.62459064",
"0.61908615",
"0.61505693",
"0.6141869",
"0.6136945",
"0.6136352",
"0.61140954",
"0.608789",
"0.60551083",
"0.60050845",
"0.59680504",
"0.59571373",
"0.59189194",
"0.5881391",
"0.5876208",
"0.5874755",
"0.58706355",
"0.5863667",
"0.5850145",
"0.58384687",
"0.58353895",
"0.58065677",
"0.5772793",
"0.57679766",
"0.5764232",
"0.5762633",
"0.5753507",
"0.57533896",
"0.574929",
"0.57474595",
"0.5743619",
"0.57346827",
"0.57310045",
"0.5720663",
"0.57098526",
"0.57065517",
"0.5697008",
"0.5664601",
"0.5664116",
"0.56482565",
"0.5643645",
"0.56412405",
"0.56407887",
"0.5640428",
"0.5638301",
"0.56310403",
"0.5619703",
"0.5613547",
"0.55981106",
"0.559219",
"0.5588805",
"0.55710506",
"0.5565554",
"0.556492",
"0.55586773",
"0.55586773",
"0.5553252",
"0.5547953",
"0.5538543",
"0.5536974",
"0.55292594",
"0.5505584",
"0.5505332",
"0.5497397",
"0.5490811",
"0.5490624",
"0.54885525",
"0.54872924",
"0.54863095",
"0.5485524",
"0.5464771",
"0.5456972",
"0.54538864",
"0.54493815",
"0.54440963",
"0.5427281",
"0.5426099",
"0.5425593",
"0.54211694",
"0.54124165",
"0.54090035",
"0.5406079",
"0.5405453",
"0.540108",
"0.53993684",
"0.5398648",
"0.5397962",
"0.538461",
"0.5382152",
"0.53800845",
"0.5378568",
"0.537795"
] | 0.7972388 | 0 |
Set the test up. | def setup_class(cls):
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
shutil.copytree(Path(CUR_PATH, "data", "dummy_aea"), Path(cls.t, "dummy_aea"))
os.chdir(Path(cls.t, "dummy_aea"))
cls.runner = CliRunner() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n\n self._set_up()",
"def setUp(self):\n MainTests.setUp(self)",
"def setUp(self):\n \n pass",
"def setUp(self):\n\n # setup init variables\n self.init_vars = {\n 'suppress_logfile': True,\n 'verbosity': 0,\n 'mothur_seed': 54321,\n }\n\n # setup directories for testing\n test_dir = os.path.join(os.getcwd(), 'tests')\n self.test_output_dir = os.path.join(test_dir, 'test_output')\n if not os.path.isdir(self.test_output_dir):\n os.makedirs(self.test_output_dir)\n self.test_input_dir = os.path.join(test_dir, 'test_data')\n\n return",
"def setUp(self):\n print(\"New test by Nikolay Melnik\")",
"def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')",
"def setUp(self):\n test_env_setup()",
"def setUp(self):\n\n pass",
"def setUp(self):\n\n pass",
"def setUp(self) :\n pass",
"def setUp(self):\n self.setup_beets()",
"def setUp(self):\n\n return",
"def setUp(self) -> None:\n pass",
"def setUp(self) -> None:\n pass",
"def setUp(self):\n pass #because we dont have anything to setup.",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def _set_up():\n repl._setUp = self.setUp",
"def setUp(self):\n setUp()",
"def setUp(self):\n print('Calling \\'setUp\\'')",
"def setUp(self):\n\n BaseTest.setUp(self)",
"def setUp(self):\n self",
"def setUp(self):\n self",
"def setUp(self):\r\n pass",
"def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\r\n pass # nothing used by all\r",
"def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()",
"def setUp(self):\r\n pass # nothing required by all\r",
"def setUp(self):\n # Used to initialize objects that should be re-initialized or\n # re-created for each individual test\n self.t = Task()\n\n self.t.config(\"alias.from\", \"to\")",
"def setUp(self):\n print(\"\\nIn setUp()...\")",
"def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]",
"def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass",
"def setUp(self):\n super(BasicTestCase, self).setUp()",
"def setUp(self):\n raise NotImplementedError",
"def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()",
"def setUp(self):\n\n # Setup for all test cases.\n controllers = com.discover_controllers_on_network()\n self.controller, _, connected = com.connect_robot_with_ipaddr(controllers, '127.0.0.1')\n if not connected:\n print 'Couldn\\'t connect to controller. Test will not be run.'\n sys.exit()\n is_logged_in, _ = user_auth.logon_robot_controller_default(self.controller)\n if not is_logged_in:\n print 'Couldn\\'t log in. Test will not be run.'\n sys.exit()\n\n # Additional setup for some test cases.\n test_desc = self.shortDescription()\n if test_desc == 'Tests edit_and_write_rapid_data_property with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()\n elif test_desc == 'Tests edit_and_write_rapid_data with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()",
"def setup( self ):",
"def setUp(self) -> None:\n self.engine = EvalHPOA()",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setUp(self):\n self.example = Example()",
"def setUpTestCase(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setUp(self) -> None:\n\n self.checker = CheckerBase()",
"def setup(self) -> None:",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n super().setUp()\n self.runner = CliRunner()",
"def setUp(self):\r\n super(EETestCase, self).setUp()"
] | [
"0.82482773",
"0.82482773",
"0.81176686",
"0.800283",
"0.7907327",
"0.78918254",
"0.7887326",
"0.7848355",
"0.7842833",
"0.7832785",
"0.7832785",
"0.781454",
"0.78136706",
"0.7806924",
"0.78026885",
"0.78026885",
"0.77940094",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7766595",
"0.77608186",
"0.77478987",
"0.7743035",
"0.76929235",
"0.76929235",
"0.768341",
"0.7623276",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.75897497",
"0.75282216",
"0.7513549",
"0.7501416",
"0.7496145",
"0.7493589",
"0.7474445",
"0.7467448",
"0.7464891",
"0.7457519",
"0.7449974",
"0.7449959",
"0.74333304",
"0.7428299",
"0.7428299",
"0.7428299",
"0.7425823",
"0.74212027",
"0.74118286",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7376384",
"0.7364325",
"0.7359819",
"0.7359819",
"0.7359506",
"0.73563415",
"0.73563415",
"0.73493826",
"0.73490524"
] | 0.0 | -1 |
Test setting the agent name. | def test_set_agent_logging_options(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"agent.logging_config.disable_existing_loggers",
"True",
"--type=bool",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"agent.logging_config.disable_existing_loggers",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "True\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)",
"def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)",
"def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_jobset_autoname(self):\n name = os.path.join('indir', 'infile')\n self.assertEqual(self.jobset.name, name)",
"def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))",
"async def name(self, ctx, *, name: str = None):\n plagueName = await self.config.plagueName()\n if not name:\n message = f\"The current plague's name is `{plagueName}`.\"\n else:\n await self.config.plagueName.set(name)\n message = f\"Set the current plague's name to `{name}`.\"\n await ctx.send(message)",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_custom_agent_port(self):\n options = ControlOptions()\n options.parseOptions([b\"--agent-port\", b\"tcp:1234\"])\n self.assertEqual(options[\"agent-port\"], b\"tcp:1234\")",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def agent_set(bus):\n # TODO\n pass",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def test_name_false(self):\r\n self.name = False",
"def set_name(self, name):\n\t\tself.name_ = name",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def setName(self, newName):\n self.__username = newName",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_story_submitter(self):\n self.assertEqual(self.story.submitter, 'karangoeluw')",
"def setMachineName(self, name):\n if type(name) != str:\n return None\n self.description.setName(name)",
"def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]",
"def setUA(self, useragent):\n\t\tpass",
"def test_name(self):\n self.assertEqual(ApiConfig.name, 'api')",
"def set_name(self,name):\r\n self._name = __name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n if entry[\"kind\"] == trace_api.SpanKind.SERVER:\n entry[\"name\"] = span_name\n else:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[2:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, default_span_details=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def get_name(self):\n if self.ui.nick_line.text() and self.ui.pass_line.text():\n self.check_verify()\n self.set_name = True\n qApp.exit()",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_username(self):\n assert_equals(self.client.username, 'testuser')",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def setName(self, name):\n self.name = str(name)",
"def setName(self, *args):\n return _libsbml.Objective_setName(self, *args)",
"async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def on_setting_myname(self, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_local_agent_from_package_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name",
"def setName(self, *args):\n return _libsbml.Species_setName(self, *args)",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)"
] | [
"0.72085243",
"0.7100356",
"0.6366684",
"0.62835133",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442",
"0.5512594",
"0.5481373",
"0.547442",
"0.546205",
"0.54620385",
"0.5456332",
"0.54486966",
"0.54285616",
"0.5425501",
"0.5424599",
"0.54212815",
"0.5411104",
"0.5397801",
"0.53956974",
"0.5389271",
"0.5380992",
"0.53781265",
"0.5372445",
"0.5369932",
"0.5351236",
"0.5347454",
"0.5343573",
"0.533603",
"0.5331701",
"0.532518",
"0.5313506",
"0.53130037",
"0.5309862",
"0.5297768",
"0.5296321",
"0.5290892",
"0.5289959",
"0.5283988",
"0.52811134",
"0.5264371",
"0.52566385",
"0.52563393",
"0.5253919",
"0.52434075",
"0.52428365",
"0.5239398",
"0.52352524",
"0.5231288",
"0.5229124",
"0.5229076",
"0.5225584",
"0.5223776",
"0.5208234",
"0.52070165",
"0.5202741",
"0.5202312",
"0.51952773",
"0.51944065",
"0.5181081",
"0.51794475",
"0.517559",
"0.51706153",
"0.516947",
"0.5169103",
"0.5162539",
"0.5156178",
"0.51513094",
"0.51496273",
"0.5144778",
"0.5144312",
"0.51377416",
"0.5130215",
"0.5125931",
"0.5124033",
"0.51237625"
] | 0.0 | -1 |
Test setting the agent name. | def test_set_agent_incorrect_value(self):
with pytest.raises(
ClickException,
match="Attribute `not_agent_name` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "agent.not_agent_name", "new_name"],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)",
"def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)",
"def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_jobset_autoname(self):\n name = os.path.join('indir', 'infile')\n self.assertEqual(self.jobset.name, name)",
"def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))",
"async def name(self, ctx, *, name: str = None):\n plagueName = await self.config.plagueName()\n if not name:\n message = f\"The current plague's name is `{plagueName}`.\"\n else:\n await self.config.plagueName.set(name)\n message = f\"Set the current plague's name to `{name}`.\"\n await ctx.send(message)",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_custom_agent_port(self):\n options = ControlOptions()\n options.parseOptions([b\"--agent-port\", b\"tcp:1234\"])\n self.assertEqual(options[\"agent-port\"], b\"tcp:1234\")",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def agent_set(bus):\n # TODO\n pass",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def test_name_false(self):\r\n self.name = False",
"def set_name(self, name):\n\t\tself.name_ = name",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def setName(self, newName):\n self.__username = newName",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_story_submitter(self):\n self.assertEqual(self.story.submitter, 'karangoeluw')",
"def setMachineName(self, name):\n if type(name) != str:\n return None\n self.description.setName(name)",
"def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]",
"def setUA(self, useragent):\n\t\tpass",
"def test_name(self):\n self.assertEqual(ApiConfig.name, 'api')",
"def set_name(self,name):\r\n self._name = __name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n if entry[\"kind\"] == trace_api.SpanKind.SERVER:\n entry[\"name\"] = span_name\n else:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[2:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, default_span_details=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def get_name(self):\n if self.ui.nick_line.text() and self.ui.pass_line.text():\n self.check_verify()\n self.set_name = True\n qApp.exit()",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_username(self):\n assert_equals(self.client.username, 'testuser')",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def setName(self, name):\n self.name = str(name)",
"def setName(self, *args):\n return _libsbml.Objective_setName(self, *args)",
"async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def on_setting_myname(self, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_local_agent_from_package_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name",
"def setName(self, *args):\n return _libsbml.Species_setName(self, *args)",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)"
] | [
"0.72085243",
"0.6366684",
"0.62835133",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442",
"0.5512594",
"0.5481373",
"0.547442",
"0.546205",
"0.54620385",
"0.5456332",
"0.54486966",
"0.54285616",
"0.5425501",
"0.5424599",
"0.54212815",
"0.5411104",
"0.5397801",
"0.53956974",
"0.5389271",
"0.5380992",
"0.53781265",
"0.5372445",
"0.5369932",
"0.5351236",
"0.5347454",
"0.5343573",
"0.533603",
"0.5331701",
"0.532518",
"0.5313506",
"0.53130037",
"0.5309862",
"0.5297768",
"0.5296321",
"0.5290892",
"0.5289959",
"0.5283988",
"0.52811134",
"0.5264371",
"0.52566385",
"0.52563393",
"0.5253919",
"0.52434075",
"0.52428365",
"0.5239398",
"0.52352524",
"0.5231288",
"0.5229124",
"0.5229076",
"0.5225584",
"0.5223776",
"0.5208234",
"0.52070165",
"0.5202741",
"0.5202312",
"0.51952773",
"0.51944065",
"0.5181081",
"0.51794475",
"0.517559",
"0.51706153",
"0.516947",
"0.5169103",
"0.5162539",
"0.5156178",
"0.51513094",
"0.51496273",
"0.5144778",
"0.5144312",
"0.51377416",
"0.5130215",
"0.5125931",
"0.5124033",
"0.51237625"
] | 0.7100356 | 1 |
Test setting the agent name. | def test_set_type_bool(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"agent.logging_config.disable_existing_loggers",
"true",
"--type=bool",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)",
"def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)",
"def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_jobset_autoname(self):\n name = os.path.join('indir', 'infile')\n self.assertEqual(self.jobset.name, name)",
"def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))",
"async def name(self, ctx, *, name: str = None):\n plagueName = await self.config.plagueName()\n if not name:\n message = f\"The current plague's name is `{plagueName}`.\"\n else:\n await self.config.plagueName.set(name)\n message = f\"Set the current plague's name to `{name}`.\"\n await ctx.send(message)",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_custom_agent_port(self):\n options = ControlOptions()\n options.parseOptions([b\"--agent-port\", b\"tcp:1234\"])\n self.assertEqual(options[\"agent-port\"], b\"tcp:1234\")",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def agent_set(bus):\n # TODO\n pass",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def test_name_false(self):\r\n self.name = False",
"def set_name(self, name):\n\t\tself.name_ = name",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def setName(self, newName):\n self.__username = newName",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_story_submitter(self):\n self.assertEqual(self.story.submitter, 'karangoeluw')",
"def setMachineName(self, name):\n if type(name) != str:\n return None\n self.description.setName(name)",
"def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]",
"def setUA(self, useragent):\n\t\tpass",
"def test_name(self):\n self.assertEqual(ApiConfig.name, 'api')",
"def set_name(self,name):\r\n self._name = __name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n if entry[\"kind\"] == trace_api.SpanKind.SERVER:\n entry[\"name\"] = span_name\n else:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[2:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, default_span_details=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def get_name(self):\n if self.ui.nick_line.text() and self.ui.pass_line.text():\n self.check_verify()\n self.set_name = True\n qApp.exit()",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_username(self):\n assert_equals(self.client.username, 'testuser')",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def setName(self, name):\n self.name = str(name)",
"def setName(self, *args):\n return _libsbml.Objective_setName(self, *args)",
"async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def on_setting_myname(self, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_local_agent_from_package_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name",
"def setName(self, *args):\n return _libsbml.Species_setName(self, *args)",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)"
] | [
"0.72085243",
"0.7100356",
"0.6366684",
"0.62835133",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442",
"0.5512594",
"0.5481373",
"0.547442",
"0.546205",
"0.54620385",
"0.5456332",
"0.54486966",
"0.54285616",
"0.5425501",
"0.5424599",
"0.54212815",
"0.5411104",
"0.5397801",
"0.53956974",
"0.5389271",
"0.5380992",
"0.53781265",
"0.5372445",
"0.5369932",
"0.5351236",
"0.5347454",
"0.5343573",
"0.533603",
"0.5331701",
"0.532518",
"0.5313506",
"0.53130037",
"0.5309862",
"0.5297768",
"0.5296321",
"0.5290892",
"0.5289959",
"0.5283988",
"0.52811134",
"0.5264371",
"0.52566385",
"0.52563393",
"0.5253919",
"0.52434075",
"0.52428365",
"0.5239398",
"0.52352524",
"0.5231288",
"0.5229124",
"0.5229076",
"0.5225584",
"0.5223776",
"0.5208234",
"0.52070165",
"0.5202741",
"0.5202312",
"0.51952773",
"0.51944065",
"0.5181081",
"0.51794475",
"0.517559",
"0.51706153",
"0.516947",
"0.5169103",
"0.5162539",
"0.5156178",
"0.51513094",
"0.51496273",
"0.5144778",
"0.5144312",
"0.51377416",
"0.5130215",
"0.5125931",
"0.5124033",
"0.51237625"
] | 0.0 | -1 |
Test setting the agent name. | def test_set_type_none(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"agent.logging_config.some_value",
"",
"--type=none",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)",
"def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)",
"def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_jobset_autoname(self):\n name = os.path.join('indir', 'infile')\n self.assertEqual(self.jobset.name, name)",
"def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))",
"async def name(self, ctx, *, name: str = None):\n plagueName = await self.config.plagueName()\n if not name:\n message = f\"The current plague's name is `{plagueName}`.\"\n else:\n await self.config.plagueName.set(name)\n message = f\"Set the current plague's name to `{name}`.\"\n await ctx.send(message)",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_custom_agent_port(self):\n options = ControlOptions()\n options.parseOptions([b\"--agent-port\", b\"tcp:1234\"])\n self.assertEqual(options[\"agent-port\"], b\"tcp:1234\")",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def agent_set(bus):\n # TODO\n pass",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def test_name_false(self):\r\n self.name = False",
"def set_name(self, name):\n\t\tself.name_ = name",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def setName(self, newName):\n self.__username = newName",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_story_submitter(self):\n self.assertEqual(self.story.submitter, 'karangoeluw')",
"def setMachineName(self, name):\n if type(name) != str:\n return None\n self.description.setName(name)",
"def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]",
"def setUA(self, useragent):\n\t\tpass",
"def test_name(self):\n self.assertEqual(ApiConfig.name, 'api')",
"def set_name(self,name):\r\n self._name = __name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n if entry[\"kind\"] == trace_api.SpanKind.SERVER:\n entry[\"name\"] = span_name\n else:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[2:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, default_span_details=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def get_name(self):\n if self.ui.nick_line.text() and self.ui.pass_line.text():\n self.check_verify()\n self.set_name = True\n qApp.exit()",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_username(self):\n assert_equals(self.client.username, 'testuser')",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def setName(self, name):\n self.name = str(name)",
"def setName(self, *args):\n return _libsbml.Objective_setName(self, *args)",
"async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def on_setting_myname(self, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_local_agent_from_package_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name",
"def setName(self, *args):\n return _libsbml.Species_setName(self, *args)",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)"
] | [
"0.72085243",
"0.7100356",
"0.6366684",
"0.62835133",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442",
"0.5512594",
"0.5481373",
"0.547442",
"0.546205",
"0.54620385",
"0.5456332",
"0.54486966",
"0.54285616",
"0.5425501",
"0.5424599",
"0.54212815",
"0.5411104",
"0.5397801",
"0.53956974",
"0.5389271",
"0.5380992",
"0.53781265",
"0.5372445",
"0.5369932",
"0.5351236",
"0.5347454",
"0.5343573",
"0.533603",
"0.5331701",
"0.532518",
"0.5313506",
"0.53130037",
"0.5309862",
"0.5297768",
"0.5296321",
"0.5290892",
"0.5289959",
"0.5283988",
"0.52811134",
"0.5264371",
"0.52566385",
"0.52563393",
"0.5253919",
"0.52434075",
"0.52428365",
"0.5239398",
"0.52352524",
"0.5231288",
"0.5229124",
"0.5229076",
"0.5225584",
"0.5223776",
"0.5208234",
"0.52070165",
"0.5202741",
"0.5202312",
"0.51952773",
"0.51944065",
"0.5181081",
"0.51794475",
"0.517559",
"0.51706153",
"0.516947",
"0.5169103",
"0.5162539",
"0.5156178",
"0.51513094",
"0.51496273",
"0.5144778",
"0.5144312",
"0.51377416",
"0.5130215",
"0.5125931",
"0.5124033",
"0.51237625"
] | 0.0 | -1 |
Test setting the default routing. | def test_set_type_dict(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"agent.default_routing",
'{"fetchai/contract_api:any": "fetchai/ledger:any"}',
"--type=dict",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_default_router(self):\n assert self.rc_conf.has_key('defaultrouter')\n assert self.rc_conf['defaultrouter'] == '\"10.137.1.7\"'",
"def test_default_routing_updated(self):\n assert self.agent_config.default_routing == {\n self.new_protocol_id: self.new_connection_id\n }",
"def test_get_agent_default_routing(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.default_routing\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"{}\\n\"",
"def test_default_route(self):\n self.assertEqual(\n DogStatsd(use_default_route=True).host,\n \"172.17.0.1\"\n )",
"def test_default_routing_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"agent.default_routing\",\n cwd=self._get_cwd(),\n )\n assert (\n result.stdout\n == f'{{\"{DefaultMessage.protocol_id}\": \"{StubConnection.connection_id}\"}}\\n'\n )",
"def test_route(self):\n\n # Set a global route for all items\n self.site.route(r\"(.*)\", lambda match, item: \"{}/index.html\".format(os.path.splitext(match.group(1))[0]))\n # Override the index item's route\n self.site.route(r\"index.html\", lambda match, item: \"index.html\")\n\n self.assertEqual(\"index.html\", self.site.items[\"index.html\"].file_route)\n self.assertEqual(\"test/test/index.html\", self.site.items[\"test/test.html\"].file_route)",
"def test_setting_default(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"base_site.html\")",
"def setDefaultRoute( self, intf ):\n self.cmd( 'ip route flush root 0/0' )\n return self.cmd( 'route add default ' + intf )",
"def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route",
"def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"",
"def test_default_handler(self):\n route = RouteFactory.build()\n handler = route.get_handler()\n self.assertIsInstance(handler, handlers.TemplateHandler)",
"def test_0010_simple(self):\n self.setup_defaults()\n app = self.get_app()\n\n with app.test_request_context('/'):\n self.assertEqual(url_for('nereid.website.home'), '/')",
"def test_init(self):\n route = mock.MagicMock()\n\n handler = BaseHandler(route)\n\n self.assertEqual(handler.route, route)",
"def test_setting_override(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"test.html\")",
"def test_create_route_for_all_namespaces(self):\n pass",
"def test_add_route(self):\n\n test_router = router.Router()\n\n command = smcommand.SMClientCommand.NSCAttack\n\n test_router.add_route(command, Controller1)\n self.assertEqual(test_router.routes[command], [Controller1])\n\n test_router.add_route(command, Controller2)\n self.assertEqual(test_router.routes[command], [Controller1, Controller2])",
"def test_defaults(self):\n serverFromString = serve.ServiceMaker._serverFromString\n self.assertIdentical(serverFromString, endpoints.serverFromString)\n self.assertIdentical(serve.ServiceMaker._buildSite, serve.buildSite)",
"def test_create_namespaced_route(self):\n pass",
"def test_replace_namespaced_route(self):\n pass",
"def test_patch_namespaced_route(self):\n pass",
"def test_main_overview_default_url(self):\n\n # change config\n set_main_overview('foobar')\n\n # login testuser\n self.client.login(\n username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'\n )\n # get reverse url\n url = reverse('main_overview')\n # compare url\n self.assertEqual(url, '/main_overview/')\n # create url\n destination = urllib.parse.quote('/system/')\n # get response\n response = self.client.get('/main_overview/')\n # compare redirect\n self.assertRedirects(\n response, destination, status_code=302, target_status_code=200\n )",
"def test_urls(self):\n assert reverse('main-index') == '/'",
"def setUp(self):\n class TestHandler(BaseHandler):\n urlconf = 'conman.routes.tests.urls'\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.view = 'conman.routes.tests.urls.dummy_view'",
"def test_url_root(self):\n url = reverse('index')\n response = self.get(url)\n self.assertEqual(response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_read_namespaced_route(self):\n pass",
"def test_no_routes(self):\n response = self.client.get(reverse('routes_app:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No routes are available.\")",
"def test_custom_route(self):\n\n # Create a human object\n Human.create(id=1, name='John')\n Dog.create(id=5, name='Johnny', owner='John')\n\n # Get the custom route\n rv = self.client.get('/humans/1/my_dogs')\n assert rv.status_code == 200\n assert rv.json['total'] == 1\n assert rv.json['dogs'][0] == {'age': 5, 'id': 5, 'name': 'Johnny', 'owner': 'John'}",
"def test_routing_policy_replace_path(api_client):\n response = api_client().get(\"/anything/anything\")\n assert response.status_code == 200\n\n echoed_request = EchoedRequest.create(response)\n assert echoed_request.path == \"/anything\"",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')",
"def test_path(self):\n base_handler_path = 'conman.routes.handlers.BaseHandler'\n self.assertEqual(BaseHandler.path(), base_handler_path)",
"def test_gourde_views(self):\n rv = self.app.get(\"/-/\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/threads\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/ready\")\n self.assertEqual(rv.status_code, 200)",
"def test_default(self):\r\n self.assertEqual(self.option.default, '/tmp')",
"def route(self):\n pass",
"def test_replace_namespaced_route_status(self):\n pass",
"def test_init_default(self):\n self._test_init_default()",
"def test_no_matching_routes(monkeypatch) -> None:\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'PATH': yml_path})\n monkeypatch.setattr('django_swagger_tester.static_schema.loader.LoadStaticSchema.get_schema', ret_bad_schema)\n with pytest.raises(ValueError, match='Could not resolve path'):\n LoadStaticSchema('apsi/v1/trucks/correct', 'get', status_code=200)",
"def test_list_namespaced_route(self):\n pass",
"def get_default_route():\n # Discover the active/preferred network interface \n # by connecting to Google's public DNS server\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.settimeout(2)\n s.connect((\"8.8.8.8\", 80))\n iface_ip = s.getsockname()[0]\n except socket.error:\n sys.stderr.write('IoT Inspector cannot run without network connectivity.\\n')\n sys.exit(1)\n\n while True:\n routes = _get_routes()\n default_route = None\n for route in routes:\n if route[4] == iface_ip:\n # Reassign scapy's default interface to the one we selected\n sc.conf.iface = route[3]\n default_route = route[2:5]\n break\n if default_route:\n break\n\n log('get_default_route: retrying')\n time.sleep(1)\n \n\n # If we are using windows, conf.route.routes table doesn't update.\n # We have to update routing table manually for packets\n # to pick the correct route. \n if sys.platform.startswith('win'):\n for i, route in enumerate(routes):\n # if we see our selected iface, update the metrics to 0\n if route[3] == default_route[1]:\n routes[i] = (*route[:-1], 0)\n\n return default_route",
"def announce_default_routes(localhost, tbinfo):\n yield\n\n ptf_ip = tbinfo[\"ptf_ip\"]\n topo_name = tbinfo[\"topo\"][\"name\"]\n if topo_name not in ['t0', 'm0', 'mx']:\n return\n logger.info(\n \"withdraw and announce default ipv4 and ipv6 routes for {}\".format(topo_name))\n localhost.announce_routes(\n topo_name=topo_name, ptf_ip=ptf_ip, action=WITHDRAW, path=\"../ansible/\")\n localhost.announce_routes(\n topo_name=topo_name, ptf_ip=ptf_ip, action=ANNOUNCE, path=\"../ansible/\")",
"def test_http_we_provide_default_route_prefix_cls(serve_instance):\n with InputNode() as dag_input:\n m1 = Model.bind(1)\n m2 = Model.bind(1)\n m1_output = m1.forward.bind(dag_input[0])\n m2_output = m2.forward.bind(dag_input[0])\n combine_output = combine.bind(m1_output, m2_output)\n serve_dag = Driver.bind(combine_output)\n\n deployments = pipeline_build(serve_dag)\n ingress_deployment = get_and_validate_ingress_deployment(deployments)\n assert ingress_deployment.route_prefix == \"/\"\n for deployment in deployments[:-1]:\n assert deployment.route_prefix is None",
"def test_patch_namespaced_route_status(self):\n pass",
"def test_defaults(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def test_get_current_route_factory():\n\n assert application_services.get_current_route_factory() == create_route",
"def test_one_route(self):\n route = Route()\n route.save()\n response = self.client.get(reverse('routes_app:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, str(route))",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_list_route_for_all_namespaces(self):\n pass",
"def test_view_url_accessible_by_name(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)",
"def test_website_not_set_if_not_match(self, template_override_mock):\n request = mock.Mock()\n request.resolver_match.kwargs.get.side_effect = Exception('something')\n request.path = '/'\n context_processors.decide_base_template(request)\n template_override_mock.assert_not_called()",
"def test_default(self):\r\n self.assertEqual(self.option.default, 1234)",
"def test_register_route_factory():\n\n current_factory = application_services.get_current_route_factory()\n application_services.register_route_factory(mock_route_factory)\n assert application_services.get_current_route_factory() == mock_route_factory\n application_services.register_route_factory(current_factory)",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'hello')",
"def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )",
"def test_home(self):\n response = self.app.get(\"/\")\n self.assertTrue(response.status_code, 200)",
"def __init__(self, root_handler, path_not_found):\n if self.__test_path(root_handler) and self.__test_path(path_not_found):\n self.route_trie = RouteTrie(root_handler) # Passes root handler to the initialised trie\n self.path_not_found = path_not_found # Stores 404 not found response",
"def test_http_we_provide_default_route_prefix_func(serve_instance):\n func_dag = func_deployment.bind()\n deployments = pipeline_build(func_dag)\n ingress_deployment = get_and_validate_ingress_deployment(deployments)\n assert ingress_deployment.route_prefix == \"/\"",
"def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)",
"def test_default_behavior_of_home_view(dummy_request):\n from ..views.default import get_home_view\n from pyramid.response import Response\n\n request = dummy_request\n response = get_home_view(request)\n # import pdb ; pdb.set_trace()\n assert isinstance(response, dict)",
"def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200",
"def test_default_behavior_of_base_view(dummy_request):\n from ..views.default import get_base_view\n from pyramid.response import Response\n\n request = dummy_request\n response = get_base_view(request)\n # import pdb ; pdb.set_trace()\n assert isinstance(response, Response)\n assert response.text == 'base view is functional'",
"def RoutingInterfaceInitialize(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def test_main(self):\n path = reverse(\"main\")\n request = RequestFactory().get(path)\n response = index(request)\n assert response.status_code == 200",
"def test_api_root_get_routes(\n self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory\n ):\n\n view = APIRootView()\n url = drf_reverse(\"accounts-api:api-root\")\n request = request_factory.get(url)\n request.user = user\n\n view.request = request\n routes = view.get_routes(request)\n\n assert isinstance(routes, dict)\n assert routes[\"users\"] == drf_reverse(\"accounts-api:user-list\", request=request)\n assert routes[\"check-username-availability\"] == drf_reverse(\n \"accounts-api:user-check-username-availability\", request=request\n )",
"def test_home_route_context_foo(self):\n response = self.client.get(\"/\")\n self.assertContains(response, 'Imager Site')",
"def test_default_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"port\"], b'tcp:%d' % (REST_API_PORT,))",
"def test_default(self):\r\n self.assertEqual(self.option.default, False)",
"def test_default_versions_path(self):\n t = self.create_request_object()\n self.assertEqual(\"Mediflex\", t.project_name)\n self.assertEqual(\"Prod\", t.environment_name)\n self.assertEqual(\"studies/Mediflex(Prod)/datasets/regular\", t.url_path())",
"def robot_is_wanting_default(giver, object, receiver, ctxt) :\n if receiver==\"compliant robot\" :\n raise ActionHandled()",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def test_solo_route_init() -> None:\n # Test SoloRoute with VirtualClientConnection (ClientConnection) in constructor\n destination = SpecificLocation()\n virtual_server = VirtualServerConnection(node=Node())\n virtual_client = VirtualClientConnection(server=virtual_server)\n h_solo = SoloRoute(destination=destination, connection=virtual_client)\n\n assert h_solo.schema.destination is destination\n assert h_solo.connection is virtual_client",
"def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")",
"def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')",
"def test_make_pathways(self):\n basic_test_runner(self, 'pathways')",
"def test_home(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)",
"def test_read_namespaced_route_status(self):\n pass",
"def test_defaults(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"max-age=0\"})",
"def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200",
"def test_handle_basic(self):\n response = self.handler.handle(self.request, '/')\n\n self.view.assert_called_with(self.request, route=self.route)\n expected = self.view(self.request, route=self.route)\n self.assertEqual(response, expected)",
"def testRedirect(self):\n self.assertRaises(NotImplementedError, self.handler.redirect, '/')",
"def test_init_no_shortlist(self):\n # Create an empty routing table.\n self.node.routing_table = RoutingTable(self.node.network_id)\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n self.assertEqual(True, lookup.done())\n self.assertRaises(RoutingTableEmpty, lookup.result)",
"def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")",
"def test_get(self):\n obj = self.client.bus.routes.get(self.random_route.id)\n self.assertEqual(type(obj), BusRoute)\n self.assertEqual(obj.id, self.random_route.id)\n self.assertEqual(obj.name, self.random_route.name)",
"def use_routes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_routes\")",
"def add_routes(self):\n pass",
"def test_index_view(self):\n response = self.client.get(url_for('main.index'))\n self.assertEqual(response.status_code, 200)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def route(self, env):\n return None",
"def configure_routing(config):\n # Static file access. Separate root for each subdirectory, because Pyramid\n # treats these as first-class routables rather than a last-ditch fallback\n config.add_static_view('/css', 'floof:assets/css')\n config.add_static_view('/files', 'floof:assets/files') # dummy file store\n config.add_static_view('/icons', 'floof:assets/icons')\n config.add_static_view('/images', 'floof:assets/images')\n config.add_static_view('/js', 'floof:assets/js')\n # TODO this doesn't actually work\n config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico')\n\n\n r = config.add_route\n\n # Miscellaneous root stuff\n r('root', '/')\n r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator)\n r('reproxy', '/reproxy')\n r('log', '/log')\n\n # Registration and auth\n r('account.login', '/account/login')\n r('account.login_begin', '/account/login_begin')\n r('account.login_finish', '/account/login_finish')\n r('account.register', '/account/register')\n r('account.add_identity', '/account/add_identity')\n r('account.persona.login', '/account/persona/login')\n r('account.logout', '/account/logout')\n\n r('account.profile', '/account/profile')\n\n # Regular user control panel\n r('controls.index', '/account/controls')\n r('controls.auth', '/account/controls/authentication')\n r('controls.persona', '/account/controls/persona')\n r('controls.persona.add', '/account/controls/persona/add')\n r('controls.persona.remove', '/account/controls/persona/remove')\n r('controls.openid', '/account/controls/openid')\n r('controls.openid.add', '/account/controls/openid/add')\n r('controls.openid.add_finish', '/account/controls/openid/add_finish')\n r('controls.openid.remove', '/account/controls/openid/remove')\n r('controls.rels', '/account/controls/relationships')\n r('controls.rels.watch', '/account/controls/relationships/watch')\n r('controls.rels.unwatch', '/account/controls/relationships/unwatch')\n r('controls.info', '/account/controls/user_info')\n\n r('controls.certs', '/account/controls/certificates')\n r('controls.certs.add', '/account/controls/certificates/add')\n r('controls.certs.generate_server',\n '/account/controls/certificates/gen/cert-{name}.p12')\n r('controls.certs.details',\n '/account/controls/certificates/details/{serial:[0-9a-f]+}')\n r('controls.certs.download',\n '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')\n r('controls.certs.revoke',\n '/account/controls/certificates/revoke/{serial:[0-9a-f]+}')\n\n # User pages\n kw = sqla_route_options('user', 'name', model.User.name)\n r('users.view', '/users/{name}', **kw)\n r('users.art', '/users/{name}/art', **kw)\n r('users.art_by_album', '/users/{name}/art/{album}', **kw)\n r('users.profile', '/users/{name}/profile', **kw)\n r('users.watchstream', '/users/{name}/watchstream', **kw)\n r('albums.user_index', '/users/{name}/albums', **kw)\n\n r('api:users.list', '/users.json')\n\n # Artwork\n kw = sqla_route_options('artwork', 'id', model.Artwork.id)\n kw['pregenerator'] = artwork_pregenerator\n r('art.browse', '/art')\n r('art.upload', '/art/upload')\n r('art.view', r'/art/{id:\\d+}{title:(-.+)?}', **kw)\n r('art.add_tags', r'/art/{id:\\d+}/add_tags', **kw)\n r('art.remove_tags', r'/art/{id:\\d+}/remove_tags', **kw)\n r('art.rate', r'/art/{id:\\d+}/rate', **kw)\n\n # Tags\n # XXX what should the tag name regex be, if anything?\n # XXX should the regex be checked in the 'factory' instead? way easier that way...\n kw = sqla_route_options('tag', 'name', model.Tag.name)\n r('tags.list', '/tags')\n r('tags.view', '/tags/{name}', **kw)\n r('tags.artwork', '/tags/{name}/artwork', **kw)\n\n # Albums\n # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has\n user_router = SugarRouter(config, '/users/{user}', model.User.name)\n album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)\n album_router.add_route('albums.artwork', '')\n\n # Administration\n r('admin.dashboard', '/admin')\n r('admin.log', '/admin/log')\n\n # Debugging\n r('debug.blank', '/debug/blank')\n r('debug.crash', '/debug/crash')\n r('debug.mako-crash', '/debug/mako-crash')\n r('debug.status.303', '/debug/303')\n r('debug.status.400', '/debug/400')\n r('debug.status.403', '/debug/403')\n r('debug.status.404', '/debug/404')\n\n # Comments; made complex because they can attach to different parent URLs.\n # Rather than hack around how Pyramid's routes works, we can just use our\n # own class that does what we want!\n\n # XXX 1: make this work for users as well\n # XXX 2: make the other routes work\n # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes\n parent_route_names = ('art.view', 'user.view')\n mapper = config.get_routes_mapper()\n parent_routes = [mapper.get_route(name) for name in parent_route_names]\n commentables = dict(\n users=model.User.name,\n art=model.Artwork.id,\n )\n\n def comments_factory(request):\n # XXX prefetching on these?\n type = request.matchdict['type']\n identifier = request.matchdict['identifier']\n\n try:\n sqla_column = commentables[type]\n entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()\n except (NoResultFound, KeyError):\n # 404!\n raise NotFound()\n\n if 'comment_id' not in request.matchdict:\n return contextualize(entity.discussion)\n\n # URLs to specific comments should have those comments as the context\n try:\n return contextualize(\n model.session .query(model.Comment)\n .with_parent(entity.discussion)\n .filter(model.Comment.id == request.matchdict['comment_id'])\n .one())\n except NoResultFound:\n raise NotFound()\n\n\n def comments_pregenerator(request, elements, kw):\n resource = None\n comment = kw.get('comment', None)\n\n if comment:\n kw['comment_id'] = comment.id\n\n if 'resource' not in kw:\n resource = comment.discussion.resource\n\n if not resource:\n resource = kw['resource']\n\n # XXX users...\n entity = resource.member\n kw['type'] = 'art'\n kw['identifier'] = entity.id\n return elements, kw\n\n r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)\n r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)",
"def test_handle_basic(self):\n with mock.patch(self.view) as view:\n response = self.handler.handle(self.request, '/')\n\n view.assert_called_with(self.request, route=self.route)\n self.assertEqual(response, view(self.request, route=self.route))",
"def test_default_app_map_search(self):\n pass",
"def test_loader_build_urls_defaults(settings):\n settings.STATICPAGES = [\"index\", \"foo\"]\n settings.STATICPAGES_DEFAULT_TEMPLATEPATH = \"ping\"\n settings.STATICPAGES_DEFAULT_NAME_BASE = \"plop-\"\n settings.STATICPAGES_DEFAULT_URLPATH = \"moo\"\n\n loader = StaticpagesLoader()\n\n urls = loader.build_urls()\n\n index_url = urls[0]\n assert str(index_url.pattern) == \"moo/\"\n assert index_url.name == \"plop-index\"\n assert index_url.callback.view_initkwargs[\"template_name\"] == \"ping/index.html\"\n\n foo_url = urls[1]\n assert str(foo_url.pattern) == \"moo/foo/\"\n assert foo_url.name == \"plop-foo\"\n assert foo_url.callback.view_initkwargs[\"template_name\"] == \"ping/foo.html\"",
"def test_route_schema_init() -> None:\n destination = SpecificLocation()\n rschema = RouteSchema(destination)\n\n assert rschema.destination is not None\n assert rschema.destination._id == destination._id",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_handle(self):\n route = RouteFactory.build(url='/branch/')\n route.handler_class = mock.MagicMock()\n request = mock.Mock()\n\n result = route.handle(request, '/branch/leaf/')\n\n expected = route.handler_class(route).handle(request, '/leaf/')\n self.assertEqual(result, expected)",
"def test_default_app_map_search_0(self):\n pass",
"def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None:\n patch_src_m = f\"{TEST_DATA}/virtual-server-route/route-multiple.yaml\"\n patch_v_s_route_from_yaml(\n kube_apis.custom_objects,\n v_s_route_setup.route_m.name,\n patch_src_m,\n v_s_route_setup.route_m.namespace,\n )\n wait_before_test()",
"def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])",
"def test_mount_routes_with_middleware_url_path_for() -> None:\n assert mounted_routes_with_middleware.url_path_for(\"route\") == \"/http/\"",
"def testGet():\n bottle.response.set_header('content-type', 'text/plain')\n content = \"Web app file is located at %s\" % os.path.dirname(os.path.abspath(__file__))\n siteMap = \"\"\n\n for route in app.routes:\n siteMap = \"%s%s%s %s\" % (siteMap, '\\n' if siteMap else '', route.rule, route.method)\n target = route.config.get('mountpoint', {}).get('target')\n if target:\n for way in target.routes:\n siteMap = \"%s\\n %s %s\" % (siteMap, way.rule, way.method)\n\n content = \"%s\\n%s\" % (content, siteMap)\n return content"
] | [
"0.7380499",
"0.71554756",
"0.706406",
"0.6946817",
"0.6943349",
"0.67906195",
"0.66364366",
"0.6569018",
"0.6359504",
"0.6266595",
"0.619172",
"0.6175089",
"0.61632776",
"0.60943156",
"0.6091761",
"0.60507745",
"0.60375005",
"0.6034048",
"0.59746397",
"0.59736043",
"0.59731203",
"0.5953322",
"0.5937068",
"0.59347844",
"0.59265846",
"0.5909573",
"0.5900354",
"0.5899029",
"0.5868762",
"0.5828785",
"0.5826898",
"0.58179057",
"0.57821745",
"0.5746125",
"0.5724846",
"0.5721114",
"0.57159543",
"0.5689503",
"0.56884456",
"0.5682442",
"0.5674908",
"0.56583655",
"0.5640149",
"0.5610994",
"0.56032085",
"0.56032085",
"0.5594442",
"0.5576072",
"0.5574818",
"0.5558244",
"0.5552924",
"0.5550567",
"0.55475116",
"0.5533237",
"0.5511676",
"0.5507702",
"0.5494679",
"0.5483146",
"0.5468188",
"0.54512036",
"0.54428124",
"0.5425316",
"0.5421905",
"0.5421604",
"0.5417869",
"0.5417689",
"0.541477",
"0.5406376",
"0.5406209",
"0.5405335",
"0.54053235",
"0.54014367",
"0.5397384",
"0.539348",
"0.5386241",
"0.53744817",
"0.53730553",
"0.5371474",
"0.5366039",
"0.5341839",
"0.53323567",
"0.5332102",
"0.5331398",
"0.5328678",
"0.53217393",
"0.53068435",
"0.53068435",
"0.5301415",
"0.5291447",
"0.52897555",
"0.52894205",
"0.5285262",
"0.5271909",
"0.5269568",
"0.52661407",
"0.52560353",
"0.5249318",
"0.52323204",
"0.5231625",
"0.5226011",
"0.52250236"
] | 0.0 | -1 |
Test setting the default routing. | def test_set_type_list(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"vendor.fetchai.connections.p2p_libp2p.config.entry_peers",
'["peer1", "peer2"]',
"--type=list",
],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_default_router(self):\n assert self.rc_conf.has_key('defaultrouter')\n assert self.rc_conf['defaultrouter'] == '\"10.137.1.7\"'",
"def test_default_routing_updated(self):\n assert self.agent_config.default_routing == {\n self.new_protocol_id: self.new_connection_id\n }",
"def test_get_agent_default_routing(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.default_routing\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"{}\\n\"",
"def test_default_route(self):\n self.assertEqual(\n DogStatsd(use_default_route=True).host,\n \"172.17.0.1\"\n )",
"def test_default_routing_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"agent.default_routing\",\n cwd=self._get_cwd(),\n )\n assert (\n result.stdout\n == f'{{\"{DefaultMessage.protocol_id}\": \"{StubConnection.connection_id}\"}}\\n'\n )",
"def test_route(self):\n\n # Set a global route for all items\n self.site.route(r\"(.*)\", lambda match, item: \"{}/index.html\".format(os.path.splitext(match.group(1))[0]))\n # Override the index item's route\n self.site.route(r\"index.html\", lambda match, item: \"index.html\")\n\n self.assertEqual(\"index.html\", self.site.items[\"index.html\"].file_route)\n self.assertEqual(\"test/test/index.html\", self.site.items[\"test/test.html\"].file_route)",
"def test_setting_default(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"base_site.html\")",
"def setDefaultRoute( self, intf ):\n self.cmd( 'ip route flush root 0/0' )\n return self.cmd( 'route add default ' + intf )",
"def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route",
"def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"",
"def test_default_handler(self):\n route = RouteFactory.build()\n handler = route.get_handler()\n self.assertIsInstance(handler, handlers.TemplateHandler)",
"def test_0010_simple(self):\n self.setup_defaults()\n app = self.get_app()\n\n with app.test_request_context('/'):\n self.assertEqual(url_for('nereid.website.home'), '/')",
"def test_init(self):\n route = mock.MagicMock()\n\n handler = BaseHandler(route)\n\n self.assertEqual(handler.route, route)",
"def test_setting_override(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"test.html\")",
"def test_create_route_for_all_namespaces(self):\n pass",
"def test_add_route(self):\n\n test_router = router.Router()\n\n command = smcommand.SMClientCommand.NSCAttack\n\n test_router.add_route(command, Controller1)\n self.assertEqual(test_router.routes[command], [Controller1])\n\n test_router.add_route(command, Controller2)\n self.assertEqual(test_router.routes[command], [Controller1, Controller2])",
"def test_defaults(self):\n serverFromString = serve.ServiceMaker._serverFromString\n self.assertIdentical(serverFromString, endpoints.serverFromString)\n self.assertIdentical(serve.ServiceMaker._buildSite, serve.buildSite)",
"def test_create_namespaced_route(self):\n pass",
"def test_replace_namespaced_route(self):\n pass",
"def test_patch_namespaced_route(self):\n pass",
"def test_main_overview_default_url(self):\n\n # change config\n set_main_overview('foobar')\n\n # login testuser\n self.client.login(\n username='testuser_main_overview', password='RYgxCfV2NRcfXlJvsSHP'\n )\n # get reverse url\n url = reverse('main_overview')\n # compare url\n self.assertEqual(url, '/main_overview/')\n # create url\n destination = urllib.parse.quote('/system/')\n # get response\n response = self.client.get('/main_overview/')\n # compare redirect\n self.assertRedirects(\n response, destination, status_code=302, target_status_code=200\n )",
"def test_urls(self):\n assert reverse('main-index') == '/'",
"def setUp(self):\n class TestHandler(BaseHandler):\n urlconf = 'conman.routes.tests.urls'\n\n self.route = mock.Mock()\n self.request = mock.Mock()\n self.handler = TestHandler(self.route)\n self.view = 'conman.routes.tests.urls.dummy_view'",
"def test_url_root(self):\n url = reverse('index')\n response = self.get(url)\n self.assertEqual(response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_read_namespaced_route(self):\n pass",
"def test_no_routes(self):\n response = self.client.get(reverse('routes_app:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No routes are available.\")",
"def test_custom_route(self):\n\n # Create a human object\n Human.create(id=1, name='John')\n Dog.create(id=5, name='Johnny', owner='John')\n\n # Get the custom route\n rv = self.client.get('/humans/1/my_dogs')\n assert rv.status_code == 200\n assert rv.json['total'] == 1\n assert rv.json['dogs'][0] == {'age': 5, 'id': 5, 'name': 'Johnny', 'owner': 'John'}",
"def test_routing_policy_replace_path(api_client):\n response = api_client().get(\"/anything/anything\")\n assert response.status_code == 200\n\n echoed_request = EchoedRequest.create(response)\n assert echoed_request.path == \"/anything\"",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')",
"def test_path(self):\n base_handler_path = 'conman.routes.handlers.BaseHandler'\n self.assertEqual(BaseHandler.path(), base_handler_path)",
"def test_gourde_views(self):\n rv = self.app.get(\"/-/\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/threads\")\n self.assertEqual(rv.status_code, 200)\n\n rv = self.app.get(\"/-/ready\")\n self.assertEqual(rv.status_code, 200)",
"def test_default(self):\r\n self.assertEqual(self.option.default, '/tmp')",
"def route(self):\n pass",
"def test_replace_namespaced_route_status(self):\n pass",
"def test_init_default(self):\n self._test_init_default()",
"def test_no_matching_routes(monkeypatch) -> None:\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'PATH': yml_path})\n monkeypatch.setattr('django_swagger_tester.static_schema.loader.LoadStaticSchema.get_schema', ret_bad_schema)\n with pytest.raises(ValueError, match='Could not resolve path'):\n LoadStaticSchema('apsi/v1/trucks/correct', 'get', status_code=200)",
"def test_list_namespaced_route(self):\n pass",
"def get_default_route():\n # Discover the active/preferred network interface \n # by connecting to Google's public DNS server\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.settimeout(2)\n s.connect((\"8.8.8.8\", 80))\n iface_ip = s.getsockname()[0]\n except socket.error:\n sys.stderr.write('IoT Inspector cannot run without network connectivity.\\n')\n sys.exit(1)\n\n while True:\n routes = _get_routes()\n default_route = None\n for route in routes:\n if route[4] == iface_ip:\n # Reassign scapy's default interface to the one we selected\n sc.conf.iface = route[3]\n default_route = route[2:5]\n break\n if default_route:\n break\n\n log('get_default_route: retrying')\n time.sleep(1)\n \n\n # If we are using windows, conf.route.routes table doesn't update.\n # We have to update routing table manually for packets\n # to pick the correct route. \n if sys.platform.startswith('win'):\n for i, route in enumerate(routes):\n # if we see our selected iface, update the metrics to 0\n if route[3] == default_route[1]:\n routes[i] = (*route[:-1], 0)\n\n return default_route",
"def announce_default_routes(localhost, tbinfo):\n yield\n\n ptf_ip = tbinfo[\"ptf_ip\"]\n topo_name = tbinfo[\"topo\"][\"name\"]\n if topo_name not in ['t0', 'm0', 'mx']:\n return\n logger.info(\n \"withdraw and announce default ipv4 and ipv6 routes for {}\".format(topo_name))\n localhost.announce_routes(\n topo_name=topo_name, ptf_ip=ptf_ip, action=WITHDRAW, path=\"../ansible/\")\n localhost.announce_routes(\n topo_name=topo_name, ptf_ip=ptf_ip, action=ANNOUNCE, path=\"../ansible/\")",
"def test_http_we_provide_default_route_prefix_cls(serve_instance):\n with InputNode() as dag_input:\n m1 = Model.bind(1)\n m2 = Model.bind(1)\n m1_output = m1.forward.bind(dag_input[0])\n m2_output = m2.forward.bind(dag_input[0])\n combine_output = combine.bind(m1_output, m2_output)\n serve_dag = Driver.bind(combine_output)\n\n deployments = pipeline_build(serve_dag)\n ingress_deployment = get_and_validate_ingress_deployment(deployments)\n assert ingress_deployment.route_prefix == \"/\"\n for deployment in deployments[:-1]:\n assert deployment.route_prefix is None",
"def test_patch_namespaced_route_status(self):\n pass",
"def test_defaults(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Cache-Control\")",
"def test_get_current_route_factory():\n\n assert application_services.get_current_route_factory() == create_route",
"def test_one_route(self):\n route = Route()\n route.save()\n response = self.client.get(reverse('routes_app:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, str(route))",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)",
"def test_list_route_for_all_namespaces(self):\n pass",
"def test_view_url_accessible_by_name(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)",
"def test_website_not_set_if_not_match(self, template_override_mock):\n request = mock.Mock()\n request.resolver_match.kwargs.get.side_effect = Exception('something')\n request.path = '/'\n context_processors.decide_base_template(request)\n template_override_mock.assert_not_called()",
"def test_default(self):\r\n self.assertEqual(self.option.default, 1234)",
"def test_register_route_factory():\n\n current_factory = application_services.get_current_route_factory()\n application_services.register_route_factory(mock_route_factory)\n assert application_services.get_current_route_factory() == mock_route_factory\n application_services.register_route_factory(current_factory)",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'hello')",
"def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )",
"def test_home(self):\n response = self.app.get(\"/\")\n self.assertTrue(response.status_code, 200)",
"def __init__(self, root_handler, path_not_found):\n if self.__test_path(root_handler) and self.__test_path(path_not_found):\n self.route_trie = RouteTrie(root_handler) # Passes root handler to the initialised trie\n self.path_not_found = path_not_found # Stores 404 not found response",
"def test_http_we_provide_default_route_prefix_func(serve_instance):\n func_dag = func_deployment.bind()\n deployments = pipeline_build(func_dag)\n ingress_deployment = get_and_validate_ingress_deployment(deployments)\n assert ingress_deployment.route_prefix == \"/\"",
"def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)",
"def test_default_behavior_of_home_view(dummy_request):\n from ..views.default import get_home_view\n from pyramid.response import Response\n\n request = dummy_request\n response = get_home_view(request)\n # import pdb ; pdb.set_trace()\n assert isinstance(response, dict)",
"def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200",
"def test_default_behavior_of_base_view(dummy_request):\n from ..views.default import get_base_view\n from pyramid.response import Response\n\n request = dummy_request\n response = get_base_view(request)\n # import pdb ; pdb.set_trace()\n assert isinstance(response, Response)\n assert response.text == 'base view is functional'",
"def RoutingInterfaceInitialize(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def test_main(self):\n path = reverse(\"main\")\n request = RequestFactory().get(path)\n response = index(request)\n assert response.status_code == 200",
"def test_api_root_get_routes(\n self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory\n ):\n\n view = APIRootView()\n url = drf_reverse(\"accounts-api:api-root\")\n request = request_factory.get(url)\n request.user = user\n\n view.request = request\n routes = view.get_routes(request)\n\n assert isinstance(routes, dict)\n assert routes[\"users\"] == drf_reverse(\"accounts-api:user-list\", request=request)\n assert routes[\"check-username-availability\"] == drf_reverse(\n \"accounts-api:user-check-username-availability\", request=request\n )",
"def test_home_route_context_foo(self):\n response = self.client.get(\"/\")\n self.assertContains(response, 'Imager Site')",
"def test_default_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"port\"], b'tcp:%d' % (REST_API_PORT,))",
"def test_default(self):\r\n self.assertEqual(self.option.default, False)",
"def test_default_versions_path(self):\n t = self.create_request_object()\n self.assertEqual(\"Mediflex\", t.project_name)\n self.assertEqual(\"Prod\", t.environment_name)\n self.assertEqual(\"studies/Mediflex(Prod)/datasets/regular\", t.url_path())",
"def robot_is_wanting_default(giver, object, receiver, ctxt) :\n if receiver==\"compliant robot\" :\n raise ActionHandled()",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def test_solo_route_init() -> None:\n # Test SoloRoute with VirtualClientConnection (ClientConnection) in constructor\n destination = SpecificLocation()\n virtual_server = VirtualServerConnection(node=Node())\n virtual_client = VirtualClientConnection(server=virtual_server)\n h_solo = SoloRoute(destination=destination, connection=virtual_client)\n\n assert h_solo.schema.destination is destination\n assert h_solo.connection is virtual_client",
"def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")",
"def test_get_with_default(self):\n self.assertEqual(self.config.get('basic','salutation'),None)\n self.assertEqual(self.config.get('basic','salutation','bonjour'),\n 'bonjour')",
"def test_make_pathways(self):\n basic_test_runner(self, 'pathways')",
"def test_home(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)",
"def test_read_namespaced_route_status(self):\n pass",
"def test_defaults(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"max-age=0\"})",
"def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200",
"def test_handle_basic(self):\n response = self.handler.handle(self.request, '/')\n\n self.view.assert_called_with(self.request, route=self.route)\n expected = self.view(self.request, route=self.route)\n self.assertEqual(response, expected)",
"def testRedirect(self):\n self.assertRaises(NotImplementedError, self.handler.redirect, '/')",
"def test_init_no_shortlist(self):\n # Create an empty routing table.\n self.node.routing_table = RoutingTable(self.node.network_id)\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n self.assertEqual(True, lookup.done())\n self.assertRaises(RoutingTableEmpty, lookup.result)",
"def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")",
"def test_get(self):\n obj = self.client.bus.routes.get(self.random_route.id)\n self.assertEqual(type(obj), BusRoute)\n self.assertEqual(obj.id, self.random_route.id)\n self.assertEqual(obj.name, self.random_route.name)",
"def use_routes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_routes\")",
"def add_routes(self):\n pass",
"def test_index_view(self):\n response = self.client.get(url_for('main.index'))\n self.assertEqual(response.status_code, 200)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def route(self, env):\n return None",
"def configure_routing(config):\n # Static file access. Separate root for each subdirectory, because Pyramid\n # treats these as first-class routables rather than a last-ditch fallback\n config.add_static_view('/css', 'floof:assets/css')\n config.add_static_view('/files', 'floof:assets/files') # dummy file store\n config.add_static_view('/icons', 'floof:assets/icons')\n config.add_static_view('/images', 'floof:assets/images')\n config.add_static_view('/js', 'floof:assets/js')\n # TODO this doesn't actually work\n config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico')\n\n\n r = config.add_route\n\n # Miscellaneous root stuff\n r('root', '/')\n r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator)\n r('reproxy', '/reproxy')\n r('log', '/log')\n\n # Registration and auth\n r('account.login', '/account/login')\n r('account.login_begin', '/account/login_begin')\n r('account.login_finish', '/account/login_finish')\n r('account.register', '/account/register')\n r('account.add_identity', '/account/add_identity')\n r('account.persona.login', '/account/persona/login')\n r('account.logout', '/account/logout')\n\n r('account.profile', '/account/profile')\n\n # Regular user control panel\n r('controls.index', '/account/controls')\n r('controls.auth', '/account/controls/authentication')\n r('controls.persona', '/account/controls/persona')\n r('controls.persona.add', '/account/controls/persona/add')\n r('controls.persona.remove', '/account/controls/persona/remove')\n r('controls.openid', '/account/controls/openid')\n r('controls.openid.add', '/account/controls/openid/add')\n r('controls.openid.add_finish', '/account/controls/openid/add_finish')\n r('controls.openid.remove', '/account/controls/openid/remove')\n r('controls.rels', '/account/controls/relationships')\n r('controls.rels.watch', '/account/controls/relationships/watch')\n r('controls.rels.unwatch', '/account/controls/relationships/unwatch')\n r('controls.info', '/account/controls/user_info')\n\n r('controls.certs', '/account/controls/certificates')\n r('controls.certs.add', '/account/controls/certificates/add')\n r('controls.certs.generate_server',\n '/account/controls/certificates/gen/cert-{name}.p12')\n r('controls.certs.details',\n '/account/controls/certificates/details/{serial:[0-9a-f]+}')\n r('controls.certs.download',\n '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')\n r('controls.certs.revoke',\n '/account/controls/certificates/revoke/{serial:[0-9a-f]+}')\n\n # User pages\n kw = sqla_route_options('user', 'name', model.User.name)\n r('users.view', '/users/{name}', **kw)\n r('users.art', '/users/{name}/art', **kw)\n r('users.art_by_album', '/users/{name}/art/{album}', **kw)\n r('users.profile', '/users/{name}/profile', **kw)\n r('users.watchstream', '/users/{name}/watchstream', **kw)\n r('albums.user_index', '/users/{name}/albums', **kw)\n\n r('api:users.list', '/users.json')\n\n # Artwork\n kw = sqla_route_options('artwork', 'id', model.Artwork.id)\n kw['pregenerator'] = artwork_pregenerator\n r('art.browse', '/art')\n r('art.upload', '/art/upload')\n r('art.view', r'/art/{id:\\d+}{title:(-.+)?}', **kw)\n r('art.add_tags', r'/art/{id:\\d+}/add_tags', **kw)\n r('art.remove_tags', r'/art/{id:\\d+}/remove_tags', **kw)\n r('art.rate', r'/art/{id:\\d+}/rate', **kw)\n\n # Tags\n # XXX what should the tag name regex be, if anything?\n # XXX should the regex be checked in the 'factory' instead? way easier that way...\n kw = sqla_route_options('tag', 'name', model.Tag.name)\n r('tags.list', '/tags')\n r('tags.view', '/tags/{name}', **kw)\n r('tags.artwork', '/tags/{name}/artwork', **kw)\n\n # Albums\n # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has\n user_router = SugarRouter(config, '/users/{user}', model.User.name)\n album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)\n album_router.add_route('albums.artwork', '')\n\n # Administration\n r('admin.dashboard', '/admin')\n r('admin.log', '/admin/log')\n\n # Debugging\n r('debug.blank', '/debug/blank')\n r('debug.crash', '/debug/crash')\n r('debug.mako-crash', '/debug/mako-crash')\n r('debug.status.303', '/debug/303')\n r('debug.status.400', '/debug/400')\n r('debug.status.403', '/debug/403')\n r('debug.status.404', '/debug/404')\n\n # Comments; made complex because they can attach to different parent URLs.\n # Rather than hack around how Pyramid's routes works, we can just use our\n # own class that does what we want!\n\n # XXX 1: make this work for users as well\n # XXX 2: make the other routes work\n # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes\n parent_route_names = ('art.view', 'user.view')\n mapper = config.get_routes_mapper()\n parent_routes = [mapper.get_route(name) for name in parent_route_names]\n commentables = dict(\n users=model.User.name,\n art=model.Artwork.id,\n )\n\n def comments_factory(request):\n # XXX prefetching on these?\n type = request.matchdict['type']\n identifier = request.matchdict['identifier']\n\n try:\n sqla_column = commentables[type]\n entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()\n except (NoResultFound, KeyError):\n # 404!\n raise NotFound()\n\n if 'comment_id' not in request.matchdict:\n return contextualize(entity.discussion)\n\n # URLs to specific comments should have those comments as the context\n try:\n return contextualize(\n model.session .query(model.Comment)\n .with_parent(entity.discussion)\n .filter(model.Comment.id == request.matchdict['comment_id'])\n .one())\n except NoResultFound:\n raise NotFound()\n\n\n def comments_pregenerator(request, elements, kw):\n resource = None\n comment = kw.get('comment', None)\n\n if comment:\n kw['comment_id'] = comment.id\n\n if 'resource' not in kw:\n resource = comment.discussion.resource\n\n if not resource:\n resource = kw['resource']\n\n # XXX users...\n entity = resource.member\n kw['type'] = 'art'\n kw['identifier'] = entity.id\n return elements, kw\n\n r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)\n r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)",
"def test_handle_basic(self):\n with mock.patch(self.view) as view:\n response = self.handler.handle(self.request, '/')\n\n view.assert_called_with(self.request, route=self.route)\n self.assertEqual(response, view(self.request, route=self.route))",
"def test_default_app_map_search(self):\n pass",
"def test_loader_build_urls_defaults(settings):\n settings.STATICPAGES = [\"index\", \"foo\"]\n settings.STATICPAGES_DEFAULT_TEMPLATEPATH = \"ping\"\n settings.STATICPAGES_DEFAULT_NAME_BASE = \"plop-\"\n settings.STATICPAGES_DEFAULT_URLPATH = \"moo\"\n\n loader = StaticpagesLoader()\n\n urls = loader.build_urls()\n\n index_url = urls[0]\n assert str(index_url.pattern) == \"moo/\"\n assert index_url.name == \"plop-index\"\n assert index_url.callback.view_initkwargs[\"template_name\"] == \"ping/index.html\"\n\n foo_url = urls[1]\n assert str(foo_url.pattern) == \"moo/foo/\"\n assert foo_url.name == \"plop-foo\"\n assert foo_url.callback.view_initkwargs[\"template_name\"] == \"ping/foo.html\"",
"def test_route_schema_init() -> None:\n destination = SpecificLocation()\n rschema = RouteSchema(destination)\n\n assert rschema.destination is not None\n assert rschema.destination._id == destination._id",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def test_root(self):\n response = self.app.test_client().get('/test/')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/index.html')",
"def test_handle(self):\n route = RouteFactory.build(url='/branch/')\n route.handler_class = mock.MagicMock()\n request = mock.Mock()\n\n result = route.handle(request, '/branch/leaf/')\n\n expected = route.handler_class(route).handle(request, '/leaf/')\n self.assertEqual(result, expected)",
"def test_default_app_map_search_0(self):\n pass",
"def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None:\n patch_src_m = f\"{TEST_DATA}/virtual-server-route/route-multiple.yaml\"\n patch_v_s_route_from_yaml(\n kube_apis.custom_objects,\n v_s_route_setup.route_m.name,\n patch_src_m,\n v_s_route_setup.route_m.namespace,\n )\n wait_before_test()",
"def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])",
"def test_mount_routes_with_middleware_url_path_for() -> None:\n assert mounted_routes_with_middleware.url_path_for(\"route\") == \"/http/\"",
"def testGet():\n bottle.response.set_header('content-type', 'text/plain')\n content = \"Web app file is located at %s\" % os.path.dirname(os.path.abspath(__file__))\n siteMap = \"\"\n\n for route in app.routes:\n siteMap = \"%s%s%s %s\" % (siteMap, '\\n' if siteMap else '', route.rule, route.method)\n target = route.config.get('mountpoint', {}).get('target')\n if target:\n for way in target.routes:\n siteMap = \"%s\\n %s %s\" % (siteMap, way.rule, way.method)\n\n content = \"%s\\n%s\" % (content, siteMap)\n return content"
] | [
"0.7380499",
"0.71554756",
"0.706406",
"0.6946817",
"0.6943349",
"0.67906195",
"0.66364366",
"0.6569018",
"0.6359504",
"0.6266595",
"0.619172",
"0.6175089",
"0.61632776",
"0.60943156",
"0.6091761",
"0.60507745",
"0.60375005",
"0.6034048",
"0.59746397",
"0.59736043",
"0.59731203",
"0.5953322",
"0.5937068",
"0.59347844",
"0.59265846",
"0.5909573",
"0.5900354",
"0.5899029",
"0.5868762",
"0.5828785",
"0.5826898",
"0.58179057",
"0.57821745",
"0.5746125",
"0.5724846",
"0.5721114",
"0.57159543",
"0.5689503",
"0.56884456",
"0.5682442",
"0.5674908",
"0.56583655",
"0.5640149",
"0.5610994",
"0.56032085",
"0.56032085",
"0.5594442",
"0.5576072",
"0.5574818",
"0.5558244",
"0.5552924",
"0.5550567",
"0.55475116",
"0.5533237",
"0.5511676",
"0.5507702",
"0.5494679",
"0.5483146",
"0.5468188",
"0.54512036",
"0.54428124",
"0.5425316",
"0.5421905",
"0.5421604",
"0.5417869",
"0.5417689",
"0.541477",
"0.5406376",
"0.5406209",
"0.5405335",
"0.54053235",
"0.54014367",
"0.5397384",
"0.539348",
"0.5386241",
"0.53744817",
"0.53730553",
"0.5371474",
"0.5366039",
"0.5341839",
"0.53323567",
"0.5332102",
"0.5331398",
"0.5328678",
"0.53217393",
"0.53068435",
"0.53068435",
"0.5301415",
"0.5291447",
"0.52897555",
"0.52894205",
"0.5285262",
"0.5271909",
"0.5269568",
"0.52661407",
"0.52560353",
"0.5249318",
"0.52323204",
"0.5231625",
"0.5226011",
"0.52250236"
] | 0.0 | -1 |
Test setting the agent name. | def test_set_invalid_value(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"agent.agent_name",
"true",
"--type=bool",
],
standalone_mode=False,
)
assert result.exit_code == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)",
"def name_option(args, run):\n run.experiment_info[\"name\"] = args\n run.run_logger = run.root_logger.getChild(args)",
"def set_agent_env(self, param, value):\n logging.info(\"setting agent_env param:[%s] = value:[%s]\", param, value)\n self.agent_env[param] = value",
"def test_string(self):\n\n new_jawn = Amenity()\n name = getattr(new_jawn, \"name\")\n self.assertIsInstance(name, str)",
"def test_route_name(self):\n route_name = 'Single Driver Round Trip'\n self.route4me.optimization.route_name(route_name)\n data = self.route4me.optimization.data['parameters']\n self.assertEqual(route_name, data['route_name'])",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_jobset_autoname(self):\n name = os.path.join('indir', 'infile')\n self.assertEqual(self.jobset.name, name)",
"def _aa_host_name(self):\n self.is_option = True\n self.is_statement = False\n self.has_validator = True\n if not (self.value.startswith('\"') and self.value.endswith('\"')):\n self.value = '\"' + self.value + '\"'\n validate_name(self.value.strip('\"'))",
"async def name(self, ctx, *, name: str = None):\n plagueName = await self.config.plagueName()\n if not name:\n message = f\"The current plague's name is `{plagueName}`.\"\n else:\n await self.config.plagueName.set(name)\n message = f\"Set the current plague's name to `{name}`.\"\n await ctx.send(message)",
"def set_name(self, newname=\"\"):\n self.name = newname",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value",
"def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()",
"def test_custom_agent_port(self):\n options = ControlOptions()\n options.parseOptions([b\"--agent-port\", b\"tcp:1234\"])\n self.assertEqual(options[\"agent-port\"], b\"tcp:1234\")",
"def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)",
"def test_detector_name(i07_nexus_object_01: I07Nexus):\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021",
"def agent_set(bus):\n # TODO\n pass",
"def set_name(self, name):\n self.settings[\"name\"] = name",
"def test_name_false(self):\r\n self.name = False",
"def set_name(self, name):\n\t\tself.name_ = name",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def setName(self, newName):\n self.__username = newName",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"async def botname(ctx, *, new_name=None):\n if ctx.message.channel.name.lower() not in bot_channels:\n return\n\n member_roles = ctx.message.author.roles\n member_admin = discord.utils.find(lambda r: r.name.lower() in admin_roles, member_roles)\n if member_admin is not None:\n bot_member = discord.utils.find(lambda m: m.id == amor_manager.user.id, ctx.message.server.members)\n await amor_manager.change_nickname(bot_member, new_name)",
"def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def GetModernizedTestName(self, arg):\n return arg",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_agent():\n\n # default parameters\n print('Testing an agent with default parameters')\n uid = 'test_agent'\n params = {'use_checkpointer': False}\n verify.verify_agent(uid, params)\n verify.log_graph(uid, write_logs=False)\n print('\\n' + '#' * 65 + '\\n')\n\n # random parameters\n for _ in range(9):\n rand_params = utils.get_random_params()\n rand_params['use_checkpointer'] = False\n print(f'Testing an agent with parameters: {rand_params}')\n verify.verify_agent(uid, rand_params)\n verify.log_graph(uid, rand_params, False)\n print('\\n' + '#' * 65 + '\\n')\n\n # cleaning up\n path = os.path.join(configs.LOG_DIR, 'test_agent')\n shutil.rmtree(path)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_story_submitter(self):\n self.assertEqual(self.story.submitter, 'karangoeluw')",
"def setMachineName(self, name):\n if type(name) != str:\n return None\n self.description.setName(name)",
"def setCaptainNames(self):\n self.captainNames = anwp.func.names.getNames('system_names.txt',self.maxCaptainNames+100, self.rand.randint(1,100))\n self.currentCaptainName = 0",
"def test_email_name(self):\n key = api.portal.get_registry_record(\n 'plone.email_from_name'\n )\n self.assertEqual(u'Briefy CMS', key)",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]",
"def setUA(self, useragent):\n\t\tpass",
"def test_name(self):\n self.assertEqual(ApiConfig.name, 'api')",
"def set_name(self,name):\r\n self._name = __name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n if entry[\"kind\"] == trace_api.SpanKind.SERVER:\n entry[\"name\"] = span_name\n else:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[2:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, default_span_details=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_organization_name(self):\n insurgent = models.Organization(title='Insurgent')\n with self.assertRaises(ValueError):\n insurgent.name = '35453496*%&^$%^'\n with self.assertRaises(ValueError):\n insurgent.name = 'Insurgent'\n insurgent.name = 'insurgent'\n self.assertEqual(insurgent.name, 'insurgent')",
"def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def get_name(self):\n if self.ui.nick_line.text() and self.ui.pass_line.text():\n self.check_verify()\n self.set_name = True\n qApp.exit()",
"def __set_name(self, name):\r\n\t\tself.__name = name\r\n\t\tself._window.chat_panel.place_name = name\r\n\t\tself.encode_message(action=\"NO\", selected_name=name)",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def set_name(self, sNewVmName):\n\t\tcall_sdk_function('PrlVmCfg_SetName', self.handle, sNewVmName)",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_username(self):\n assert_equals(self.client.username, 'testuser')",
"def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name",
"def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')",
"def test_name_shower(self):\n self.assertTrue(self.ec.name_shower(self.ec.names))",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_set_appsearch_engine_name(self):\n\n # Get the current engine name and store it\n original_engine_name = Car.get_appsearch_engine_name()\n\n # Set a new app search engine name\n Car.set_appsearch_engine_name('test_cars')\n\n # Test if its set successfully\n engine_name = Car.get_appsearch_engine_name()\n self.assertEqual(engine_name, 'test_cars')\n\n # Reset it back to the original\n Car.set_appsearch_engine_name(original_engine_name)",
"def setName(self, name):\n self.name = str(name)",
"def setName(self, *args):\n return _libsbml.Objective_setName(self, *args)",
"async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def on_setting_myname(self, value):\n raise NotImplementedError()",
"def enter_name(self, name):\n self.name = name",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_local_agent_from_package_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-package/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n\n inputs = {\n 'resource_base': self.resource_base,\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue,\n 'file_server_port': self.fs.port\n }\n\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def set_experiment_name(self, experiment_name):\n self.experiment_name = experiment_name",
"def setName(self, *args):\n return _libsbml.Species_setName(self, *args)",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)"
] | [
"0.72085243",
"0.7100356",
"0.6366684",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442",
"0.5512594",
"0.5481373",
"0.547442",
"0.546205",
"0.54620385",
"0.5456332",
"0.54486966",
"0.54285616",
"0.5425501",
"0.5424599",
"0.54212815",
"0.5411104",
"0.5397801",
"0.53956974",
"0.5389271",
"0.5380992",
"0.53781265",
"0.5372445",
"0.5369932",
"0.5351236",
"0.5347454",
"0.5343573",
"0.533603",
"0.5331701",
"0.532518",
"0.5313506",
"0.53130037",
"0.5309862",
"0.5297768",
"0.5296321",
"0.5290892",
"0.5289959",
"0.5283988",
"0.52811134",
"0.5264371",
"0.52566385",
"0.52563393",
"0.5253919",
"0.52434075",
"0.52428365",
"0.5239398",
"0.52352524",
"0.5231288",
"0.5229124",
"0.5229076",
"0.5225584",
"0.5223776",
"0.5208234",
"0.52070165",
"0.5202741",
"0.5202312",
"0.51952773",
"0.51944065",
"0.5181081",
"0.51794475",
"0.517559",
"0.51706153",
"0.516947",
"0.5169103",
"0.5162539",
"0.5156178",
"0.51513094",
"0.51496273",
"0.5144778",
"0.5144312",
"0.51377416",
"0.5130215",
"0.5125931",
"0.5124033",
"0.51237625"
] | 0.62835133 | 3 |
Test setting the 'dummy' skill name. | def test_set_skill_name_should_fail(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "skills.dummy.name", "new_dummy_name"],
standalone_mode=False,
)
assert result.exit_code == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def test_name_false(self):\r\n self.name = False",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_change_name_of_the_devicefalse():",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def test_change_name_of_the_devicetrue():",
"def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_thingname_nostr(self, mock):\n mock.configure_mock(**(self.config_shadowget(ParamValidationError(\n report='UnitTest'))))\n self.assertRaises(\n ParamValidationError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_dummy():",
"def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def test_default_codeword(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def test_thingname_nokey(self, mock):\n self.assertRaises(\n KeyError,\n lf.lambda_handler, event=self.lambdaevent_nokey, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)",
"def test_skill_created(self):\n\t\tself.skill.save()\n\t\tskill_instance = Skill.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tskill_instance.user,\n\t\t\tself.skill.user,\n\t\t\t'User don\\'t match.'\n\t\t)\n\t\tself.assertEqual(\n\t\t\tskill_instance.tag,\n\t\t\tself.tag,\n\t\t\t'Skill tag\\'s don\\'t match.'\n\t\t)",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def _check_name(self):\n\t\tpass",
"def test_init(self):\r\n self.assertEqual(self.default_app.Name, 'RdpTaxonAssigner')",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def test_skills_updated(self):\n assert self.skill_config.skills == {self.new_skill_id}",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_name(self):\n dtt = self.TDTT(when=self.txt_when)\n expected_name = self.txt_when\n self.assertEquals(expected_name, dtt.name)\n self.assertEquals(expected_name, '{}'.format(dtt))\n expected_logged = '{}({})'.format(dtt.typename(), self.txt_when)\n self.assertEquals(expected_logged, dtt.logged)",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_that_name_saved():\n custom_sum_name = \"custom_sum\"\n\n assert custom_sum.__name__ == custom_sum_name",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_details(_):\n return span_name, {}\n\n def update_expected_span_name(expected):\n for entry in expected:\n entry[\"name\"] = \" \".join(\n [span_name] + entry[\"name\"].split(\" \")[-1:]\n )\n return expected\n\n app = otel_asgi.OpenTelemetryMiddleware(\n simple_asgi, span_details_callback=get_predefined_span_details\n )\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_span_name])",
"def test_initial_arg_name_isnt_set(self):\r\n v = GroovyTestModel.create(text='cross fingers')\r\n\r\n assert v == v.arg_test1()\r\n assert v == v.arg_test2()",
"def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')",
"def test_get_github_name_negative(self):\n self.assertIsNone(app.get_github_name(\"undefined_user12345\")[\"user\"])",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_unnamed_parameter(self):\n\n m = Mothur(**self.init_vars)\n m.help('summary.seqs')\n\n return",
"def test_legal_names(self):\n product_list = generate_products()\n for prod in product_list:\n noun = prod.name.split(\" \")[1]\n adjective = prod.name.split(\" \")[0]\n self.assertIn(noun, NOUNS)\n self.assertIn(adjective, ADJECTIVES)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def isSetName(self):\n return _libsbml.FluxBound_isSetName(self)",
"def testbed_name(self): \n return \"C-Lab\"",
"def test_name_required(self):\n self.required_field_fail('name', self.test_data['pants'])",
"def test_name_set_no_changes(self):\n field1 = basic.flag(name='field1')\n field2 = basic.flag(name='field2')\n\n self.assertEqual('field1', field1.name)\n self.assertEqual('field2', field2.name)",
"def is_yummy(self):\n return False",
"def test_override_span_name(self):\n span_name = \"Dymaxion\"\n\n def get_predefined_span_name(scope):\n # pylint: disable=unused-argument\n return span_name\n\n app = otel_wsgi.OpenTelemetryMiddleware(\n simple_wsgi, name_callback=get_predefined_span_name\n )\n response = app(self.environ, self.start_response)\n self.validate_response(response, span_name=span_name)",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))",
"def test_sample_one_sample_type(self):\r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')",
"def test_name(name):\n expected = 'datachannel' if name is None else name\n c = DataChannel(name=name)\n assert c.name == expected",
"def allow_any_name(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_any_name\")",
"def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)",
"def test_name_default(self):\n field = basic.flag()\n\n self.assertEqual('Flag Field', field.name)",
"def test_name_required_error_validation():\n template_name = pxe.CustomizationTemplate(\n name=None,\n description=generate_random_string(size=16),\n image_type='RHEL-6',\n script_type='Kickstart',\n script_data='Testing the script')\n\n with error.expected('Name is required'):\n template_name.create()",
"def allow_any_name(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_any_name\")",
"def allow_any_name(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_any_name\")",
"def isSetName(self):\n return _libsbml.Objective_isSetName(self)",
"def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"",
"def testName(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"name\")\n\n self.util.stringPropertyTest(self, project, \"name\")",
"def isSetName(self):\n return _libsbml.Input_isSetName(self)",
"def test_title(names):",
"def test_with_unknown_role(self, do_student_launch, student_payload):\n student_payload[\"https://purl.imsglobal.org/spec/lti/claim/roles\"] = [\n \"http://purl.imsglobal.org/vocab/lis/v2/membership#Learner\",\n \"http://purl.imsglobal.org/vocab/lis/v2/uknownrole/unknown#Unknown\",\n ]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_testing():\n Pendulum = pu.Pendulum()\n ans = Pendulum.dummytest()\n assert ans",
"def fixture_microbial_sample_id():\n return \"microbial_sample_test\"",
"def test_state_name(self):\n state = State('test-state')\n self.assertEqual(state.name, 'test-state')",
"def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def _fallback_fixture_names(self):\n if not self.request_name or not self.response_name:\n warnings.warn(\n \"No name was specified for the recorded fixture. Falling \"\n \"back to default names.\")\n\n if not self.request_name:\n self.request_name = __default_names__[0]\n if not self.response_name:\n self.response_name = __default_names__[1]",
"def isSetName(self):\n return _libsbml.SBase_isSetName(self)",
"def test_create_invalid_name(self):\r\n print(\"Create survey with invalid name\")\r\n s_name = \"\"\r\n c_id = 1\r\n questions = [1, 2]\r\n\r\n prev_noSurveys = len(Survey.query.all())\r\n self.assertEqual(self.system.create_survey(s_name, c_id, questions), 0)\r\n curr_noSurveys = len(Survey.query.all())\r\n self.assertEqual(prev_noSurveys, curr_noSurveys)",
"def test_name(self, data, firstname, secondname):\n layer = Points(data)\n assert layer.name == \"Points\"\n\n layer = Points(data, name=firstname)\n assert layer.name == firstname\n\n layer.name = secondname\n assert layer.name == secondname",
"def isSetName(self):\n return _libsbml.FluxObjective_isSetName(self)",
"def test_dummy_test():\n pass",
"def isSetName(self):\n return _libsbml.Species_isSetName(self)",
"def dummy(self):\n pass",
"def give_names(x): \n if x == 0:\n return 'Lost'\n else:\n return 'Won/Broke Even'",
"def test_create_bad_sample_names(self):\n # set a horrible list of sample names\n self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']\n with self.assertRaises(QiitaDBColumnError):\n SampleTemplate.create(self.metadata, self.new_study)",
"def test_default_missing_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'To enroll, you must follow the honor code.',\r\n )",
"def test_interaction_accepts_name():\n dmi = DMI(1)\n assert hasattr(dmi, 'name')",
"def test_initialization_of_teacher_first_name():\n assert opp_teacher.first_name == \"Daniil\"",
"def set_name(name=False):\n if not name:\n name = name_generator()\n return name",
"def test_anonymous(self):\n\n self.client.logout()\n response = self.client.get(reverse(\n 'studio:skills',\n kwargs={\n 'aiid': self.ai['aiid']\n }\n ))\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\n response.url,\n reverse('account_login') + '?next=/bots/edit/%s/skills' % self.ai['aiid']\n )",
"def name():\r\n return _random.choice([male_first(), female_first()])",
"def test_legal_names(self):\n test_list = generate_products()\n names_list = []\n for i in test_list:\n names_list.append(i[0])\n for name in names_list:\n nameparts = name.split()\n self.assertEqual(len(nameparts), 2,\n msg=\"missing noun, space, or adj\")\n the_adj = nameparts[0]\n self.assertIn(the_adj, ADJECTIVES, msg='Bad Adj')\n the_noun = nameparts[1]\n self.assertIn(the_noun, NOUNS, msg='Bad Noun')",
"def test_name(self):\n self.assertTrue(type(x.name) == str)",
"def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']"
] | [
"0.7661072",
"0.6244207",
"0.61193323",
"0.6040593",
"0.59651697",
"0.5919205",
"0.5802785",
"0.5780799",
"0.5780309",
"0.5751823",
"0.57467544",
"0.5730054",
"0.57029426",
"0.5673183",
"0.5667184",
"0.56606334",
"0.5655326",
"0.56511235",
"0.5628001",
"0.5626317",
"0.56261504",
"0.560481",
"0.5593023",
"0.5590323",
"0.5576531",
"0.5537359",
"0.5523387",
"0.5498324",
"0.5490404",
"0.5487239",
"0.54832053",
"0.5481246",
"0.54776084",
"0.54749256",
"0.54706484",
"0.5464069",
"0.5463591",
"0.5461051",
"0.54495513",
"0.544291",
"0.5441591",
"0.54412377",
"0.54404217",
"0.5437704",
"0.5432549",
"0.5422519",
"0.5394386",
"0.5378319",
"0.5363961",
"0.5362074",
"0.53587526",
"0.5352732",
"0.53510517",
"0.5347664",
"0.53444725",
"0.5332104",
"0.53183645",
"0.52972955",
"0.5293751",
"0.52928776",
"0.52847844",
"0.5278075",
"0.52771026",
"0.5271225",
"0.5270451",
"0.52689236",
"0.5260982",
"0.5260317",
"0.52585983",
"0.52585983",
"0.5257051",
"0.5255562",
"0.52441293",
"0.52320784",
"0.5231011",
"0.5211483",
"0.5208815",
"0.52064306",
"0.52055347",
"0.5204454",
"0.5202572",
"0.5195844",
"0.51916426",
"0.5188374",
"0.5187711",
"0.5184913",
"0.5182212",
"0.51806986",
"0.51768696",
"0.51754016",
"0.5175323",
"0.5173657",
"0.51719105",
"0.51710683",
"0.51679856",
"0.5165707",
"0.5156256",
"0.5155781",
"0.51550204",
"0.5148386"
] | 0.8387392 | 0 |
Test setting a nested attribute. | def test_set_nested_attribute(self):
path = "skills.dummy.behaviours.dummy.args.behaviour_arg_1"
new_value = "10" # cause old value is int
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", path, new_value],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", path],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert new_value in result.output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_attribute_setters(self):\n test = self.test\n test.id = 2\n test['name'] = 'bar'\n\n self.assertEqual(test.id, 2)\n self.assertEqual(test['name'], 'bar')",
"def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")",
"def test_update_attribute_data(self):\n pass",
"def test_set_attribute():\n elem = hr.Element(\"this is some text\", id=\"spam\", style=\"eggs\")\n elem.set_attributes(holy=\"grail\", answer=42)\n\n assert (\n get_opening_line(elem)\n == '<html id=\"spam\" style=\"eggs\" holy=\"grail\" answer=\"42\">'\n )",
"def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')",
"def test_getter_child_attr(self):\n root = netapp_api.NaElement('root')\n root.add_attr('val', 'FAKE_VALUE')\n\n self.assertEqual('FAKE_VALUE',\n root.__getitem__('val'))",
"def setattr_nested(obj, attributes, value):\n pre, _, post = attributes.rpartition(\".\")\n setattr(getattr_nested(obj, pre) if pre else obj, post, value)",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_setAttribute():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute();\n x.setAttribute(\"foo\");\n x.setAttribute(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute(\"onfoo\", \"bar\");\n \"\"\").failed()",
"def test_one_att(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer([self.test_attribute, ])\n self.assert_mock_analyzer(self.test_attribute)",
"def set_nested_attr(__obj: object, __name: str, __value: Any):\n pre, _, post = __name.rpartition('.')\n return setattr(get_nested_attr(__obj, pre) if pre else __obj, post, __value)",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def test_attribute_access(self):\n cd = ConfigDict()\n\n cd['x'] = 1\n self.assertEquals(cd.x, 1)\n\n cd.y = 2\n self.assertEquals(cd['y'], 2)",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def test_set_attrs(self):\n city2 = City()\n city2.name = \"Hawaii\"\n self.assertEqual(city2.name, \"Hawaii\")\n city2.state_id = \"<3\"\n self.assertEqual(city2.state_id, \"<3\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(City.state_id, \"\")",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)",
"def test_update_metadata_by_attribute(self):\n pass",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_register_existing_attr(self):\n pass",
"def test_get_attribute_data(self):\n pass",
"def test_attr_type(self):\n self.my_city.state_id = \"1c5dd90a-a3df-4516-b1ac-32a8715e5539\"\n self.my_city.name = \"New York\"\n self.assertIsInstance(self.my_city.name, str)\n self.assertIsInstance(self.my_city.state_id, str)",
"def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(len(sub_ch), 2)\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual(c.get_content(), 'v1')\n else:\n self.assertEqual(c.get_content(), 'v2')",
"def set_value(node, attr, attr_data, verbose=False):\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)",
"def test_attribute_getters(self):\n test = self.test\n self.assertEqual(test.name, 'Foo')\n self.assertEqual(test['id'], 1)",
"def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(2, len(sub_ch))\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual('v1', c.get_content())\n else:\n self.assertEqual('v2', c.get_content())",
"def test_patch_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.patch = 234\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value",
"def test_attributeWithValueAny(self):\n xp = XPathQuery(\"/foo/*[@attrib2='value2']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar2])",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def _check_property_on_test_context(\n context: \"HookContext\", attr_str: str, user_facing_name: str, param_on_builder: str\n):\n value = getattr(context, attr_str)\n if value is None:\n raise DagsterInvalidPropertyError(\n f\"Attribute '{user_facing_name}' was not provided when \"\n f\"constructing context. Provide a value for the '{param_on_builder}' parameter on \"\n \"'build_hook_context'. To learn more, check out the testing hooks section of Dagster's \"\n \"concepts docs: https://docs.dagster.io/concepts/ops-jobs-graphs/op-hooks#testing-hooks\"\n )\n else:\n return value",
"def test_api_object_update_property(self, api_object):\n attrs_dict = {'uuid_': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.uuid_ != 'CREATING'",
"def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)",
"def set_attribute(obj, path, value):\n names = path.split('.')\n if len(names) > 1:\n set_attribute(getattr(obj, names[0]), '.'.join(names[1:]), value)\n else:\n setattr(obj, names[0], value)",
"def _set_attributes(self):",
"def test_attr(self):\n self.assertTrue(hasattr(self.amenity, \"created_at\"))\n self.assertTrue(hasattr(self.amenity, \"id\"))\n self.assertTrue(hasattr(self.amenity, \"updated_at\"))\n self.assertFalse(hasattr(self.amenity, \"random_attr\"))\n self.assertTrue(hasattr(self.amenity, \"name\"))\n self.assertEqual(self.amenity.__class__.__name__, \"Amenity\")\n self.assertEqual(self.amenity.name, \"\")",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def test_traversal__path_resource_attribute(path, attribute_name, value):\n from pyramid.traversal import traverse\n root_resource = root_resource_factory()\n t = traverse(root_resource, path)\n context = t['context']\n assert getattr(context, attribute_name) == value",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def testSetParentage(self):\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )\n\n self.cc.parent = 'bob'\n self.media_ref.parent = 'joe'\n\n self.cd.set_parentage()\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_set_node_properties(self):\n\n pass",
"def test_get_attributes(self):\n pass",
"def has_nested_attr(__obj: object, __name: str) -> bool:\n pre, _, post = __name.rpartition('.')\n if pre:\n if has_nested_attr(__obj, pre):\n return has_nested_attr(get_nested_attr(__obj, pre), post)\n else:\n return False\n else:\n return hasattr(__obj, post)",
"def test_attr_dict(self):\n obj = awstats_reader.AttrDict([('this','that'), ('thus','those')])\n self.assertEqual(obj.thus, 'those')",
"def test_update_or_set_public_attr(self, api_object):\n attrs_dict = dict(status='SERVICE', not_in_init='secret', _private='secret')\n api_object.update_or_set_public_attrs(attrs_dict)\n assert api_object.status == 'SERVICE'\n try:\n assert api_object.not_in_init == 'secret'\n except AttributeError:\n raise AssertionError\n else:\n assert True\n try:\n api_object._private\n except AttributeError:\n assert True\n else:\n raise AssertionError",
"def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(len(root.get_children()), 1)\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)",
"def test_attributes(self):\n self.assertTrue(hasattr(self.city, 'name'))\n self.assertTrue(hasattr(self.city, 'state_id'))",
"def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(1, len(root.get_children()))\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)",
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"def test_descriptor_set_get(self):\n obj = TestObject()\n self.assertIsNone(obj.test_setting)\n obj.test_setting = \"foo\"\n self.assertEqual(obj.test_setting, \"foo\")",
"def _setAttributes(self, primaryAttr, attrs):\n return False",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)",
"def UseAttribute(self) -> bool:",
"def set_attribute(self, node, attribute, value):\n name = '{}.{}'.format(node, attribute)\n try:\n attr_type = mc.getAttr(name, typ=True)\n if 'string' in attr_type:\n mc.setAttr(name, value, typ='string')\n elif 'float3' in attr_type:\n mc.setAttr(\n name, value[0][0], value[0][1], value[0][2], typ='float3'\n )\n else:\n mc.setAttr(name, value)\n except Exception:\n return False\n return True",
"def test_dotwiz_plus_set_attr():\n dd = DotWizPlus()\n dd.a = [{'one': 1, 'two': 2}]\n\n item = dd.a[0]\n assert isinstance(item, DotWizPlus)\n assert item.one == 1\n assert item.two == 2",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def attr(self, name):\r\n return Assert(getattr(self.obj, name))",
"def _check_nested(self, key, self_val, nested):\n nested_val = getattr(nested, key)\n assert self_val == nested_val, \\\n \"selector['{}']='{}' in '{}' doesn't match header['{}']='{}' in nested file '{}'.\".format(\n key, self_val, self.filename, key, nested_val, nested.filename)",
"def test_descriptor_set_get_dict(self):\n obj = TestObject()\n self.assertIsNone(obj.__dict__.get('test_setting'))\n obj.test_setting = \"foo\"\n self.assertEqual(obj.__dict__.get('test_setting'), \"foo\")",
"def test_existing_attribute(self):\n self.assertEqual(import_from_setting('TEST_SETTING'), 1)",
"def _SetValue(param, field, value):\n attr = None\n attr_name = ''\n for attr_name in field.split('.'):\n if attr:\n param = attr\n\n if not hasattr(param, attr_name):\n raise ValueError(\"Can't find field %s.\" % field)\n attr = getattr(param, attr_name)\n param.SetField(attr_name, value)",
"def test_set_attribute_override():\n elem = hr.Element(\n \"this is some text\",\n style=\"cheese\",\n answer=1,\n clas=\"spam\", # cspell:disable-line\n )\n elem.set_attributes(holy=\"grail\", answer=42, _clas=\"eggs\") # cspell:disable-line\n\n opening_tag = get_opening_line(elem)\n assert 'style=\"cheese\"' in opening_tag\n assert 'answer=\"42\"' in opening_tag\n assert 'class=\"eggs\"' in opening_tag\n assert 'holy=\"grail\"' in opening_tag",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_nested_col(self):\n\n self.assertRaises(TypeError, self.table.where, 'c_nested')",
"def testGetattr(self):\n patch = self.pd.main\n\n obj = patch.Lt_(0.5)\n self.assertEquals('<~', obj.name)\n self.assertEquals(('<~', 0.5), obj.args)\n\n alternate = patch.Obj('<~', 0.5)\n self.assertEquals('<~', alternate.name)\n self.assertEquals(('<~', 0.5), alternate.args)\n\n bang = patch.Bang()\n self.assertTrue(isinstance(bang, pdctl.Bang))",
"def test_set_attribute_with_response_returning_place():\n response_ok = {\n 'candidates': [{\n 'formatted_address': '92340 Bourg-la-Reine, France',\n 'name': 'Bourg-la-Reine',\n 'place_id': 'ChIJBY5REypx5kcRgD6LaMOCCwQ'}],\n 'status': 'OK'}\n place = Place()\n place.response = response_ok\n place.set_attribute()\n assert place.status is True\n assert place.place_id == \"ChIJBY5REypx5kcRgD6LaMOCCwQ\"\n assert place.name == \"Bourg-la-Reine\"\n assert place.address == \"92340 Bourg-la-Reine, France\"",
"def test_attr_access(self):\n c = ConfigDict()\n c.test = 5\n self.assertEqual(5, c.test)\n self.assertEqual(5, c['test'])\n c['test'] = 6\n self.assertEqual(6, c.test)\n self.assertEqual(6, c['test'])\n del c.test\n self.assertTrue('test' not in c)\n self.assertEqual(None, c.test)",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def set_attr_impl(context, builder, sig, args, attr):\n typ, valty = sig.args\n target, val = args\n\n if attr in typ.struct:\n # It's a struct member\n inst = context.make_helper(builder, typ, value=target)\n data_ptr = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_ptr)\n\n # Get old value\n attr_type = typ.struct[attr]\n oldvalue = getattr(data, _mangle_attr(attr))\n\n # Store n\n setattr(data, _mangle_attr(attr), val)\n context.nrt.incref(builder, attr_type, val)\n\n # Delete old value\n context.nrt.decref(builder, attr_type, oldvalue)\n\n elif attr in typ.jit_props:\n # It's a jitted property\n setter = typ.jit_props[attr]['set']\n disp_type = types.Dispatcher(setter)\n sig = disp_type.get_call_type(context.typing_context,\n (typ, valty), {})\n call = context.get_function(disp_type, sig)\n call(builder, (target, val))\n _add_linking_libs(context, call)\n else:\n raise NotImplementedError(\n 'attribute {0!r} not implemented'.format(attr))",
"def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)",
"def test_assign(mock_empty_os_environ, mock_env_parser, attr, value, expected):\n s = settings_parser.Settings(prefix='this', settings_file_suffix='suffix', parser=None)\n assert s.settings_files == []\n setattr(s, attr, value)\n assert getattr(s, attr) == expected",
"def __setattr__(self, item, value):\n\n # This test allows attributes to be set in the __init__ method\n if \"_AttribDict__initialised\" not in self.__dict__:\n return dict.__setattr__(self, item, value)\n\n # Any normal attributes are handled normally\n elif item in self.__dict__:\n dict.__setattr__(self, item, value)\n\n else:\n self.__setitem__(item, value)",
"def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"",
"def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))",
"def test_build_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.build = 9001\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_attributenamenotfound(self):\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])",
"def test_init_attributes(self):\n t = self.Test({'id': 1, 'poop': 'abc'})\n\n self.assertEqual(t.id, 1)\n self.assertEqual(t.name, None)\n self.assertRaises(AttributeError, t.__getattribute__, 'poop')",
"def test_container_attribute(self):\n self.add_pool()\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = DaosCommand(self.bin)\n\n expected_for_param = []\n name = self.params.get(\"name\", '/run/attrtests/name_handles/*/')\n expected_for_param.append(name[1])\n value = self.params.get(\"value\", '/run/attrtests/value_handles/*/')\n expected_for_param.append(value[1])\n\n # Convert any test yaml string to bytes\n if isinstance(name[0], str):\n name[0] = name[0].encode(\"utf-8\")\n if isinstance(value[0], str):\n value[0] = value[0].encode(\"utf-8\")\n\n attr_dict = {name[0]: value[0]}\n\n expected_result = 'PASS'\n for result in expected_for_param:\n if result == 'FAIL':\n expected_result = 'FAIL'\n break\n try:\n self.container.container.set_attr(data=attr_dict)\n\n data = self.daos_cmd.container_list_attrs(\n pool=self.pool.uuid,\n cont=self.container.uuid)\n self.verify_list_attr(attr_dict, data['response'])\n\n # Request something that doesn't exist\n if name[0] is not None and b\"Negative\" in name[0]:\n name[0] = b\"rubbish\"\n\n attr_value_dict = self.container.container.get_attr([name[0]])\n\n # Raise an exception if the attr value is empty\n # This is expected to happen on Negative test cases\n if not attr_value_dict[name[0]]:\n raise DaosApiError(\"Attr value is empty. \"\n \"Did you set the value?\")\n self.verify_get_attr(attr_dict, attr_value_dict)\n\n if expected_result in ['FAIL']:\n self.fail(\"Test was expected to fail but it passed.\\n\")\n\n except (DaosApiError, DaosTestError) as excep:\n print(excep)\n print(traceback.format_exc())\n if expected_result == 'PASS':\n self.fail(\"Test was expected to pass but it failed.\\n\")",
"def test_update_metadata_by_attribute1(self):\n pass",
"def test_put_user_property(self):\n pass",
"def test_has_attr(self):\n self.assertTrue(hasattr(City, \"save\"))",
"def set_attribute(self, name, value):\n\n pass",
"def test_set_diameter():\n radius = 10\n c = Circle(radius) \n expected_diameter = 10 \n c.diameter = expected_diameter \n assert c.diameter == expected_diameter\n assert c.radius == expected_diameter / 2",
"def __setattr__ (self, attr, value):\n self.set_value (attr, value)",
"def has_attribute(self, name):\n\n pass",
"async def test_update_with_json_attrs_with_json_attrs_path(hass: HomeAssistant) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n json={\n \"toplevel\": {\n \"master_value\": \"123\",\n \"second_level\": {\n \"some_json_key\": \"some_json_value\",\n \"some_json_key2\": \"some_json_value2\",\n },\n },\n },\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes_path\": \"$.toplevel.second_level\",\n \"json_attributes\": [\"some_json_key\", \"some_json_key2\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == \"123\"\n assert state.attributes[\"some_json_key\"] == \"some_json_value\"\n assert state.attributes[\"some_json_key2\"] == \"some_json_value2\"",
"def sk_attr(est, attr):\n from sklearn.utils.validation import check_is_fitted\n from sklearn.exceptions import NotFittedError\n try:\n check_is_fitted(est, attr)\n return True\n except NotFittedError:\n return False",
"def test_has_attr(self):\n\n self.assertTrue(hasattr(City, \"save\"))"
] | [
"0.7471137",
"0.6970188",
"0.656537",
"0.6477467",
"0.63603884",
"0.61522514",
"0.6127132",
"0.6091757",
"0.6082865",
"0.6073286",
"0.60494506",
"0.59865016",
"0.59510404",
"0.59033054",
"0.59033054",
"0.5884618",
"0.5871959",
"0.58645827",
"0.58420926",
"0.5816876",
"0.5816365",
"0.58151364",
"0.57993424",
"0.57891285",
"0.5776685",
"0.5766262",
"0.5751023",
"0.5744841",
"0.5743418",
"0.5729915",
"0.5728987",
"0.5717495",
"0.5708464",
"0.5707245",
"0.57014775",
"0.56512785",
"0.5649086",
"0.56411487",
"0.56267333",
"0.5623912",
"0.5609945",
"0.56003994",
"0.55817485",
"0.5577416",
"0.55644375",
"0.5564416",
"0.555694",
"0.5547089",
"0.5539344",
"0.55362225",
"0.55355066",
"0.55349934",
"0.55285335",
"0.5519334",
"0.5516847",
"0.55155045",
"0.5512193",
"0.5511551",
"0.5508268",
"0.5487549",
"0.54849494",
"0.5483715",
"0.54781497",
"0.5476417",
"0.5473543",
"0.54710597",
"0.5458062",
"0.5453779",
"0.54502165",
"0.5442559",
"0.5441879",
"0.5437078",
"0.54354393",
"0.54313636",
"0.5425293",
"0.5424809",
"0.5419289",
"0.54184115",
"0.54161346",
"0.54121184",
"0.5392068",
"0.5380552",
"0.5377808",
"0.537136",
"0.53596336",
"0.5355482",
"0.5347723",
"0.5344241",
"0.5340888",
"0.53395146",
"0.53384024",
"0.5335995",
"0.53348607",
"0.5333716",
"0.53335744",
"0.5332822",
"0.53283215",
"0.5327713",
"0.5327309",
"0.5325458"
] | 0.7355271 | 1 |
Test setting a nested attribute. | def test_set_nested_attribute_not_allowed(self):
path = "skills.dummy.behaviours.dummy.config.behaviour_arg_1"
new_value = "new_dummy_name"
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", path, new_value],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output",
"def test_attribute_setters(self):\n test = self.test\n test.id = 2\n test['name'] = 'bar'\n\n self.assertEqual(test.id, 2)\n self.assertEqual(test['name'], 'bar')",
"def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")",
"def test_update_attribute_data(self):\n pass",
"def test_set_attribute():\n elem = hr.Element(\"this is some text\", id=\"spam\", style=\"eggs\")\n elem.set_attributes(holy=\"grail\", answer=42)\n\n assert (\n get_opening_line(elem)\n == '<html id=\"spam\" style=\"eggs\" holy=\"grail\" answer=\"42\">'\n )",
"def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')",
"def test_getter_child_attr(self):\n root = netapp_api.NaElement('root')\n root.add_attr('val', 'FAKE_VALUE')\n\n self.assertEqual('FAKE_VALUE',\n root.__getitem__('val'))",
"def setattr_nested(obj, attributes, value):\n pre, _, post = attributes.rpartition(\".\")\n setattr(getattr_nested(obj, pre) if pre else obj, post, value)",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_setAttribute():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute();\n x.setAttribute(\"foo\");\n x.setAttribute(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute(\"onfoo\", \"bar\");\n \"\"\").failed()",
"def test_one_att(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer([self.test_attribute, ])\n self.assert_mock_analyzer(self.test_attribute)",
"def set_nested_attr(__obj: object, __name: str, __value: Any):\n pre, _, post = __name.rpartition('.')\n return setattr(get_nested_attr(__obj, pre) if pre else __obj, post, __value)",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def test_attribute_access(self):\n cd = ConfigDict()\n\n cd['x'] = 1\n self.assertEquals(cd.x, 1)\n\n cd.y = 2\n self.assertEquals(cd['y'], 2)",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def test_set_attrs(self):\n city2 = City()\n city2.name = \"Hawaii\"\n self.assertEqual(city2.name, \"Hawaii\")\n city2.state_id = \"<3\"\n self.assertEqual(city2.state_id, \"<3\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(City.state_id, \"\")",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)",
"def test_update_metadata_by_attribute(self):\n pass",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_register_existing_attr(self):\n pass",
"def test_get_attribute_data(self):\n pass",
"def test_attr_type(self):\n self.my_city.state_id = \"1c5dd90a-a3df-4516-b1ac-32a8715e5539\"\n self.my_city.name = \"New York\"\n self.assertIsInstance(self.my_city.name, str)\n self.assertIsInstance(self.my_city.state_id, str)",
"def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(len(sub_ch), 2)\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual(c.get_content(), 'v1')\n else:\n self.assertEqual(c.get_content(), 'v2')",
"def set_value(node, attr, attr_data, verbose=False):\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)",
"def test_attribute_getters(self):\n test = self.test\n self.assertEqual(test.name, 'Foo')\n self.assertEqual(test['id'], 1)",
"def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(2, len(sub_ch))\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual('v1', c.get_content())\n else:\n self.assertEqual('v2', c.get_content())",
"def test_patch_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.patch = 234\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value",
"def test_attributeWithValueAny(self):\n xp = XPathQuery(\"/foo/*[@attrib2='value2']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar2])",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def _check_property_on_test_context(\n context: \"HookContext\", attr_str: str, user_facing_name: str, param_on_builder: str\n):\n value = getattr(context, attr_str)\n if value is None:\n raise DagsterInvalidPropertyError(\n f\"Attribute '{user_facing_name}' was not provided when \"\n f\"constructing context. Provide a value for the '{param_on_builder}' parameter on \"\n \"'build_hook_context'. To learn more, check out the testing hooks section of Dagster's \"\n \"concepts docs: https://docs.dagster.io/concepts/ops-jobs-graphs/op-hooks#testing-hooks\"\n )\n else:\n return value",
"def test_api_object_update_property(self, api_object):\n attrs_dict = {'uuid_': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.uuid_ != 'CREATING'",
"def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)",
"def set_attribute(obj, path, value):\n names = path.split('.')\n if len(names) > 1:\n set_attribute(getattr(obj, names[0]), '.'.join(names[1:]), value)\n else:\n setattr(obj, names[0], value)",
"def _set_attributes(self):",
"def test_attr(self):\n self.assertTrue(hasattr(self.amenity, \"created_at\"))\n self.assertTrue(hasattr(self.amenity, \"id\"))\n self.assertTrue(hasattr(self.amenity, \"updated_at\"))\n self.assertFalse(hasattr(self.amenity, \"random_attr\"))\n self.assertTrue(hasattr(self.amenity, \"name\"))\n self.assertEqual(self.amenity.__class__.__name__, \"Amenity\")\n self.assertEqual(self.amenity.name, \"\")",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})",
"def test_traversal__path_resource_attribute(path, attribute_name, value):\n from pyramid.traversal import traverse\n root_resource = root_resource_factory()\n t = traverse(root_resource, path)\n context = t['context']\n assert getattr(context, attribute_name) == value",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def testSetParentage(self):\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )\n\n self.cc.parent = 'bob'\n self.media_ref.parent = 'joe'\n\n self.cd.set_parentage()\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cd.media_ref.parent\n )",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_set_node_properties(self):\n\n pass",
"def test_get_attributes(self):\n pass",
"def has_nested_attr(__obj: object, __name: str) -> bool:\n pre, _, post = __name.rpartition('.')\n if pre:\n if has_nested_attr(__obj, pre):\n return has_nested_attr(get_nested_attr(__obj, pre), post)\n else:\n return False\n else:\n return hasattr(__obj, post)",
"def test_attr_dict(self):\n obj = awstats_reader.AttrDict([('this','that'), ('thus','those')])\n self.assertEqual(obj.thus, 'those')",
"def test_update_or_set_public_attr(self, api_object):\n attrs_dict = dict(status='SERVICE', not_in_init='secret', _private='secret')\n api_object.update_or_set_public_attrs(attrs_dict)\n assert api_object.status == 'SERVICE'\n try:\n assert api_object.not_in_init == 'secret'\n except AttributeError:\n raise AssertionError\n else:\n assert True\n try:\n api_object._private\n except AttributeError:\n assert True\n else:\n raise AssertionError",
"def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(len(root.get_children()), 1)\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)",
"def test_attributes(self):\n self.assertTrue(hasattr(self.city, 'name'))\n self.assertTrue(hasattr(self.city, 'state_id'))",
"def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(1, len(root.get_children()))\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)",
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"def test_descriptor_set_get(self):\n obj = TestObject()\n self.assertIsNone(obj.test_setting)\n obj.test_setting = \"foo\"\n self.assertEqual(obj.test_setting, \"foo\")",
"def _setAttributes(self, primaryAttr, attrs):\n return False",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)",
"def UseAttribute(self) -> bool:",
"def set_attribute(self, node, attribute, value):\n name = '{}.{}'.format(node, attribute)\n try:\n attr_type = mc.getAttr(name, typ=True)\n if 'string' in attr_type:\n mc.setAttr(name, value, typ='string')\n elif 'float3' in attr_type:\n mc.setAttr(\n name, value[0][0], value[0][1], value[0][2], typ='float3'\n )\n else:\n mc.setAttr(name, value)\n except Exception:\n return False\n return True",
"def test_dotwiz_plus_set_attr():\n dd = DotWizPlus()\n dd.a = [{'one': 1, 'two': 2}]\n\n item = dd.a[0]\n assert isinstance(item, DotWizPlus)\n assert item.one == 1\n assert item.two == 2",
"def test_wifi_attribute(self):\n self.amenity.name = \"greatWifi\"\n if hasattr(self.amenity, 'name'):\n actual = self.amenity.name\n else:\n actual = ''\n expected = \"greatWifi\"\n self.assertEqual(expected, actual)",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def attr(self, name):\r\n return Assert(getattr(self.obj, name))",
"def _check_nested(self, key, self_val, nested):\n nested_val = getattr(nested, key)\n assert self_val == nested_val, \\\n \"selector['{}']='{}' in '{}' doesn't match header['{}']='{}' in nested file '{}'.\".format(\n key, self_val, self.filename, key, nested_val, nested.filename)",
"def test_descriptor_set_get_dict(self):\n obj = TestObject()\n self.assertIsNone(obj.__dict__.get('test_setting'))\n obj.test_setting = \"foo\"\n self.assertEqual(obj.__dict__.get('test_setting'), \"foo\")",
"def test_existing_attribute(self):\n self.assertEqual(import_from_setting('TEST_SETTING'), 1)",
"def _SetValue(param, field, value):\n attr = None\n attr_name = ''\n for attr_name in field.split('.'):\n if attr:\n param = attr\n\n if not hasattr(param, attr_name):\n raise ValueError(\"Can't find field %s.\" % field)\n attr = getattr(param, attr_name)\n param.SetField(attr_name, value)",
"def test_set_attribute_override():\n elem = hr.Element(\n \"this is some text\",\n style=\"cheese\",\n answer=1,\n clas=\"spam\", # cspell:disable-line\n )\n elem.set_attributes(holy=\"grail\", answer=42, _clas=\"eggs\") # cspell:disable-line\n\n opening_tag = get_opening_line(elem)\n assert 'style=\"cheese\"' in opening_tag\n assert 'answer=\"42\"' in opening_tag\n assert 'class=\"eggs\"' in opening_tag\n assert 'holy=\"grail\"' in opening_tag",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_nested_col(self):\n\n self.assertRaises(TypeError, self.table.where, 'c_nested')",
"def testGetattr(self):\n patch = self.pd.main\n\n obj = patch.Lt_(0.5)\n self.assertEquals('<~', obj.name)\n self.assertEquals(('<~', 0.5), obj.args)\n\n alternate = patch.Obj('<~', 0.5)\n self.assertEquals('<~', alternate.name)\n self.assertEquals(('<~', 0.5), alternate.args)\n\n bang = patch.Bang()\n self.assertTrue(isinstance(bang, pdctl.Bang))",
"def test_set_attribute_with_response_returning_place():\n response_ok = {\n 'candidates': [{\n 'formatted_address': '92340 Bourg-la-Reine, France',\n 'name': 'Bourg-la-Reine',\n 'place_id': 'ChIJBY5REypx5kcRgD6LaMOCCwQ'}],\n 'status': 'OK'}\n place = Place()\n place.response = response_ok\n place.set_attribute()\n assert place.status is True\n assert place.place_id == \"ChIJBY5REypx5kcRgD6LaMOCCwQ\"\n assert place.name == \"Bourg-la-Reine\"\n assert place.address == \"92340 Bourg-la-Reine, France\"",
"def test_attr_access(self):\n c = ConfigDict()\n c.test = 5\n self.assertEqual(5, c.test)\n self.assertEqual(5, c['test'])\n c['test'] = 6\n self.assertEqual(6, c.test)\n self.assertEqual(6, c['test'])\n del c.test\n self.assertTrue('test' not in c)\n self.assertEqual(None, c.test)",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def set_attr_impl(context, builder, sig, args, attr):\n typ, valty = sig.args\n target, val = args\n\n if attr in typ.struct:\n # It's a struct member\n inst = context.make_helper(builder, typ, value=target)\n data_ptr = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_ptr)\n\n # Get old value\n attr_type = typ.struct[attr]\n oldvalue = getattr(data, _mangle_attr(attr))\n\n # Store n\n setattr(data, _mangle_attr(attr), val)\n context.nrt.incref(builder, attr_type, val)\n\n # Delete old value\n context.nrt.decref(builder, attr_type, oldvalue)\n\n elif attr in typ.jit_props:\n # It's a jitted property\n setter = typ.jit_props[attr]['set']\n disp_type = types.Dispatcher(setter)\n sig = disp_type.get_call_type(context.typing_context,\n (typ, valty), {})\n call = context.get_function(disp_type, sig)\n call(builder, (target, val))\n _add_linking_libs(context, call)\n else:\n raise NotImplementedError(\n 'attribute {0!r} not implemented'.format(attr))",
"def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)",
"def test_assign(mock_empty_os_environ, mock_env_parser, attr, value, expected):\n s = settings_parser.Settings(prefix='this', settings_file_suffix='suffix', parser=None)\n assert s.settings_files == []\n setattr(s, attr, value)\n assert getattr(s, attr) == expected",
"def __setattr__(self, item, value):\n\n # This test allows attributes to be set in the __init__ method\n if \"_AttribDict__initialised\" not in self.__dict__:\n return dict.__setattr__(self, item, value)\n\n # Any normal attributes are handled normally\n elif item in self.__dict__:\n dict.__setattr__(self, item, value)\n\n else:\n self.__setitem__(item, value)",
"def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"",
"def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))",
"def test_build_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.build = 9001\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_attributenamenotfound(self):\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])",
"def test_init_attributes(self):\n t = self.Test({'id': 1, 'poop': 'abc'})\n\n self.assertEqual(t.id, 1)\n self.assertEqual(t.name, None)\n self.assertRaises(AttributeError, t.__getattribute__, 'poop')",
"def test_container_attribute(self):\n self.add_pool()\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = DaosCommand(self.bin)\n\n expected_for_param = []\n name = self.params.get(\"name\", '/run/attrtests/name_handles/*/')\n expected_for_param.append(name[1])\n value = self.params.get(\"value\", '/run/attrtests/value_handles/*/')\n expected_for_param.append(value[1])\n\n # Convert any test yaml string to bytes\n if isinstance(name[0], str):\n name[0] = name[0].encode(\"utf-8\")\n if isinstance(value[0], str):\n value[0] = value[0].encode(\"utf-8\")\n\n attr_dict = {name[0]: value[0]}\n\n expected_result = 'PASS'\n for result in expected_for_param:\n if result == 'FAIL':\n expected_result = 'FAIL'\n break\n try:\n self.container.container.set_attr(data=attr_dict)\n\n data = self.daos_cmd.container_list_attrs(\n pool=self.pool.uuid,\n cont=self.container.uuid)\n self.verify_list_attr(attr_dict, data['response'])\n\n # Request something that doesn't exist\n if name[0] is not None and b\"Negative\" in name[0]:\n name[0] = b\"rubbish\"\n\n attr_value_dict = self.container.container.get_attr([name[0]])\n\n # Raise an exception if the attr value is empty\n # This is expected to happen on Negative test cases\n if not attr_value_dict[name[0]]:\n raise DaosApiError(\"Attr value is empty. \"\n \"Did you set the value?\")\n self.verify_get_attr(attr_dict, attr_value_dict)\n\n if expected_result in ['FAIL']:\n self.fail(\"Test was expected to fail but it passed.\\n\")\n\n except (DaosApiError, DaosTestError) as excep:\n print(excep)\n print(traceback.format_exc())\n if expected_result == 'PASS':\n self.fail(\"Test was expected to pass but it failed.\\n\")",
"def test_update_metadata_by_attribute1(self):\n pass",
"def test_put_user_property(self):\n pass",
"def test_has_attr(self):\n self.assertTrue(hasattr(City, \"save\"))",
"def set_attribute(self, name, value):\n\n pass",
"def test_set_diameter():\n radius = 10\n c = Circle(radius) \n expected_diameter = 10 \n c.diameter = expected_diameter \n assert c.diameter == expected_diameter\n assert c.radius == expected_diameter / 2",
"def __setattr__ (self, attr, value):\n self.set_value (attr, value)",
"def has_attribute(self, name):\n\n pass",
"async def test_update_with_json_attrs_with_json_attrs_path(hass: HomeAssistant) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n json={\n \"toplevel\": {\n \"master_value\": \"123\",\n \"second_level\": {\n \"some_json_key\": \"some_json_value\",\n \"some_json_key2\": \"some_json_value2\",\n },\n },\n },\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes_path\": \"$.toplevel.second_level\",\n \"json_attributes\": [\"some_json_key\", \"some_json_key2\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == \"123\"\n assert state.attributes[\"some_json_key\"] == \"some_json_value\"\n assert state.attributes[\"some_json_key2\"] == \"some_json_value2\"",
"def sk_attr(est, attr):\n from sklearn.utils.validation import check_is_fitted\n from sklearn.exceptions import NotFittedError\n try:\n check_is_fitted(est, attr)\n return True\n except NotFittedError:\n return False",
"def test_has_attr(self):\n\n self.assertTrue(hasattr(City, \"save\"))"
] | [
"0.7471137",
"0.7355271",
"0.656537",
"0.6477467",
"0.63603884",
"0.61522514",
"0.6127132",
"0.6091757",
"0.6082865",
"0.6073286",
"0.60494506",
"0.59865016",
"0.59510404",
"0.59033054",
"0.59033054",
"0.5884618",
"0.5871959",
"0.58645827",
"0.58420926",
"0.5816876",
"0.5816365",
"0.58151364",
"0.57993424",
"0.57891285",
"0.5776685",
"0.5766262",
"0.5751023",
"0.5744841",
"0.5743418",
"0.5729915",
"0.5728987",
"0.5717495",
"0.5708464",
"0.5707245",
"0.57014775",
"0.56512785",
"0.5649086",
"0.56411487",
"0.56267333",
"0.5623912",
"0.5609945",
"0.56003994",
"0.55817485",
"0.5577416",
"0.55644375",
"0.5564416",
"0.555694",
"0.5547089",
"0.5539344",
"0.55362225",
"0.55355066",
"0.55349934",
"0.55285335",
"0.5519334",
"0.5516847",
"0.55155045",
"0.5512193",
"0.5511551",
"0.5508268",
"0.5487549",
"0.54849494",
"0.5483715",
"0.54781497",
"0.5476417",
"0.5473543",
"0.54710597",
"0.5458062",
"0.5453779",
"0.54502165",
"0.5442559",
"0.5441879",
"0.5437078",
"0.54354393",
"0.54313636",
"0.5425293",
"0.5424809",
"0.5419289",
"0.54184115",
"0.54161346",
"0.54121184",
"0.5392068",
"0.5380552",
"0.5377808",
"0.537136",
"0.53596336",
"0.5355482",
"0.5347723",
"0.5344241",
"0.5340888",
"0.53395146",
"0.53384024",
"0.5335995",
"0.53348607",
"0.5333716",
"0.53335744",
"0.5332822",
"0.53283215",
"0.5327713",
"0.5327309",
"0.5325458"
] | 0.6970188 | 2 |
Test that the 'get' fails because the root is not recognized. | def test_no_recognized_root(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "wrong_root.agent_name", "value"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The root of the dotted path must be one of: {}".format(
ALLOWED_PATH_ROOTS
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_error_html_using_get(self):\n pass",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"wrong_root.agent_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def test_get_fantray_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Fantray.get, session, node)",
"def test_get(self):\n pass",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_lookup_missing(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n ret = env.lookup('foo')\n self.assertIsNone(ret)",
"def test_get_root_html1(self):\n pass",
"def test_missing_root_setting(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n error = MagicMock()\n error.side_effect = _error\n\n settings.MADCAP_FLARE_ROOT.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})",
"def test_tree_with_one_node_root_exists(one_t):\n assert one_t.root",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_get_not_found(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': INVALID_UUID},\n )\n response = self.request_knox(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass",
"def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def test_get_document_inexistent(empty_index):\n with pytest.raises(Exception):\n empty_index().get_document(\"123\")",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_get_root_html(self):\n pass",
"def test_gettem_using_get(self):\n pass",
"def test_root_invalid_revision(self, mock_get_entity):\n mock_get_entity.side_effect = AzureMissingResourceHttpError(\"Not Found\", 404)\n\n url = '/?{0}=InvalidRevsion'.format(TestConfig.REVISION_PARAMETER)\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 404)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get_root_html3(self):\n pass",
"def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))",
"def test_get_root_html2(self):\n pass",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def test_get_user_404(self):\n resp = self.app.get('/users/thisuserdoesntexist')\n assert resp.status_code == 404",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_small_tree_has_no_root(small_tree):\n assert small_tree.root.left is None",
"def test_get_powersupply_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Powersupply.get, session, node)",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_b_get_no_items(self):\n storage = FileStorage()\n get = storage.get(User, 123)\n self.assertEqual(None, get)",
"def test_get_empty_ring(self): \n cons_hash = ConsistentHash(2)\n\n threw_value_error = False\n try:\n cons_hash.get_node('192.168.1.1')\n except exceptions.ValueError:\n threw_value_error = True\n self.assertTrue(threw_value_error)",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def test_05a_get_nonexistant_app(self):\r\n res = self.app.get('/app/nonapp', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_get_specific_pacient_not_found(self):\n url = '/api/v1/pacientes/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def test_get_non_existent_item(self):\n\n response = self.client.get('/api/v1/category/200',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_get_order_not_found(self):\n resp = self.app.get('/orders/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_order_not_found(self):\n resp = self.app.get('/orders/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_get_not_found(self):\n url = reverse('notification', kwargs={'way_id': 999, 'notification_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('notification', kwargs={'way_id': 100, 'notification_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get1(self):\n pass",
"def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_get2(self):\n pass",
"def test_resource_collection_get_missing_resource(self):\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n resource = collection.get('missing-uri')\n\n self.assertIsNone(resource)",
"def test_error404():\n response = echo_client(\"GET test/test HTTP/1.1\")\n assert '404' in response",
"def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_root01(self):\n result = self.init_test_app().get('/')\n self.assertEqual(\n loads(result.data), {\n '_links': [{\n 'rel': 'pollination',\n 'href': '/pollination'\n }, {\n 'rel': 'tester-ui',\n 'href': '/tester'\n }, {\n 'href': \"/estimate-runtime\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"estimate\"\n }, {\n 'href': \"/reveg-curve.png\",\n 'params': {\n 'years': {\n 'type': \"integer\"\n }\n },\n 'rel': \"reveg-curve\"\n }]\n })",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def test_get_a_thing_that_doesnt_exist(self) -> None:\n with self.assertRaises(things.NoSuchThing):\n things.get_a_thing(2)",
"def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())",
"def test_get_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.get(\"key 1\")",
"def test_get_specific_office_not_found(self):\n url = '/api/v1/consultorios/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def check_for_root(self):\n if self.root is None:\n raise ValueError(\"root is NoneType\")",
"def test_nosuch_detail(self):\n\t\tresponse = self.client.get(\"/post/2/\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t# We got an error before trying to use a template,\n\t\t# so no template was accessed\n\t\t###print(f\"@@@ {response}\")\n\t\t###print(f\"@@@ @@@ template name: {response.template_name}\")\n\t\t###self.assertTemplateUsed(response, None)\n\t\tself.assertFalse(hasattr(response, \"template_name\"))",
"def test_get_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))",
"def test_graph_retrieve_bad(self):\n fuseki = GraphStore()\n with self.assertRaises(ConnectionError):\n fuseki._graph_retrieve(\"default\")",
"async def test_missing(cli):\n response = await cli.get(f'/result/nope')\n assert response.status == 404",
"def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")",
"def test_get_leader(self):\n self._mock_api(200, 'foo.example.com')\n self.assertEquals(self.client.election.get('/mysql'), 'foo.example.com')\n self._mock_api(200,'')\n self.assertRaises(etcd.EtcdException, self.client.election.get, '/mysql')",
"def test_non_existent_question(self):\n\n res = self.app.get('/api/v1/questions/'+str(56))\n self.assertEqual(res.status_code, 404)",
"def test_get_for_not_found_team(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/Team_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_bst_single_node():\n assert BST(1).root is None",
"def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)",
"def testGet(self):\n response = self.runGet(self.root)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(len(data), 1)",
"def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')",
"def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_get(self):\n self.assertEqual(200, self.resp.status_code)",
"def test_tree_intersection_error():\n with pytest.raises(AttributeError):\n assert tree_intersection(1, 2)",
"def test_05c_get_nonexistant_app_tutorial(self):\r\n res = self.app.get('/app/noapp/tutorial', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_get404(self):\n with self.assertRaises(Exception) as context:\n self.api.get(\"card_fake\", limit=2)\n self.assertTrue(\"card not found\" in context.exception.__str__())",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_get(self):\n self.assertEqual(200, self.response.status_code)",
"def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_removing_root(item):\n item.root = None\n assert not item.has_root",
"def test_request_root(self):\n response = requests.get(self.url + '/')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 2)\n self.assertIn('users', json.keys())\n self.assertIn('groups', json.keys())\n\n users = json.get('users')\n groups = json.get('groups')\n self.assertIsInstance(users, list)\n self.assertIsInstance(groups, list)\n self.assertEqual(len(users), 2)\n self.assertEqual(len(groups), 3)\n self.assertIn('John', users)\n self.assertIn('Jane', users)\n self.assertIn('Human', groups)\n self.assertIn('Male', groups)\n self.assertIn('Female', groups)"
] | [
"0.81131697",
"0.81131697",
"0.6998153",
"0.68379533",
"0.680399",
"0.6676301",
"0.6673244",
"0.66658753",
"0.66128844",
"0.65304714",
"0.6508784",
"0.6396497",
"0.6373404",
"0.63408643",
"0.6340362",
"0.6320993",
"0.6299329",
"0.62931",
"0.62797034",
"0.6279614",
"0.62703735",
"0.6222162",
"0.6213923",
"0.61607766",
"0.61532617",
"0.6142906",
"0.6136606",
"0.61338687",
"0.6133071",
"0.61307055",
"0.6128813",
"0.6117946",
"0.61122656",
"0.6097754",
"0.6091606",
"0.60863405",
"0.60595405",
"0.60402685",
"0.602265",
"0.6007301",
"0.5993404",
"0.59857017",
"0.5976707",
"0.5963646",
"0.59613895",
"0.5959834",
"0.5956122",
"0.5951712",
"0.5951607",
"0.59396356",
"0.5939268",
"0.59362453",
"0.593469",
"0.5931543",
"0.5931543",
"0.59212303",
"0.59212303",
"0.592027",
"0.59175694",
"0.5914404",
"0.5905598",
"0.590496",
"0.5890854",
"0.5871459",
"0.5868394",
"0.5866175",
"0.5861127",
"0.5860907",
"0.5855379",
"0.58484334",
"0.58479524",
"0.58430797",
"0.584179",
"0.58411825",
"0.5841019",
"0.58355755",
"0.58321416",
"0.5831363",
"0.58231956",
"0.5822531",
"0.5817458",
"0.58161587",
"0.58062446",
"0.5803435",
"0.580305",
"0.57967216",
"0.57940114",
"0.57934356",
"0.57829833",
"0.57818097",
"0.57818097",
"0.57811785",
"0.5765995",
"0.57651424",
"0.5764405",
"0.5764405",
"0.5762511",
"0.57536715",
"0.5750757",
"0.57480407"
] | 0.618814 | 23 |
Test that the 'get' fails because the path is too short but the root is correct. | def test_too_short_path_but_root_correct(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "agent", "data"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
)
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "skills.dummy", "value"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def verify_root_path(self) -> None:\n path = \"/\"\n with self.assertRaises(AccessDeniedException):\n verify_file_path(path)",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)",
"def test_get_absolute_path():\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"../foo\"), \"/bar/foo\")\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"/foo\"), \"/foo\")",
"def validate_short_path(short_path):",
"def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def test_client_id_path() -> None:\n assert indieauth._parse_client_id(\"http://ex.com\").path == \"/\"\n assert indieauth._parse_client_id(\"http://ex.com/hello\").path == \"/hello\"\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello/.world\").path == \"/hello/.world\"\n )\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello./.world\").path\n == \"/hello./.world\"\n )\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/.\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/./yo\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/../yo\")",
"def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"wrong_root.agent_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)",
"def test_error_html_using_get(self):\n pass",
"def test_append_slash_slashless_unknown(self):\n request = self.rf.get(\"/unknown\")\n response = CommonMiddleware(get_response_404)(request)\n self.assertEqual(response.status_code, 404)",
"def test_nonexistent_path(tmpdir_factory):\n folder = Path(tmpdir_factory.mktemp('git'))\n path = folder.joinpath('nonexistent')\n\n with pytest.raises(ValueError):\n gitb.pull(path)",
"def test_get_file_with_remote_and_short_SHA1_error(self):\n with self.assertRaises(ShortSHA1Error):\n self.remote_tool.get_file('README', 'd7e96b3')",
"def test_invalid_pathname(self):\n self.assertFalse(Util.is_pathname_valid(''))",
"def test_get_path_not_exist(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n expected = None\n actual = Hash(self.file).get()\n self.assertEqual(expected, actual)",
"def test_404(self):\n for path in ('/foo', '/abs', '/abs/'):\n response = self.client.get(path)\n self.assertEqual(response.status_code,\n status.HTTP_404_NOT_FOUND,\n f'should get 404 for {path}')\n self.assertIn('text/html', response.content_type)\n\n response = self.client.get('/abs/1307.0001v999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known paper ID with '\n 'nonexistent version')\n response = self.client.get('/abs/alg-geom/07059999')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for valid old paper ID '\n 'with nonexistent paper number affix')\n response = self.client.get('/abs/astro-ph/0110242')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for known deleted paper')\n response = self.client.get('/abs/foo-bar/11223344')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND,\n f'should get 404 for bad paper ID')",
"def test_get(self):\n client = kazoo.client.KazooClient()\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123}', None)\n self.assertEqual({'xxx': 123}, zkutils.get(client, '/foo'))\n\n # parsing error\n kazoo.client.KazooClient.get.return_value = ('{xxx: 123', None)\n self.assertEqual(\n '{xxx: 123',\n zkutils.get(client, '/foo', strict=False)\n )\n self.assertRaises(yaml.YAMLError, zkutils.get, client, '/foo')\n\n kazoo.client.KazooClient.get.return_value = (None, None)\n self.assertIsNone(zkutils.get(client, '/foo'))",
"def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"",
"def test_github_path_purepath():\n p = github_api.GithubPath('/tensorflow/datasets/tree/master/')\n sub_p = p / 'some_folder'\n assert isinstance(sub_p, github_api.GithubPath)\n assert str(p) == '/tensorflow/datasets/tree/master'\n assert p == github_api.GithubPath.from_repo('tensorflow/datasets')",
"def test_00(self):\n result = resolve_path({'_id': '1'}, '')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, '/')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'foo.png')\n expected = '/foo.png'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'bar/foo.js')\n expected = '/bar/foo.js'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'main.js')\n expected = '/main.js'\n self.assertEqual(result, expected)",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)",
"def test_url_path(self):\n response = self.client.get('/planner/recipes/1/')\n self.assertEqual(response.status_code, 200)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def test_not_a_valid_fuzz_path(self):\n self.assertFalse(cifuzz.check_fuzzer_build('not/a/valid/path'))",
"def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')",
"def local_assert_empty(path):\n try:\n local = get_local(path)\n except ValueError:\n return\n raise ValueError(\"Something exists at %s\" % local.path)",
"def test_root() -> Path:\n return TEST_ROOT",
"def test_missing_shortlink(self):\n rv = self.app.get('/TheStakeOut')\n assert 'No url found' in rv.data",
"def test_docs_paths():\n assert os.path.exists('test/examples/docs/paths-root-api.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')",
"def path_home_mock():\n raise AttributeError()",
"def test_get_failure(self, mock_exists):\n static_path = self.finder.find('can/put/anything/here.js')\n self.assertEqual(static_path, ())",
"def verify_non_existing_path(self) -> None:\n path = \"/some/non/existing/path\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)",
"def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"wrong_root.agent_name\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def test_get_path_returns_none_for_bad_key(\n self, audio_store_and_expected_files, key):\n audio_store = audio_store_and_expected_files[0]\n assert audio_store.get_path(key) is None",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")",
"def test_nonexistent_path(tmpdir):\n with pytest.raises(IOError):\n checksum(tmpdir.join(\"does-not-exist.txt\").strpath)",
"def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))",
"def test_verify_path2_17(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_environment_path_subdir_leadingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"/keys\"\n )",
"def test_append_slash_have_slash(self):\n request = self.rf.get(\"/slash/\")\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)",
"def path_validate(path):\n # functionality to be added later\n return path",
"def test_error404():\n response = echo_client(\"GET test/test HTTP/1.1\")\n assert '404' in response",
"def test_sha1_from_path(self):\n self.assertEqual(TEST_SHA1, _get_sha1_from_path(TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/' + TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/test/' + TEST_SHA1))",
"def test_collisions_file_path(self):\n self.assertRaises(ValueError, collisions_clean, \"not_a_file_path\")",
"def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False",
"def assert_path(self, root: Node, path: str) -> None:\n\n if not self.__assert_path(root, path):\n raise Exception('Path \\'{}\\' not found in root node:\\n{}'.format(path, root))",
"def test_find_path_bi():\n assert True",
"def test_get_base_url():\n eq_(get_base_url(\"http://foo.com/bar/baz\"), \"http://foo.com\")\n eq_(get_base_url(\"https://foo.com:443/foo/bar\"), \"https://foo.com:443\")",
"def testInvalidPostPath(self):\n for path in ('framework', 'endpoint', 'invalid'):\n status, _ = self._http_post(path, \"some-data\")\n self.assertEqual(status, 404)",
"def test_computed_url(self):\n t = TwoHundredRequest()\n self.assertEqual(\"twohundred\", t.url_path())",
"def test_get_single_different(single_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n single_bucket.get(\"key 2\")",
"def test_BenchmarkSuite_invalid_path_access(benchmark_suite: typing.Callable):\n bs = benchmark_suite()\n with test.Raises(TypeError):\n _ = bs.path",
"def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)",
"def test_get_strict_no_sdb_in_uri():\n\n msg = 'SDB uri must start with \"sdb://\"'\n with pytest.raises(SaltInvocationError, match=msg) as cm:\n sdb.get(\"://salt/foo\", strict=True)",
"def test_local_path(nexus_base, path):\n assert nexus_base.local_path == path",
"def test_append_slash_disabled(self):\n request = self.rf.get(\"/slash\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)",
"def test_base_path(original_base_path, args):\n if args.skip_redirects:\n return original_base_path\n\n # WARNING: some redirects are hardcoded to production URLs.\n # Both staging and production will rate limit us.\n response = session.head(args.root_url + original_base_path, allow_redirects=True)\n\n if 200 <= response.status_code < 300:\n return response.url.replace('https://www.gov.uk', '').replace(args.root_url, '')\n elif response.status_code == 429:\n response.raise_for_status()\n else:\n if response.status_code not in (410,):\n sys.stderr.write(\"Unexpected response {} for {}\\n\".format(response.status_code, original_base_path))\n return None",
"def test_AlgorithmsIdHandler_GET_MalformedRequest(self):\n searchedId='xyz' + ' ' + '1'\n response = self.testapp.get('/algorithms/' + searchedId, expect_errors=True)\n self.assertEqual(400, response.status_int, msg='Wrong answer code')\n self.assertEqual('application/json', response.content_type)\n self.assertIn('Malformed Data', response.normal_body.decode(encoding='UTF-8'))",
"def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)",
"def test_missing_root_setting(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n error = MagicMock()\n error.side_effect = _error\n\n settings.MADCAP_FLARE_ROOT.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})",
"def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_getter_key_error(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'get_child_by_name', return_value=None)\n self.mock_object(root, 'has_attr', return_value=None)\n\n self.assertRaises(KeyError,\n netapp_api.NaElement.__getitem__,\n root, '123')",
"def test_resource_path(self):\n\n # Without arguments\n resources_root_path = os.path.abspath(os.path.join(\n MY_DIRECTORY, '..', '..', 'resources'\n ))\n self.assertEqual(resources_root_path, paths.resource())",
"def test_empty(self):\n self.assertFalse(os.path.exists('/'))",
"def test_url_path(self):\n response = self.client.get('/planner/recipes/')\n self.assertEqual(response.status_code, 200)",
"def test_gettem_using_get(self):\n pass",
"def test_get_storage_invalid_suffix(self):\r\n self.assertRaises(KeyError, self.profile.get_storage, ('testing.json,'))",
"def test_get(self):\n url, port = self.server.address\n\n #couple of basic GETs\n r = self.client.get(\"http://{0}:{1}/\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/200\".format(url, port))\n self.assertEqual(200, r.status_code)\n r = self.client.get(\"http://{0}:{1}/400\".format(url, port))\n self.assertEqual(400, r.status_code)\n\n # GETs with params\n r = self.client.get(\"http://{0}:{1}/get_with_params\".format(url, port),\n params=self.params)\n self.assertEqual(200, r.status_code)\n self.assertEqual(str(self.params), r.text)\n\n # GETs with ...?",
"def test_get_query_subdir(): # ***Incomplete test\n ##########################\n # Arrange.\n outdir = \"outdir\"\n\n ##########################\n # Act.\n #x = get_query_subdir(outdir)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_get(self):\n pass",
"def test_environment_path_subdir_trailingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"keys/\"\n )",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_invalid_path(self, tmp_path):\n other_path = tmp_path / \"other\"\n other_path.mkdir()\n pattern = (\n \"Could not find any of configuration files '.kedro.yml, pyproject.toml'\"\n )\n with pytest.raises(KedroContextError, match=re.escape(pattern)):\n load_context(str(other_path))"
] | [
"0.68525183",
"0.682792",
"0.682792",
"0.67941284",
"0.6463298",
"0.6370032",
"0.6338329",
"0.6308827",
"0.63069284",
"0.6288383",
"0.62703323",
"0.62336504",
"0.6228994",
"0.61508656",
"0.6129853",
"0.61171794",
"0.6103337",
"0.60726905",
"0.6042274",
"0.6037921",
"0.60007167",
"0.6000453",
"0.59572804",
"0.5911264",
"0.59086555",
"0.590164",
"0.588647",
"0.5873285",
"0.58717763",
"0.5869244",
"0.58692086",
"0.5860821",
"0.58440065",
"0.58380485",
"0.58361006",
"0.5820062",
"0.58171546",
"0.5812012",
"0.5805157",
"0.58029485",
"0.57927805",
"0.57916",
"0.578124",
"0.57806754",
"0.5768879",
"0.57679653",
"0.57309455",
"0.5729767",
"0.5711159",
"0.5708755",
"0.5705874",
"0.5685506",
"0.56801516",
"0.56792223",
"0.5679011",
"0.56734717",
"0.5664543",
"0.566085",
"0.5657873",
"0.56574094",
"0.56531256",
"0.56464946",
"0.5638136",
"0.5637684",
"0.56349057",
"0.5626285",
"0.56233895",
"0.5617767",
"0.56108266",
"0.5610455",
"0.56075186",
"0.5606751",
"0.5606602",
"0.55979943",
"0.5596366",
"0.55957186",
"0.558929",
"0.5587888",
"0.55857164",
"0.55856174",
"0.5578683",
"0.55784106",
"0.55768",
"0.5575611",
"0.55694",
"0.55690277",
"0.5567621",
"0.5564866",
"0.5563558",
"0.5556996",
"0.55308586",
"0.5529717",
"0.5523177",
"0.55183214",
"0.5511762",
"0.55049115",
"0.54925424",
"0.54882544",
"0.54807276",
"0.5474418"
] | 0.6692549 | 4 |
Test that the 'get' fails because the resource does not exist. | def test_resource_not_existing(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"connections.non_existing_connection.name",
"value",
],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "Resource connections/non_existing_connection does not exist."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_request_invalid_resource(self):\n response = requests.get(self.url + '/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')",
"def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404",
"def test_request_users_user_invalid_resource(self):\n response = requests.get(self.url + '/users/John/invalid')\n\n self.assertEqual(response.status_code, 404)",
"def test_resource_collection_get_missing_resource(self):\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n resource = collection.get('missing-uri')\n\n self.assertIsNone(resource)",
"async def test_missing(cli):\n response = await cli.get(f'/result/nope')\n assert response.status == 404",
"def test_read_non_existent(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 999},\n ),\n )\n\n content = {'detail': 'Not found.'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_non_existent_issue_fails(self):\n response = self.client.get(self.non_existent_url)\n response_json = response.get_json()\n error_details = response_json[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], ISSUE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], ISSUE_NOT_FOUND_SUB_CODE)",
"def test_get_item_not_found(self):\n resp = self.app.get('/items/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_task_not_found(self):\n task_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"TaskNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get_not_found(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': INVALID_UUID},\n )\n response = self.request_knox(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_fail(self):\n response = self.second_client.get(self.url)\n self.assertEquals(response.status_code, 400)",
"def test_get_not_found(self):\n url = reverse('route', kwargs={'way_id': 999, 'route_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('route', kwargs={'way_id': 100, 'route_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def test_get_template_not_found(self):\n template_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/templates/{template_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified template does not exist\",\n \"code\": \"TemplateNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)",
"def test_get404(self):\n with self.assertRaises(Exception) as context:\n self.api.get(\"card_fake\", limit=2)\n self.assertTrue(\"card not found\" in context.exception.__str__())",
"def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)",
"def test_get_unexisting_book(self):\n\n response1 = self.client.get(\n '/api/v1/books/NJCF4057', content_type='application/json', headers=self.get_admin_token())\n result = json.loads(response1.data.decode())\n self.assertEqual(result['message'],\n 'Book Not Found')\n assert response1.status_code == 404",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.place == storage.get(Place, self.place.id))\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_non_existent_item(self):\n\n response = self.client.get('/api/v1/category/200',\n headers=self.attendant_headers)\n self.assertEqual(response.status_code, 404)\n self.assertIn('category with id 200 does not exist',\n str(response.data))",
"def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))",
"def testNotFound(self):\n response = requests.get(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(TestModel.DoesNotExist):\r\n TestModel.objects.get(test_id=100)",
"def test_get_car_invalid_id():\n response = client.get(\"/11111\")\n assert response.status_code == STATUS_NOT_FOUND",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')",
"def test_get(self):\n self.assertEqual(403, self.response.status_code)",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_get_book_with_id_does_not_exist(self):\n\t\tlogin_data = self.register_and_login_in_user()\n\t\ttoken = login_data['auth_token']\n\n\t\t# get book id\n\t\tbook = self.client.get(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json'\n\t\t)\n\n\t\tbook_res = json.loads(book.data.decode())\n\t\tself.assertTrue(book_res['message'] == 'book not found')\n\t\tself.assertEqual(book.status_code, 404)",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_invalid_resource_list_404(self):\n url = reverse(\"resources:resources\", (\"invalid\",))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_get_not_found(self):\n url = reverse('notification', kwargs={'way_id': 999, 'notification_id': 100})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)\n\n url = reverse('notification', kwargs={'way_id': 100, 'notification_id': 999})\n response = self.client.get(url)\n self.assertEqual(response.status_code, 400)",
"def testNotFound(self):\n response = requests.post(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_user_404(self):\n resp = self.app.get('/users/thisuserdoesntexist')\n assert resp.status_code == 404",
"def test_non_existent_question(self):\n\n res = self.app.get('/api/v1/questions/'+str(56))\n self.assertEqual(res.status_code, 404)",
"def test_get_non_existent_book_by_id(self):\n response = self.client().get('/api/v1/products/0')\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"That book does not exist\")\n self.assertEqual(response.status_code, 404)",
"def test_request_returns_404(client):\n assert client.get(\"/url_que_nao_existe\").status_code == 404",
"def test_get_object_not_found(self, employee_model):\n employee_model.DoesNotExist = Employee.DoesNotExist\n employee_model.objects.get.side_effect = employee_model.DoesNotExist\n\n with self.assertRaises(Http404):\n self.view.get_object(1)",
"def test_invalid_resource_endpoint_returns_error(self):\n self.add_tasks()\n response = self.app.get('api/v1/tasks/209', follow_redirects=True)\n self.assertEquals(response.status_code, 404)\n self.assertEquals(response.mimetype, 'application/json')\n self.assertIn(b'Element does not exist', response.data)",
"def test_get_a_thing_that_doesnt_exist(self) -> None:\n with self.assertRaises(things.NoSuchThing):\n things.get_a_thing(2)",
"def test_trucks_api_error(self):\n resp = self.app.get('/asfdasdf')\n self.assertEqual(resp.status_code, 404)\n assert \"NOT FOUND\" in resp.status",
"def test_handle_non_existed_assignment_properly(user_client):\n response = user_client.get(\"/api/v1/assignments/999/\")\n assert response.data == {\"detail\": \"Not found.\"}",
"def test_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_get_specific_pacient_not_found(self):\n url = '/api/v1/pacientes/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_get_inventory_not_found(self):\n resp = self.app.get('/inventories/0')\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())",
"def test_get_single_bad_item(test_client):\n\n response = test_client.get(BAD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def testNotFound(self):\n data = {'text': 'toto'}\n response = requests.put(url=self.invalid_url, json=data)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertIn('error', json_data)\n self.assertEqual(json_data['error'], 'Not found')",
"def test_get_movie_404(self):\n res = self.client().get('/api/movies/9000')\n self.assertEqual(res.status_code, 404)",
"def test_GET_fetcher_fail():\n bad_url = GET_ECHO_ENDPOINT.replace('.com', '.comx')\n\n with pytest.raises(Exception): #TODO: specific exception?\n resp = wf_utils.fetch_GET_request(bad_url)\n\n #TODO: bad status code tests?",
"def test_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/revisions/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)\n\n # correct job type, bad version\n url = '/%s/job-types/my-job/9.9.9/revisions/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)",
"def test_get_nonexistant_data(self):\n response = self.client.get(\"/api/elections/1\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data[\"message\"], \"Could not find election with id 1\")",
"def test_validate_get_single_resource(client):\n response = client.get('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def test_get_meals_with_invalid_url(test_client):\n response = test_client.get(\"/api/v2/menu/\")\n assert response.status_code == 404",
"def test_nonexisting_event(self):\n response = self.client.get(\"/events/1\")\n self.assertEqual(response.status_code, 404)",
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def test_itemidnotvalid_return4042(self):\r\n config = self.__load_config()\r\n url = f\"http://{config['api']['host']}:{config['api']['port']}/gs/api/v1/asdfg\"\r\n r = requests.get(url)\r\n\r\n self.assertEqual(r.status_code, 404)",
"def test_cache_get_non_existent_item(self):\n self.assertEqual(self.cache.get('ghost'), None)\n self.assertEqual(self.cache.get('ghost', 'never exists'), 'never exists')",
"def test_get_image_exists_not(self):\n with self.assertRaises(errors.NotFound):\n self.docker.images.get(\"image_does_not_exists\")",
"def test_failed_get_resource(self, mock_spotify_api_class, invalid_request):\n with patch('bpm.spotify.requests.get') as mock_requests:\n mock_requests.return_value = invalid_request\n result = mock_spotify_api_class.get_resource(\"\")\n assert result == {}",
"def test_fetch_url_not_ok():\n with patch(\"cheddar.index.remote.get\") as mocked:\n mocked.return_value = MagicMock()\n mocked.return_value.status_code = codes.bad_request\n with assert_raises(NotFoundError):\n fetch_url(\"http://example.com\", TIMEOUT, getLogger())",
"def test_retrieve_not_found(self):\n\n # get a valid digest\n content = \"\"\"\\xe1\\xbc\\x84\\xce\\xbd\\xce\\xb4\\xcf\\x81\\xce\\xb1\n \\xce\\xbc\\xce\\xbf\\xce\\xb9\n \\xe1\\xbc\\x94\\xce\\xbd\\xce\\xbd\\xce\\xb5\\xcf\\x80\\xce\\xb5\"\"\"\n namespace = 'default'\n collection = generate_collection(namespace, [content])\n preupload_status = self.call_api(\n 'preupload', self.message_to_dict(collection), 200)\n message = preupload_status.json.get(u'items', [{}])[0]\n\n # get the digest\n request = preupload_status_to_request(message, content)\n embedded = validate(\n request.upload_ticket, handlers_endpoints_v1.UPLOAD_MESSAGES[0])\n\n # don't upload data; try to retrieve\n retrieve_request = handlers_endpoints_v1.RetrieveRequest(\n digest=embedded['d'], namespace=handlers_endpoints_v1.Namespace())\n with self.call_should_fail('404'):\n self.call_api('retrieve', self.message_to_dict(retrieve_request), 200)",
"def test_get_single_movie_incorrect_id(client):\n resp = client.get(f\"/api/movies/{30}/\")\n assert resp.status_code == 404",
"def test_nosuch_detail(self):\n\t\tresponse = self.client.get(\"/post/2/\")\n\t\tself.assertEqual(response.status_code, 404)\n\t\t# We got an error before trying to use a template,\n\t\t# so no template was accessed\n\t\t###print(f\"@@@ {response}\")\n\t\t###print(f\"@@@ @@@ template name: {response.template_name}\")\n\t\t###self.assertTemplateUsed(response, None)\n\t\tself.assertFalse(hasattr(response, \"template_name\"))",
"def test_patch_a_resource_that_does_not_exist():\n pass",
"def test_get_specific_office_not_found(self):\n url = '/api/v1/consultorios/AAA/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)",
"def test_not_found(self):\n with self.assertRaises(UserNotFoundException):\n self._storage.get_by_username(\"test\")",
"async def test_api_get_non_existing_state(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(\"/api/states/does_not_exist\")\n assert resp.status == HTTPStatus.NOT_FOUND",
"def test_returns_404_if_user_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/queries/non_existent/touched/\")\n self.assertEqual(response.status_code, 404)",
"def test_details_not_found(self):\n\n url = '/%s/job-types/missing-job/1.0.0/revisions/9/' % self.api\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)",
"def test_get_for_not_found_team(self):\n user = User.create(name='foo', email='[email protected]')\n user.put()\n response = self.testapp.get(\n '/api/teams/Team_other/users',\n headers=self.login_headers(user),\n status=404\n )",
"def test_feeds_do_not_exist(self):\n rv = self.client.get('/user/who.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/project/fake.xml')\n eq_(rv.status_code, 404)\n\n rv = self.client.get('/team/not-real.xml')\n eq_(rv.status_code, 404)",
"def test_get_doesnotexist_exception(self):\r\n with self.assertRaises(self.table.DoesNotExist):\r\n self.table.objects.get(test_id=100)",
"def test_invalid_route_is_status_404(self):\n response = self.client.get(\"/bad\")\n self.assertTrue(response.status_code == 404)",
"def testNotFound(self):\n response = requests.delete(url=self.invalid_url)\n headers = response.headers\n json_data = response.json()\n\n self.assertEqual(response.status_code, 404, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertTrue(self.place == storage.get(Place, self.place.id))\n self.assertIn('error', json_data.keys())\n self.assertEqual(json_data['error'], 'Not found')",
"def test_entity_doesnt_exist(self):\n key = ndb.Key(models.InstanceTemplateRevision, 'fake-key')\n urls = snapshots.fetch(key)\n self.failIf(urls)",
"def test_resource_not_existing(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"connections.non_existing_connection.name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Resource connections/non_existing_connection does not exist.\"\n )",
"def test_404_not_found(app, client):\n\n response = client.get(\"/notexistpage\")\n assert response.status_code == 404\n assert \"404 Not Found\" in str(response.data)",
"def assertHttpNotFound(self, resp):\r\n return self.assertEqual(resp.status_code, 404)",
"def test_neg_exists_with_non_existent_data(self, key, ex, ex_code):\n try:\n key, meta = self.as_connection.exists(key)\n assert meta is None\n \"\"\"\n We are making the api backward compatible. In case of RecordNotFound an\n exception will not be raised. Instead Ok response is returned withe the\n meta as None. This might change with further releases.\n \"\"\"\n except ex as exception:\n assert exception.code == ex_code",
"def _does_not_exist():\n response_payload = dict(\n message=\"Recipe does not exist!\"\n )\n response_payload = jsonify(response_payload)\n return make_response(response_payload, 404)",
"def test_get_actor_404(self):\n res = self.client().get('/api/actors/9000')\n self.assertEqual(res.status_code, 404)",
"def test_fake_get_url(self):\n resp = self.app.get('/api/v1/g?url=somefalseurl')\n self.assertEqual(resp.status_code, 500)",
"def test_retrieve_invalid_course(self):\n path = reverse('commerce_api:v1:courses:retrieve_update', args=['a/b/c'])\n response = self.client.get(path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 404",
"def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)",
"def test_404_page_not_found():\n with app.test_client() as c:\n response = c.get('/this_page_not_exists')\n assert response.status_code == 404",
"def test_nonexistent_user(self):\n self.client.login(username=self.global_staff.username, password=self.password)\n resp = self.client.get(self.get_url('IDoNotExist'))\n assert resp.status_code == status.HTTP_404_NOT_FOUND",
"def test_404(client, route):\n response = client.get(route)\n assert b'Page not found' in response.data\n assert \"404\" in response.status",
"def test_05d_get_nonexistant_app_results_json(self):\r\n res = self.app.get('/app/noapp/24/results.json', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status",
"def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)",
"def test_get_item_details_invalid_id(self, mock_requests_get_404):\n with pytest.raises(exceptions.NoSuchItemException):\n resources.get_item_details(1)",
"def test_GET3(self):\n r = requests.get(self.address)\n self.assertEqual(r.status_code, 400)",
"def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])",
"def test_get(self):\n self.assertEqual(\n self.attempts[0],\n self.resource.get(self.attempts[0][_ATTEMPT.attempt_id]))",
"async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)",
"def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")"
] | [
"0.88033277",
"0.8728782",
"0.8429543",
"0.79759574",
"0.7970379",
"0.7949762",
"0.7726153",
"0.76256686",
"0.7595202",
"0.7590344",
"0.7586807",
"0.7548084",
"0.75071794",
"0.75046843",
"0.7499026",
"0.74956346",
"0.7443056",
"0.7441133",
"0.7414929",
"0.74075717",
"0.7398209",
"0.7398018",
"0.73817956",
"0.7368238",
"0.7368202",
"0.7352406",
"0.73462236",
"0.7345498",
"0.7344628",
"0.73426586",
"0.73311406",
"0.7324583",
"0.7317627",
"0.7312478",
"0.73096305",
"0.7305385",
"0.72995853",
"0.7291707",
"0.72724885",
"0.72713745",
"0.7269546",
"0.7250612",
"0.72033656",
"0.7190982",
"0.71902466",
"0.7188802",
"0.717824",
"0.7167796",
"0.7153777",
"0.7138438",
"0.71378297",
"0.7136229",
"0.7133956",
"0.7133234",
"0.71310365",
"0.7128842",
"0.7127299",
"0.71195906",
"0.71036184",
"0.7102452",
"0.70836526",
"0.7077397",
"0.70515853",
"0.7049563",
"0.70455223",
"0.7044658",
"0.7037198",
"0.70338386",
"0.7029452",
"0.70256853",
"0.7025556",
"0.70216525",
"0.7019422",
"0.70193183",
"0.7018935",
"0.701761",
"0.7016538",
"0.7011433",
"0.70089364",
"0.7006824",
"0.7003226",
"0.6996306",
"0.6994263",
"0.6985008",
"0.6984684",
"0.69780684",
"0.6970884",
"0.69677263",
"0.6958122",
"0.69489324",
"0.69365156",
"0.69358724",
"0.6930312",
"0.6915211",
"0.69026",
"0.6890643",
"0.68791986",
"0.68664974",
"0.6866238",
"0.6865779",
"0.6860837"
] | 0.0 | -1 |
Test that the 'set' fails because the attribute is not found. | def test_attribute_not_found(self):
with pytest.raises(
ClickException,
match="Attribute `non_existing_attribute` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"skills.dummy.non_existing_attribute",
"value",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_register_nonexisting_attr(self):\n pass",
"def test_get_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute",
"def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)",
"def test_bad_get_property(self):\n s = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n with pytest.raises(AttributeError):\n s.bad_get",
"def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')",
"def test_Alpha_setter_invalid(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)",
"def test_attribute_not_found(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_missing_attribute(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"attribute\": \"missing\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n self.opp.states.set(\"sensor.test_state\", \"State\", {\"attr\": \"2\"})\n self.opp.block_till_done()\n self.opp.states.set(\"sensor.test_state\", \"State\", {\"attr\": \"1\"})\n self.opp.block_till_done()\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"off\"",
"def test_attributenamenotfound(self):\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething'])\n self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething'])",
"def test_handle_removals_add_if_named_in_attribute(self):\n self.assertFalse(False)",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_snmpset_unwritable_field():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, oid='SNMPv2-MIB::sysDescr.0',\n community='public', value_type='s',\n value='Test Description', port=SNMP_SRV_PORT)\n assert 'No Such Instance' in str(excinfo.value)",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def test_register_existing_attr(self):\n pass",
"def test_that_field_required_validations_are_triggered_on_incorrect_attribute_setting(\n self,\n ):\n person = Person(first_name=\"Johnny\", last_name=\"John\")\n\n with pytest.raises(ValidationError) as error:\n person.first_name = \"\" # Simulate an error by force-resetting an attribute\n\n assert error.value.messages == {\"first_name\": [\"is required\"]}",
"def testSetWithBadString(self):\n def setSat():\n self.node.sat = 'banana'\n\n self.assertRaises(\n TypeError,\n setSat\n )",
"def test_update_attribute_method9(self):\n with self.assertRaises(TypeError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(\"put\", \"new\")",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_setAttribute():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute();\n x.setAttribute(\"foo\");\n x.setAttribute(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute(\"onfoo\", \"bar\");\n \"\"\").failed()",
"def test_update_attribute_method8(self):\n with self.assertRaises(ValueError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(2, -3)",
"def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')",
"def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_MetadataMap_setter_invalid_input(self):\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n \"foo\")\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n [])\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n {})\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n None)\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n self.overview_dm)",
"def testStrictAssignment(self):\n class SimpleMessage(messages.Message):\n field = messages.IntegerField(1)\n\n simple_message = SimpleMessage()\n self.assertRaises(AttributeError,\n setattr,\n simple_message,\n 'does_not_exist',\n 10)",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='Dam')\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', attr='ST')",
"def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:\n raise AttributeError(attr)",
"def test_no_metaclass_set(self):\n obj = BadTestObject()\n with self.assertRaises(TypeError):\n obj.test_setting = \"foo\"",
"def test_with_nonexisting_attr(create_file_with_text):\n test_class = KeyValueStorage(create_file_with_text)\n with pytest.raises(ValueError, match=\"No such key\"):\n test_class[\"wrong_attribute\"]",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AttributeAbility, 'Invalid')\n self.assertRaises(AbilityError, AttributeAbility, '', 3)",
"def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)",
"def __setattr__(self, key, value):\n resp = f'Attribute {key} can not be '\n if key in self.__dict__:\n resp += 'changed'\n else:\n resp += 'added'\n raise AttributeError(resp)",
"def testClassNotMutable(self):\n self.assertRaises(AttributeError,\n setattr,\n Color,\n 'something_new',\n 10)",
"def testInstancesMutable(self):\n self.assertRaises(TypeError,\n setattr,\n Color.RED,\n 'something_new',\n 10)",
"def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"",
"def test_no_password_getter(self):\n self.user.password = '123456'\n with self.assertRaises(AttributeError):\n self.user.password",
"def test_name_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.name = 'bar'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_missing_attribute(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('TEST_SETTING')",
"def test_string_attribute_errors(self):\n self.inventory = Inventory()\n with self.assertRaises(AttributeError):\n self.inventory.add_coconut('south asian')",
"def testExceptionRaisedBySetattr(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g = 6\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ttry:\n\t\t\tx.g = 6\n\t\t\tself.fail()\n\t\texcept Exception, e:\n\t\t\tpass",
"def test_set_attribute_with_response_returning_no_place():\n response_nok = {\n 'candidates': [],\n 'status': 'ZERO_RESULTS'}\n place = Place()\n place.response = response_nok\n place.set_attribute()\n assert place.status is not True\n assert place.place_id == \"\"\n assert place.name == \"\"\n assert place.address == \"\"",
"def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def test_members_are_set_when_args_are_invalid(self):\n\n self.assertRaises(ValueError, Vec3, \"abc\", 6, \"q\")",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_setter_invalid_value(self):\n root = netapp_api.NaElement('root')\n try:\n root['k'] = netapp_api.NaServer('localhost')\n except Exception as e:\n if not isinstance(e, TypeError):\n self.fail(_('Error not a TypeError.'))",
"def test_setter_invalid_value(self):\n root = netapp_api.NaElement('root')\n try:\n root['k'] = netapp_api.NaServer('localhost')\n except Exception as e:\n if not isinstance(e, TypeError):\n self.fail(_('Error not a TypeError.'))",
"def test_patch_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.patch = 234\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_existing_attribute(self):\n self.assertEqual(import_from_setting('TEST_SETTING'), 1)",
"def test_set_fails_when_setting_non_primitive_type(self):\n with pytest.raises(\n ClickException, match=\"Attribute `behaviours` is not allowed to be updated!\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.behaviours\", \"value\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_value_not_str(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.assertRaises(TypeError, lambda: self.helper.set_value([\"Hello\", \"World!\"]))",
"def test_get_set_raises(simple_param):\n for kwargs in ({'set_cmd': None}, {'get_cmd': None}):\n with pytest.raises(KeyError) as e:\n DelegateParameter('test_delegate_parameter', simple_param, **kwargs)\n assert str(e.value).startswith('\\'It is not allowed to set')",
"def test_set_attributes_missing_key(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n ATTRIBUTES.pop('product_key')\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 40\n assert _msg == (\"RAMSTK ERROR: Missing attribute 'product_key' in attribute \"\n \"dictionary passed to RAMSTKSiteInfo.set_attributes().\")\n\n ATTRIBUTES['product_key'] = '0000'",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_remove_a_single_attribute(self):\n pass",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def test_attribute_setters(self):\n test = self.test\n test.id = 2\n test['name'] = 'bar'\n\n self.assertEqual(test.id, 2)\n self.assertEqual(test['name'], 'bar')",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_classproperty_without_fget(self):\n p = classproperty()\n with self.assertRaises(AttributeError):\n p.__get__('x')",
"def test_no_metaclass_get(self):\n obj = BadTestObject()\n with self.assertRaises(TypeError):\n x = obj.test_setting",
"def test_media_attribute_is_fine_after_being_set():\n b = MediaBag()\n b.media = None\n assert b.media is None",
"def testSetPowerWithBadString(self):\n def setPower():\n self.node.power = 'banana'\n\n self.assertRaises(\n TypeError,\n setPower\n )",
"def test_set_readonly_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.kMDItemDateAdded = datetime.datetime.now()",
"async def test_set_only_target_temp_bad_attr(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"temperature\") == 119\n with pytest.raises(vol.Invalid):\n await common.async_set_temperature(opp, None, ENTITY_WATER_HEATER)\n assert state.attributes.get(\"temperature\") == 119",
"def testSetOffsetWithBadString(self):\n def setOffset():\n self.node.offset = 'banana'\n\n self.assertRaises(\n TypeError,\n setOffset\n )",
"def test_init_attributes(self):\n t = self.Test({'id': 1, 'poop': 'abc'})\n\n self.assertEqual(t.id, 1)\n self.assertEqual(t.name, None)\n self.assertRaises(AttributeError, t.__getattribute__, 'poop')",
"def _setAttr(self, attrName, value):\n\n if (value not in (None, \"\")):\n setattr(self, attrName, value)",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)",
"def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))",
"def test_no_metaclass_get(self):\n obj = BadTestObject()\n with self.assertRaises(TypeError):\n del obj.test_setting",
"def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])",
"def test_set_molecule_error(self):\n mol = Molecule.from_smiles(\"CCO\")\n atom = Atom(6, 0, False)\n atom.molecule = mol\n with pytest.raises(AssertionError, match=\"already has an associated molecule\"):\n atom.molecule = mol",
"def test_page_getattr_should_not_exist(test_page):\n test_page.navigate()\n\n with pytest.raises(AttributeError):\n assert test_page.foobar()",
"def testDirectorySetBadType(self):\n def setDirectory():\n self.mr.directory = 12345\n\n self.assertRaises(\n TypeError,\n setDirectory\n )",
"def test_get_property_missing(self):\r\n try:\r\n value = self.config.option2\r\n assert value\r\n except Exception as e:\r\n self.assertIsInstance(e, OptionValueNotSetError)\r\n self.assertNotIn('option2', self.config.values)",
"def test_update_metadata_by_attribute(self):\n pass",
"def test_del_attribute_is_assigned_properly(self):\r\n class DelModel(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n key = columns.Integer(primary_key=True)\r\n data = columns.Integer(required=False)\r\n\r\n model = DelModel(key=4, data=5)\r\n del model.data\r\n with self.assertRaises(AttributeError):\r\n del model.key",
"def test_class_attribute_identity(self):\n self.assertIsNot(self.aldous, self.__class__.aldous)",
"def test_setLastTwiceFails(self):\n m = MessageSet(1, None)\n m.last = 2\n with self.assertRaises(ValueError):\n m.last = 3",
"def testFilenameSetBadType(self):\n def setFilename():\n self.mr.filename = 12345\n\n self.assertRaises(\n TypeError,\n setFilename\n )",
"def test_unknown_names_raise_exception(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n with self.assertRaises(TypeError):\r\n tm.update(jon='beard')",
"def test_one_att(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer([self.test_attribute, ])\n self.assert_mock_analyzer(self.test_attribute)",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_version_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.version = '3.4'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_other_user_kvs_set_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.set(self.other_key_factory(self.existing_field_name), \"new_value\")",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def test_query_for_unknown_attribute_raise(test_store):\n query: Generator[Person, None, None] = test_store.get_by(not_the_droids=9000)\n\n with pytest.raises(AttributeError):\n next(query)",
"def test_get_tag_fail(self):\n self.assertRaises(AttributeError, get_tag, None, \"h1\")\n self.assertRaises(\n AttributeError, get_tag, \"<h1>This is not a XML tag object</h1>\", \"h1\"\n )",
"def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )",
"def test_should_raise_error_for_duplicate_names(self):\r\n self.edge_spec['label'] = 'updated_at'\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement(self.property_spec)\r\n self.spec_parser.parse_statement(self.edge_spec)",
"def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})"
] | [
"0.78650194",
"0.7669831",
"0.7593095",
"0.7491456",
"0.7206654",
"0.70923287",
"0.70798254",
"0.70552427",
"0.6949441",
"0.69461375",
"0.6840308",
"0.6822182",
"0.6748613",
"0.6726124",
"0.6721214",
"0.6721214",
"0.6691278",
"0.66883737",
"0.6682688",
"0.66346335",
"0.6634397",
"0.6632628",
"0.66079485",
"0.6588782",
"0.6571088",
"0.65704936",
"0.656591",
"0.65612155",
"0.6559968",
"0.6547403",
"0.65283203",
"0.6518682",
"0.6510226",
"0.6482266",
"0.6481634",
"0.6466367",
"0.64524585",
"0.6427973",
"0.64240384",
"0.6416524",
"0.64076",
"0.6406858",
"0.6383166",
"0.6380598",
"0.6376526",
"0.6366153",
"0.6360683",
"0.63559806",
"0.63471663",
"0.6345246",
"0.63450944",
"0.63450944",
"0.632803",
"0.6323622",
"0.6321944",
"0.6311353",
"0.62991875",
"0.62970763",
"0.6295671",
"0.6292501",
"0.62713677",
"0.62490565",
"0.62398297",
"0.62269866",
"0.6226216",
"0.62011683",
"0.6200171",
"0.61997396",
"0.6178872",
"0.6172782",
"0.6172284",
"0.61644405",
"0.6163501",
"0.6157975",
"0.61442685",
"0.6142601",
"0.61329806",
"0.61303943",
"0.61275136",
"0.6121304",
"0.6106408",
"0.6097007",
"0.60948336",
"0.60935944",
"0.6090988",
"0.6073719",
"0.6072354",
"0.6069608",
"0.6068801",
"0.6060352",
"0.6049191",
"0.6036052",
"0.6034057",
"0.6021171",
"0.6021078",
"0.6019254",
"0.6013579",
"0.6011185",
"0.60040706",
"0.6003507"
] | 0.75652254 | 3 |
Test that setting the 'dummy' skill behaviours fails because not a primitive type. | def test_set_fails_when_setting_non_primitive_type(self):
with pytest.raises(
ClickException, match="Attribute `behaviours` is not allowed to be updated!"
):
self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "skills.dummy.behaviours", "value"],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dummy(self):\n pass",
"def test_handler_no_type_hints(self):\n with self.assertRaises(ValueError):\n\n @intent_handler\n def decorated_test(context, param):\n return None",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def testTypeSingle(self):\n prop = make_prop(kind=bool)\n with self.assertRaises(TypeError):\n prop.interpret(1, {})\n\n self.assertEqual(True, prop.interpret(True, {}))",
"def test_handler_no_type_hints_param(self):\n with self.assertRaises(ValueError):\n\n @intent_handler\n def decorated_test(param):\n return None",
"def testTheType(self, theTestType):\n \n pass",
"def testPowerBadType(self):\n def setPower():\n self.cc.power = 'ban'\n\n self.assertRaises(\n TypeError,\n setPower\n )",
"def test_dummy():",
"def _dummy(*args, **kwargs):\n pass",
"def test_patch_none():",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_no_source():\n assert get_type_hints(int) == {}",
"def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.act()",
"def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")",
"def test_no_bleedthrough():\n\n @type_checked\n def _run_test(*args, ok:int, then:float, well:bool, **kwargs:str):\n assert args == (\"12\", 4, None, 19.9)\n assert ok == 90\n assert then == 17.2\n assert well is True\n assert kwargs == {\"one\": \"111\", \"two\": \"22.2\"}\n\n _run_test(\"12\", 4, None, 19.9, ok=\"90\", then=\"17.2\", well=\"True\", one=111,\n two=22.2)",
"def test_badyvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, True, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_sample_one_sample_type(self):\r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')",
"def test_is_not_missed():\n game = Game()\n game.word = 'word'\n assert game.is_missed('w') is False",
"def test_return_types():\n my_method = SGMethod(\"Test\")\n \n my_method.return_type = \"SoundEffect\"\n assert my_method.return_type == \"SoundEffect\"",
"def test(self) -> Any:\n pass",
"def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])",
"def allow(self, test):\n raise NotImplementedError()",
"def test_default_sound_system(self):\n\n self.assertFalse(self.mc.machine_config['sound_system']['enabled'])\n self.assertIsNone(self.mc.sound_system)",
"def test_dummy_test():\n pass",
"def setUp(self):\n self.false_int = \"A\"",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def _dummy(*args, **kwargs):\n err_str = \"\"\n raise NotImplementedError()",
"def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_theft_and_stealing(self):",
"def dummy(*args, **kwargs):\r\n pass",
"def constrained_lens_object_test():\n return # TODO",
"def test_examiningNonThing(self):\n t = objects.Thing(name=u\"magic stone\", store=self.store)\n t.powerUp(MagicStone(thing=t, store=self.store))\n t.moveTo(self.location)\n\n self.assertCommandOutput(\n \"look at rune\",\n [\"It's too dark to see.\"],\n [])\n self.lighting.candelas = 100\n self.assertCommandOutput(\n \"look at rune\",\n [\"A totally mystical rune.\"],\n [])",
"def test_light_no_data(self):\n light = Light({})\n\n assert light.warning is None\n assert light.off is None",
"def dummy_method_silent(self):\n\n pass",
"def before_any(self) -> None:",
"def test_handler_fail_silent(self):\n\n @intent_handler\n def date_test(date: datetime.date):\n return date\n\n r = create_request(\"TEST_CONTEXT\", date=[\"not a date\"])\n result = date_test(r)\n self.assertIsInstance(result, EntityValueException)\n\n @intent_handler\n def int_test(integer: int):\n return integer\n\n r = create_request(\"TEST_CONTEXT\", integer=[\"not a number\"])\n result = int_test(r)\n self.assertIsInstance(result, EntityValueException)",
"def test_is_missed():\n game = Game()\n game.word = 'word'\n assert game.is_missed('x') is True",
"def test_datatype():\n\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float32\n\n pf.set_datatype(torch.float64)\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float64\n pf.set_datatype(torch.float32)\n\n with pytest.raises(TypeError):\n pf.set_datatype(\"lala\")",
"def notEnabledDummy(self, ev):\n pass",
"def testSlopeBadType(self):\n def setSlope():\n self.cc.slope = 'ban'\n\n self.assertRaises(\n TypeError,\n setSlope\n )",
"def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()",
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def test_sample_type(self):\r\n \r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')",
"def test_RandomFunction_setter_invalid_input(self):\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', 42)\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', 42.0)\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', \"j\")\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', None)\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', [])\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', ())\r\n self.assertRaises(TypeError, setattr, self.cs_overview,\r\n 'RandomFunction', {})",
"def is_yummy(self):\n return False",
"def test_miscellaneous_stateful(self) -> None:\n\n my_unit = Dummy()\n\n # assert that the grad scaler is stored in the app_state\n self.assertEqual(my_unit.app_state()[\"grad_scaler_e\"], my_unit.grad_scaler_e)\n\n # delete the attribute\n # pyre-fixme[8]: Attribute has type `GradScaler`; used as `None`.\n my_unit.grad_scaler_e = None\n\n # the attribute should be removed from tracked_misc_statefuls\n self.assertFalse(\"grad_scaler_e\" in my_unit.tracked_misc_statefuls())",
"def test_types(question):\n instance = question[\"instance\"]\n for name, data in instance.get(\"variables\", {}).items():\n assert \"optional\" not in data or isinstance(data[\"optional\"], bool)\n if data.get(\"type\") == \"boolean\":\n assert \"value\" not in data or isinstance(data[\"value\"], bool)\n elif data.get(\"type\") in [\"integer\", \"long\"]:\n assert \"value\" not in data or isinstance(data[\"value\"], int)",
"def test_dummy_test(self):\n self.assertTrue(True)",
"def dummy_fn(self):\n\t\tpass",
"def test_domain_and_target_type(self):\n t = OneHotEncode(3)\n assert t.domain_type == \"integer\"\n assert t.target_type == \"real\"",
"def test__includes_custom_type_positive(self, *mocks):\n content_type = \"Union[str]\"\n result = self.protocol_generator._includes_custom_type(content_type)\n self.assertTrue(result)\n\n content_type = \"Optional[str]\"\n result = self.protocol_generator._includes_custom_type(content_type)\n self.assertTrue(result)",
"def test_no_metaclass_set(self):\n obj = BadTestObject()\n with self.assertRaises(TypeError):\n obj.test_setting = \"foo\"",
"def test1SetBuiltinTypes( self ):\n\n from AthExHelloWorld.AthExHelloWorldConf import HelloAlg\n\n HelloWorld = HelloAlg( 'HelloWorld' )\n\n HelloWorld.MyInt = 42\n HelloWorld.MyBool = True\n HelloWorld.MyDouble = 3.14159\n HelloWorld.MyStringVec = [ \"Welcome\", \"to\", \"Athena\", \"Framework\", \"Tutorial\" ]\n HelloWorld.MyStringVec += [ \"!\" ]\n HelloWorld.MyDict = { 'Bonjour' : 'Guten Tag',\n 'Good Morning' : 'Bonjour' , 'one' : 'uno' }\n HelloWorld.MyDict[ \"Goeiedag\" ] = \"Ni Hao\"\n HelloWorld.MyTable = [ ( 1 , 1 ) , ( 2 , 4 ) , ( 3 , 9 ) ]\n HelloWorld.MyTable += [ ( 4, 16 ) ]\n HelloWorld.MyMatrix = [ [ 1, 2, 3 ],\n [ 4, 5, 6 ] ]\n HelloWorld.MyMatrix += [ [ 7, 8, 9 ] ]\n\n HelloWorld.setup()\n\n self.assertEqual( HelloWorld.MyInt, 42 )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyInt', '42' ) )\n\n self.assertEqual( HelloWorld.MyBool, True )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyBool', 'True' ) )\n\n self.assertEqual( round( HelloWorld.MyDouble - 3.14159, 8 ), 0. )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyDouble', '3.14159' ) )\n\n # the following may be too sensitive to non-consequential changes in formatting\n self.assertEqual( HelloWorld.MyStringVec,\n [ \"Welcome\", \"to\", \"Athena\", \"Framework\", \"Tutorial\", \"!\" ] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyStringVec',\n \"['Welcome', 'to', 'Athena', 'Framework', 'Tutorial', '!']\" ) )\n\n self.assertEqual( HelloWorld.MyDict,\n {'Bonjour': 'Guten Tag', 'one': 'uno', 'Goeiedag': 'Ni Hao', 'Good Morning': 'Bonjour'} )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyDict',\n {'Bonjour': 'Guten Tag', 'one': 'uno', 'Goeiedag': 'Ni Hao', 'Good Morning': 'Bonjour'} ) )\n self.assertEqual( HelloWorld.MyTable, [(1, 1), (2, 4), (3, 9), (4, 16)] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyTable',\n \"[(1, 1), (2, 4), (3, 9), (4, 16)]\" ) )\n\n self.assertEqual( HelloWorld.MyMatrix, [[1, 2, 3], [4, 5, 6], [7, 8, 9]] )\n self.assertTrue( JobOptionsSvc.verify( 'HelloWorld', 'MyMatrix',\n \"[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\" ) )",
"def test_special_zero(self):\n x = py_function(0)\n if x != 100:\n self.fail(\"Zero is special, py_function(0) did not return 100\")",
"def testPsychDifficulties(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"difficulties\")\n\n self.util.intPropertyTest(self, attr, \"difficulties\")",
"def test_nothing(self):",
"def test_inputs_are_needed():\n with pytest.raises(TypeError):\n song_decoder()",
"def test_snmpset_non_existant_type():\n with pytest.raises(SNMPWriteError) as excinfo:\n snmpset(ipaddress=SNMP_SRV_ADDR, community='public',\n oid='SNMPv2-MIB::sysName.0', value_type='z',\n value='Test Description', port=SNMP_SRV_PORT)\n assert str(excinfo.value) == 'The type value you specified does not ' \\\n 'match one of the accepted type codes.\\n' \\\n 'Valid type codes are one of ' \\\n '(i|u|t|a|o|s|x|d|b)'",
"def test_03_visit_special(self):",
"def testworsethan(self):\n a = AttributeAbility(['ST',], 3)\n self.assertFalse(a.worsethan(a))\n self.assertFalse(a.worsethan(AttributeAbility(['ST',], 3)))\n self.assertFalse(a.worsethan(AttributeAbility(['ST',], 2)))\n self.assertTrue(a.worsethan(AttributeAbility(['ST',], 4)))",
"def test_default_axis_type(i07_nexus: I07Nexus, description):\n assert i07_nexus.default_axis_type == description",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_stealable(self):\r\n prod = Product(name='Test Product',\r\n weight=100, price=1,\r\n flammability=0.5)\r\n self.assertEqual(prod.stealability(), \"Not so stealable...\")",
"def test_hood_no_data(self):\n hood = Hood({})\n\n assert hood.warning is None\n assert hood.closed is None",
"def test_sample_one_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')",
"def test_not_supported():\n assert get_accessory(None, State('demo.demo', 'on'), 2, config=None) \\\n is None",
"def test_verbose_non_bool_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(verbose=1)",
"def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, .5)",
"def test(self, x={}, **kwargs):\n return 0",
"def testInitFail():\n with pytest.raises(NotImplementedError):\n naiveConf.NaiveConf([1,2,3])\n with pytest.raises(NotImplementedError):\n naiveConf.NaiveConf(0)",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, 0.5)",
"def test_default_product_flammability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.flammability, 0.5)",
"def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)",
"def test_cast_string_failure(self) -> None:\n self.flag.flag_type = None\n self.assertRaises(ValueError, self.flag.cast_string, '42')",
"def test_matcher_on_instance(self):\n\n skill = _TestSkill(None, None)\n self.assertTrue(hasattr(skill.hello_skill, \"matchers\"))",
"def testTypeFancy(self):\n prop = make_prop(kind=config.List(int))\n for value in (1, 'hi', [3, 'test']):\n with self.assertRaises(TypeError):\n prop.interpret(value, {})\n\n self.assertEqual([2, 3], prop.interpret([2, 3], {}))",
"def test_int_to_listed():\n\n @type_checked\n def _run_test(thing:[int]=None):\n assert thing == [15]\n\n _run_test(\"15.0\")",
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars is None",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_indicate(self):\n self.objective.Indicate()",
"def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )",
"def test_bool(self):\n mute_map = MutableMap()\n\n assert not mute_map\n\n mute_map['str_val'] = 'test'\n\n assert mute_map",
"def test_dummy():\n assert 1 == 1",
"def test(self):\n raise NotImplementedError",
"def test_roll_or_hold(self):\n INPUT.side_effect = ['R', 'H', 'h', 'z', '12345', 'r']\n pig = game.pig.Pig('PlayerA', 'PlayerB')\n self.assertEqual(pig.roll_or_hold(), 'roll')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'hold')\n self.assertEqual(pig.roll_or_hold(), 'roll')",
"def bad(self):\n raise NotImplementedError",
"def bad(self):\n raise NotImplementedError",
"def test_no_abstract_syntax_match(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n msg = (\n r\"No presentation context for 'Verification SOP Class' has been \"\n r\"accepted by the peer \"\n r\"for the SCU role\"\n )\n with pytest.raises(ValueError, match=msg):\n assoc.send_n_set(None, Verification, None)\n\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()",
"def test_no_attributes(self):\n self.run_mock_analyzer([])\n eq_(self.obj.analyze_attribute.called, False)",
"def test_X_normalization_not_raw(self):\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )",
"def test_vote_nohint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(dict_out == {'error': 'Failure in voting!'})",
"def testDefault(self):\n for val in (1, {}, 'test', None):\n prop = make_prop(default=val)\n self.assertEqual(val, prop.interpret(recipe_api.PROPERTY_SENTINEL, {}))",
"def test_model_initialization():\n MyModel(\"model\", SkillContext())",
"def nulltest():",
"def test_tracked_modules(self) -> None:\n\n my_unit = Dummy()\n\n # assert that the attributes are stored in tracked_modules\n self.assertEqual(my_unit.tracked_modules()[\"module_a\"], my_unit.module_a)\n self.assertEqual(my_unit.tracked_modules()[\"loss_fn_b\"], my_unit.loss_fn_b)\n\n # delete the attributes\n # pyre-fixme[8]: Attribute has type `Linear`; used as `None`.\n my_unit.module_a = None\n # pyre-fixme[8]: Attribute has type `CrossEntropyLoss`; used as `None`.\n my_unit.loss_fn_b = None\n\n # the attributes should be removed from tracked_modules\n self.assertFalse(\"module_a\" in my_unit.tracked_modules())\n self.assertFalse(\"loss_fn_b\" in my_unit.tracked_modules())",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_empty_dict_failure():\n\n @type_checked\n def _run_test(thing:{}): pass\n\n with pytest.raises(TypeError):\n _run_test(1)",
"def testDoNotEncodeStrangeObjects(self):\n class BogusObject(object):\n\n def check_initialized(self):\n pass\n\n self.assertRaises(TypeError,\n protojson.encode_message,\n BogusObject())"
] | [
"0.611732",
"0.5922523",
"0.5780935",
"0.577883",
"0.57609355",
"0.57075256",
"0.56813645",
"0.56792086",
"0.55930203",
"0.5585353",
"0.5578745",
"0.55738044",
"0.55481416",
"0.5526285",
"0.5522617",
"0.55186546",
"0.55089563",
"0.54697376",
"0.54681265",
"0.5462291",
"0.5453334",
"0.54506284",
"0.544105",
"0.5422537",
"0.54190695",
"0.53859216",
"0.5379288",
"0.5372681",
"0.53722334",
"0.5362338",
"0.5357833",
"0.53554976",
"0.53447574",
"0.53438264",
"0.5343594",
"0.5340054",
"0.5339612",
"0.5328386",
"0.5326592",
"0.5321898",
"0.532109",
"0.53209203",
"0.53164834",
"0.5313499",
"0.53128",
"0.53081197",
"0.53056395",
"0.5303399",
"0.52987975",
"0.52986175",
"0.529481",
"0.52869654",
"0.52728355",
"0.526832",
"0.5237663",
"0.52310675",
"0.52204293",
"0.52180433",
"0.52034426",
"0.52014756",
"0.5192381",
"0.5186479",
"0.5181215",
"0.5179231",
"0.51789546",
"0.5178702",
"0.51785165",
"0.5176621",
"0.5175985",
"0.51752603",
"0.5174671",
"0.51718086",
"0.51689696",
"0.51689696",
"0.51625574",
"0.5160124",
"0.515467",
"0.514884",
"0.5139475",
"0.5131156",
"0.51260704",
"0.51260704",
"0.5121858",
"0.51169205",
"0.51148576",
"0.5114338",
"0.5112845",
"0.5110512",
"0.5110512",
"0.51097596",
"0.5107081",
"0.5104925",
"0.5104193",
"0.5093535",
"0.5092468",
"0.5092131",
"0.50917655",
"0.509053",
"0.5089225",
"0.50882506"
] | 0.7457731 | 0 |
Test that setting a nested object in 'dummy' skill fails because path is not valid. | def test_get_fails_when_setting_nested_object(self):
with pytest.raises(
ClickException,
match=r"Attribute `non_existing_attribute.dummy` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"skills.dummy.non_existing_attribute.dummy",
"new_value",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output",
"def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))",
"def test_invoke_invalid_object(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"this/path/doesnt/exist.ext\"\n lambda_handler(mock_event, mock_context)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))",
"def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def testInitialize(self):\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n data_stream=u'test', location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=u'/test', parent=None)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(inode=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec, bogus=u'BOGUS')",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_set_without_path_sets_the_root(self):\n mock_config = {'foo': 'bar'}\n root_config = Config()\n root_config.set(value=mock_config)\n self.assertDictEqual(root_config.get(), mock_config)",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def test_set_item(self):\n content = json.dumps({\n \"nb\": \"test-nb\",\n \"en\": \"test-en\",\n })\n structure = MultiLingualTextStructure(content, use_default_for_empty=True)\n\n self.assertEqual(structure[\"nb\"], \"test-nb\")\n self.assertEqual(structure[\"en\"], \"test-en\")\n structure[\"nb\"] = \"changed-nb\"\n self.assertEqual(structure[\"nb\"], \"changed-nb\")\n self.assertEqual(structure[\"en\"], \"test-en\")",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_set_item_from_outside(self):\n\n expected = {\n self.file_to_test: {\n \"hello.world\": {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db.database = {\n self.file_to_test: {\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db[\"hello.world\"] = {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n }\n\n self.assertEqual(expected, self.inactive_db.database)",
"async def test_update_with_json_attrs_with_json_attrs_path(hass: HomeAssistant) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n json={\n \"toplevel\": {\n \"master_value\": \"123\",\n \"second_level\": {\n \"some_json_key\": \"some_json_value\",\n \"some_json_key2\": \"some_json_value2\",\n },\n },\n },\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes_path\": \"$.toplevel.second_level\",\n \"json_attributes\": [\"some_json_key\", \"some_json_key2\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == \"123\"\n assert state.attributes[\"some_json_key\"] == \"some_json_value\"\n assert state.attributes[\"some_json_key2\"] == \"some_json_value2\"",
"def test_circular_nested(self):\n obj = {}\n obj[\"list\"] = [{\"obj\": obj}]\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def testDirectorySetBadType(self):\n def setDirectory():\n self.mr.directory = 12345\n\n self.assertRaises(\n TypeError,\n setDirectory\n )",
"def testInitialize(self):\n path_spec = apm_path_spec.APMPathSpec(parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(parent=None)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(\n parent=self._path_spec, bogus='BOGUS')",
"def test_settings_items(mock_empty_os_environ):\n climate = core.Climate()\n climate.update({\"a\": {\"b\": {\"c\": [1, 2, 3]}}, \"d\": [{\"e\": \"f\"}, {\"g\": \"h\"}]})\n assert climate.settings[\"a\"] == {\"b\": {\"c\": [1, 2, 3]}}\n assert climate.settings.a == {\"b\": {\"c\": [1, 2, 3]}}\n assert climate.settings.a.b.c[0] == 1\n\n # test assignment\n for value in [{\"new\": \"data\"}, \"blaaa\", [3, 4, 5]]:\n with pytest.raises(TypeError):\n climate.settings.a.b.c = value\n climate.update({\"a\": {\"b\": {\"c\": value}}})\n assert climate.settings.a.b.c == value\n\n for value in [{\"new\": \"data\"}, \"blaaa\", 100]:\n with pytest.raises(TypeError):\n climate.settings.a.b.c[0] = value\n climate.update({\"a\": {\"b\": {\"c\": [value]}}})\n assert climate.settings.a.b.c[0] == value\n\n # test deletion\n with pytest.raises(TypeError):\n del climate.settings.a.b[\"c\"]\n climate.update({\"a\": {\"b\": {\"c\": core.REMOVED}}})\n assert climate.settings.a.b == {}\n climate.update()\n assert climate.settings.a.b == {}\n\n # test attribute deletion\n with pytest.raises(TypeError):\n del climate.settings.d[0].e\n climate.update({\"d\": [{\"e\": core.REMOVED}]})\n assert climate.settings.d == [{}, {\"g\": \"h\"}]\n climate.update()\n assert climate.settings.d == [{}, {\"g\": \"h\"}]\n\n # test sequence item deletion\n climate.update({\"d\": [core.REMOVED]})\n assert climate.settings.d == [{\"g\": \"h\"}]\n climate.update()\n assert climate.settings.d == [{\"g\": \"h\"}]\n\n # test second deletion at index to make sure that it is applied after the previous deletion\n climate.update({\"d\": [core.REMOVED]})\n assert climate.settings.d == []\n climate.update()\n assert climate.settings.d == []",
"def test_set_path_4(self, verify_path2_mock):\n test_file = Path(\"/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(\"/dir1/file.txt\")\n self.assertEqual(output, exp)",
"def test_removing_root(item):\n item.root = None\n assert not item.has_root",
"def test_invalid_session_populate_children(self):\n pod1 = Pod('1')\n node = Node('1', '2', 'Spine1', 'spine', pod1)\n self.assertRaises(TypeError, node.populate_children)",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def test_addPath_obviousCycle(self):\n g = Garden()\n self.assertRaises(CycleError, g.addPath, 'foo', 'v1', [\n ('foo', 'v1'),\n ])",
"def test_issue_114(asserter):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\n \"type\": \"array\",\n \"items\": {\n \"b\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n value = {\"a\": []}\n expected = value\n asserter(schema, value, expected)",
"def test_get_fails_when_setting_non_dict_attribute(self):\n behaviour_arg_1 = \"behaviour_arg_1\"\n path = f\"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, \"new_value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n s = f\"Attribute '{behaviour_arg_1}' is not a dictionary.\"\n assert result.exception.message == s",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def test_set_path_1(self):\n self.file.touch()\n # Since using tempfile, there is an added quirk.\n # the tempfile path may be a symlink, so passing it through set path\n # will resolve the symlink, changing the path, and breaking the test.\n self.file = self.file.resolve()\n output = basic.set_path(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertIsInstance(output, Path)\n with self.subTest():\n self.assertEqual(str(self.file), str(output))",
"def test_set_artifacts_path__deny_change_to_empty():\n path_before = copy.copy(ContractHandler.artifacts_path)\n assert path_before is not None\n assert ContractHandler._contracts\n\n ContractHandler.set_artifacts_path(None) # it should deny this\n\n assert ContractHandler.artifacts_path == path_before\n assert ContractHandler._contracts # cache should *not* have reset",
"def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def test_set_parent_when_provided():\n\n # GIVEN a valid parent\n father: str = Pedigree.FATHER\n\n # WHEN running \"set_parent_if_missing\"\n validated_father: str = set_parent_if_missing(father)\n\n # THEN the returned string should not have been altered\n assert validated_father == father",
"def test_name(self):\r\n name, path, args, kwargs = self.field.deconstruct()\r\n self.assertIsNone(name)\r\n self.field.set_attributes_from_name(\"segments\")\r\n name, path, args, kwargs = self.field.deconstruct()\r\n self.assertEqual(name, \"segments\")",
"def test_reassignment_dict_to_string(self):\n self.fs[\"dir\"] = {\"x\": {\"y\": \"z\"}}\n self.fs[\"dir\"] = \"a new thing.\"\n self.assertEquals(self.fs[\"dir\"], \"a new thing.\")",
"def test_circular_dict(self):\n obj = {}\n obj[\"obj\"] = obj\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def test_bad_structures(bad_structures, mapper):\n for index, structure in enumerate(bad_structures):\n # This is for helping devs finding any errors that may occur\n print(f\"Trying structure number {index} from 'test_bad_structures.json'\")\n with pytest.raises(ValidationError):\n StructureResource(**mapper(MAPPER).map_back(structure))",
"def test_invalid_path(self, tmp_path):\n other_path = tmp_path / \"other\"\n other_path.mkdir()\n pattern = (\n \"Could not find any of configuration files '.kedro.yml, pyproject.toml'\"\n )\n with pytest.raises(KedroContextError, match=re.escape(pattern)):\n load_context(str(other_path))",
"def test_spector_init_error_no_survey(obj_dirobj):\n\tobj = obj_dirobj\n\n\twith pytest.raises(Exception) as e:\n\t\ts = spector.Spector(obj=obj)",
"def test_value_case_matters(self):\n a = {\n \"a\": [\n {\n \"b\": \"val\",\n },\n ]\n }\n b = copy.deepcopy(a)\n b[\"a\"][0][\"b\"] = \"VAL\"\n\n with pytest.raises(exceptions.KeyMismatchError):\n check_keys_match_recursive(a, b, [])",
"def test_dotwiz_plus_set_item():\n dd = DotWizPlus()\n dd['a'] = [{'one': 1, 'two': 2}]\n\n item = dd.a[0]\n assert isinstance(item, DotWizPlus)\n assert item.one == 1\n assert item.two == 2",
"def test_set_artifacts_path__allow_change():\n path_before = copy.copy(ContractHandler.artifacts_path)\n assert path_before is not None\n assert ContractHandler._contracts\n\n ContractHandler.set_artifacts_path(\"new path\")\n\n assert ContractHandler.artifacts_path == \"new path\"\n assert not ContractHandler._contracts # cache should have reset",
"def test_trailing_slash(setup_teardown_file):\n f = setup_teardown_file[3]\n\n f[\"dataset\"] = 42\n assert \"dataset/\" in f",
"def constrained_lens_object_test():\n return # TODO",
"def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )",
"def test_references(self):\n a = DummyObject()\n d = {'a.a.a':1, 'a.b.a':3, 'b':a}\n # Check dict single level keys don't lose reference\n self.assertEqual( dottedDict(d).data['b'], d['b'] )\n self.assertEqual( dottedDict(d).data, dottedDict(dottedDict(d)).data )",
"def test_dummy():\n dummyblock = DummyBlockNode(\n name=\"None\",\n parameters=(),\n ancestor=None,\n dirty=False,\n filepath=\"/some/random/path\"\n )\n dummydirective = DummyDirectiveNode(\n name=\"Name\",\n ancestor=None,\n filepath=\"/another/path\"\n )\n dummycomment = DummyCommentNode(\n comment=\"Comment\",\n ancestor=dummyblock,\n filepath=\"/some/file\"\n )",
"def testStudyPath(self):\n study_path = dicom_path.FromString(tdpu.STUDY_PATH_STR)\n self._AssertStoreAttributes(study_path)\n self.assertEqual(study_path.study_uid, tdpu.STUDY_UID)\n self.assertIsNone(study_path.series_uid)\n self.assertIsNone(study_path.instance_uid)\n self.assertEqual(study_path.type, dicom_path.Type.STUDY)\n self.assertEqual(study_path.dicomweb_path_str, tdpu.DICOMWEB_PATH_STR)\n self.assertEqual(str(study_path), tdpu.STUDY_PATH_STR)\n self.assertEqual(str(study_path.GetStorePath()), tdpu.STORE_PATH_STR)\n self.assertEqual(str(study_path.GetStudyPath()), tdpu.STUDY_PATH_STR)",
"def test_reassignment_string_to_dict(self):\n self.fs[\"dir\"] = \"an old thing.\"\n self.fs[\"dir\"] = {\"x\": {\"y\": \"z\"}}\n self.assertEquals(self.fs[\"dir\"][\"x\"][\"y\"], \"z\")",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)",
"def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()",
"def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")",
"def test_save_json_with_invalid_step(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n\n with pytest.raises(ValueError):\n save_json(temp_dir, data, step={\"invalid\": \"dict\"})",
"def test_first_level_subdir(self):\n self.mfs.add_entries({'/foo': 'bar'})\n self.assertTrue(os.path.exists('/'))\n self.assertTrue(os.path.isdir('/'))\n self.assertTrue(os.path.exists('/foo'))",
"def test_descriptor_with_nopath(self):\r\n\r\n self._get_descriptor_with_invalid_link(NoPathToItem)",
"def testSlopeFromDict(self):\n def setSlope():\n self.node.slope = {'r': 1.3782, 'g': 278.32, 'b': 2}\n\n self.assertRaises(\n TypeError,\n setSlope\n )",
"def test_set_path_5(self, verify_path2_mock):\n home = Path(\"~\")\n home = home.expanduser()\n test_file = Path(\"~/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(home, \"dir1/file.txt\")\n self.assertEqual(output, exp)",
"def test_load_path(parser):\n doc = parser.load(pathlib.Path('jsonexamples') / 'small' / 'demo.json')\n doc.at_pointer('/Image/Width')",
"def test_spector_init_error_survey_spec(obj_dirobj):\n\tobj = obj_dirobj\n\n\twith pytest.raises(Exception) as e: \n\t\ts = spector.Spector(obj=obj, survey_spec='sdss')",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def _assign_to_null(obj, path, value, force=True):\n if isinstance(obj, NullType):\n d = _get(obj, \"__dict__\")\n o = d[\"_obj\"]\n p = d[\"__key__\"]\n s = [p]+path\n return _assign_to_null(o, s, value)\n\n path0 = path[0]\n\n if len(path) == 1:\n if force:\n obj[path0] = value\n else:\n _setdefault(obj, path0, value)\n return\n\n old_value = obj.get(path0)\n if old_value == None:\n if value == None:\n return\n else:\n obj[path0] = old_value = {}\n\n _assign_to_null(old_value, path[1:], value)",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])",
"def _create_path(root, dict_type, path):\n for sub_path in path:\n if not isinstance(root.get(sub_path, None), dict):\n root[sub_path] = dict_type()\n\n root = root[sub_path]\n\n return root",
"def test_call(self):\r\n p = TreeBuilder({})\r\n self.assertRaises(NotImplementedError, p, '/path/to/seqs')",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))",
"def test_basic_prop(chikin):\n assert str(chikin) == '[document]' == chikin.name\n assert chikin.depth == 0\n assert len(chikin.branches) == 2\n assert isinstance(chikin.section, TreeOfContents)",
"def test_transform_object(self):\n # Test object with nested \"international\" fields\n obj1 = {\n \"international\": {\n \"display_name\": {\n \"af\": \"Dokumentbestuurstelsel\",\n \"fr\": \"type de logiciel\",\n \"ro\": \"colecție organizată a documentelor\",\n }\n }\n }\n transform_object(obj1, \"international\")\n self.assertDictEqual(\n {\n \"international\": {\n \"display_name\": {\n \"keys\": [\"af\", \"fr\", \"ro\"],\n \"values\": [\n \"Dokumentbestuurstelsel\",\n \"type de logiciel\",\n \"colecție organizată \" \"a documentelor\",\n ],\n }\n }\n },\n obj1,\n )\n\n # Test object with nested \"international\" none\n obj2 = {\"international\": {\"display_name\": None}}\n transform_object(obj2, \"international\")\n self.assertDictEqual({\"international\": {\"display_name\": None}}, obj2)\n\n # Test object with nested \"abstract_inverted_index\" fields\n obj3 = {\n \"abstract_inverted_index\": {\n \"Malignant\": [0],\n \"hyperthermia\": [1],\n \"susceptibility\": [2],\n \"(MHS)\": [3],\n \"is\": [4, 6],\n \"primarily\": [5],\n }\n }\n transform_object(obj3, \"abstract_inverted_index\")\n self.assertDictEqual(\n {\n \"abstract_inverted_index\": {\n \"keys\": [\"Malignant\", \"hyperthermia\", \"susceptibility\", \"(MHS)\", \"is\", \"primarily\"],\n \"values\": [\"0\", \"1\", \"2\", \"3\", \"4, 6\", \"5\"],\n }\n },\n obj3,\n )\n\n # Test object with nested \"abstract_inverted_index\" none\n obj4 = {\"abstract_inverted_index\": None}\n transform_object(obj4, \"abstract_inverted_index\")\n self.assertDictEqual({\"abstract_inverted_index\": None}, obj4)",
"def test_set_artifacts_path__deny_change_to_same():\n path_before = copy.copy(ContractHandler.artifacts_path)\n assert path_before is not None\n assert ContractHandler._contracts\n\n ContractHandler.set_artifacts_path(path_before)\n\n assert ContractHandler.artifacts_path == path_before\n assert ContractHandler._contracts # cache should *not* have reset",
"def test_path(tmp_path: Path) -> None:\n path = tmp_path / \"repository\"\n repository = Repository.init(path)\n assert path == repository.path",
"def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value",
"def test_change_parent_location(self):\n pass",
"def test_path_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"foobar\"\n self.assertRaises(InvalidInputError, self.command.run)",
"def test_Struct(self):\n\n my_dict = {'param1': 'value1', 'param2': {'param21': 'value21'}}\n object1 = Struct(my_dict)\n\n self.assertEqual(my_dict[\"param1\"], object1.param1)",
"def test_paths_properties():\n template_script = get_template_script(output_dir='output1')\n template_script['options']['setup_dir'] = 'setup1'\n exp_builder = ExperimentBuilder(template_script)\n\n # The database path is configured correctly.\n assert exp_builder._db.setup_dir == os.path.join('output1', 'setup1')\n\n # Updating paths also updates the database main directory.\n exp_builder.output_dir = 'output2'\n exp_builder.setup_dir = 'setup2'\n assert exp_builder._db.setup_dir == os.path.join('output2', 'setup2')",
"def test_set_path_3(self, verify_path2_mock):\n home = Path(\"~\")\n home = home.expanduser()\n test_file = Path(\"~/path/to/file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(home, \"path/to/file.txt\")\n self.assertEqual(output, exp)",
"def test_nested_settings_files(tmpdir):\n subfolder = tmpdir.mkdir(\"sub\")\n p = subfolder.join(\"settings.json\")\n nested_1_p = subfolder.join(\"nested_1.json\")\n nested_2_p = subfolder.join(\"nested_2.json\")\n\n nested_2_p.write(json.dumps({\"foo\": 1, \"bar\": 2}))\n nested_1_p.write(json.dumps({\"level_2_from_file\": str(nested_2_p)}))\n p.write(\n json.dumps(\n {\n \"level_1_from_file\": str(\n nested_1_p\n ), # nested_1_p references nested_2_p internally.\n \"spam\": \"parrot\",\n \"list\": [\n \"random\",\n {\n \"this_from_file\": str(\n nested_2_p\n ) # dictionaries in lists should be expanded as well.\n },\n ],\n }\n )\n )\n\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=[str(p)])\n assert dict(climate.settings) == {\n \"spam\": \"parrot\",\n \"level_1\": {\"level_2\": {\"foo\": 1, \"bar\": 2}},\n \"list\": [\"random\", {\"this\": {\"foo\": 1, \"bar\": 2}}],\n }",
"def test_create_unexpected_problem(self):\n pass",
"def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')",
"def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))",
"def test_compound_with_location(self):\n self.assertEqual(self.compound_with_loc.location, 'extracellular')",
"def test_update_study_missing(self):\n study_spec = sample_study_spec()\n with self.assertRaises(ValueError):\n self.storage.update_study(study_spec)",
"def test_Tree():",
"def test_do_not_load_in_child_dir(self, tmp_path):\n nested_directory = tmp_path / os.path.join(\"a\", \"b\", \"c\")\n os.makedirs(nested_directory, exist_ok=True)\n\n # Create a FF in a nested directory\n ForceField(\"openff-1.0.0.offxml\").to_file(\n os.path.join(nested_directory, \"force-field.offxml\")\n )\n\n # Check that the file does not exist in the current working directory.\n assert not os.path.isfile(\"force-field.offxml\")\n\n with pytest.raises(\n OSError, match=\"Source 'force-field.offxml' could not be read.\"\n ):\n ForceField(\"force-field.offxml\")",
"def testPathAttributes(self):\n ddict = {\n (\"\", \"NX_class\"): 'NXcollection',\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n # This should not warn\n with LoggingValidator(dictdump_logger, warning=0):\n dictdump.dicttoh5(ddict, h5file, h5path=\"foo/bar\")",
"def set_attribute(obj, path, value):\n names = path.split('.')\n if len(names) > 1:\n set_attribute(getattr(obj, names[0]), '.'.join(names[1:]), value)\n else:\n setattr(obj, names[0], value)",
"def testValuesWithPaths(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag1 = createTag(user, namespace, u'tag1')\n tag2 = createTag(user, namespace, u'tag2')\n self.store.add(TagValue(user.id, tag1.id, objectID, None))\n self.store.add(TagValue(user.id, tag2.id, objectID, 42))\n collection = TagValueCollection(paths=[u'name/tag1'])\n (tag, tagValue) = collection.values().one()\n self.assertEqual(objectID, tagValue.objectID)\n self.assertEqual(u'name/tag1', tag.path)\n self.assertEqual(None, tagValue.value)"
] | [
"0.65963215",
"0.6569621",
"0.64509624",
"0.6348378",
"0.61588395",
"0.5966255",
"0.5864911",
"0.5818894",
"0.57906985",
"0.57551765",
"0.57298034",
"0.5715899",
"0.570213",
"0.5672622",
"0.5666781",
"0.566096",
"0.56185013",
"0.56091845",
"0.55812943",
"0.55795157",
"0.5563023",
"0.5555255",
"0.55440736",
"0.5526201",
"0.54905695",
"0.54895663",
"0.5485383",
"0.5475001",
"0.54728854",
"0.5465715",
"0.5455976",
"0.5453225",
"0.54491067",
"0.54368305",
"0.5429059",
"0.5401973",
"0.53963494",
"0.53944135",
"0.53537613",
"0.53334504",
"0.5326704",
"0.5325989",
"0.53201693",
"0.53150344",
"0.5313558",
"0.5307076",
"0.5307076",
"0.5305899",
"0.53052175",
"0.5304626",
"0.5294137",
"0.5285086",
"0.52797586",
"0.52738845",
"0.5272915",
"0.52674204",
"0.5265879",
"0.5260147",
"0.5258571",
"0.525309",
"0.5252899",
"0.5252337",
"0.5250224",
"0.5247625",
"0.5243639",
"0.5222771",
"0.51983213",
"0.5194621",
"0.5179943",
"0.51791465",
"0.5176488",
"0.51752394",
"0.51637757",
"0.51628804",
"0.51628625",
"0.51602495",
"0.5158121",
"0.5155826",
"0.5154702",
"0.51411355",
"0.5128821",
"0.5126815",
"0.5120804",
"0.5120715",
"0.5120254",
"0.51201314",
"0.5118948",
"0.51183563",
"0.51075536",
"0.5104009",
"0.5103648",
"0.5095508",
"0.50947",
"0.50935245",
"0.5092935",
"0.5091942",
"0.5089818",
"0.5089453",
"0.50886285",
"0.5088027"
] | 0.6992566 | 0 |
Test that the set fails because the path point to a nondict object. | def test_get_fails_when_setting_non_dict_attribute(self):
behaviour_arg_1 = "behaviour_arg_1"
path = f"skills.dummy.behaviours.dummy.args.{behaviour_arg_1}.over_the_string"
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", path, "new_value"],
standalone_mode=False,
)
assert result.exit_code == 1
s = f"Attribute '{behaviour_arg_1}' is not a dictionary."
assert result.exception.message == s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_non_hashable1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n d = {}\n self.assertRaises(TypeError, hash, xp)\n self.assertRaises(TypeError, d.setdefault, xp, 'key')",
"def testNotExistingPath(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f['data'] = 1\n\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=1):\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='raise')",
"def test_no_path():\n test = [{'key': 'val'}, []]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'path list empty' in str(t_result.failure())",
"def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)",
"def testDictDoesNotContain(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" not in d1:\n print d1[\"nonsense\"] # Dead code\n else:\n print d1[\"x\"]\n\n d2 = {}\n if \"x\" not in d2:\n pass\n else:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" not in d3:\n print d3[\"y\"]\n else:\n print d3[\"x\"]\n \"\"\")",
"def testContainOnlyImmutables(self):\n aset = set()\n \n aset.add(1)\n aset.add(\"cheka\")\n \n # non-hashable object (that is mutable) objects cannot be contained in set\n self.assertRaises(TypeError, lambda : aset.add([]) )",
"def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def testPathAttributes(self):\n ddict = {\n (\"\", \"NX_class\"): 'NXcollection',\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n # This should not warn\n with LoggingValidator(dictdump_logger, warning=0):\n dictdump.dicttoh5(ddict, h5file, h5path=\"foo/bar\")",
"def test_get_filled_attributes_from_file_non_existent_file(tmp_path):\n assert get_filled_attributes_from_file(tmp_path / \"does not exist\") == frozenset([])",
"def test_add_patterns_raises_error_pattern_not_iter_of_dict(ruler: SpaczzRuler) -> None:\n with pytest.raises(TypeError):\n ruler.add_patterns({\"label\": \"GPE\", \"pattern\": \"Montana\"}) # type: ignore",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_badyvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {1, 2, 3}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_dumps_set(self):\n try:\n _build_test_dirs()\n dicti = {\n 'set': set([1, 2, 4, 4, 2]),\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()",
"def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None",
"def test_validate_bad_data(self, value):\n opt = scheme.DictOption('test-opt', scheme.Scheme())\n with pytest.raises(errors.SchemeValidationError):\n opt.validate('foo', value)",
"def testFromStringTypeError(self):\n for path_type in dicom_path.Type:\n if path_type != dicom_path.Type.STORE:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.STORE_PATH_STR, path_type)\n if path_type != dicom_path.Type.STUDY:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.STUDY_PATH_STR, path_type)\n if path_type != dicom_path.Type.SERIES:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.SERIES_PATH_STR, path_type)\n if path_type != dicom_path.Type.INSTANCE:\n self.assertRaises(ValueError, dicom_path.FromString,\n tdpu.INSTANCE_PATH_STR, path_type)",
"def test_badyvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, {\"foo\": 1}, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_OBJT_pass(self):\n for O in self.mod.objts.itervalues():\n self.assertTrue(O.isset)",
"def testDirectorySetBadType(self):\n def setDirectory():\n self.mr.directory = 12345\n\n self.assertRaises(\n TypeError,\n setDirectory\n )",
"def test_unique_item_properties_failed(self):\n check_value = [{\"a\": 1, \"b\": 3}, {\"a\": 1, \"b\": 2}]\n\n with pytest.raises(AssertionError):\n unique_item_properties(check_value, \"a\")",
"def test_value_case_matters(self):\n a = {\n \"a\": [\n {\n \"b\": \"val\",\n },\n ]\n }\n b = copy.deepcopy(a)\n b[\"a\"][0][\"b\"] = \"VAL\"\n\n with pytest.raises(exceptions.KeyMismatchError):\n check_keys_match_recursive(a, b, [])",
"def test_badxvaluewithsets(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {1, 2, 3}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_badxvaluewithdicts(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, {\"foo\": 1}, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_invalid_assignment():\n with pytest.raises(TypeError):\n PropertyAndConditions(property=LinkByUID('id', 'a15'))\n with pytest.raises(TypeError):\n PropertyAndConditions(property=Property(\"property\"),\n conditions=[Condition(\"condition\"), LinkByUID('scope', 'id')])",
"def test_unicode_names(self):\n self.assertArrayEqual(self.dset['a'], self.data['a'])\n self.dset['a'] = 42\n data = self.data.copy()\n data['a'] = 42\n self.assertArrayEqual(self.dset['a'], data['a'])",
"def test_issue_74():\n patient = Patient(active=True, address=[])\n assert \"address\" not in patient.dict()\n assert patient.dict(exclude_none=False)[\"address\"] == []",
"def verify_paths(self) -> None:\n bad_keys = []\n for key, path in self.paths.items():\n # we only check specified paths, and drop them otherwise\n if path is not None:\n if isinstance(path, str):\n path = Path(path)\n if not path.exists():\n warn(\n f\"A path for {key} dataset was specified but unresolvable, please check {path.absolute()} exists and contains *.lmdb files.\"\n )\n bad_keys.append(key)\n else:\n bad_keys.append(key)\n for key in bad_keys:\n del self.paths[key]",
"def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False",
"def test_set_use_collection_not_bool(self) -> None:\n\n given = [\"Hello\", \"World!\"]\n\n self.assertRaises(TypeError, lambda: self.checker.set_use_collection(given))",
"def test_set_missing_keys_2(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"cluster\"])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)",
"def test_isc_rr_type_set_failing(self):\n test_string = [\n 'oops',\n ]\n result = rr_type_set.runTests(test_string, failureTests=True)\n self.assertTrue(result[0])",
"def _validate_key(sample, path):\n mapping_tmp = sample\n for key in path:\n try:\n mapping_tmp = mapping_tmp[key]\n except KeyError:\n return False\n except TypeError:\n return False\n return True",
"def test_dict_with_invalid_version(self):\n\n invalid_version_info = (-1, -1, -1)\n d = LexicalDictionary(invalid_version_info)\n\n with self.assertRaises(FileNotFoundError):\n lp = Lexpp(external_dict=d)",
"def test_get(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1,2,'three'],\n '4': {1:'one', 2:'two'}}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key])\n\n values = [storage.get(key) for key in keys_to_set.keys()]\n true_values = [keys_to_set[key] for key in keys_to_set.keys()]\n self.assertEqual(true_values, values)\n self.assertRaises(StorageKeyError,storage.get, '0')",
"def test_unique(self):\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n with self.assertRaises(KeyError):\n env.add(graph)",
"def test_add_keys_multiple_times(self):\n path = _path.Path.from_str(\"RootOper.Foo(*)\")\n with self.assertRaisesRegex(\n ValueError, \"Path element already has key information\"):\n path(4)",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def testUidMissingError(self):\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', None,\n '4.5.6')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', 'stuid',\n None, '7.8.9')",
"def test_write_noncompound(self):\n data2 = self.data.copy()\n data2['b'] = 1.0\n self.dset['b'] = 1.0\n self.assertTrue(np.all(self.dset[...] == data2))",
"def test_MetadataMap_setter_invalid_input(self):\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n \"foo\")\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n [])\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n {})\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n None)\r\n self.assertRaises(TypeError, setattr, self.cs_overview, 'MetadataMap',\r\n self.overview_dm)",
"def test_get_type_for_key_path_invalid_key_path(test_schema):\n assert get_type_for_key_path(test_schema, \"foo.bar\") == None",
"def test_invalid_tensor_op_object_graph_pairing(self, data, description):\n with self.assertRaises((KeyError, AssertionError, TypeError), msg=description):\n data()",
"def test_save_json_with_invalid_step(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n\n with pytest.raises(ValueError):\n save_json(temp_dir, data, step={\"invalid\": \"dict\"})",
"def test_permlookupdict_in(self):\n pldict = PermLookupDict(MockUser(), \"mockapp\")\n with self.assertRaises(TypeError):\n self.EQLimiterObject() in pldict",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_process_dict_true(self):\n\n self.assertIn('userA@domain', self.temp_set)",
"def test_get_invalid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertRaises(KeyError, ars.__getitem__, 'invalid_section')",
"def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)",
"def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"",
"def test_set_missing_keys_1(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"host_genus\"])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"\")",
"def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)",
"def testSetWithBadString(self):\n def setSat():\n self.node.sat = 'banana'\n\n self.assertRaises(\n TypeError,\n setSat\n )",
"def test_nonexisting_string_not_contained(tricky_trie):\n assert not tricky_trie.contains('no')",
"def testSetWithListFails(self):\n def setSat():\n self.node.sat = [-1.1]\n\n self.assertRaises(\n TypeError,\n setSat\n )",
"def test_RestrictingNodeTransformer__visit_NotIn_Dict():\n assert restricted_eval('2 not in {1: 1, 2: 2, 3: 3}') is False",
"def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value",
"def test___getitem___invalid_index(self):\n with pytest.raises(TypeError):\n self.Person.objects()[\"a\"]",
"def test_setter_invalid_value(self):\n root = netapp_api.NaElement('root')\n try:\n root['k'] = netapp_api.NaServer('localhost')\n except Exception as e:\n if not isinstance(e, TypeError):\n self.fail(_('Error not a TypeError.'))",
"def test_setter_invalid_value(self):\n root = netapp_api.NaElement('root')\n try:\n root['k'] = netapp_api.NaServer('localhost')\n except Exception as e:\n if not isinstance(e, TypeError):\n self.fail(_('Error not a TypeError.'))",
"def test_traversal_invalid_string(traversal_test_trie):\n with pytest.raises(KeyError):\n gen = traversal_test_trie.traversal('invalid')\n next(gen)",
"def test_has_location_with_invalid_states():\n for state in (None, 1, \"hello\", object):\n assert not location.has_location(state)",
"def test_key_case_matters(self):\n a = {\n \"a\": [\n {\n \"b\": \"val\",\n },\n ]\n }\n b = copy.deepcopy(a)\n b[\"a\"][0] = {\"B\": \"val\"}\n\n with pytest.raises(exceptions.KeyMismatchError):\n check_keys_match_recursive(a, b, [])",
"def test_15_dict_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, {})\n self.assertEqual(\n \"height must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle({\"a\": 1, \"b\": 2, \"c\": 3}, 2)\n self.assertEqual(\n \"width must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, {\"a\": 1})\n self.assertEqual(\n \"x must be an integer\",\n str(x.exception))\n with self.assertRaises(TypeError) as x:\n r = Rectangle(10, 2, 0, {\"hi\": None})\n self.assertEqual(\n \"y must be an integer\",\n str(x.exception))",
"def test_bad_setitem(self):\n space = Space()\n\n # The name of an integer must be a of `str` type.\n # Integers are reversed for indexing the OrderedDict.\n with pytest.raises(TypeError) as exc:\n space[5] = Integer(\"yolo\", \"uniform\", -3, 6)\n assert \"string\" in str(exc.value)\n\n # Only object of type `Dimension` are allowed in `Space`.\n with pytest.raises(TypeError) as exc:\n space[\"ispis\"] = \"nope\"\n assert \"Dimension\" in str(exc.value)\n\n # Cannot register something with the same name.\n space.register(Integer(\"yolo\", \"uniform\", -3, 6))\n with pytest.raises(ValueError) as exc:\n space.register(Real(\"yolo\", \"uniform\", 0, 6))\n assert \"another name\" in str(exc.value)",
"def test_get_key_not_defined_yet(self):\n storage = SessionStorage()\n\n self.assertNotIn('key1', storage)\n s1 = storage['key1']\n self.assertIn('key1', storage)\n\n self.assertNotIn('key2', storage)\n s2 = storage['key2']\n self.assertIn('key2', storage)\n\n self.assertIsNot(s1, s2)",
"def test_dumps_frozenset(self):\n try:\n _build_test_dirs()\n dicti = {\n 'set': frozenset([1, 2, 4, 4, 2]),\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_collisions_file_path(self):\n self.assertRaises(ValueError, collisions_clean, \"not_a_file_path\")",
"def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)",
"def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)",
"def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)",
"def test_set_value_not_str(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.assertRaises(TypeError, lambda: self.helper.set_value([\"Hello\", \"World!\"]))",
"def testSlopeFromDict(self):\n def setSlope():\n self.node.slope = {'r': 1.3782, 'g': 278.32, 'b': 2}\n\n self.assertRaises(\n TypeError,\n setSlope\n )",
"def test_isadict(self):\n # It is a dict-subclass, so this kind of pointless, but it doen't hurt.\n d, m = dict(a=5), ConfigDict(a=5)\n d['key'], m['key'] = 'value', 'value'\n d['k2'], m['k2'] = 'v1', 'v1'\n d['k2'], m['k2'] = 'v2', 'v2'\n self.assertEqual(d.keys(), m.keys())\n self.assertEqual(list(d.values()), list(m.values()))\n self.assertEqual(d.get('key'), m.get('key'))\n self.assertEqual(d.get('cay'), m.get('cay'))\n self.assertEqual(list(iter(d)), list(iter(m)))\n self.assertEqual([k for k in d], [k for k in m])\n self.assertEqual(len(d), len(m))\n self.assertEqual('key' in d, 'key' in m)\n self.assertEqual('cay' in d, 'cay' in m)\n self.assertRaises(KeyError, lambda: m['cay'])",
"def test_to_json_file_non_dict(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n self.assertRaises(TypeError, lambda: Dict(1).to_json_file(output_file))\n self.assertRaises(TypeError, lambda: Dict(\"100\").to_json_file(output_file))\n self.assertRaises(\n TypeError, lambda: Dict(\"{'hello': 'world'}\").to_json_file(output_file)\n )\n\n File(output_file).delete()",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_object_has_no_copy_uploaded_marker(self):\n self.assertTrue('copy_uploaded' not in self.eightythreeb.data['markers']) # should not be present",
"def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )",
"def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")",
"def test_circular_dict(self):\n obj = {}\n obj[\"obj\"] = obj\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def test_save_npy_with_invalid_step(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n\n with pytest.raises(ValueError):\n save_npy(temp_dir, data, step={\"invalid\": \"dict\"})",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def test_not_h5py_group(self):\n with self.assertRaises(TypeError):\n self.map_digis(None)",
"def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def testDictContains(self):\n self.Check(\"\"\"\n d1 = {\"x\": 42}\n if \"x\" in d1:\n print d1[\"x\"]\n else:\n print d1[\"nonsense\"] # Dead code\n\n d2 = {}\n if \"x\" in d2:\n print d2[\"nonsense\"] # Dead code\n\n d3 = {__any_object__: __any_object__}\n if \"x\" in d3:\n print d3[\"x\"]\n else:\n print d3[\"y\"]\n \"\"\")",
"def test_errs(self):\n b1 = BaseModel()\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_path\n\n with self.assertRaises(TypeError):\n models.storage.new()\n models.storage.new(self, b1)\n models.save(b1)\n models.reload(b1)\n models.all(b1)",
"def test_if_it_accepts_dictionary(self):\n with self.assertRaises(TypeError):\n prime_numbers({})",
"def test_example():\n with pytest.raises(\n AssertionError,\n match=expected_error_match,\n ):\n actual = {\n \"test1\": 1,\n \"test2\": \"foo\",\n \"bar\": {\"cheese\": \"parrot\", \"rabbit\": [\"black\", \"knight\"], \"other\": \"oops\"},\n }\n assert actual == Alike(\n {\n \"something\": A.is_missing,\n \"test2\": \"foo\",\n \"test1\": A < 2,\n \"bar\": {\n \"cheese\": A.is_present,\n \"rabbit\": [\"black\", \"wrong\"],\n \"other\": A.is_missing,\n },\n }\n )",
"def test_exclusive(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertEquals({1, 2, 3}, s.data[1])\n self.assertEquals({4, 5, 6}, s.data[4])",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']",
"def test_importing_invalid_data_for_collections(self):\n self.prepare()\n\n def _check(file_name, table_name, expected_results):\n # import the CSV file with COPY FROM\n logger.debug('Importing from csv file: {}'.format(file_name))\n out, err, _ = self.run_cqlsh(cmds=\"COPY ks.{} FROM '{}'\".format(table_name, file_name))\n logger.debug(out)\n\n assert 'ParseError - Failed to parse' in err\n\n results = rows_to_list(self.session.execute(\"SELECT * FROM {}\".format(table_name)))\n logger.debug(results)\n assert expected_results == results\n\n def _test_invalid_data_for_sets():\n logger.debug('Testing invalid data for sets')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataforsets (\n key text,\n value frozen<set<text>>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"{\\'test1\\', \\'test2\\'}\"\\n')\n f.write('key2,\"{\\'test1\\', \\'test2\\']\"\\n')\n f.write('key3,not_a_set\\n')\n f.write('key4,\"not_a_set\"\\n')\n f.write(\"key5,'not_a_set'\\n\")\n\n expected_results = [['key1', SortedSet(['test1', 'test2'])]]\n _check(tempfile.name, 'testinvaliddataforsets', expected_results)\n\n def _test_invalid_data_for_lists():\n logger.debug('Testing invalid data for lists')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataforlists (\n key text,\n value list<text>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"[\\'test1\\', \\'test2\\']\"\\n')\n f.write('key2,\"[\\'test1\\', \\'test2\\'}\"\\n')\n f.write('key3,not_a_list\\n')\n f.write('key4,\"not_a_list\"\\n')\n f.write(\"key5,'not_a_list'\\n\")\n\n expected_results = [['key1', list(['test1', 'test2'])]]\n _check(tempfile.name, 'testinvaliddataforlists', expected_results)\n\n def _test_invalid_data_for_maps():\n logger.debug('Testing invalid data for maps')\n self.session.execute(\"\"\"\n CREATE TABLE testinvaliddataformaps (\n key text,\n value map<text, text>,\n PRIMARY KEY (key)\n )\"\"\")\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.write('key1,\"{\\'key1\\': \\'test1\\', \\'key2\\': \\'test2\\'}\"\\n')\n f.write('key2,\"{\\'key1\\': \\'test1\\', \\'key2\\': \\'test2\\']\"\\n')\n f.write('key3,not_a_map\\n')\n f.write('key4,\"not_a_map\"\\n')\n f.write(\"key5,'not_a_map'\\n\")\n\n expected_results = [['key1', dict([('key1', 'test1'), ('key2', 'test2')])]]\n _check(tempfile.name, 'testinvaliddataformaps', expected_results)\n\n _test_invalid_data_for_sets()\n _test_invalid_data_for_lists()\n _test_invalid_data_for_maps()",
"def test_reassignment_dict_to_string(self):\n self.fs[\"dir\"] = {\"x\": {\"y\": \"z\"}}\n self.fs[\"dir\"] = \"a new thing.\"\n self.assertEquals(self.fs[\"dir\"], \"a new thing.\")",
"def test_to_yaml_file_non_dict(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n self.assertRaises(TypeError, lambda: Dict(1).to_yaml_file(output_file))\n self.assertRaises(TypeError, lambda: Dict(\"100\").to_yaml_file(output_file))\n self.assertRaises(\n TypeError, lambda: Dict(\"{'hello': 'world'}\").to_yaml_file(output_file)\n )\n\n File(output_file).delete()",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}"
] | [
"0.6538133",
"0.6399158",
"0.6360518",
"0.6323283",
"0.6202455",
"0.614464",
"0.612193",
"0.6104999",
"0.60922164",
"0.5989702",
"0.5986565",
"0.5978894",
"0.59288955",
"0.59278184",
"0.59278184",
"0.59171736",
"0.5916863",
"0.5883562",
"0.58797073",
"0.58732975",
"0.5871565",
"0.5871162",
"0.58651626",
"0.5840054",
"0.5827002",
"0.5802676",
"0.5797453",
"0.57895994",
"0.5782119",
"0.5771139",
"0.5769376",
"0.5763321",
"0.5760221",
"0.5758533",
"0.5747707",
"0.57441145",
"0.5742364",
"0.57399994",
"0.57395875",
"0.5735682",
"0.57316977",
"0.5729762",
"0.57279295",
"0.5714478",
"0.5714055",
"0.5702165",
"0.56991476",
"0.56964654",
"0.5675573",
"0.56689453",
"0.5617334",
"0.5603287",
"0.5592901",
"0.5590026",
"0.5589361",
"0.5586829",
"0.5572585",
"0.5560294",
"0.5550696",
"0.5545988",
"0.5544398",
"0.55408454",
"0.55338913",
"0.55338913",
"0.5533368",
"0.55216324",
"0.5521537",
"0.5519883",
"0.5517073",
"0.5512198",
"0.5511036",
"0.55089575",
"0.5504079",
"0.55029416",
"0.55022615",
"0.5490768",
"0.5488066",
"0.54858744",
"0.5483504",
"0.54833645",
"0.5483039",
"0.54810464",
"0.5479244",
"0.54755974",
"0.5473327",
"0.54699355",
"0.54677755",
"0.5467623",
"0.54644084",
"0.54537106",
"0.54469144",
"0.5445684",
"0.5443083",
"0.54413414",
"0.54366404",
"0.54366404",
"0.5427128",
"0.54270864",
"0.5415875",
"0.5411128"
] | 0.54350406 | 96 |
Set the test up. | def setup(self):
self.cwd = os.getcwd()
self.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = self.t / dir_path
src_dir = self.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
shutil.copytree(Path(CUR_PATH, "data", "dummy_aea"), Path(self.t, "dummy_aea"))
os.chdir(Path(self.t, "dummy_aea"))
self.runner = CliRunner() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n\n self._set_up()",
"def setUp(self):\n MainTests.setUp(self)",
"def setUp(self):\n \n pass",
"def setUp(self):\n\n # setup init variables\n self.init_vars = {\n 'suppress_logfile': True,\n 'verbosity': 0,\n 'mothur_seed': 54321,\n }\n\n # setup directories for testing\n test_dir = os.path.join(os.getcwd(), 'tests')\n self.test_output_dir = os.path.join(test_dir, 'test_output')\n if not os.path.isdir(self.test_output_dir):\n os.makedirs(self.test_output_dir)\n self.test_input_dir = os.path.join(test_dir, 'test_data')\n\n return",
"def setUp(self):\n print(\"New test by Nikolay Melnik\")",
"def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')",
"def setUp(self):\n test_env_setup()",
"def setUp(self):\n\n pass",
"def setUp(self):\n\n pass",
"def setUp(self) :\n pass",
"def setUp(self):\n self.setup_beets()",
"def setUp(self):\n\n return",
"def setUp(self) -> None:\n pass",
"def setUp(self) -> None:\n pass",
"def setUp(self):\n pass #because we dont have anything to setup.",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def _set_up():\n repl._setUp = self.setUp",
"def setUp(self):\n setUp()",
"def setUp(self):\n print('Calling \\'setUp\\'')",
"def setUp(self):\n\n BaseTest.setUp(self)",
"def setUp(self):\n self",
"def setUp(self):\n self",
"def setUp(self):\r\n pass",
"def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\n \n \n pass",
"def setUp(self):\r\n pass # nothing used by all\r",
"def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()",
"def setUp(self):\r\n pass # nothing required by all\r",
"def setUp(self):\n # Used to initialize objects that should be re-initialized or\n # re-created for each individual test\n self.t = Task()\n\n self.t.config(\"alias.from\", \"to\")",
"def setUp(self):\n print(\"\\nIn setUp()...\")",
"def setUp(self):\n\t\tself.testCases = [\n\t\t\t{\n\t\t\t\t'show': \"House\",\n\t\t\t\t'episode': 11,\n\t\t\t\t'season': 3,\n\t\t\t\t'title': \"Words and Deeds\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Lost\",\n\t\t\t\t'episode': 21,\n\t\t\t\t'season': 2,\n\t\t\t\t'title': \"?\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t'show': \"Heroes\",\n\t\t\t\t'episode': 15,\n\t\t\t\t'season': 1,\n\t\t\t\t'title': \"Run!\"\n\t\t\t}\n\t\t]",
"def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass",
"def setUp(self):\n super(BasicTestCase, self).setUp()",
"def setUp(self):\n raise NotImplementedError",
"def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()",
"def setUp(self):\n\n # Setup for all test cases.\n controllers = com.discover_controllers_on_network()\n self.controller, _, connected = com.connect_robot_with_ipaddr(controllers, '127.0.0.1')\n if not connected:\n print 'Couldn\\'t connect to controller. Test will not be run.'\n sys.exit()\n is_logged_in, _ = user_auth.logon_robot_controller_default(self.controller)\n if not is_logged_in:\n print 'Couldn\\'t log in. Test will not be run.'\n sys.exit()\n\n # Additional setup for some test cases.\n test_desc = self.shortDescription()\n if test_desc == 'Tests edit_and_write_rapid_data_property with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()\n elif test_desc == 'Tests edit_and_write_rapid_data with correct input data.':\n is_master, _, self.mastership = user_mastership.get_master_access_to_controller_rapid(self.controller)\n if not is_master:\n print 'Couldn\\'t get mastership. Test will not run.'\n sys.exit()",
"def setup( self ):",
"def setUp(self) -> None:\n self.engine = EvalHPOA()",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setup(self) -> None:\n pass",
"def setUp(self):\n self.example = Example()",
"def setUpTestCase(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setUp(self) -> None:\n\n self.checker = CheckerBase()",
"def setup(self) -> None:",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n self.hass = get_test_home_assistant()",
"def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n super().setUp()\n self.runner = CliRunner()",
"def setUp(self):\r\n super(EETestCase, self).setUp()"
] | [
"0.82482773",
"0.82482773",
"0.81176686",
"0.800283",
"0.7907327",
"0.78918254",
"0.7887326",
"0.7848355",
"0.7842833",
"0.7832785",
"0.7832785",
"0.781454",
"0.78136706",
"0.7806924",
"0.78026885",
"0.78026885",
"0.77940094",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7776961",
"0.7766595",
"0.77608186",
"0.77478987",
"0.7743035",
"0.76929235",
"0.76929235",
"0.768341",
"0.7623276",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.7608938",
"0.75897497",
"0.75282216",
"0.7513549",
"0.7501416",
"0.7496145",
"0.7493589",
"0.7474445",
"0.7467448",
"0.7464891",
"0.7457519",
"0.7449974",
"0.7449959",
"0.74333304",
"0.7428299",
"0.7428299",
"0.7428299",
"0.7425823",
"0.74212027",
"0.74118286",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7410674",
"0.7376384",
"0.7364325",
"0.7359819",
"0.7359819",
"0.7359506",
"0.73563415",
"0.73563415",
"0.73493826",
"0.73490524"
] | 0.0 | -1 |
Tear dowm the test. | def teardown(self):
os.chdir(self.cwd)
try:
shutil.rmtree(self.t)
except (OSError, IOError):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopTestRun(self):",
"def tearDown(self):\n logging.debug('tearing down')",
"def tearDown(self):\n logging.debug('tearing down')",
"def stopTest(self, test):",
"def tearDown(self):\n self.teardown_beets()",
"def test_run_ended(self):",
"def tearDown(self):\n self.testbed.deactivate()",
"def tearDown(self):\n self.testbed.deactivate()",
"def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')",
"def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass",
"def Waive(self):\n self.waived = True\n\n for test in self.subtests:\n test.Waive()\n\n if self.GetState().status == TestState.FAILED:\n self.UpdateState(status=TestState.FAILED_AND_WAIVED)",
"def _tearDown(self):\r\n\r\n if core.FW_conf['connection'].isLeader() and core.FW_conf['settings'].TestRun.BLTEnabledInFollower:\r\n executeInFollower(\"core.FW_conf['blt_ue'].stopCurrentMeasuring()\")\r\n\r\n # stop current measurement if battery is available\r\n if core.FW_conf['connection'].battery is not None and core.FW_conf['connection'].battery.isEnabled():\r\n core.FW_conf['connection'].battery.stopCurrentMeasuring()\r\n\r\n # skip tearDown if systemExit exception has occurred or\r\n # we are stopping execution or teardown skipping is wanted\r\n if not self._raiseSystemExit and not core.FW_conf['should_stop']:\r\n debug.out(\"MarbleTestCase tearDown\")\r\n\r\n self.logApply(core.FW_conf['connection']._tearDown, self)\r\n\r\n for remote in core.FW_conf['remote_connection']:\r\n self.logApply(remote._tearDown, self)",
"def test_terminate_run(self):\n pass",
"def tearDown(self):\n self.brow.quit()",
"def _postTearDown(self):\r\n if not core.FW_conf['should_stop'] and \\\r\n (not core.FW_conf['connection'].isFollower() or core.FW_conf['connection'].isFullBlackBox()):\r\n if core.FW_conf['connection'].currentTcId:\r\n if not (core.FW_conf['connection'].isFullBlackBox() or core.FW_conf['connection'].isFollower()):\r\n core.FW_conf['connection']._getCrashDumps()\r\n elif not core.FW_conf['connection'].isFollower():\r\n core.FW_conf['connection']._getCrashDumpsInBlackBox()\r\n\r\n if not core.FW_conf['connection'].isLeader() and \\\r\n not core.FW_conf['connection'].isFollower():\r\n # disconnect TA server(s) and scripting service(s)\r\n for phone in core.FW_conf['connection']:\r\n phone._tab._disconnectServices()\r\n\r\n # get and remove x-files from remote phone(s)\r\n for remote in core.FW_conf['remote_connection']:\r\n resp = self.logApply(remote._fileDumper.extractDumpFiles)\r\n if resp == False:\r\n remote.warn('Getting X-files (in tearDown) failed: %s.' % resp)\r\n\r\n resp = self.logApply(remote._fileDumper.removeDumpFiles)\r\n if resp == False:\r\n remote.warn('Removing X-files(in tearDown) failed: %s.' % resp)",
"def tearDown(self):\n self.m.shutdown()",
"def tearDown(self):\n self.stop_worker()",
"def test_downgrade_control(self, ping_fixture_all_errs_disconnect):\n\n engine = ping_fixture_all_errs_disconnect\n\n conn = engine.connect()\n conn.close()",
"def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()",
"def stop_fixture(self):\n pass",
"def tearDown(self):\n self.loop.close()",
"def tearDown(self):\n self.loop.close()",
"def tearDown(self):\n self.loop.close()",
"def tearDown(self):\n self.loop.close()",
"def tearDown(self):\n zope.component.testing.tearDown()",
"def tearDown(self):\n pass",
"def tearDown(self) :\n pass",
"def tearDown(self) :\n pass",
"def tearDown(self) :\n pass",
"def tearDown(self):\n self.hass.stop()",
"def tearDown(self):\n self.hass.stop()",
"def tearDown(self):\n self.hass.stop()",
"def tearDown(self):\n self.hass.stop()",
"def stopTest(self, test):\n self.complete_output()",
"def tearDown(self):\r\n pass",
"def tearDown(self):\r\n pass",
"def tearDown(self):\r\n pass",
"def tearDown(self):\r\n pass",
"def tearDown(self):\r\n pass",
"def tearDown(self):\n if hasattr(self.module, '__path__'):\n names = ['teardownPackage', 'teardown_package']\n else:\n names = ['teardownModule', 'teardown_module']\n names += ['tearDown', 'teardown'] \n try_run(self.module, names)",
"def tearDown(self) -> None:\n pass",
"def tearDown(self) -> None:\n pass",
"def tearDown(self) -> None:\n pass",
"def tearDown(self):\n self.teardown_local_site()\n self.teardown_remote_site()\n time.sleep(2)",
"def tearDown(self):\n\t\tpass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass",
"def tearDown(self):\n pass"
] | [
"0.7069817",
"0.69193774",
"0.69193774",
"0.6745571",
"0.672614",
"0.66953135",
"0.66407",
"0.66407",
"0.6625474",
"0.65923214",
"0.6536559",
"0.6518057",
"0.64930147",
"0.64839315",
"0.6469215",
"0.6450778",
"0.6431649",
"0.6419006",
"0.6385628",
"0.6330049",
"0.6329882",
"0.6329882",
"0.6329882",
"0.6329882",
"0.6329622",
"0.63243157",
"0.6313318",
"0.6313318",
"0.6313318",
"0.6309835",
"0.6309835",
"0.6309835",
"0.6309835",
"0.630035",
"0.62988883",
"0.62988883",
"0.62988883",
"0.62988883",
"0.62988883",
"0.62965274",
"0.62929314",
"0.62929314",
"0.62929314",
"0.62870854",
"0.62810844",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404",
"0.6280404"
] | 0.0 | -1 |
Fail on incorrect attribute tryed to be updated. | def test_set_get_incorrect_path(self):
with pytest.raises(
ClickException, match="Attribute `.*` for .* config does not exist"
):
self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", self.INCORRECT_PATH],
standalone_mode=False,
catch_exceptions=False,
)
with pytest.raises(
ClickException,
match="Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
self.INCORRECT_PATH,
str(self.NEW_VALUE),
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_attribute_method8(self):\n with self.assertRaises(ValueError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(2, -3)",
"def test_update_attribute_data(self):\n pass",
"def test_update_attribute_method9(self):\n with self.assertRaises(TypeError):\n r1 = Rectangle(10, 10, 10, 10)\n r1.update(\"put\", \"new\")",
"def test_attribute_not_found(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `non_existing_attribute` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute\",\n \"value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def do_update(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n args = arg.split(\" \")\n if len(args) < 3:\n print(\"** attribute name missing **\")\n return\n if len(args) < 4:\n print(\"** value missing **\")\n return\n setattr(obj, args[2], args[3])\n obj.save()",
"def test_invalid_update_kwarg(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_invalid_update_kwarg(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)",
"def test_update_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.update()\n except KeyError:\n self.assertRaises(KeyError)",
"def test_invalid_update_kwarg(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(numbers=20)",
"def test_invalid_update_kwarg(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(numbers=20)",
"def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"",
"async def test_update_missing_field(self):\n await self.collection.create({'id': 'foo', 'value': 'bar'})\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.update('foo', {})\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))",
"def test_update_cart_invalid_attributes(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.update_cart(user_id, cart_id, {'InvalidAttribute': 'Cart2'})\n self.assertEqual('Cart1', self.cart_item_manager.get_cart(user_id, cart_id)['CartName'])",
"def test_deep_update_illegal_update(self):\n # Update with an illegal type\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\" \"\".format(type({}), type(update_with)),\n ):\n dictupdate.update_dict_key_value({}, \"foo\", update_with)\n # Again, but now using OrderedDicts\n for update_with in [42, None, [42], \"bar\"]:\n with self.assertRaisesRegex(\n SaltInvocationError,\n r\"Cannot update {} with a {}.\"\n \"\".format(type(OrderedDict()), type(update_with)),\n ):\n dictupdate.update_dict_key_value(\n {}, \"foo\", update_with, ordered_dict=True\n )",
"def test_put_db_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n with mock.patch('user_profile.models.UserProfile.update') as update:\n update.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def test_update_args_bad(self):\n r = Rectangle(5, 2)\n d = r.__dict__.copy()\n\n r.update(10)\n d[\"id\"] = 10\n self.assertEqual(r.__dict__, d)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, -5)\n s = \"width must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, -17)\n s = \"height must be > 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, -20)\n s = \"x must be >= 0\"\n self.assertEqual(str(e.exception), s)\n\n with self.assertRaises(ValueError) as e:\n r.update(10, 5, 17, 20, -25)\n s = \"y must be >= 0\"\n self.assertEqual(str(e.exception), s)",
"def test_update_metadata_by_attribute(self):\n pass",
"def test_update_on_unique_field_raises(test_store):\n\n with pytest.raises(NotImplementedError):\n test_store.update(fields={\"name\": \"Andy\"})",
"def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)",
"def test_do_cell_update_ignores_unknown_fields(self, mock_update):\n client = mock.Mock()\n inventory = mock.Mock()\n inventory.cells = cells.CellManager(mock.ANY,\n mock.ANY,\n 'http://127.0.0.1/')\n client.inventory = mock.Mock(name='inventory')\n client.inventory.return_value = inventory\n invalid_input = Namespace(region=1,\n id=1,\n name='mock_cell',\n invalid=True)\n cells_shell.do_cell_update(client, invalid_input)\n vars(invalid_input).pop('region')\n vars(invalid_input).pop('invalid')\n mock_update.assert_called_once_with(**vars(invalid_input))",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_update_metadata_by_attribute1(self):\n pass",
"def test_update_item_incorrect_value_type(test_client, item_with_bad_value):\n\n response = test_client.put(GOOD_ITEM_URL,\n data=json.dumps(item_with_bad_value),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST",
"def test_submit_bad_data_when_updating_membership(self):\n self.login_as(\"bob\")\n\n # let's try to change bob's membership to ben\n # user is a read-only field so it is simply ignored:\n payload = {\"user\": {\"id\": self.USERS[\"ben\"][\"id\"]}}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"user\"][\"id\"], self.USER_ID)\n\n # now, let's try to move bob's membership to another community\n # community is a read-only field so it is also ignored:\n payload = {\"community\": self.COMMUNITIES[\"group2\"][\"id\"]}\n with self.assertNumQueries(6):\n response = self.client.put(self.url, payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"community\"], self.GROUP_ID)\n\n # now, let's try to submit bad value:\n payload = {\"is_admin\": \"Of course!\"}\n with self.assertNumQueries(4):\n response = self.client.put(self.url, payload)\n self.assert_validation_failed(response, data={\n \"is_admin\": [\"Must be a valid boolean.\"]\n })\n self.assertTrue(Membership.objects.get(\n community_id=self.GROUP_ID, user_id=self.USER_ID).is_admin)",
"def test_that_field_required_validations_are_triggered_on_incorrect_attribute_setting(\n self,\n ):\n person = Person(first_name=\"Johnny\", last_name=\"John\")\n\n with pytest.raises(ValidationError) as error:\n person.first_name = \"\" # Simulate an error by force-resetting an attribute\n\n assert error.value.messages == {\"first_name\": [\"is required\"]}",
"def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)",
"def test_update_node_state_smartfail(self):\n pass",
"def test_primary_key_update_failure(self):\n with self.assertRaises(ValidationError):\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)",
"async def test_set_target_humidity_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert 67 == state.attributes.get(ATTR_HUMIDITY)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_humidity(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_CLIMATE)\n assert 67 == state.attributes.get(ATTR_HUMIDITY)",
"def test_property_invalid(self):\n\n self.assertRaises(DataObjectError,\n setattr(self, \"foobar\", \"some value\")\n )",
"def _setUpdateExpected(self, value):\n self.__isUpdateExpected = value",
"def test_primary_key_update_failure(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(partition=uuid4())",
"def test_update_rule(self):\n pass",
"def test_update_update_has_a_value(self):\n self.Person.drop_collection()\n\n author = self.Person.objects.create(name=\"Test User\")\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update({})\n\n with pytest.raises(OperationError):\n self.Person.objects(pk=author.pk).update_one({})",
"def test_primary_key_update_failure(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(partition=uuid4())",
"async def test_set_only_target_temp_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert 21 == state.attributes.get(ATTR_TEMPERATURE)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_temperature(opp, None, ENTITY_CLIMATE)\n\n await opp.async_block_till_done()\n assert 21 == state.attributes.get(ATTR_TEMPERATURE)",
"def test_update_case(self):\n pass",
"def _check_datum(self, attr, datum):\n if getattr(self, attr) != datum:\n raise Exception('%s not consistent for line %i - %i n_par = %i'\n % (attr, self.node_from_code, self.node_to_code, self.parallel_num))",
"async def test_set_only_target_temp_bad_attr(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"temperature\") == 119\n with pytest.raises(vol.Invalid):\n await common.async_set_temperature(opp, None, ENTITY_WATER_HEATER)\n assert state.attributes.get(\"temperature\") == 119",
"def test_patch_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.patch = 234\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_Alpha_setter_invalid(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)",
"def test_update_non_existent(cards_db):\n i = 123 # any number will do, db is empty\n with pytest.raises(InvalidCardId):\n cards_db.update_card(i, Card(summary=\"bar\", owner=\"not me\"))",
"async def test_set_operation_bad_attr_and_state(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"\n with pytest.raises(vol.Invalid):\n await common.async_set_operation_mode(opp, None, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"operation_mode\") == \"eco\"\n assert state.state == \"eco\"",
"def test_update_customer_fails(self):\n customer = Customer.objects.create(**customer_data)\n\n self.assertTrue(isinstance(customer, Customer))\n\n with self.assertRaises(IntegrityError):\n customer.email = None\n customer.save()",
"def test_updating(self):\n self.p.update(dict(mirrored=\"mirrored\"))\n self.assertEqual(self.p.mirrored, \"mirrored\")\n self.assertEqual(self.p.sampler.__name__, \"mirrored_sampling\")\n\n with self.assertRaises(ValueError):\n self.p.update(dict(nonexist=10))\n\n self.p.update(dict(active=True))\n self.assertEqual(self.p.active, True)\n self.assertEqual(self.p.mirrored, \"mirrored\")\n self.assertEqual(self.p.sampler.__name__, \"mirrored_sampling\")\n\n old_mueff = self.p.mueff\n self.p.update(dict(weights_option=\"equal\"), reset_default_modules=True)\n self.assertEqual(self.p.active, False)\n self.assertEqual(self.p.mirrored, None)\n self.assertEqual(self.p.weights_option, \"equal\")\n self.assertNotEqual(self.p.mueff, old_mueff)",
"def _validate_update_data(self, data):\n return",
"def test_updatebadxvalue(self):\n Square.reset_objects()\n r1 = Square(1, 2, 3, 4)\n r1.update(1, 2, \"foo\")\n self.assertEqual(r1.x, 2)",
"def test_update_unexpected_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.update(data_url, data=data_update)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out",
"def test_bayes_updates_bad_data(self):\r\n self.assertRaises(ValueError, bayes_updates, self.bad)",
"def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)",
"def test_put_validation_fail(self):\n test_data = {\n 'first_name': 'new_first_name',\n 'last_name': 'new_last_name'\n }\n\n with mock.patch('user_profile.views.profile_validator') as profile_validator:\n profile_validator.return_value = False\n response = self.client.put(self.url, json.dumps(test_data), content_type='application/json')\n self.assertEquals(response.status_code, 400)",
"def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )",
"def test_update_failure_http_error(self, acme_id, new_name):\n\n api_url = self.get_acme_account_url(acme_id)\n\n # Setup the mocked response\n responses.add(responses.PUT, api_url, status=400)\n\n acme = ACMEAccount(client=self.client)\n\n self.assertRaises(HTTPError, acme.update, acme_id, new_name)",
"async def test_update_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n await self.collection.update('x', {})",
"def test_api_object_update_property(self, api_object):\n attrs_dict = {'uuid_': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.uuid_ != 'CREATING'",
"async def test_set_aux_heat_bad_attr(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF\n\n with pytest.raises(vol.Invalid):\n await common.async_set_aux_heat(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n assert state.attributes.get(ATTR_AUX_HEAT) == STATE_OFF",
"def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)",
"def test_unknown_names_raise_exception(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n with self.assertRaises(TypeError):\r\n tm.update(jon='beard')",
"def test_update_one(self):\n pass",
"def test_update_no_customer(self):\n set_up_db()\n with self.assertRaises(ValueError):\n update_customer_credit(2, 5.50)",
"def test_update_condition_not_defined(self):\n original_alt_info = getattr(self.form, 'alt_field_info', None)\n expected_label = 'alt_test_no_method'\n label_for_used_attrs = 'alt_test_feature'\n test_method = getattr(self.form, 'condition_' + expected_label, None)\n alt_info = getattr(self, 'alt_field_info', None)\n expected = alt_info.get(label_for_used_attrs, None)\n self.form.alt_field_info = alt_info\n self.form.test_condition_response = True\n actual = self.form.get_alt_field_info()\n\n self.assertIsNotNone(alt_info)\n self.assertIsNone(test_method)\n self.assertIsNotNone(expected)\n self.assertIn(expected_label, alt_info)\n self.assertEqual(expected, actual)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_info\n if original_alt_info is None:\n del self.form.alt_field_info",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_reusableitem_changerequest_bad_data(self):\n\n self.client.force_authenticate(user=self.user_1)\n\n # name is empty string\n response = self.client.patch(get_reusable_item_1_url(self), {'name': '', 'link': 'hello'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # name is None\n response = self.client.patch(get_reusable_item_1_url(self), {'name': None, 'link': 'hello'}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # no values\n response = self.client.patch(get_reusable_item_1_url(self), {}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # no new values\n response = self.client.patch(get_reusable_item_1_url(self), {'name': self.reusableitem_1.name}, format='json')\n\n updated_object = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update_with_invalid_data(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_invalid_data2, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update_issue_with_invalid_request_data_fails(self):\n response = self.client.patch(\n self.url,\n headers={\"Authorization\": self.test_user_token},\n json={\"issue_description\": TEST_ISSUE_DESCRIPTION, \"issue_name\": \"\"},\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response_json[\"Error\"], \"Unable to update mapping issue category\"\n )\n self.assertEqual(response_json[\"SubCode\"], \"InvalidData\")",
"async def test_set_target_temp_range_bad_attr(opp):\n state = opp.states.get(ENTITY_ECOBEE)\n assert state.attributes.get(ATTR_TEMPERATURE) is None\n assert 21.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)\n assert 24.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)\n\n with pytest.raises(vol.Invalid):\n await common.async_set_temperature(\n opp,\n temperature=None,\n entity_id=ENTITY_ECOBEE,\n target_temp_low=None,\n target_temp_high=None,\n )\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_ECOBEE)\n assert state.attributes.get(ATTR_TEMPERATURE) is None\n assert 21.0 == state.attributes.get(ATTR_TARGET_TEMP_LOW)\n assert 24.0 == state.attributes.get(ATTR_TARGET_TEMP_HIGH)",
"def test_no_uid_causes_error():\n empty = create_db()\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n empty,\n \"some_uid\",\n INP\n )",
"def validateAttribute(self, attributeName):\n if (not attributeName in self._attributes):\n raise pcssErrors.PcssGlobalException(\"Error: attempted to set attribute %s which is not a valid pfa attribute\" % attributeName)",
"def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)",
"def test_no_update_on_data_element(self):\n no_update = self.admitgen.data.attrib['noupdate']\n self.assertEqual(no_update, '1', 'Incorrect noupdate flag')",
"def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AttributeAbility, 'Invalid')\n self.assertRaises(AbilityError, AttributeAbility, '', 3)",
"def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def update_attribute(self, attribute_name, attribute_value, strict=True):\n\n if (attribute_name in self._default_attrs) or (not strict):\n print('Setting attribute \"{}\" to \"{}\"'.format(attribute_name, attribute_value))\n self._attrs[attribute_name] = attribute_value\n else:\n raise(Exception('{} is not a valid attribute.'.format(attribute_name)))",
"def validate_attribute(self, attr):\n self.validate(attr)",
"def test_not_enough_change(self):\n item, change, _ = give_item_and_change('apple', '.2')\n self.assertIsNone(item)\n self.assertEqual(change, 0.2)",
"def test_update_car_bad_data():\n car_data = {\n \"id\": 1,\n \"make\": \"BMWNotValid\", # must be in existing makes\n \"model\": \"longname\" * 10, # must not be longer than 50\n \"year\": 2051, # must be between 1900 and 2050\n \"vin\": \"JH4CU2F60AC794232\",\n }\n response = client.put(\"/1\", data=car_data)\n # These responses are automatically generated by apistar.\n status_bad_request = 400\n assert response.status_code == status_bad_request\n error_messages = response.json()\n assert \"Must be one of\" in error_messages[\"make\"]\n assert \"Must have no more than 50 characters.\" in error_messages[\"model\"]\n assert \"Must be less than or equal to 2050.\" in error_messages[\"year\"]",
"def test_update_customer_invalid_payload(self):\n update_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 1})\n\n payload = {\"first_name\": \"Dennis\", \"last_name\": \"\", \"is_active\": True}\n\n response = self.client.put(update_customer_url, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def test_update_plan_error(self):\n self.subscription.plan.downgrade.side_effect = PlanDowngradeError()\n self.subscription.plan.upgrade.side_effect = PlanUpgradeError()\n with self.assertRaises(SubscriptionPlanNotValid):\n self.subscription.update_plan('new_plan')",
"def test_api_object_failed_property(self, api_object):\n api_object.status = 'FAILED'\n assert api_object.failed\n assert not api_object.creating",
"def test_handle_removals_add_if_named_in_attribute(self):\n self.assertFalse(False)",
"def test_updateallbadvalues(self):\n Square.reset_objects()\n r1 = Square(1, 2, 3, 4)\n r1.update([1], (9, ), True, \"foo\")\n self.assertEqual(r1.size, 1)\n self.assertEqual(r1.x, 2)\n self.assertEqual(r1.y, 3)\n self.assertEqual(r1.id, 4)",
"def test_bad_get_property(self):\n s = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n with pytest.raises(AttributeError):\n s.bad_get",
"def test_bad_email_is_rejected(self):\n self.updated_data['email'] = ''\n self.update_user()\n\n # email should be left as original email address\n self.assertEqual(self.user.email, self.updated_data['email'])\n # And we should get a HTTP 400 with error code.\n self.assertEqual(self.response.status_code, 400)\n # Eventually this needs to return a custom error instead of blank\n # For now the blank error will do\n self.assertEqual(\n self.response.data['email'],\n [ErrorDetail(string='This field may not be blank.', code='blank')]\n )",
"def test_invalid_update_request_with_taken_username(self):\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(self.author.get_key()))\n response: Response = self.client.patch(BASE_URL + '/update/', data={\n 'username': self.temporary_author.username\n })\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, msg=data)\n self.assertEqual(data, {'detail': f\"User '{self.temporary_author.username}' already exists.\"})",
"def test_update_when_unavailable(self):\n self.api.update = Mock(\n \"google_wifi.GoogleWifiAPI.update\", side_effect=self.update_side_effect()\n )\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n sensor.update()\n assert sensor.state is None",
"async def test_set_away_mode_bad_attr(opp):\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"away_mode\") == \"off\"\n with pytest.raises(vol.Invalid):\n await common.async_set_away_mode(opp, None, ENTITY_WATER_HEATER)\n assert state.attributes.get(\"away_mode\") == \"off\"",
"def test_update_community_name_to_an_existing_one_fails(self):\n self.login_as(\"bob\")\n\n bad_payload = self.update_payload.copy()\n bad_payload[\"name\"] = \"group2\"\n\n with self.assertNumQueries(5):\n response = self.client.put(self.url, bad_payload)\n self.assert_validation_failed(response, data={\n \"name\": [\"community with this name already exists.\"]\n })\n self.assertEqual(Community.objects.filter(name=\"group2\").count(), 1)",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_email_is_optional(self):\n self.updated_data['email'] = ''\n self.update_user()\n self.assertEqual(self.user.email, self.updated_data['email'])",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_update_privileges_fails(self):\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update_record(self):\n pass",
"def test_update_http_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.update(data_url, data=data_update)\n assert 'HTTP error: 300' in capsys.readouterr().out"
] | [
"0.75063664",
"0.7077898",
"0.7065358",
"0.68179995",
"0.67873245",
"0.6754584",
"0.673527",
"0.6715411",
"0.6590745",
"0.658792",
"0.6587453",
"0.655349",
"0.6550303",
"0.65469915",
"0.6511253",
"0.64953804",
"0.6487863",
"0.64297426",
"0.6420575",
"0.6412844",
"0.63404197",
"0.6334349",
"0.6283369",
"0.62432766",
"0.62155575",
"0.621139",
"0.620498",
"0.61900353",
"0.61697483",
"0.61678696",
"0.616205",
"0.6160222",
"0.61553335",
"0.61532456",
"0.6152275",
"0.61509943",
"0.6145643",
"0.6143997",
"0.61373514",
"0.6136116",
"0.613193",
"0.6111549",
"0.6098331",
"0.60898983",
"0.6089341",
"0.60427326",
"0.60179126",
"0.60157895",
"0.60096776",
"0.5996731",
"0.5982332",
"0.59820634",
"0.5957432",
"0.5953479",
"0.59531474",
"0.59356505",
"0.59328365",
"0.5931766",
"0.5923433",
"0.5898712",
"0.5898023",
"0.58961564",
"0.58856547",
"0.58847237",
"0.5856714",
"0.5854037",
"0.5850741",
"0.5850393",
"0.58459085",
"0.58411",
"0.5840496",
"0.583648",
"0.5835186",
"0.5834945",
"0.5826239",
"0.58246213",
"0.5817522",
"0.58131385",
"0.58044845",
"0.5802304",
"0.57903093",
"0.578555",
"0.57851404",
"0.5785016",
"0.5781629",
"0.5775988",
"0.57629275",
"0.57618916",
"0.57565606",
"0.5754639",
"0.5754535",
"0.57544416",
"0.5752877",
"0.57507944",
"0.57490414",
"0.57478255",
"0.57478255",
"0.57478255",
"0.5742208",
"0.57369816"
] | 0.5876722 | 64 |
Load agent config for current dir. | def load_agent_config(self) -> AgentConfig:
agent_loader = ConfigLoader.from_configuration_type(PackageType.AGENT)
with open(DEFAULT_AEA_CONFIG_FILE, "r") as fp:
agent_config = agent_loader.load(fp)
return agent_config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_config(self) -> AgentConfig:\n with cd(self._get_cwd()):\n agent_loader = self.loader()\n path = Path(DEFAULT_AEA_CONFIG_FILE)\n with path.open(mode=\"r\", encoding=\"utf-8\") as fp:\n agent_config = agent_loader.load(fp)\n return agent_config",
"def load_config(self) -> AgentConfig:\n agent_loader = self.loader()\n path = Path(DEFAULT_AEA_CONFIG_FILE)\n with path.open(mode=\"r\", encoding=\"utf-8\") as fp:\n agent_config = agent_loader.load(fp)\n return agent_config",
"def generate(ctx: Context):\n try_to_load_agent_config(ctx)",
"def try_to_load_agent_config(\n ctx: Context, is_exit_on_except: bool = True, agent_src_path: str = None\n) -> None:\n if agent_src_path is None:\n agent_src_path = ctx.cwd\n\n try:\n path = Path(os.path.join(agent_src_path, DEFAULT_AEA_CONFIG_FILE))\n with path.open(mode=\"r\", encoding=\"utf-8\") as fp:\n ctx.agent_config = ctx.agent_loader.load(fp)\n logging.config.dictConfig(ctx.agent_config.logging_config)\n except FileNotFoundError:\n if is_exit_on_except:\n raise click.ClickException(\n \"Agent configuration file '{}' not found in the current directory.\".format(\n DEFAULT_AEA_CONFIG_FILE\n )\n )\n except jsonschema.exceptions.ValidationError:\n if is_exit_on_except:\n raise click.ClickException(\n \"Agent configuration file '{}' is invalid. Please check the documentation.\".format(\n DEFAULT_AEA_CONFIG_FILE\n )\n )\n except AEAEnforceError as e:\n raise click.ClickException(str(e)) # pragma: nocover",
"def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg",
"def load_config(self):\n pass",
"def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg",
"def loader() -> ConfigLoader:\n return ConfigLoader.from_configuration_type(PackageType.AGENT)",
"def loader() -> ConfigLoader:\n return ConfigLoader.from_configuration_type(PackageType.AGENT)",
"def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )",
"def readConfig(self):\n filename = self.cfg.get(\"CFG_FILE\")\n\n logging.info(\"Reading configuration file **{}**\".format(filename))\n try:\n # track previous MAP_FILE setting - map only needs to be redrawn if it's changed\n oldmap = self.cfg.get(\"MAP_FILE\") # may be None\n execfile(filename, self.cfg)\n\n # display setings\n out = '\\n'\n for a, b in self.cfg.iteritems():\n # exclude unprintables\n if a is not \"__builtins__\" and a is not \"MAPREF\":\n out = \"{} \\t {}: {}\\n\".format(out, a, b)\n # logging.info\n logging.info(\"Options read from configuration file: {}\".format(out))\n\n self.processMap()\n self.initAgent()\n logging.info(\"Starting agent pre-processing...\")\n self.processPrefs()\n self.setStart(self.cfg.get(\"START\"))\n self.setGoal(self.cfg.get(\"GOAL\"))\n\n if self.gui is not None: # reset has been called from the gui\n self.gui.setLmap(self.lmap)\n if not oldmap == self.cfg.get(\"MAP_FILE\"):\n self.gui.vmap.drawMap(self.lmap)\n self.hdlReset() # includes resetVars\n else:\n self.resetVars() # no attempt to update GUI\n\n\n except p4.BadMapException:\n self.updateStatus(\"Unable to load map: \" + self.cfg.get(\"MAP_FILE\"))\n except p4.BadAgentException:\n self.updateStatus(\"Unable to load agent: \" + self.cfg.get(\"AGENT_FILE\"))\n except:\n # unexpected error\n logging.error(\"Trace-back: \\n {}\".format(traceback.format_exc()))\n self.updateStatus(\"Problem reading config file!\")",
"def load_config(args):\n config = dict()\n #loc = [conf, os.curdir+\"config.json\", ]\n locations = [ os.curdir, os.path.expanduser(\"~\"), \"/etc/failhadoop\",\n os.environ.get(\"FAILHADOOP_ROOT\") ]\n# if args.conf:\n# locations.append(args.conf)\n\n for loc in locations:\n try:\n with open(os.path.join(loc,\"config.json\")) as source:\n conf = json.load(source)\n config.update(conf)\n except IOError:\n pass\n except:\n print(\"Cannot load config from any of the locations {0}\".format(locations))\n try:\n with open(args.conf) as source:\n conf = json.load(source)\n config.update(conf)\n except IOError:\n print(\"Cannot load config from any of the locations {0}\".format(locations))\n\n # Override config elements from command line\n for a in vars(args):\n config[a] = getattr(args,a)\n\n return config",
"def __init__(self, config_parser, **kwargs):\n BaseAgent.__init__(self, config_parser)\n\n self.SERVICE_ID = config_parser.get('agent', 'SERVICE_ID')\n self.GENERIC_DIR = config_parser.get('agent', 'CONPAAS_HOME')\n self.VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')\n self.CODE_DIR = join(self.VAR_CACHE, 'bin')\n self.VOLUME_DIR = '/media'\n self.env = {}\n self.processes = {}",
"def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills",
"def load_config(self, cfg: ConfigParser):\n log.debug('loading configuration file')\n section = cfg['DEFAULT']\n # use host-specific configuration, if any\n if self.hostname in cfg:\n section = cfg[self.hostname]\n self.path = os.path.abspath(os.path.expanduser(section['repo_dir']))\n self.gpg_key_id = section['gpg_key_id']\n self.ignored_files = section['ignored_files'].split(',')\n self.ignored_files.append('.gitkeep')",
"def load_agents(self, agents):\n self.agents = agents",
"def LoadCurrentConfig(options, from_dir=None):\n if not from_dir:\n from_dir = os.curdir\n path = os.path.realpath(from_dir)\n while not os.path.exists(os.path.join(path, options.config_filename)):\n next = os.path.split(path)\n if not next[1]:\n return None\n path = next[0]\n client = GClient(path, options)\n client._LoadConfig()\n return client",
"def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config",
"def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)",
"def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)",
"def load_config(self) -> Dict[str, Any]:\n # Load all configs\n config: Dict[str, Any] = self.load_from_files(self.args.get(\"config\", []))\n\n return config",
"def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)",
"def config_load(env, config_file=None, config_dir=None):\n config = {}\n conf_files = list_files(config_dir)\n if config_file:\n conf_files.insert(0, config_file)\n for f in conf_files:\n config.update(config_merge(env, f))\n config['env'] = env\n return config",
"def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)",
"def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]",
"def load_config(self):\n DEFAULT_CONFIG = {\n \"test_times\": 30,\n \"perfherder_protocol\": \"http\",\n \"perfherder_host\": \"local.treeherder.mozilla.org\",\n \"perfherder_client_id\": \"\",\n \"perfherder_secret\": \"\",\n \"perfherder_repo\": \"mozilla-central\",\n \"dashboard_host\": \"\",\n \"dashboard_ssh\": \"\"\n }\n logger_hasal.info('Loading config file from {} ...'.format(self._config_path))\n if os.path.isfile(self._config_path):\n with open(self._config_path, 'r') as f:\n ret_obj = json.load(f)\n test_times = ret_obj.get('test_times', '')\n perf_protocol = ret_obj.get('perfherder_protocol', '')\n perf_host = ret_obj.get('perfherder_host', '')\n perf_repo = ret_obj.get('perfherder_repo', '')\n if test_times:\n logger_hasal.info('Test Times: {}'.format(test_times))\n if perf_protocol:\n logger_hasal.info('Perfherder Protocol: {}'.format(perf_protocol))\n if perf_host:\n logger_hasal.info('Perfherder Host: {}'.format(perf_host))\n if perf_repo:\n logger_hasal.info('Perfherder Repo: {}'.format(perf_repo))\n return ret_obj\n else:\n with open(self._config_path, 'w') as f:\n f.write(json.dumps(DEFAULT_CONFIG))\n logger_hasal.info('No config.json file {}. Generate default config.'.format(self._config_path))\n return DEFAULT_CONFIG",
"def init_agents(self, config):\n agents = []\n os.chdir(self.load_directory) # Changes current working directory to load_directory\n file_list = glob.glob(\"*.h5\") # Returns all files in directory with ending .h5\n file_list = sorted(file_list, key=return_episode_num)\n for file in file_list:\n filename = \"../models/\" + file # The path is used in actor which is placed in agent\n actor = Actor(config.learning_rate, config.epsilon, config.decay_rate, config.board_size, config.nn_dims, config.activation, config.optimizer, config.loss_function, filename)\n actor.load(filename)\n agents.append(actor)\n return agents",
"def load_config():\n path = os.environ.get('WORKER_CONFIG')\n if not path:\n path = _get_default_config_path()\n\n mod_name, file_ext = os.path.splitext(os.path.split(path)[-1])\n config = imp.load_source(mod_name, path)\n return config",
"def cli_load_config(self, args) -> str:\n path = args.config_path\n if not os.path.isfile(path):\n return error(\"Path {} DNE\".format(path))\n\n try:\n self.config = config.from_file(path)\n return ok(\"Configuration loaded from {}\".format(path))\n except FileNotFoundError as err:\n return error(\"Could not load file: {}\".format(err))\n except json.JSONDecodeError as json_err:\n return error(\"Could not parse json file {}\".format(json_err))",
"def load_config():\n here = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(here, 'config.json')\n with open(config_path, encoding='utf-8') as f:\n return json.load(f)",
"def load_config():\n config_file = os.path.dirname(os.path.abspath(__file__)) + '/../config.json'\n with open(config_file, 'r') as f:\n config = json.load(f)\n\n return config",
"def loadconfig():\n CONFIG['static_folder'] = str(Path(Path(APP.root_path).parent, 'static'))\n\n for cfile in Path(APP.instance_path).iterdir():\n if cfile.name[-5:] == '.json' and cfile.name != 'config.json':\n name = cfile.name[:-5]\n LOG.debug(\"Loading \" + name)\n with cfile.open() as json_data_file:\n CONFIG[name] = json.load(json_data_file)",
"def config():\n if \"conf_file\" not in __opts__:\n return {}\n if os.path.isdir(__opts__[\"conf_file\"]):\n if salt.utils.platform.is_proxy():\n gfn = os.path.join(\n __opts__[\"conf_file\"], \"proxy.d\", __opts__[\"id\"], \"grains\"\n )\n else:\n gfn = os.path.join(__opts__[\"conf_file\"], \"grains\")\n else:\n if salt.utils.platform.is_proxy():\n gfn = os.path.join(\n os.path.dirname(__opts__[\"conf_file\"]),\n \"proxy.d\",\n __opts__[\"id\"],\n \"grains\",\n )\n else:\n gfn = os.path.join(os.path.dirname(__opts__[\"conf_file\"]), \"grains\")\n if os.path.isfile(gfn):\n log.debug(\"Loading static grains from %s\", gfn)\n with salt.utils.files.fopen(gfn, \"rb\") as fp_:\n try:\n return salt.utils.data.decode(salt.utils.yaml.safe_load(fp_))\n except Exception: # pylint: disable=broad-except\n log.warning(\"Bad syntax in grains file! Skipping.\")\n return {}\n return {}",
"def load_config():\n config_file = os.path.join(\n Path(os.path.dirname(os.path.realpath(__file__))).parent,\n \"config.ini\"\n )\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n app_config = configparser.ConfigParser()\n app_config.read(config_file)\n return app_config['uberoo']",
"def read_settings(self):\n config = ConfigParser.SafeConfigParser()\n config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')\n\n # Cache related\n cache_path = config.get('linode', 'cache_path')\n self.cache_path_cache = cache_path + \"/ansible-linode.cache\"\n self.cache_path_index = cache_path + \"/ansible-linode.index\"\n self.cache_max_age = config.getint('linode', 'cache_max_age')",
"def load_config(self) -> Dict[str, Any]:\n config: Dict[str, Any] = {}\n # Now expecting a list of config filenames here, not a string\n for path in self.args.config:\n logger.info('Using config: %s ...', path)\n # Merge config options, overwrting old values\n config = deep_merge_dicts(self._load_config_file(path),config)\n\n if 'internals' not in config:\n config['internals'] = {}\n\n logger.info('Validating configuration ...')\n self._validate_config(config)\n\n # Set strategy if not specified in config and or if it's non default\n if self.args.strategy != constant.DEFAULT_STRATEGY or not config.get('strategy'):\n config.update({'strategy': self.args.strategy})\n if self.args.strategy_path:\n config.update({'strategy_path': self.args.strategy_path})\n\n # Load Common configuration\n config = self._load_common_config(config)\n\n # Load Backtesting\n config = self._load_backtesting_config(config)\n\n # Load Edge\n config = self._load_edge_config(config)\n\n # Load Hyperopt\n config = self._load_hyperopt_config(config)\n\n # Set runmode\n if not self.runmode:\n self.runmode = RunMode.DRY_RUN if config.get('dry_run', True) else RunMode.LIVE\n\n config.update({'runmode': self.runmode})\n return config",
"def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)",
"def load_conf(self):\n self._read_uconf()",
"def load_config(self):\n try:\n path = os.environ.get('HOME') + '/.tinyserver'\n if os.path.exists(path):\n with open(path) as f:\n JSON = json.load(f)\n self.rooms = jsonpickle.decode(JSON)\n print('Rooms config loaded...')\n \n except Exception as e:\n print(\"Error while loading server config {0}\".format(e))",
"def load_config(self, context: ResourceCommandContext, config_file_location: str) -> None:\n enqueue_keep_alive(context)\n self.handler.load_config(context, config_file_location)",
"def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)",
"def onLoadConfig(self, inifile):\n cp = ConfigParser(self.defaults)\n cp.readfp(inifile)\n depth = self.getDepth(cp)\n self.baseurl = urljoin(self.inipath, depth)\n # create child loaders for any other l10n.ini files to be included\n try:\n for title, path in cp.items('includes'):\n # skip default items\n if title in self.defaults:\n continue\n # add child config parser\n self.addChild(title, path, cp)\n except NoSectionError:\n pass\n # try to load the \"dirs\" defined in the \"compare\" section\n try:\n self.dirs.extend(cp.get('compare', 'dirs').split())\n except (NoOptionError, NoSectionError):\n pass\n # try getting a top level compare dir, as used for fennec\n try:\n self.tld = cp.get('compare', 'tld')\n # remove tld from comparison dirs\n if self.tld in self.dirs:\n self.dirs.remove(self.tld)\n except (NoOptionError, NoSectionError):\n self.tld = None\n # try to set \"all_path\" and \"all_url\"\n try:\n self.all_path = cp.get('general', 'all')\n self.all_url = urljoin(self.baseurl, self.all_path)\n except (NoOptionError, NoSectionError):\n self.all_path = None\n self.all_url = None\n return cp",
"def load_args(self):\n\n # retrieve module path\n dir_path = os.path.dirname(os.path.abspath(__file__))\n dir_path = os.path.split(dir_path)[0]\n # get all the default yaml configs with glob\n dir_path = os.path.join(dir_path, 'configs', '*.yml')\n\n # -- From default yapt configuration\n self._defaults_path = {}\n self._defaults_yapt = OmegaConf.create(dict())\n for file in glob.glob(dir_path):\n # split filename from path to create key and val\n key = os.path.splitext(os.path.split(file)[1])[0]\n self._defaults_path[key] = file\n # parse default args\n self._defaults_yapt = OmegaConf.merge(\n self._defaults_yapt, OmegaConf.load(file))\n\n # -- From command line\n self._cli_args = OmegaConf.from_cli()\n if self._cli_args.config is not None:\n self.default_config = self._cli_args.config\n del self._cli_args['config']\n self.console_log.warning(\"override default config with: %s\", self.default_config)\n\n # -- From experiment default config file\n self._default_config_args = OmegaConf.create(dict())\n if self.default_config is not None:\n self._default_config_args = OmegaConf.load(self.default_config)\n\n # -- Merge default args\n self._args = OmegaConf.merge(\n self._defaults_yapt,\n self._default_config_args)\n\n # -- Resolve interpolations to be sure all nodes are explicit\n # self._args = OmegaConf.to_container(self._args, resolve=True)\n # self._args = OmegaConf.create(self._args)\n\n # -- make args structured: it fails if accessing a missing key\n OmegaConf.set_struct(self._args, True)",
"def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))",
"def dump_config(self, agent_config: AgentConfig) -> None:\n agent_loader = self.loader()\n path = Path(DEFAULT_AEA_CONFIG_FILE)\n\n with path.open(mode=\"w\", encoding=\"utf-8\") as fp:\n agent_loader.dump(agent_config, fp)",
"def _loadConfig(self):\n self._packRoot = getattr(sys, \"_MEIPASS\", path.abspath(path.dirname(__file__)))\n rootDir = path.abspath(path.join(self._packRoot, path.pardir))\n logger.debug(\"MOTools root dir is: %s\" % rootDir)\n\n metConf = path.join(rootDir, \"met_config\", \"met_config.json\")\n mainConf = path.join(rootDir, \"main_config.json\")\n userConf = path.join(rootDir, \"user_config.json\")\n\n self._confData = {\n \"MET\": {\"path\": metConf, \"config\": {}, \"loaded\": False},\n \"MAIN\": {\"path\": mainConf, \"config\": {}, \"loaded\": False},\n \"USER\": {\"path\": userConf, \"config\": {}, \"loaded\": False},\n }\n\n for confGroup in self._confData:\n confFile = self._confData[confGroup][\"path\"]\n logger.debug(\"Loading %s config file\" % confGroup)\n if path.isfile(confFile):\n jsonData = {}\n try:\n with open(confFile, mode=\"r\") as inFile:\n jsonData = json.loads(inFile.read())\n if \"config\" in jsonData:\n self._confData[confGroup][\"config\"] = jsonData[\"config\"]\n self._confData[confGroup][\"loaded\"] = True\n except Exception as e:\n logger.error(\"Failed to parse config JSON data.\")\n logger.error(str(e))\n return False\n else:\n logger.debug(\"No file: %s\" % confFile)\n\n # if not self._confData[\"MAIN\"][\"loaded\"]:\n # logger.error(\"Failed to load minimum configuration file main_config.json.\")\n # raise RuntimeError\n\n return",
"def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']",
"def load_config(self):\n if self.get_vrps():\n self.process_vrps()",
"def _loadConfigFiles(self):\n for conf in self._configFiles():\n self.configManager.load(conf)",
"def antenny_config_load(self, name: str = None):\n return self.antenny_config.load(name)",
"def load_config(self):\n try:\n self.config = yaml.load(open(roslib.packages.get_pkg_dir('human_activities') + '/config/config.ini', 'r'))\n print \"config loaded:\", self.config.keys()\n\n return True\n except:\n print \"no config file found in /human_activities/config/config.ini\"\n return False",
"def init(self, args, **kwargs):\n # Retrieve configuration file and directory or set defaults.\n conf_file = os.path.expanduser(\n args._get('conf_file', kwargs.pop('conf_file', DEFAULT_CONF_FILE)))\n conf_dir = os.path.expanduser(\n args._get('conf_dir', kwargs.pop('conf_dir', DEFAULT_CONF_DIR)))\n commands = [value for (arg, value) in sorted(args) if arg.startswith('command')]\n\n # Load main configuration file.\n if os.path.exists(conf_file):\n self.load_cmd_file(conf_file)\n\n # Load intermediary configuration files.\n if os.path.isdir(conf_dir):\n self.load_dir(conf_dir, clg.config, commands)",
"def init_configs(self):\n\n # get current location\n self.script_dir = os.path.dirname(__file__)\n\n # load configuration file\n with open(os.path.join(self.script_dir, \"config.json\")) as f:\n self.configs = json.load(f)\n \n # load some configs as attributes\n self.resource_folder = os.path.join(self.script_dir, self.configs[\"resource_path\"], self.resource_type, self.language)\n self.pre_processed_folder = os.path.join(self.resource_folder, self.configs[\"pre_processed_path\"])\n self.results_folder = os.path.join(self.resource_folder, self.configs[\"results_path\"])\n self.chunk_size = self.configs[\"resources\"][self.resource_type][\"chunk_size\"]",
"def agent_configuration(self) -> pulumi.Output['outputs.AgentConfigurationResponse']:\n return pulumi.get(self, \"agent_configuration\")",
"def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))",
"def configure_agent_to_load_jacoco_agent(self):\n zones = parse('zones[*]').find(self.config)\n for zone in zones:\n hosts = parse('pods[*].clusters[*].hosts[*]').find(zone)\n for host in hosts:\n hostname = host.value['url'].split('/')[-1]\n connection = {'hostname': hostname, 'username': host.value['username'],\n 'password': host.value['password']}\n cmd = r\"sed -i -e 's|/bin/java -Xms|/bin/java -javaagent:/tmp/jacoco-agent.jar=destfile=/tmp/jacoco-it.exec -Xms|' /usr/lib/systemd/system/cosmic-agent.service\"\n self.zone = zone.value['name']\n src_file = self.workspace + \"/target/jacoco-agent.jar\"\n self._scp_put(srcfile=src_file, destfile=\"/tmp\", **connection)\n self._ssh(cmd=cmd, **connection)\n self._ssh(cmd=\"systemctl daemon-reload\", **connection)\n print(\"==> Agent configured\")",
"def _load_config(self, sshconfig=\"~/.ssh/config\"):\n rpath = os.path.realpath(os.path.expanduser(sshconfig))\n try:\n os.stat(rpath)\n except OSError:\n return\n\n try:\n with codecs.open(rpath, \"rb\", \"utf-8\") as f:\n clines = f.readlines()\n except:\n print(\"!! Failed to parse %s\" % (rpath))\n return\n\n self._config.parse(clines)\n print(\"** Loaded ssh config %s\" % (rpath))",
"def load_config(args, path=\".\"):\n with open(path + \"/config/\" + args.config, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n args.__dict__[key] = value",
"def _load_config(self, sshconfig=\"~/.ssh/config\"):\n rpath = os.path.realpath(os.path.expanduser(sshconfig))\n try:\n os.stat(rpath)\n except OSError:\n return\n\n try:\n with codecs.open(rpath, \"rb\", \"utf-8\") as f:\n clines = f.readlines()\n except:\n print(\"!! Failed to parse %s\" % (rpath))\n return\n\n self._config.parse(clines)\n if self.verbose: print(\"** Loaded ssh config %s\" % (rpath))",
"def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)",
"def _load_config():\n fname = _get_config_fname()\n if fname is None or not op.isfile(fname):\n return dict()\n with open(fname, 'r') as fid:\n config = json.load(fid)\n return config",
"def load_config(self, filename, basedir = '.'):\n #find absolute path for config file\n (f, filepath) = self.find_file(filename, [basedir, __VALIDATA_ETC__])\n if filepath in self.include:\n raise ConfigError('Recursively include config file \"%s\"!' % filepath)\n self.include.add(filepath)\n cfg = yaml.load(f)\n f.close()\n\n #decide base directory for current config file\n basedir = dirname(filepath)\n\n #get log file path\n if '__logfile' in cfg:\n logfile = cfg['__logfile']\n if logfile[0] != '/':\n logfile = basedir + '/' + logfile\n self.logfile = logfile\n\n #check if there's any external reference\n for key in cfg:\n #ignore keywords\n if key[:2] == '__':\n continue\n elif key[:1] == '_':\n if isinstance(cfg[key], list):\n continue\n #load external reference\n refname = cfg[key]\n (f, filepath) = self.find_file(refname, [basedir, __VALIDATA_ETC__])\n cfg[key] = [x.rstrip('\\r\\n').decode('utf8') for x in f if x.rstrip('\\r\\n') != '']\n f.close()\n print 'Reference file \"%s\" loaded.' % refname\n\n #load include file(s)\n if '__include' in cfg:\n include = cfg['__include']\n del cfg['__include']\n if not isinstance(include, list):\n include = [include]\n tmp = {}\n for i in include:\n tmp.update(self.load_config(i, basedir))\n tmp.update(cfg)\n cfg = tmp\n\n print 'Config file \"%s\" loaded.' % filename\n return cfg",
"def load_config(args):\n cfg=get_cfg()\n # load from yaml\n if args.cfg_file is not None:\n cfg.merge_from_file(args.cfg_file)\n\n #load from cmd\n # print(args.opts)\n # if args.opts is not None:\n # cfg.merge_from_file(args.opts)\n\n # make a checkpoint dir\n # cu.make_checkpoint_dir(cfg.OUTPUT_DIR,cfg)\n return cfg",
"def load_config(logdir):\n with open(os.path.join(logdir, 'config.yml'), 'r') as f:\n return Namespace(**yaml.load(f))",
"def _load_config(self, args: argparse.Namespace):\n #\n # Load a config, filename may or may-not be provided...\n #\n try:\n self._config = TortugaScriptConfig.load(args.config)\n\n except ConfigException as ex:\n print(str(ex))\n sys.exit(0)\n\n #\n # Override the config with any provided argument values\n #\n if args.url:\n self._config.url = args.url\n if args.username:\n self._config.username = args.username\n if args.password:\n self._config.password = args.password\n if args.token:\n self._config.token = args.token\n self._config.verify = args.verify",
"def load_config():\n global config\n with open('config.yml', 'r') as file:\n config = yaml.load(file)",
"def read_config_file(self, chaindir, instances):\n self.instances = instances\n if not os.path.exists(chaindir):\n raise Exception(\"Chain configuration directory is non-existent or not readable\")\n self.config_reader = ConfigParser.ConfigParser()\n self.read_dir(chaindir)\n self.create_chain_instances()\n self.wire_chains()\n self.start_chains()",
"def preload_all_configs(self):\n for _, _, filenames in os.walk(self.configDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n configID = filename[0:-3]\n self.load_config(configID)",
"def __load_config(runtime_env):\n config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"config.ini\")\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n _app_config = configparser.ConfigParser()\n _app_config.read(config_file)\n\n # Evaluate\n _app_config = _app_config[runtime_env]\n return _app_config",
"def get_default_config(self):\n config = super(PuppetAgentCollector, self).get_default_config()\n config.update({\n 'yaml_path': '/var/lib/puppet/state/last_run_summary.yaml',\n 'path': 'puppetagent',\n })\n return config",
"def get_config():\n global _config_read\n config = _DEFAULT_CONFIG\n if _config_read:\n return config\n\n def _update(source, delta):\n if isinstance(source, dict):\n assert isinstance(delta, dict)\n for k in source.keys():\n if k in delta:\n source[k] = _update(source[k], delta[k])\n for k in delta.keys():\n # Catch name errors in config file.\n assert k in source\n else:\n source = delta\n return source\n\n config = _DEFAULT_CONFIG\n for curr_dir in os.curdir, os.path.expanduser('~'):\n path = os.path.join(curr_dir, '.torchrayrc')\n if os.path.exists(path):\n with open(path, 'r') as file:\n config_ = json.load(file)\n _update(config, config_)\n break\n\n _config_read = True\n return config",
"def load(file):\n _config.load(file)",
"def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()",
"def config():\n global base_dir, log_path\n\n # Set paths\n base_dir = os.path.dirname(os.path.realpath(__file__))\n cfg.path = base_dir + '/config.json'\n log_path = base_dir + '/log.log'\n\n # Start logging\n logging.basicConfig(filename=log_path, format='%(asctime)-16s | %(levelname)-5s | %(message)s', level=logging.DEBUG)\n sys.excepthook = _excepthook\n\n # Load configuration\n cfg.load()\n logging.info('Loaded configuration')\n\n # Print configuration and check if is complete\n cfg.print()\n if not cfg.check:\n logging.info('Exiting...')\n sys.exit(1)",
"def _loadconfig(self):\n\n # Get the Topology, from the topology layout file\n topo = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n topo = yaml.load(stream)\n if \"metadata\" in topo:\n self._metadata = topo[\"metadata\"]\n except yaml.YAMLError as exc:\n raise RuntimeError(\n \"Couldn't read the Auto config file {}.\".format(\n self._autoconfig_filename, exc\n )\n )\n\n systemfile = self._rootdir + self._metadata[\"system_config_file\"]\n if self._clean is False and os.path.isfile(systemfile):\n with open(systemfile, \"r\") as sysstream:\n try:\n systopo = yaml.load(sysstream)\n if \"nodes\" in systopo:\n self._nodes = systopo[\"nodes\"]\n except yaml.YAMLError as sysexc:\n raise RuntimeError(\n \"Couldn't read the System config file {}.\".format(\n systemfile, sysexc\n )\n )\n else:\n # Get the nodes from Auto Config\n if \"nodes\" in topo:\n self._nodes = topo[\"nodes\"]\n\n # Set the root directory in all the nodes\n for i in self._nodes.items():\n node = i[1]\n node[\"rootdir\"] = self._rootdir",
"def read_config() -> dict:\n if not os.path.exists(Vars.default_config_path):\n raise Exception(\"You should run many_pynb.run at first.\")\n\n with open(Vars.default_config_path, \"r\") as f:\n config = json.load(f)\n return(config)",
"def load_config():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config",
"def load_model_config(model_dir):\n config_path = _get_config_path(model_dir)\n with open(config_path, \"r\") as config_file:\n return json.load(config_file)",
"def agent_init(self):\n pass",
"def loadconfig(self, configfile=None):\n realconfig = configfile\n if configfile is None:\n realconfig = Infopage.DEFAULT_CFGFILE\n try:\n fh = io.open(realconfig, 'r')\n config = json.load(fh)\n self.config.update(config)\n except IOError as e:\n if configfile is None:\n # ignore if the default config does not exist\n pass\n else:\n raise e",
"def get_config(self, cwd):\n path = Path(cwd) if cwd else Path.cwd()\n if path.is_file():\n conf_file = path\n cwd = path.parent\n else: # Do a reverse search on parent folders\n for _ in range(0, 4):\n if not os.path.isdir(path):\n raise ValueError(f'Path \"{path}\" is not a folder')\n if (path / self.CONF_FILE).is_file():\n break\n path = path.parent\n else:\n raise ValueError(f\"Not found any {self.CONF_FILE} under {path}\")\n conf_file = path / self.CONF_FILE\n cwd = path\n try:\n with open(conf_file) as open_file:\n terrafile = yaml.safe_load(open_file)\n if not terrafile:\n raise ValueError('{} is empty'.format(path))\n except IOError as error:\n sys.stderr.write('Error loading Terrafile: {}\\n'.format(error.strerror))\n sys.exit(1)\n except ValueError as error:\n sys.stderr.write('Error loading Terrafile: {}\\n'.format(error))\n sys.exit(1)\n config = terrafile.pop('setup', dict())\n config['cwd'] = cwd\n self._config = config\n self._modules = terrafile",
"def get_config_dir(agent_version):\n if int(agent_version) == 5:\n return \"/etc/dd-agent\"\n else:\n return \"/etc/datadog-agent\"",
"def load(self):\n config_dict = {}\n with open(\n os.path.join(\n os.path.dirname(\n os.path.abspath(\n inspect.stack()[0][1]\n )\n ),\n \"config.txt\"), 'r') as config_file:\n for line in config_file:\n if not line.startswith('#'):\n line = line.strip().split('=', 1)\n if len(line) == 2:\n config_dict[line[0]] = line[1]\n return config_dict",
"def setup(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n\n os.chdir(cls.t)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n # add connection first time",
"def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config",
"def parse(self):\n try:\n with open(self.path, 'r') as ymlfile:\n self.__cfg = yaml.load(ymlfile)\n except IOError:\n self.log(\"File {0} not found -- aborting\".format(self.path))\n raise ConfigFileException",
"def config_parse_file():\n global ANGELCO_EMAIL, ANGELCO_PASSWORD\n\n print(\"Parsing the config file...\")\n config = configparser.ConfigParser()\n with open('dwh.cfg') as configfile:\n config.read_file(configfile)\n\n ANGELCO_EMAIL = config.get('ANGELCO', 'EMAIL')\n ANGELCO_PASSWORD = config.get('ANGELCO', 'PASSWORD')",
"def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')",
"def load_config(config_path):\n global config\n with open(config_path) as config_file:\n config = munchify(yaml.safe_load(config_file))",
"def _file_loader(self) -> dict:\n cfg = None\n try:\n with open(self._path) as file:\n cfg = json.loads(file.read())\n except FileNotFoundError as e:\n print(e)\n exit(1)\n return cfg",
"def load_config_file(self):\n\n conf_file = config.DEFAULT_CONFIGURATION_FILE\n\n if self.options and getattr(self.options, \"conf_file\"):\n conf_file = self.options.conf_file\n if (\n not os.path.exists(conf_file) and\n not os.path.exists(\"%s.d\" % conf_file)\n ):\n raise Exception(\n (\n \"The specified configuration file \"\n \"does not exist. File=(%s)\"\n ) % self.options.conf_file\n )\n\n self.from_file(conf_file)",
"def _load_config_log(self):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n if not os.path.isfile(config_path):\n return {}\n with open(config_path, 'r') as f:\n data = yaml.load(f)\n return data",
"def load_subdir(self, dirpath):\n conf = Dict()\n for filename in sorted(os.listdir(dirpath)):\n filepath = os.path.join(dirpath, filename)\n if os.path.isfile(filepath):\n conf[os.path.splitext(filename)[0]] = self.load_file(filepath)\n else:\n conf[filename] = self.load_subdir(filepath)\n return conf",
"def load_config():\n proj_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(proj_dir, \"config.yml\")\n conf = yaml.safe_load(open(config_path))\n return conf",
"def _read_config(path):\n with open(path) as f:\n data = f.read()\n data = os.path.expandvars(data)\n data = yaml.safe_load(data)\n return data"
] | [
"0.77170765",
"0.7356012",
"0.64128786",
"0.6412161",
"0.6184576",
"0.609155",
"0.6082478",
"0.6059316",
"0.6059316",
"0.60558116",
"0.60297316",
"0.59903866",
"0.5989058",
"0.5963897",
"0.5926974",
"0.59229696",
"0.58908015",
"0.5871731",
"0.5866416",
"0.5855968",
"0.5802609",
"0.57756025",
"0.5743846",
"0.5743741",
"0.56587595",
"0.56516534",
"0.56462175",
"0.5643965",
"0.5643965",
"0.56340116",
"0.56142104",
"0.5612271",
"0.56118524",
"0.5604455",
"0.5602361",
"0.5575093",
"0.5573085",
"0.5565924",
"0.5565766",
"0.5553231",
"0.5534105",
"0.5525178",
"0.5508443",
"0.5491986",
"0.54594713",
"0.5457588",
"0.5448045",
"0.54445994",
"0.54423934",
"0.5440876",
"0.5426893",
"0.5423455",
"0.5423356",
"0.5422229",
"0.5415495",
"0.5406597",
"0.5406496",
"0.5395675",
"0.53933805",
"0.539244",
"0.5388126",
"0.53828037",
"0.53763014",
"0.53748953",
"0.5368301",
"0.5367428",
"0.5366444",
"0.5362939",
"0.5362441",
"0.53590244",
"0.5342036",
"0.5333819",
"0.5333206",
"0.53331393",
"0.5330316",
"0.53276736",
"0.53250664",
"0.532427",
"0.53195524",
"0.53037596",
"0.5297642",
"0.5295727",
"0.52940625",
"0.5289773",
"0.52858365",
"0.52798843",
"0.52783465",
"0.5277451",
"0.5273826",
"0.5265775",
"0.5248948",
"0.5243817",
"0.52396774",
"0.5225047",
"0.5223161",
"0.521153",
"0.5211145",
"0.51999587",
"0.5191248",
"0.5186434"
] | 0.7280268 | 2 |
Get component variable value. | def get_component_config_value(self) -> dict:
package_type, package_name, *path = self.PATH.split(".")
file_path = Path(f"{package_type}") / package_name / f"{package_type[:-1]}.yaml"
with open(file_path, "r") as fp:
data = yaml_load(fp)
value = data
for i in path:
value = value[i]
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_variable(self, request, context):\n response = GetVariableResponse()\n value = self._delegator.get_variable(request.component, request.variable)\n response.value = encode(value)\n return response",
"def get_variable(self, svc, var):\n action = \"variableget\"\n path = \"data_request?id=%s&DeviceNum=%d&serviceId=%s&Variable=%s\" \\\n % (action, self.id, svc, var)\n return self.vera.get(path)",
"def get_variable_value(self, name):\n return self._design.GetVariableValue(name)",
"def get_value(self):\n return self._value",
"def _get_value(self):\n \n return self._value",
"def _get_value(self):\n return self.__value",
"def get_variable(self, name):\n return self._properties[name]",
"def _get_value(self, value, context):\n try:\n var_value = template.Variable(value).resolve(context)\n except template.VariableDoesNotExist:\n try:\n var_value = self.var_value.var\n except AttributeError:\n var_value = self.var_value\n return var_value",
"def getval(self):\r\n return self.value",
"def inputValue(self):\n return self.variable",
"def get(self):\n # We use here the fact, that when used in a widget, the value will be\n # retrieved directly instead through .get(). Thus the widget will always \"see\" the str representation.\n value = self._tk.globalgetvar(self._name)\n try:\n value = self.convert(value)\n except Exception as e:\n value = Invalid\n if self._validated_hook:\n self._validated_hook(value is not Invalid)\n return value",
"def getVariable(self):\n return _libsbml.Rule_getVariable(self)",
"def get_value(self):\n pass",
"def get_val(self):\n return self.value",
"def get_value(self):\n return self.value",
"def get_value(self):\n return self.value",
"def get_value(self):\n return self.value",
"def getValue(self):\n return _libsbml.Parameter_getValue(self)",
"def get_value(self):\n return self._val",
"def get_value(self):\n return self._value",
"def get_value(self):\n return self._value",
"def get_value(self):\n return self._value",
"def get_value(self):\n return self._value",
"def get_value(self):\n return self._value",
"def get(self):\n return self._value",
"def getValue(self):\n return self.value",
"def getvalue(self):\n ...",
"def getvalue(self):\n ...",
"def getVariable(self):\n return _libsbml.EventAssignment_getVariable(self)",
"def get_var(self, tag):\n if not tag in self.env:\n print(\"ERROR: value {} is not defined yet\".format(tag))\n elif callable(self.env[tag]):\n print(\"ERROR: tried to access callable {} was a value\".format(tag))\n else:\n return self.env[tag]",
"def read_value(self):\n return self.load_attr(\"value\")",
"def getValue(self):\n return self.value",
"def variable(self):\n return _coconut_tail_call(Var, self.name)",
"def value(self) -> pulumi.Input['AssetModelVariableValueArgs']:\n return pulumi.get(self, \"value\")",
"def get_variable(self, name):\n if self._scalamagic:\n intp = self.scala_interpreter\n intp.interpret(name)\n return intp.last_result()",
"def get(self, var):\n return getattr(self, 'var_%s' % (var,))",
"def GetValue(self):\n return self._value",
"def GetValue(self):\n return self._value",
"def _value(self):\n return self.device.value(*self._id[1:])",
"def variable(self, name):\n\n status, stdout, stderr = self.__xcall__(['--variable=%s' % name])\n\n if status != 0:\n raise RuntimeError(\"error querying --variable=%s for package `%s': %s\" % (name, self.name, stderr))\n\n return stdout.strip()",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def getValue(self):\n \n return self._value",
"def get_value(self):",
"def get(self, name, **valuefilter):\n if not valuefilter:\n valuefilter = self.valuefilter\n varobj = Variable(name, **valuefilter)\n value = varobj.get(gid=self.gid)\n return value",
"def getCvar(self, key):\n print \"get cvar %s\" % key\n return self.cvars.get(key)",
"def value(self):\n return self.get_data(\"value\")",
"def getVar(self, id):\n if id in self.variables:\n return self.variables[id]",
"def getVariable(self, varName):\n return self[varName]",
"def variable(self, val):",
"def get_variable_value(variable):\n def pipeline_from_info(variableinfo):\n controller = variableinfo._controller\n version = controller.vistrail.get_version_number(\n 'dat-var-%s' % variable.name)\n return controller.vistrail.getPipeline(version), version\n\n def pipeline_from_generator(variable_gen):\n # Get the original OutputPort module\n orig_controller = variable_gen._generator.controller\n base_pipeline = orig_controller.vistrail.getPipeline('dat-vars')\n if len(base_pipeline.module_list) != 1:\n raise ValueError(\"dat-vars version is invalid\")\n output_port = base_pipeline.module_list[0]\n\n controller = VistrailController(Vistrail())\n # OutputPort\n operations = [('add', output_port)]\n # Rest of the pipeline\n operations += variable_gen._generator.operations\n # Connection\n connection = controller.create_connection(\n variable_gen._output_module,\n variable_gen._outputport_name,\n output_port,\n 'InternalPipe')\n operations.append(('add', connection))\n # Materialize this\n action = create_action(operations)\n controller.add_new_action(action)\n version = controller.perform_action(action)\n controller.change_selected_version(version)\n assert version == controller.current_version == 1\n return controller.current_pipeline, 1\n\n # Obtain 'pipeline' and 'version' from 'variable'\n if isinstance(variable, Variable.VariableInformation):\n # Pipeline already exists\n pipeline, version = pipeline_from_info(variable)\n elif isinstance(variable, Variable):\n if variable._materialized is not None:\n # Pipeline already exists\n pipeline, version = pipeline_from_info(variable._materialized)\n else:\n # Pipeline doesn't exist\n # We need to make one from the operations\n pipeline, version = pipeline_from_generator(variable)\n else:\n raise TypeError\n\n # Setup the interpreter for execution\n interpreter = get_default_interpreter()\n interpreter.clean_non_cacheable_modules()\n interpreter.parent_execs = [None]\n res = interpreter.setup_pipeline(pipeline)\n if len(res[5]) > 0:\n raise ValueError(\"Variable pipeline has errors:\\n%s\" %\n '\\n'.join(me.msg for me in res[5].itervalues()))\n tmp_id_to_module_map = res[0]\n\n # Execute\n res = interpreter.execute_pipeline(\n pipeline,\n res[0], # tmp_id_to_module_map\n res[1], # persistent_to_tmp_id_map\n current_version=version,\n reason=\"getting variable value\")\n if len(res[2]) > 0:\n raise ValueError(\"Error while executing variable pipeline:\\n%s\" %\n '\\n'.join('%s: %s' % (me.module.__class__.__name__,\n me.msg)\n for me in res[2].itervalues()))\n if len(res[4]) > 0:\n # extract messages and previous ModuleSuspended exceptions\n raise ValueError(\"Module got suspended while executing variable \"\n \"pipeline:\\n%s\" %\n '\\n'.join(msg for msg in res[4].itervalues()))\n\n # Get the result\n outputport_desc = get_module_registry().get_descriptor_by_name(\n 'org.vistrails.vistrails.basic', 'OutputPort')\n for module in pipeline.module_list:\n if module.module_descriptor is outputport_desc:\n if get_function(module, 'name') == 'value':\n module_obj = tmp_id_to_module_map[module.id]\n result = module_obj.get_output('ExternalPipe')\n break\n else:\n result = None\n\n interpreter.finalize_pipeline(pipeline, *res[:-1])\n interpreter.parent_execs = [None]\n return result",
"def get_value(self):\n return self.sensor.get_value()",
"def get_val(self):\n return",
"def get_value(self):\n raise NotImplementedError",
"def component(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"component\")",
"def d_var(self):\r\n return 'dval'",
"def value(self):\n\n\t\treturn self.__value",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def load(self):\n return self._value",
"def get_data(self, variable):\n return self.data.get(variable)",
"def GetValue(self):\n return self._value",
"def GetValue(self):\n return self._value",
"def GetValue(self):\n return self._value",
"def variable(self) -> Variable:\n ...",
"def Get(self, section, var):\n return self.cp.get(section, var)",
"def get_val(self, **kwargs):\n return self._value",
"def value(self):\n return self._val",
"def value(self):\n return self.__value",
"def value(self):\n return self.__value",
"def getvalue(self):\n return self.out.getvalue()",
"def value(self):\n return self._force.params[self.typepair][self.name] * (\n self._cpp_obj.alpha)",
"def xvar ( self ) :\n return self.__xvar",
"def get(name):\r\n return componentManager.components[name]",
"def get_component_parameter(self, key: str, default_value=None) -> Any:\n value = self._node[\"app_data\"].get(\"component_parameters\", {}).get(key, default_value)\n return None if value == \"None\" else value",
"def get_airflow_variable(key: str) -> str:\n return models.Variable.get(key)",
"def get_assigned_value(self, var) :\n return self.assigned_values.get(var, None)",
"def get(self):\n if not self.__name in g_platform_variables:\n raise RuntimeError(\"unknown platform variable '%s'\" % (self.__name))\n current_var = g_platform_variables[self.__name]\n combinations = get_platform_combinations()\n for ii in combinations:\n if ii in current_var:\n return current_var[ii]\n raise RuntimeError(\"current platform %s not supported for variable '%s'\" % (str(combinations), self.__name))",
"def value(self):\n return self._value_",
"def var(self) -> float:\n return self._data.var()",
"def get(self, var):\n s = self.eval('{0}'.format(var))\n return self.strip_answer(s)",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value",
"def value(self):\n return self._value"
] | [
"0.71437204",
"0.70724845",
"0.70511836",
"0.69538087",
"0.69123816",
"0.690659",
"0.69018465",
"0.68676025",
"0.6828851",
"0.6796717",
"0.67753345",
"0.6770774",
"0.6696874",
"0.6677195",
"0.6672275",
"0.6672275",
"0.6672275",
"0.66238886",
"0.6623324",
"0.66175497",
"0.66175497",
"0.66175497",
"0.66090745",
"0.66090745",
"0.6603225",
"0.6563141",
"0.65623754",
"0.65623754",
"0.65594226",
"0.6550387",
"0.6547964",
"0.65301645",
"0.6526398",
"0.6520841",
"0.65184873",
"0.65111405",
"0.6510208",
"0.6510208",
"0.649539",
"0.6470976",
"0.64597434",
"0.64597434",
"0.6459089",
"0.64253193",
"0.64229727",
"0.6412607",
"0.641224",
"0.641012",
"0.6408213",
"0.6374",
"0.63715804",
"0.63622713",
"0.6358074",
"0.6349151",
"0.63427496",
"0.6341943",
"0.632819",
"0.6291965",
"0.6291965",
"0.6291965",
"0.6287036",
"0.6283745",
"0.62835395",
"0.62835395",
"0.62835395",
"0.62770045",
"0.62721896",
"0.6266124",
"0.6262474",
"0.62579083",
"0.62579083",
"0.6257294",
"0.6256964",
"0.6240143",
"0.62382287",
"0.62145096",
"0.6204875",
"0.61863226",
"0.61823267",
"0.61734504",
"0.61702406",
"0.61632067",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722",
"0.6151722"
] | 0.0 | -1 |
Test component value updated in agent config not in component config. | def test_set_get_correct_path(self):
agent_config = self.load_agent_config()
assert not agent_config.component_configurations
config_value = self.get_component_config_value()
assert config_value == self.INITIAL_VALUE
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", self.PATH],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert str(self.INITIAL_VALUE) in result.output
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", self.PATH, str(self.NEW_VALUE)],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
config_value = self.get_component_config_value()
assert config_value == self.INITIAL_VALUE
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", self.PATH],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert str(self.NEW_VALUE) in result.output
agent_config = self.load_agent_config()
assert agent_config.component_configurations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_custom_configuration_updated(self):\n component_protocol_id = ComponentId(\n ComponentType.PROTOCOL, self.new_protocol_id\n )\n component_contract_id = ComponentId(\n ComponentType.CONTRACT, self.new_contract_id\n )\n component_connection_id = ComponentId(\n ComponentType.CONNECTION, self.new_connection_id\n )\n component_skill_id = ComponentId(ComponentType.SKILL, self.new_skill_id)\n\n assert (\n self.agent_config.component_configurations[component_protocol_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_contract_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_connection_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_skill_id]\n == self.expected_custom_component_configuration\n )",
"def test_component_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n # create component_owner option\n self.env.config.set('ticket-field-config','component_owner','test')\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_component_list(), self.new['component'])",
"def test_component_configuration_removed_from_agent_config(self):\n with cd(self._get_cwd()):\n self.run_cli_command(\n \"add\", \"--local\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)\n )\n self.run_cli_command(\"add\", \"--local\", \"connection\", \"fetchai/http_server\")\n\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.soef.config.api_key\",\n \"some_api_key\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.http_server.config.port\",\n \"9000\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n config = self.load_config()\n assert config.component_configurations\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n in config.component_configurations\n )\n\n self.run_cli_command(\"remove\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID))\n\n config = self.load_config()\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n not in config.component_configurations\n )\n assert config.component_configurations",
"def test_set_existing_property():\n\n value = 'new'\n\n contents = (\"[Info]\\n\"\n \"sdk = old\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"",
"def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0",
"def test_update_node_driveconfig(self):\n pass",
"def test_set_new_property():\n\n value = '1'\n contents = (\"[info]\\n\"\n \"real = not_real\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()",
"def test_update_deployment_state(self):\n pass",
"async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED",
"def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output",
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()",
"def test_update_state(self):\n pass",
"def test_config_changed_non_leader(\n self,\n ) -> NoReturn:\n self.harness.set_leader(is_leader=False)\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)",
"def test_update_reg_ex_config(self):\n pass",
"async def test_value_updated(\n hass: HomeAssistant, vision_security_zl7432, integration, client\n) -> None:\n node = vision_security_zl7432\n # Add states to the value we are updating to ensure the translation happens\n node.values[\"7-37-1-currentValue\"].metadata.data[\"states\"] = {\"1\": \"on\", \"0\": \"off\"}\n events = async_capture_events(hass, \"zwave_js_value_updated\")\n\n event = Event(\n type=\"value updated\",\n data={\n \"source\": \"node\",\n \"event\": \"value updated\",\n \"nodeId\": 7,\n \"args\": {\n \"commandClassName\": \"Switch Binary\",\n \"commandClass\": 37,\n \"endpoint\": 1,\n \"property\": \"currentValue\",\n \"newValue\": 1,\n \"prevValue\": 0,\n \"propertyName\": \"currentValue\",\n },\n },\n )\n\n node.receive_event(event)\n # wait for the event\n await hass.async_block_till_done()\n assert len(events) == 1\n assert events[0].data[\"home_id\"] == client.driver.controller.home_id\n assert events[0].data[\"node_id\"] == 7\n assert events[0].data[\"entity_id\"] == \"switch.in_wall_dual_relay_switch\"\n assert events[0].data[\"command_class\"] == CommandClass.SWITCH_BINARY\n assert events[0].data[\"command_class_name\"] == \"Switch Binary\"\n assert events[0].data[\"endpoint\"] == 1\n assert events[0].data[\"property_name\"] == \"currentValue\"\n assert events[0].data[\"property\"] == \"currentValue\"\n assert events[0].data[\"value\"] == \"on\"\n assert events[0].data[\"value_raw\"] == 1\n\n # Try a value updated event on a value we aren't watching to make sure\n # no event fires\n event = Event(\n type=\"value updated\",\n data={\n \"source\": \"node\",\n \"event\": \"value updated\",\n \"nodeId\": 7,\n \"args\": {\n \"commandClassName\": \"Basic\",\n \"commandClass\": 32,\n \"endpoint\": 1,\n \"property\": \"currentValue\",\n \"newValue\": 1,\n \"prevValue\": 0,\n \"propertyName\": \"currentValue\",\n },\n },\n )\n\n node.receive_event(event)\n # wait for the event\n await hass.async_block_till_done()\n # We should only still have captured one event\n assert len(events) == 1",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_get_current_component_status_DISABLED(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_DISABLED, self.u.get_current_component_status('a'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)",
"def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())",
"def test_component_update_available_NEW(self):\n MockPopen.mock_stdout = 'Inst b (new from)'\n self.assertTrue(self.u.component_update_available())",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_update_wait():\n wait = '10 seconds'\n config_info = read_config()\n config_info['wait'] = wait\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['wait'] == wait",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_config(self):\n\n # We start in uninitialized state.\n # In this state there is no driver process.\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n # Ping the agent.\n retval = self._ia_client.ping_agent()\n log.info(retval)\n\n # Initialize the agent.\n # The agent is spawned with a driver config, but you can pass one in\n # optinally with the initialize command. This validates the driver\n # config, launches a driver process and connects to it via messaging.\n # If successful, we switch to the inactive state.\n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n # Ping the driver proc.\n retval = self._ia_client.ping_resource()\n log.info(retval)\n\n decoder = IonObjectDeserializer(obj_registry=get_obj_registry())\n\n # Grab the alarms defined in the config.\n retval = decoder.deserialize(self._ia_client.get_agent(['alarms'])['alarms'])\n\n \"\"\"\n {'status': None, 'stream_name': 'parsed', 'name': 'test_sim_warning',\n 'upper_bound': 5.0, 'expr': 'x<5.0', 'upper_rel_op': '<',\n 'lower_rel_op': None, 'type_': 'IntervalAlarmDef', 'value_id': 'temp',\n 'lower_bound': None, 'message': 'Temperature is above test range of 5.0.',\n 'current_val': None, 'type': 1}\n \"\"\"\n self.assertEqual(retval[0].type_, 'IntervalAlarmDef')\n self.assertEqual(retval[0].upper_bound, 5.0)\n self.assertEqual(retval[0].expr, 'x<5.0')\n \n # Reset the agent. This causes the driver messaging to be stopped,\n # the driver process to end and switches us back to uninitialized.\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)",
"def test_update_asset_state(self):\n pass",
"def test_get_component_ON(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/foo': 'bar',\n })\n c = self.u.get_component('a')\n self.assertEqual({'name': 'a', 'activated': True, 'foo': 'bar'}, c)",
"def test_update_age():\n age = '2 minutes'\n config_info = read_config()\n config_info['age'] = age\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['age'] == age",
"def test_update_node_state_servicelight(self):\n pass",
"def test_update_state1(self):\n pass",
"def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0",
"def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())",
"def test_update_system(self):\n pass",
"def setup_class(cls):\n cls.expected_custom_component_configuration = dict(foo=\"bar\")\n\n cls.agent_config = AgentConfig(\n agent_name=\"agent_name\",\n author=\"author\",\n version=\"0.1.0\",\n default_routing={str(cls.old_protocol_id): str(cls.old_connection_id)},\n default_connection=str(cls.old_connection_id),\n )\n\n cls.agent_config.protocols = {cls.old_protocol_id}\n cls.agent_config.contracts = {cls.old_contract_id}\n cls.agent_config.connections = {cls.old_connection_id}\n cls.agent_config.skills = {cls.old_skill_id}\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.PROTOCOL, cls.old_protocol_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.CONTRACT, cls.old_contract_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.CONNECTION, cls.old_connection_id)\n ] = cls.expected_custom_component_configuration\n cls.agent_config.component_configurations[\n ComponentId(ComponentType.SKILL, cls.old_skill_id)\n ] = cls.expected_custom_component_configuration\n\n replace_component_ids(cls.agent_config, cls.replacements)",
"def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)",
"def test_update_software_component_for_system_module(self):\n pass",
"def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)",
"async def test_minimal_config(hass, mock_client):\n config = {prometheus.DOMAIN: {}}\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED",
"def test_get_current_component_status_OK(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n 'repository/online/component/b': 'yes',\n 'repository/online/component/c': 'yes',\n 'repository/online/component/d': 'yes',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n print >> tmp, 'deb http://host:port/prefix/0.0/maintained/component/ c/arch/'\n print >> tmp, 'deb http://host:port/prefix/0.0/unmaintained/component/ d/arch/'\n tmp.flush()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('c'))\n self.assertEqual(UU.COMPONENT_AVAILABLE, self.u.get_current_component_status('d'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"def test_update_scenario(self):\n pass",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def test_set_testing(self):\n old_value = Config.testing\n Config.set_testing(True)\n\n self.assertNotEqual(old_value, Config.testing)",
"def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}",
"async def test_manual_configuration_update_configuration(hass):\n device = await setup_axis_integration(hass)\n\n result = await hass.config_entries.flow.async_init(\n AXIS_DOMAIN, context={\"source\": \"user\"}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n\n mock_device = Mock()\n mock_device.vapix.params.system_serialnumber = MAC\n\n with patch(\n \"homeassistant.components.axis.config_flow.get_device\",\n return_value=mock_device,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input={\n CONF_HOST: \"2.3.4.5\",\n CONF_USERNAME: \"user\",\n CONF_PASSWORD: \"pass\",\n CONF_PORT: 80,\n },\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"\n assert device.host == \"2.3.4.5\"",
"def test_get_component_OFF(self):\n self._ucr({\n 'repository/online/component/b': 'no',\n 'repository/online/component/b/foo': 'bar',\n })\n c = self.u.get_component('b')\n self.assertEqual({'name': 'b', 'activated': False, 'foo': 'bar'}, c)",
"def test_update_state2(self):\n pass",
"async def test_update_device_config(hass, hass_client):\n with patch.object(config, \"SECTIONS\", [\"automation\"]):\n await async_setup_component(hass, \"config\", {})\n\n client = await hass_client()\n\n orig_data = [{\"id\": \"sun\"}, {\"id\": \"moon\"}]\n\n def mock_read(path):\n \"\"\"Mock reading data.\"\"\"\n return orig_data\n\n written = []\n\n def mock_write(path, data):\n \"\"\"Mock writing data.\"\"\"\n written.append(data)\n\n with patch(\"homeassistant.components.config._read\", mock_read), patch(\n \"homeassistant.components.config._write\", mock_write\n ), patch(\"homeassistant.config.async_hass_config_yaml\", return_value={}):\n resp = await client.post(\n \"/api/config/automation/config/moon\",\n data=json.dumps({\"trigger\": [], \"action\": [], \"condition\": []}),\n )\n\n assert resp.status == 200\n result = await resp.json()\n assert result == {\"result\": \"ok\"}\n\n assert list(orig_data[1]) == [\"id\", \"trigger\", \"condition\", \"action\"]\n assert orig_data[1] == {\"id\": \"moon\", \"trigger\": [], \"condition\": [], \"action\": []}\n assert written[0] == orig_data",
"def test_sim_control_device_attribute_change(self):\n desired_attribute_name = \"temperature\"\n input_value = 100.0\n self.sim_control_device.attribute_name = self.attr_name_enum_labels.index(\n desired_attribute_name\n )\n self.sim_control_device.pause_active = True\n setattr(self.sim_control_device, \"last_val\", input_value)\n self.assertEqual(self.sim_device.temperature, input_value)",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_get_current_component_status_MISSING(self):\n self._ucr({\n 'repository/online/component/b': 'yes',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_NOT_FOUND, self.u.get_current_component_status('b'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"async def test_update_with_json_attrs_no_data(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": CONTENT_TYPE_JSON},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.key }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n\n state = hass.states.get(\"sensor.foo\")\n assert state.state == STATE_UNKNOWN\n assert state.attributes == {\"unit_of_measurement\": \"MB\", \"friendly_name\": \"foo\"}\n assert \"Empty reply\" in caplog.text",
"def test_update_state4(self):\n pass",
"def test_update_software_configuration_for_system_module(self):\n pass",
"def test_custom_configuration_updated_correctly(self):\n result = self.run_cli_command(\n \"--skip-consistency-check\",\n \"config\",\n \"get\",\n \"vendor.fetchai.skills.error.is_abstract\",\n cwd=self._get_cwd(),\n )\n assert result.stdout == \"True\\n\"",
"def test_configure_to_reconfigure_param(self):\n\n class ToConfigure(object):\n \"\"\"Class to configure.\"\"\"\n\n def __init__(self):\n super(ToConfigure, self).__init__()\n self.test = None\n\n target = ToConfigure()\n\n param = 'test'\n\n conf = configuration(category('TEST', Parameter(param, value=True)))\n\n self.configurable.configure(conf=conf, targets=[target])\n self.assertTrue(target.test)",
"def test_version_sensor(self):\n config = {\"sensor\": {\"platform\": \"version\"}}\n\n assert setup_component(self.opp, \"sensor\", config)",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_value_can_be_changed(self):\n value = 30\n self.progressbar.setValue(value)\n self.assertEqual(self.progressbar.getValue(), value)\n\n # TODO: should we check for variable type to avoid app crashes ?\n # NOTE: weirdly enough, the sliders don't crash like this; this may\n # be a bug in libui.\n # with self.assertRaises(ValueError):\n # self.progressbar.setValue('hello')",
"def test_update_config_node(self):\n config_node = self._create_config_node()\n config_node_uuid = config_node['config-node']['uuid']\n updated_name = data_utils.rand_name('new_config_node')\n with self.override_role():\n self.config_client.update_config_node(\n config_node_uuid, display_name=updated_name)",
"def test_update_hyperflex_ucsm_config_policy(self):\n pass",
"def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)",
"async def test_manual_update(hass: HomeAssistant) -> None:\n await async_setup_component(hass, \"homeassistant\", {})\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"data\": \"first\"}\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"name\": \"mysensor\",\n \"value_template\": \"{{ value_json.data }}\",\n \"platform\": DOMAIN,\n \"resource_template\": \"{% set url = 'http://localhost' %}{{ url }}\",\n \"method\": \"GET\",\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n assert hass.states.get(\"sensor.mysensor\").state == \"first\"\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK, json={\"data\": \"second\"}\n )\n await hass.services.async_call(\n \"homeassistant\",\n \"update_entity\",\n {ATTR_ENTITY_ID: [\"sensor.mysensor\"]},\n blocking=True,\n )\n assert hass.states.get(\"sensor.mysensor\").state == \"second\"",
"def test_set_property_casing():\n\n sdk = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('INFO', 'sdk', sdk)\n assert prop.get_prop('info', 'sdk') == sdk\n assert prop.get_prop('Info', 'sdk') == sdk\n assert prop.get_prop('INFO', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0",
"def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig",
"def test_update_state3(self):\n pass",
"async def test_changing_source_attribute(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n )\n entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n now = time.monotonic()\n info = BLUECHARM_BEACON_SERVICE_INFO_2\n device = generate_ble_device(\n address=info.address,\n name=info.name,\n details={},\n )\n advertisement_data = generate_advertisement_data(\n local_name=info.name,\n manufacturer_data=info.manufacturer_data,\n service_data=info.service_data,\n service_uuids=info.service_uuids,\n rssi=info.rssi,\n )\n\n inject_advertisement_with_time_and_source_connectable(\n hass,\n device,\n advertisement_data,\n now,\n \"local\",\n True,\n )\n await hass.async_block_till_done()\n\n attributes = hass.states.get(\n \"sensor.bluecharm_177999_8105_estimated_distance\"\n ).attributes\n assert attributes[ATTR_SOURCE] == \"local\"\n\n inject_advertisement_with_time_and_source_connectable(\n hass,\n device,\n advertisement_data,\n now,\n \"proxy\",\n True,\n )\n await hass.async_block_till_done()\n with patch_all_discovered_devices([BLUECHARM_BEACON_SERVICE_INFO_2]):\n async_fire_time_changed(\n hass,\n dt_util.utcnow() + timedelta(seconds=UPDATE_INTERVAL.total_seconds() * 2),\n )\n await hass.async_block_till_done()\n\n attributes = hass.states.get(\n \"sensor.bluecharm_177999_8105_estimated_distance\"\n ).attributes\n assert attributes[ATTR_SOURCE] == \"proxy\"",
"def test_update_connector(self):\n pass",
"def test_set_value(self):\n self.server_widget.value = 50\n assert self.client_widget.value == self.server_widget.value",
"def test_update_global_system_config(self):\n new_config = self._create_global_system_config()\n update_name = data_utils.rand_name('test')\n with self.override_role():\n self.config_client.update_global_system_config(\n new_config['uuid'],\n display_name=update_name)",
"def on_config_changed(self, event):\n unit = self.model.unit",
"async def test_update_with_xml_convert_bad_xml(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": \"text/xml\"},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == STATE_UNKNOWN\n assert \"REST xml result could not be parsed\" in caplog.text\n assert \"Empty reply\" in caplog.text",
"def test_update_goal_metric(self):\n pass",
"async def test_update_with_failed_get(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": \"text/xml\"},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == STATE_UNKNOWN\n assert \"REST xml result could not be parsed\" in caplog.text\n assert \"Empty reply\" in caplog.text",
"def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]",
"def test_get_value_success(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = self.config.values[name]\r\n\r\n self.assertEqual(self.config.get_value(name, option), value)",
"def test_store_property_after_reconnecting_to_the_device():",
"async def test_update(airsensor, hass, config):\n\n feature_mock, entity_id = airsensor\n\n def initial_update():\n feature_mock.pm1 = 49\n feature_mock.pm2_5 = 222\n feature_mock.pm10 = 333\n\n feature_mock.async_update = AsyncMock(side_effect=initial_update)\n await async_setup_entity(hass, config, entity_id)\n\n state = hass.states.get(entity_id)\n\n assert state.attributes[ATTR_PM_0_1] == 49\n assert state.attributes[ATTR_PM_2_5] == 222\n assert state.attributes[ATTR_PM_10] == 333\n\n assert state.state == \"222\"",
"def test_get_current_component_status_UNKNOWN(self):\n self._ucr({\n 'repository/online/component/d': 'yes',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n tmp.close()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_UNKNOWN, self.u.get_current_component_status('d'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG",
"def test_update_deployment(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_setup_with_config(self):\n setup_component(self.hass, \"sensor\", VALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is not None",
"def modifyNotValuableComponents(self):\n # Nothing to do\n pass",
"def test_patch_namespaced_deployment_config_status(self):\n pass",
"def test_setup_with_alerts_config(self):\n setup_component(self.hass, \"sensor\", VALID_CONFIG_ALERTS)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_alerts\")\n assert state.state == \"0\"",
"async def test_state_update(hass: HomeAssistant) -> None:\n await init_integration(hass)\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3200.0\"\n\n future = utcnow() + timedelta(minutes=60)\n\n current_condition = load_json_object_fixture(\n \"accuweather/current_conditions_data.json\"\n )\n current_condition[\"Ceiling\"][\"Metric\"][\"Value\"] = 3300\n\n with patch(\n \"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions\",\n return_value=current_condition,\n ), patch(\n \"homeassistant.components.accuweather.AccuWeather.requests_remaining\",\n new_callable=PropertyMock,\n return_value=10,\n ):\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3300\"",
"def _setUpdateExpected(self, value):\n self.__isUpdateExpected = value",
"def test_update_agent(self):\n original_status = self.agent['admin_state_up']\n agent_status = {'admin_state_up': original_status}\n\n with self.override_role():\n self.agents_client.update_agent(agent_id=self.agent['id'],\n agent=agent_status)",
"def test_configure_non_interactive_missing_field_value(ExampleComponentClass):\n\n with pytest.raises(\n ValueError,\n match=r\"^No configuration value found for annotated field 'FAKE_NAME.a' of type 'int'.\",\n ):\n configure(ExampleComponentClass(), {\"b\": \"bar\"}, name=\"FAKE_NAME\")",
"def test_update_node_state_smartfail(self):\n pass",
"def test_update_software_components_for_system_module(self):\n pass",
"def test_edit_configuration(self):\n configuration = copy.deepcopy(self.configuration)\n configuration['settings'] = {'DB_HOST': 'other_scale_db'}\n configuration['mounts'] = {\n 'dted': {\n 'type': 'host',\n 'host_path': '/some/new/path'\n }\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n json_data = {\n 'configuration': configuration,\n 'auto_update': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"def test_active_configs(self):\n # one config and one active config\n if self.mod.knobs.n_configs != 1:\n self.mod.knobs.n_configs = 1\n self.assertTrue(len(self.map.active_configs) == 1)\n self.assertTrue(self.map.active_configs[0] == 'config01')\n\n # three configs and one active config\n self.mod.knobs.n_configs = 3\n self.mod.knobs.active_config = 'config02'\n self.assertTrue(len(self.map.active_configs) == 1)\n self.assertTrue(self.map.active_configs[0] == 'config02')",
"def test__get_component_server_local(self):\n MockConfigRegistry._EXTRA = {\n 'local/repository': 'yes',\n 'repository/online/server': 'a.example.net',\n 'repository/online/port': '4711',\n 'repository/online/component/a': 'yes',\n }\n self.u.ucr_reinit()\n s = self.u._get_component_server('a')\n self.assertEqual('a.example.net', s.mock_server)\n self.assertEqual('4711', s.mock_port)"
] | [
"0.6883115",
"0.6502125",
"0.64594984",
"0.6202278",
"0.6129778",
"0.61204875",
"0.6075345",
"0.6033572",
"0.5983752",
"0.5950724",
"0.5938686",
"0.59221786",
"0.5911883",
"0.59022325",
"0.5892399",
"0.58266765",
"0.57814544",
"0.57743114",
"0.5774052",
"0.5764389",
"0.5747986",
"0.5745692",
"0.5728632",
"0.57181954",
"0.5712753",
"0.56938446",
"0.56841576",
"0.5665145",
"0.5653854",
"0.5637229",
"0.56143653",
"0.5614005",
"0.5598069",
"0.55754644",
"0.5563991",
"0.55620575",
"0.55567455",
"0.5553928",
"0.5546621",
"0.5537109",
"0.5536026",
"0.55319995",
"0.55229473",
"0.5517409",
"0.5507999",
"0.55023813",
"0.550011",
"0.5477752",
"0.54749626",
"0.54612297",
"0.5459266",
"0.5456274",
"0.5444959",
"0.5433608",
"0.5426005",
"0.5422082",
"0.5418983",
"0.5417214",
"0.54111665",
"0.54077876",
"0.54049546",
"0.539898",
"0.5388572",
"0.53848654",
"0.53846914",
"0.5384295",
"0.5383658",
"0.53805304",
"0.537095",
"0.5367402",
"0.53653055",
"0.5364856",
"0.53624064",
"0.53623915",
"0.53623873",
"0.5360909",
"0.5351364",
"0.53480715",
"0.53439254",
"0.53405786",
"0.53339314",
"0.53337765",
"0.53303736",
"0.53297263",
"0.53240955",
"0.53240955",
"0.53240955",
"0.531739",
"0.5315056",
"0.5314158",
"0.53059024",
"0.52998",
"0.52953887",
"0.5294011",
"0.5292673",
"0.52893174",
"0.5285542",
"0.5275782",
"0.5268538",
"0.5267152"
] | 0.6550141 | 1 |
Test agent config manager get_overridables. | def test_AgentConfigManager_get_overridables():
path = Path(CUR_PATH, "data", "dummy_aea")
agent_config = AEABuilder.try_to_load_agent_configuration_file(path)
config_manager = AgentConfigManager(agent_config, path)
agent_overridables, component_overridables = config_manager.get_overridables()
assert "description" in agent_overridables
assert "is_abstract" in list(component_overridables.values())[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ini_get_all():\n raise NotImplementedError()",
"def antenny_list_configs(self):\n return self.antenny_config.list_configs()",
"def getConfigAll(self):\n return self.configAll(False)",
"def getConfigs(self, host):\n raise \"not implemented\"",
"def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"",
"def test_list_config_nodes(self):\n with self.override_role():\n self.config_client.list_config_nodes()",
"def target_interfaces(self):",
"def target_interfaces(self):",
"def test_find_agent_ips(self):\n\n with patch(\n \"salt.cloud.clouds.proxmox.query\",\n return_value={\n \"result\": [\n {\n \"name\": \"eth0\",\n \"ip-addresses\": [\n {\"ip-address\": \"1.2.3.4\", \"ip-address-type\": \"ipv4\"},\n {\"ip-address\": \"2001::1:2\", \"ip-address-type\": \"ipv6\"},\n ],\n },\n {\n \"name\": \"eth1\",\n \"ip-addresses\": [\n {\"ip-address\": \"2.3.4.5\", \"ip-address-type\": \"ipv4\"},\n ],\n },\n {\n \"name\": \"dummy\",\n },\n ]\n },\n ) as mock_query:\n vm_ = {\n \"technology\": \"qemu\",\n \"host\": \"myhost\",\n \"driver\": \"proxmox\",\n \"ignore_cidr\": \"1.0.0.0/8\",\n }\n\n # CASE 1: Test ipv4 and ignore_cidr\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2.3.4.5\"\n\n # CASE 2: Test ipv6\n\n vm_[\"protocol\"] = \"ipv6\"\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2001::1:2\"",
"def net_list_on_dhcp_agent(mgr_or_client, *args, **kwargs):\n return net_list(mgr_or_client, *args, **kwargs)",
"def test_list_global_system_configs(self):\n with self.override_role():\n self.config_client.list_global_system_configs()",
"def test_getorgs(self):\n pass",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def test_dont_merge_if_multiple_client(self):\r\n raise SkipTest(\"Not implemented\")",
"def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills",
"def test_client_addresses_list(self):\n pass",
"def get_agent_network_interfaces(self):\n iface_list = [iface.serialize()['name'] for iface in\n hardware.dispatch_to_managers('list_network_interfaces')]\n iface_list = [name for name in iface_list if 'lo' not in name]\n\n if len(iface_list) == 0:\n raise errors.LookupAgentInterfaceError('Agent could not find a '\n 'valid network interface.')\n else:\n return iface_list",
"def test_get_hyperflex_config_result_list(self):\n pass",
"def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )",
"def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]",
"def _get_oslo_configs():\n # NOTE(flaper87): Oslo config should be\n # optional. Instead of doing try / except\n # at the top of this file, lets import cfg\n # here and assume that the caller of this\n # function already took care of this dependency.\n from oslo.config import cfg\n\n return [\n cfg.StrOpt('cache_url', default='memory://',\n help='URL to connect to the cache back end.')\n ]",
"def test_list_build_config_for_all_namespaces(self):\n pass",
"def available_auto_connection():\n path = os.path.dirname(verticapy.__file__) + \"/connections.verticapy\"\n confparser = ConfigParser()\n confparser.optionxform = str\n try:\n confparser.read(path)\n confparser.remove_section(\"VERTICAPY_AUTO_CONNECTION\")\n except:\n pass\n all_connections = confparser.sections()\n return all_connections",
"def overrides(self) -> ConfigNodePropertyArray:\n return self._overrides",
"def _config_table(self):\n return self.targets",
"def test_server_override_general(self):\n # Sanity check our override values do not overlap\n self.assertNotEqual(CONFIG_DATA[\"ConcurrentWorkers\"],\n CONFIG_DATA[\"OverrideConcurrentWorkers\"])\n self.assertNotEqual(CONFIG_DATA[\"SaveTimeoutMinutes\"],\n CONFIG_DATA[\"OverrideSaveTimeoutMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"RetainImageMinutes\"],\n CONFIG_DATA[\"OverrideRetainImageMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"Region\"],\n CONFIG_DATA[\"OverrideRegion\"])\n config_data = imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(\n self._server_valid_override))\n # Verify default disabled server is not included\n self.assertNotIn(\n CONFIG_DATA[\"OverrideNotExistFQDN\"],\n [server_data.name for server_data in\n config_data.server_data])\n # Sanity check we have every server's config we expect to have\n self.assertSetEqual(\n set([server_data.name for server_data in\n config_data.server_data]),\n {CONFIG_DATA[\"OverrideWorkersFQDN\"],\n CONFIG_DATA[\"OverrideSaveTimeoutFQDN\"],\n CONFIG_DATA[\"OverrideRetainImageFQDN\"],\n CONFIG_DATA[\"OverrideRegionFQDN\"]},\n )\n # Smoke test they are all enabled\n self.assertTrue(all([server_data.enabled\n for server_data in\n config_data.server_data]))",
"def server_agent_list(ctx, output_format, columns):\n data = ctx.obj.get_agents()\n\n for agent in data['agent']:\n agent_info = ctx.obj.get_agent_by_agent_id(agent['id'])\n agent['ip'] = agent_info['ip']\n agent['pool'] = agent_info['pool']['name']\n agent['build_type'] = ctx.obj.get_agent_build_type(agent['id'])\n agent['build_text'] = ctx.obj.get_agent_build_text(agent['id'])\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['agent'])\n elif output_format == 'json':\n output_json_data(data)",
"def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(6, len(hosts))",
"def get_opentsdb_config():\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\"))):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\")))\n try:\n opentsdb_url = config_parser.get('opentsdb', 'opentsdb_server_url')\n opentsdb_token = config_parser.get('opentsdb', 'token')\n opentsdb_metrics = config_parser.get('opentsdb', 'metrics')\n except ConfigParser.NoOptionError:\n logger.error(\n \"Agent not correctly configured. Check config file.\")\n sys.exit(1)\n\n if len(opentsdb_url) == 0:\n logger.warning(\n \"Agent not correctly configured(OPENTSDB_URL). Check config file. Using \\\"127.0.0.1:4242\\\" as default.\")\n opentsdb_url = \"http://127.0.0.1:4242\"\n if len(opentsdb_metrics) != 0:\n opentsdb_metrics = opentsdb_metrics.split(\",\")\n else:\n opentsdb_metrics = []\n\n opentsdb_config = {\n \"OPENTSDB_URL\": opentsdb_url,\n \"OPENTSDB_METRICS\": opentsdb_metrics,\n \"OPENTSDB_TOKEN\": opentsdb_token\n }\n else:\n logger.warning(\"No config file found. Using defaults.\")\n opentsdb_config = {\n \"OPENTSDB_URL\": \"http://127.0.0.1:4242\",\n \"OPENTSDB_METRICS\": \"\",\n \"OPENTSDB_TOKEN\": \"\"\n }\n\n return opentsdb_config",
"def configs(self):\n raise NotImplementedError()",
"def test_confiure_read_merge(self):\n class TestBase(pyperry.Base):\n def _config(cls):\n cls.configure('read', poop='smells')\n\n class Test(TestBase):\n def _config(cls):\n cls.configure('read', foo='bar')\n\n self.assertEqual(Test.adapter_config['read']['foo'], 'bar')\n self.assertEqual(Test.adapter_config['read']['poop'], 'smells')\n\n class Test2(Test):\n def _config(cls):\n cls.configure('read', { 'poop': 'stanks' })\n\n self.assertEqual(Test2.adapter_config['read']['poop'], 'stanks')\n self.assertEqual(Test.adapter_config['read']['poop'], 'smells')",
"def gather_configs(self):\n configs = []\n for what in self.order:\n for key in self.plugins[what]:\n mgr = self.plugins[what][key]\n c = mgr.config(what='get')\n if c is not None:\n c.update({\n 'description': mgr.description\n })\n # print(\"Gathering configuration from \", c)\n configs.append(c)\n return configs",
"def test_list_deployment_config_for_all_namespaces(self):\n pass",
"def test_all_servers_connection():\n task_data = dict(const.TEST_TASK)\n task_data[\"client_list\"] = list()\n agents = models.Agent.objects.all()\n for agent in agents:\n task_data[\"client_list\"].append({\"id\": agent.id, \"ip_address\": agent.ip_address})\n message_queue.push_task(task_data)\n logger.info(\"create tasks to test all agents' connection status\")",
"def test_get_connectors_list(self):\n pass",
"def test_intercommunalitys_get(self):\n pass",
"def remotes():",
"def getAliases(self):",
"def slave_ips(self) -> 'List[str]':\n raise NotImplementedError",
"def get_interface_data(devices, production_only=True, max_conns=MAX_CONNS):\n skip_disabled = not opts.include_disabled # Inverse of include is skip :D\n ninfo = NetACLInfo(devices=devices, production_only=production_only,\n max_conns=max_conns,\n skip_disabled=skip_disabled)\n ninfo.run()\n if DEBUG:\n print 'NetACLInfo done!'\n\n return ninfo.config",
"def atlas_organizations():\n pass",
"def get_hosts(self, target, listener_type):",
"def openconfig_override_base_linter(ctx, stmt):\n if stmt.i_module is not None and \\\n stmt.i_module.arg in [\"iana-if-type\", \"ietf-interfaces\"]:\n return\n\n lint.v_chk_recommended_substmt(ctx, stmt)\n lint.v_chk_required_substmt(ctx, stmt)",
"def all_hosts(self):\n ...",
"def get_agent_config(agent_id, which, obs_space, act_space, config):\r\n agent_config_pretrained = (POLICIES[config['_trainer']], obs_space, act_space, {\r\n 'model': {\r\n \"custom_model\": \"GymCompetePretrainedModel\",\r\n \"custom_model_config\": {\r\n \"agent_id\": agent_id - 1,\r\n \"env_name\": config['_env']['env_name'],\r\n \"model_config\": {},\r\n \"name\": \"model_%s\" % (agent_id - 1),\r\n \"load_weights\": True,\r\n },\r\n },\r\n\r\n \"framework\": config['framework'],\r\n })\r\n\r\n agent_config_from_scratch_sb = (POLICIES[config['_trainer']], obs_space, act_space, {\r\n 'model': {\r\n \"custom_model\": \"GymCompetePretrainedModel\",\r\n \"custom_model_config\": {\r\n \"agent_id\": agent_id - 1,\r\n \"env_name\": config['_env']['env_name'],\r\n \"model_config\": {},\r\n \"name\": \"model_%s\" % (agent_id - 1),\r\n \"load_weights\": 'normalization_only',\r\n },\r\n },\r\n\r\n \"framework\": config['framework'],\r\n })\r\n\r\n agent_config_from_scratch = (POLICIES[config['_trainer']], obs_space, act_space, {\r\n \"model\": {\r\n **config['_model_params']\r\n },\r\n \"framework\": config['framework'],\r\n \"observation_filter\": \"MeanStdFilter\",\r\n })\r\n\r\n configs = {\"pretrained\": agent_config_pretrained,\r\n \"from_scratch\": agent_config_from_scratch,\r\n \"from_scratch_sb\": agent_config_from_scratch_sb}\r\n\r\n return configs[which]",
"def test_list_host_subnet(self):\n pass",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))",
"def test_exclude_include_overlapping_for_configitem_with_overridden_mc_select_envs(capsys):\n errorline = [None]\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA():\n with McSelectOverrideItem() as it:\n errorline[0] = next_line_num()\n it.mc_select_envs(exclude=[dev1], include=[dev1, pp])\n\n assert \"There was 1 error when defining item\" in str(exinfo.value)\n _sout, serr = capsys.readouterr()\n print(serr)\n\n ce(errorline[0], serr, exp_dev1_ambiguous)\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA():\n with McSelectOverrideItem2() as it:\n errorline[0] = next_line_num()\n it.mc_select_envs(exclude=[dev1], include=[dev1, pp])\n\n assert \"There was 1 error when defining item\" in str(exinfo.value)\n _sout, serr = capsys.readouterr()\n print(serr)\n\n ce(errorline[0], serr, exp_dev1_ambiguous)",
"def _get_api_server_authorized_ip_ranges(self, enable_validation: bool = False) -> List[str]:\n # read the original value passed by the command\n api_server_authorized_ip_ranges = self.raw_param.get(\n \"api_server_authorized_ip_ranges\"\n )\n # In create mode, try to read the property value corresponding to the parameter from the `mc` object.\n if self.decorator_mode == DecoratorMode.CREATE:\n read_from_mc = False\n if (\n self.mc and\n self.mc.api_server_access_profile and\n self.mc.api_server_access_profile.authorized_ip_ranges is not None\n ):\n api_server_authorized_ip_ranges = (\n self.mc.api_server_access_profile.authorized_ip_ranges\n )\n read_from_mc = True\n\n # normalize\n if not read_from_mc:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n elif self.decorator_mode == DecoratorMode.UPDATE:\n # normalize, keep None as None\n if api_server_authorized_ip_ranges is not None:\n api_server_authorized_ip_ranges = [\n x.strip()\n for x in (\n api_server_authorized_ip_ranges.split(\",\")\n if api_server_authorized_ip_ranges\n else []\n )\n ]\n\n # validation\n if enable_validation:\n if self.decorator_mode == DecoratorMode.CREATE:\n if api_server_authorized_ip_ranges:\n if (\n safe_lower(self._get_load_balancer_sku(enable_validation=False)) ==\n CONST_LOAD_BALANCER_SKU_BASIC\n ):\n raise InvalidArgumentValueError(\n \"--api-server-authorized-ip-ranges can only be used with standard load balancer\"\n )\n if self._get_enable_private_cluster(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n elif self.decorator_mode == DecoratorMode.UPDATE:\n if api_server_authorized_ip_ranges:\n if check_is_private_cluster(self.mc):\n raise MutuallyExclusiveArgumentError(\n \"--api-server-authorized-ip-ranges is not supported for private cluster\"\n )\n return api_server_authorized_ip_ranges",
"def interfaces(self):",
"def interfaces(self):",
"def test_config_load2():\n print test_config_load2.__name__\n test_config_file = BytesIO()\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-rs1')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-rs2')\n append_to_file_mongo_setting(test_config_file, 'mongo-oplog-rs3')\n test_config_file.seek(0)\n # config file processing\n config = configparser.ConfigParser()\n config.read_file(test_config_file)\n pp = PrettyPrinter()\n all_settings = load_mongo_replicas_from_setting(config, \n 'mongo-oplog')\n pp.pprint(all_settings)\n print all_settings.keys()\n assert(sorted(all_settings.keys()) == sorted(['mongo-oplog']))\n assert(3 == len(all_settings['mongo-oplog']))\n assert('mongo-oplog-rs1' == all_settings['mongo-oplog'][0].host)\n assert('mongo-oplog-rs2' == all_settings['mongo-oplog'][1].host)\n assert('mongo-oplog-rs3' == all_settings['mongo-oplog'][2].host)\n mongo_settings_from_config(config, 'mongo-oplog-rs1')",
"def GetOverlappingItems(self):\r\n\r\n area_bbox = self.area.GetBoundingBox()\r\n\r\n if hasattr(self.board, 'GetModules'):\r\n modules = self.board.GetModules()\r\n else:\r\n modules = self.board.GetFootprints()\r\n\r\n tracks = self.board.GetTracks()\r\n\r\n self.overlappings = []\r\n\r\n for zone in self.board.Zones():\r\n if zone.GetZoneName() != self.area.GetZoneName():\r\n if zone.GetBoundingBox().Intersects(area_bbox):\r\n self.overlappings.append(zone)\r\n\r\n for item in tracks:\r\n if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)):\r\n self.overlappings.append(item)\r\n if type(item) is pcbnew.PCB_TRACK:\r\n self.overlappings.append(item)\r\n\r\n for item in modules:\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n for pad in item.Pads():\r\n self.overlappings.append(pad)\r\n for zone in item.Zones():\r\n self.overlappings.append(zone)\r\n\r\n # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping'\r\n for i in range(0, self.board.GetAreaCount()):\r\n item = self.board.GetArea(i)\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n if item.GetNetname() != self.net:\r\n self.overlappings.append(item)",
"def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_agents_responder_spaces(self):\n pass",
"def _get_lsp_config_exclude_interfaces(self):\n return self.__lsp_config_exclude_interfaces",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))",
"def get_interfaces(self):\n raise NotImplementedError",
"def arc_clients(self):\n return self.__get_option('arc_client_tools')",
"def test_ipam_ip_addresses_list(self):\n pass",
"def test_all_addresses_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info_1'}),\n '10.0.0.2': Mock(**{'serial.return_value': 'address_info_2'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertItemsEqual(['address_info_1', 'address_info_2'],\n rpc.get_all_addresses_info())",
"def test_retrieve_l_organizations(self):\n pass",
"def loadconfigtable(ui, extname, configtable):\n for section, items in sorted(configtable.items()):\n knownitems = ui._knownconfig.setdefault(section, itemregister())\n knownkeys = set(knownitems)\n newkeys = set(items)\n for key in sorted(knownkeys & newkeys):\n msg = b\"extension '%s' overwrite config item '%s.%s'\"\n msg %= (extname, section, key)\n ui.develwarn(msg, config=b'warn-config')\n\n knownitems.update(items)",
"def get_hosts_retry(self, target, listener_type):",
"def get_config_from_pymoo(self, pymoo_config: List) -> SubnetConfig:\n return self._m_handler.get_config_from_pymoo(pymoo_config)",
"def test_get_objects_without_properties(self):\n test_spec = self.spec.get(\"test_get_objects\")\n expected_type = test_spec.get(\"_type\")\n expected_host_ips = test_spec.get(\"host_ips\")\n \n object_content = self.session.invoke_api(vim_util, \n 'get_objects', \n self.vim, \n 'HostSystem', \n 100)\n host_ip_list = []\n if object_content:\n self.assertIsNotNone(object_content.objects)\n for one_object in object_content.objects:\n self.assertEqual(one_object.obj._type, expected_type)\n if hasattr(one_object, 'propSet'):\n dynamic_properties = one_object.propSet\n for prop in dynamic_properties:\n host_ip_list.append(prop.val)\n for each_ip in expected_host_ips:\n self.assertTrue(each_ip in host_ip_list)",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def _get_MindtPy_OA_config():\n CONFIG = ConfigBlock('MindtPy-OA')\n\n _add_common_configs(CONFIG)\n _add_oa_configs(CONFIG)\n _add_roa_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG",
"def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(5, len(hosts))",
"def getTargetRobots(self):\n # self.log(\"find targets\")\n robots = self.get_visible_robots()\n enemyRobots = []\n if len(robots) > 0:\n for bot in robots:\n # self.log(\"target bot team \" + str(bot['team']))\n # self.log(\"my team \" + str(self.me['team']))\n if bot['team'] != self.me['team']:\n self.log(\"adding bot to enemy list\")\n enemyRobots.append(bot)\n return enemyRobots",
"def test_upgrade_shared_dependencies(self):\n result = self.run_cli_command(\"-s\", \"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n\n agent_config: AgentConfig = cast(\n AgentConfig,\n load_item_config(PackageType.AGENT.value, Path(self.current_agent_context)),\n )\n assert OefSearchMessage.protocol_id in agent_config.protocols\n assert SOEF_PUBLIC_ID in agent_config.connections\n assert OEF_PUBLIC_ID in agent_config.connections",
"def set_config(self, existing_l3_interfaces_facts):\n config = self._module.params.get(\"config\")\n want = []\n if config:\n for w in config:\n w.update({\"name\": normalize_interface(w[\"name\"])})\n want.append(remove_empties(w))\n have = deepcopy(existing_l3_interfaces_facts)\n self.init_check_existing(have)\n resp = self.set_state(want, have)\n return to_list(resp)",
"def source_interfaces(self):",
"def source_interfaces(self):",
"def _to_add_and_update(self):\n primitive_list=[]\n group_list=[]\n location_list = []\n #primitives\n for item in self.candidate_config:\n item_groupdef=group(item['name'], item['type'], item['ips'])\n if item['name'] in names(self.live_config):\n live_item=[x for x in self.live_config if x['name']==item['name']][0]\n added_ips=[x for x in item['ips'] if x not in live_item['ips']]\n primitive_list += ip_primitives(item['name'], added_ips)\n if item['type'] != live_item['type']:\n if item['type'] == 'ldirectord':\n primitive_list += ld_primitives(item['name'], item['loadbalancers'])\n if set(item_groupdef[0].split()) != set(group2(live_item)[0].split()):\n group_list += item_groupdef\n if item['loadbalancers'] != live_item['loadbalancers']:\n location_list += lborder(item['name'], item['loadbalancers'])\n else:\n primitive_list += primitives(item['name'], item['type'], item['ips'])\n group_list += item_groupdef\n location_list += lborder(item['name'], item['loadbalancers'])\n return '\\n'.join(primitive_list + group_list + location_list)",
"def get_all_switch(self, conf):\n\t\tpass",
"def get_rllib_full_config(self):\n return merged_dict(self.get_default_config(), self.get_config())",
"def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_agents_responder(self):\n pass",
"def get_addrs(self) -> List[Multiaddr]:",
"def get_addrs(self):\n # TODO check if server is listening\n return self.multiaddrs",
"def _load_raw_configs(override_path, defaults, overrides):\n if override_path:\n path = override_path\n else:\n search_dirs = [\".\", _locate_config_dir()]\n path = _locate_config_file(search_dirs)\n\n with open(path) as f:\n parsed_configs = yaml.safe_load(f)\n\n out = []\n try:\n items = parsed_configs[\"items\"]\n for config_dict in items:\n # Legacy fix for renamed key. TODO: Remove this after a while.\n if \"copy_type\" in config_dict:\n config_dict[\"install_method\"] = config_dict[\"copy_type\"]\n del config_dict[\"copy_type\"]\n\n # Name this config (since we may override the local_path).\n config_dict[\"name\"] = config_dict[\"local_path\"]\n\n nones = {key: None for key in Config._fields}\n combined = strif.dict_merge(nones, defaults, config_dict, overrides)\n log.debug(\"raw, combined config: %r\", combined)\n\n try:\n out.append(combined)\n except TypeError as e:\n raise ConfigError(\"error in config value: %s: %s\" % (e, config_dict))\n except ValueError as e:\n raise ConfigError(\"error reading config file: %s\" % e)\n\n return out",
"def manager_agents(self):\n return self.get(\"manager_agents\")",
"def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides",
"def get_configured_interfaces():\n with manager.connect(host=HOST, port=PORT, username=USER, password=PASS,\n hostkey_verify=False, device_params={'name': 'default'},\n allow_agent=False, look_for_keys=False) as m:\n\n with open(FILE) as f:\n return(m.get_config('running', f.read()))",
"def _config_interfaces(self):\n self.interfaces['loopback'] = \"127.0.0.1\"\n self.interfaces['internal'] = \"127.0.0.1\"\n self.interfaces['external'] = \"0.0.0.0\"\n self.interfaces[\"any\"] = \"0.0.0.0\"\n self.interfaces[\"localhost\"] = \"127.0.0.1\"",
"def test_fetch_indicators_command_google_ip_ranges(mocker):\n from JSONFeedApiModule import fetch_indicators_command\n client = Client(\n url='',\n headers={},\n feed_name_to_config={\n 'CIDR': {\n 'url': 'https://www.test.com/ipranges/goog.json',\n 'extractor': 'prefixes[]', 'indicator': 'ipv4Prefix', 'indicator_type': 'CIDR'\n }\n }\n )\n\n mocker.patch.object(\n client, 'build_iterator', return_value=(\n [{'ipv4Prefix': '1.1.1.1'}, {'ipv4Prefix': '1.2.3.4'}, {'ipv6Prefix': '1111:1111::/28'}], True\n ),\n )\n\n indicators, _ = fetch_indicators_command(client, indicator_type=None, feedTags=[], auto_detect=None, limit=100)\n for indicator in indicators:\n assert indicator.get('value')",
"def test_get_networks(self):\n pass",
"def testLoadConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('postsubmit'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball-power'))",
"def testGetConfigIngressPorts(self):\n self.oxc.getconfig_ingress_ports(file_name = 'getconfig_ingress_ports.xml', ingress_ports = oxcDict['valid_ingress_ports'])",
"def test_aws_service_api_interfaces_get(self):\n pass",
"def test_get(self):\n\n # Grab the server's addresses...\n addrs = self.server.addresses\n\n # Make sure the public and private lists are present\n dtutil.assert_true('public' in addrs)\n dtutil.assert_true('private' in addrs)\n\n # Are IP addresses actually returned?",
"def get_reachable_servers(self) -> List[Server]:\n pass",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n mac3 = '00:11:22:33:33:36'\n ip3 = '3.4.3.7'\n self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))\n self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))",
"def ibns_intf(task):\n # init lists of interfaces\n access_interfaces = []\n uplink_interfaces = []\n # iterate over all interfaces\n for intf in task.host[\"intfs\"]:\n\n # uplink interfaces\n if intf[\"interface\"] in task.host[\"uplinks\"]:\n uplink_interfaces.append(intf)\n\n # other non-excluded access ports\n elif intf[\"interface\"] not in task.host[\"excluded_intf\"]:\n if intf[\"access_vlan\"] in task.host[\"vlans\"]:\n access_interfaces.append(intf)\n\n # assign uplink interface list to task.host\n task.host[\"uplink_interfaces\"] = uplink_interfaces\n # render uplink interface configs\n uplink_intf_cfg = task.run(\n task=text.template_file,\n template=\"IBNS_uplink_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n # assign access interface list to task.host\n task.host[\"access_interfaces\"] = access_interfaces\n # render access interface configs\n access_intf_cfg = task.run(\n task=text.template_file,\n template=f\"IBNS{task.host['ibns_ver']}_access_intf.j2\",\n path=\"templates/\",\n **task.host,\n )\n\n # init list of L3 vlan interfaces\n l3_vlan_int = [\"Vlan777\"]\n # list of vlan interfaces that will not relay\n no_relay_ints = [\"1\", \"666\", \"667\"]\n # iterate over active L3 interfaces\n for intf in task.host[\"ip_int_br\"]:\n # accept only those that are active vlan interfaces\n if intf[\"intf\"].startswith(\"Vlan\") == True and intf[\"status\"] == \"up\":\n # strip vlan id from interface name\n vlan_id = intf[\"intf\"].strip(\"Vlan\")\n # compare with list of no relay ints\n if vlan_id not in no_relay_ints:\n # add to list of interfaces for ISE DHPC relay\n l3_vlan_int.append(intf[\"intf\"])\n\n # save L3 vlan interfaces to task.host\n task.host[\"l3_vlan_int\"] = l3_vlan_int\n\n if \"emea\" in task.host['region']:\n L3VLAN_template = \"IBNS_EMEA_L3VLAN_intf.j2\"\n else:\n L3VLAN_template = \"IBNS_L3VLAN_intf.j2\"\n\n # render L3 vlan interface configs\n l3_vlan_int_cfg = task.run(\n task=text.template_file,\n template=L3VLAN_template,\n path=\"templates/\",\n **task.host,\n )\n\n # return configuration\n return uplink_intf_cfg.result + access_intf_cfg.result + l3_vlan_int_cfg.result",
"def test_get_hyperflex_proxy_setting_policy_list(self):\n pass",
"def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list",
"def test_nics_to_access_configs(neo4j_session):\n _ensure_local_neo4j_has_test_instance_data(neo4j_session)\n ac_query = \"\"\"\n MATCH (nic:GCPNetworkInterface)-[r:RESOURCE]->(ac:GCPNicAccessConfig)\n return nic.nic_id, ac.access_config_id, ac.public_ip\n \"\"\"\n nodes = neo4j_session.run(ac_query)\n\n nic_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0'\n ac_id1 = f\"{nic_id1}/accessconfigs/ONE_TO_ONE_NAT\"\n nic_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0'\n ac_id2 = f\"{nic_id2}/accessconfigs/ONE_TO_ONE_NAT\"\n\n actual_nodes = {(n['nic.nic_id'], n['ac.access_config_id'], n['ac.public_ip']) for n in nodes}\n expected_nodes = {\n (nic_id1, ac_id1, '1.3.4.5'),\n (nic_id2, ac_id2, '1.2.3.4'),\n }\n assert actual_nodes == expected_nodes",
"def Warmup(): # pylint: disable=unused-variable\n configs = luci_config.ListAllConfigs(\n datastore_client, _cache_timestamp=time.time() + 10)\n for _, revision, subscription in configs:\n luci_config.GetMatcher(revision, subscription)\n return jsonify({})",
"async def test_bridge_homekit_already_configured(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"aabbccddeeff\")])\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_HOMEKIT},\n data=zeroconf.ZeroconfServiceInfo(\n host=\"0.0.0.0\",\n addresses=[\"0.0.0.0\"],\n hostname=\"mock_hostname\",\n name=\"mock_name\",\n port=None,\n properties={zeroconf.ATTR_PROPERTIES_ID: \"aa:bb:cc:dd:ee:ff\"},\n type=\"mock_type\",\n ),\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\""
] | [
"0.5522886",
"0.533993",
"0.5265402",
"0.52284235",
"0.5223923",
"0.5155312",
"0.5122521",
"0.5122521",
"0.5117879",
"0.50871843",
"0.5086171",
"0.505273",
"0.5049074",
"0.5049074",
"0.5049074",
"0.49887353",
"0.49772704",
"0.497659",
"0.49645105",
"0.494614",
"0.49298245",
"0.49221227",
"0.49180844",
"0.4916",
"0.49030092",
"0.48923257",
"0.48863012",
"0.48777267",
"0.4864638",
"0.4863723",
"0.48334146",
"0.48104116",
"0.4807281",
"0.48007062",
"0.47932464",
"0.47866902",
"0.47854325",
"0.47813696",
"0.4781081",
"0.4780314",
"0.4774783",
"0.47620797",
"0.4761935",
"0.47583443",
"0.4752801",
"0.47407934",
"0.47404408",
"0.47398698",
"0.4737649",
"0.47368184",
"0.4736363",
"0.47061723",
"0.47061723",
"0.4704079",
"0.4703492",
"0.46993968",
"0.4695572",
"0.46953157",
"0.46907845",
"0.4689877",
"0.46894544",
"0.46884668",
"0.46870396",
"0.46838337",
"0.46736696",
"0.46718588",
"0.467032",
"0.46638608",
"0.4659017",
"0.46487856",
"0.4648121",
"0.4645139",
"0.46416354",
"0.46379474",
"0.46379474",
"0.46324775",
"0.46275002",
"0.4615209",
"0.46106437",
"0.4606721",
"0.45861498",
"0.4584487",
"0.4582844",
"0.45714283",
"0.45710352",
"0.45684138",
"0.45673406",
"0.45673004",
"0.45577028",
"0.45571467",
"0.45546556",
"0.4544307",
"0.45432425",
"0.45412898",
"0.45378298",
"0.4534213",
"0.4531595",
"0.45305854",
"0.45260286",
"0.45204896"
] | 0.8485979 | 0 |
Iterate prior to posterior distribution using input data. | def iterate(self, data):
# Append data to self.data
self.data = np.append(self.data, data)
for i, d in enumerate(data):
update = self.current*self.likelihood(d)
self.current = self._normalize(update)
self.posterior = np.concatenate((self.posterior,[self.current]))
print(str(len(data)) + " iterations completed!")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities",
"def posterior_sample(self):\n pass",
"def sample_from_prior(self, *args, **kwargs):\n pass",
"def posterior_distr(self, y, **args):\n raise NotImplementedError",
"def computePosterior(self):\n # in their log form, posterior = prior + beta * datalikelihood\n # make a copy of prior at first\n self.posterior.copy(self.prior)\n # add the data likelihood\n altar.blas.daxpy(self.beta, self.data, self.posterior)\n # all done\n return self",
"def posterior(self, samples):\n unique_samps = set(samples)\n denominator = 0\n posteriors = []\n n_samps = len(samples)\n for concept in self.concepts:\n num = 0\n if unique_samps.issubset(set(concept.extension)):\n num = concept.prior*concept.likelihood(n_samps)\n denominator += num\n posteriors.append(num)\n return np.divide(posteriors, denominator)",
"def posterior_distribution(x, t, M, noise_precision, prior_mu, prior_precision):\n A = np.array([x ** i for i in range(M)]).reshape((1, M)) # (M, 1)\n\n new_precision = prior_precision + noise_precision * np.dot(np.transpose(A), A)\n new_mu = np.dot(np.linalg.inv(new_precision), noise_precision * t * np.transpose(A) + np.dot(prior_precision, prior_mu))\n\n return new_mu, new_precision",
"def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist",
"def iterate_pagerank(corpus, damping_factor):\n distribution = dict()\n corpus_length = len(corpus)\n for u in corpus: #On first iteration, each page is equally likely.\n distribution[u] = 1.0 / corpus_length\n\n difference = 1.0\n max_difference = 0.0\n while ( difference > 0.001 ):\n old_distribution = distribution.copy()\n for u in corpus: #Page we are currently looking at\n prob = (1.0 - damping_factor) / corpus_length\n for x in corpus:\n if u == x:\n continue\n if u in corpus[x]:\n links = list(corpus[x])\n prob += damping_factor * (distribution[x] / len(links))\n distribution[u] = prob\n difference = abs(distribution[u] - old_distribution[u])\n if difference > max_difference: max_difference = difference\n return distribution",
"def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data",
"def sample_from_prior(self):\n raise NotImplementedError",
"def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)",
"def posterior(mu, x, sigma):\n post = like(x, sigma, mu) * prior(mu)\n evidencia = np.trapz(post, mu)\n return post/evidencia",
"def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()",
"def iterate_pagerank(corpus, damping_factor):\n # List all pages in corpus\n pages = list(corpus.keys())\n # {p: i}\n links = dict()\n\n # Fix corpus\n for p in corpus.keys():\n # If no links, then it has one link for every page in corpus\n if corpus[p] == set():\n corpus[p] = set(pages)\n \n for page in pages:\n links[page] = []\n for p in corpus.keys():\n if page in corpus[p]:\n links[page].append(p)\n #print(corpus)\n #print(links)\n\n probabilities = dict()\n updated_probabilities = dict()\n\n # Initial PR = 1/N\n for p in corpus.keys():\n probabilities[p] = 1 / len(corpus.keys())\n updated_probabilities[p] = float(0)\n\n # PR differences\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n\n # Recalculate\n i = 0\n p_corpus = (1 - damping_factor) / len(corpus)\n while max(d.values()) > 0.001:\n for p in corpus.keys():\n p_link = 0\n # Links\n for lp in links[p]:\n if (i % 2) == 0:\n p_link += (probabilities[lp] / len(corpus[lp]))\n else:\n p_link += (updated_probabilities[lp] / len(corpus[lp]))\n pr = p_corpus + (damping_factor * p_link)\n\n # Update probabilities or updated_probabilities dictionary\n if (i % 2) == 0:\n updated_probabilities[p] = pr\n else:\n probabilities[p] = pr\n \n # Increase count\n i += 1\n\n # Update differences dictionary\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n #print(\"P\", \"\\033[93m {}\\033[00m\" .format(probabilities))\n #print(\"UP\", \"\\033[96m {}\\033[00m\" .format(updated_probabilities))\n #print(\"D\", \"\\033[91m {}\\033[00m\" .format(d))\n\n # When PR's do not change by > 0.001\n return probabilities",
"def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)",
"def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()",
"def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0",
"def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior",
"def bd_process_model_probability(data,\n fitness_prior=flat_fitness_prior,\n N_w_prior=flat_N_w_prior,\n mutation_object=True):\n\n if mutation_object is True:\n trajectories = data.data\n else:\n trajectories = data\n\n ind_likelihood = []\n for traj in trajectories:\n int_s = []\n for s in fitness_prior[0, :]:\n int_N_w = []\n for N_w in N_w_prior[0, :]:\n int_N_w.append(\n bd_process_conditional_likelihood_s_N(traj,\n s=s, N_w=N_w)\n )\n int_s.append(np.trapz(x=N_w_prior[0, :],\n y=int_N_w*N_w_prior[1, :]))\n\n marginalised_likelihood = np.trapz(x=fitness_prior[0, :],\n y=int_s*fitness_prior[1, :])\n ind_likelihood.append(marginalised_likelihood)\n \n mutation_prob = np.product(ind_likelihood)\n\n if mutation_object is True:\n # return updated model_comparison object \n data.bd_prob = mutation_prob\n return data\n else:\n # return marginalised likelihood.\n return mutation_prob",
"def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self",
"def calc_posterior(likelihood, prior, norm_list):\n Pa = 0\n \n for t in norm_list:\n x = t[0] * t[1]\n Pa+=x\n\n return (likelihood*prior)/Pa",
"def process_custom_prior(prior) -> Tuple[Distribution, int, bool]:\n\n check_prior_methods(prior)\n\n check_prior_batch_behavior(prior)\n\n prior, is_prior_numpy = maybe_wrap_prior_to_pytorch(prior)\n\n parameter_dim = prior.sample().numel()\n\n return prior, parameter_dim, is_prior_numpy",
"def test_3_prior(self):\n print(\"test 3: prior probabilities\")\n\n for i, x in enumerate(self.X):\n print(i+1, prior_probability(\n x, self.means, self.dispersions, self.cluster_probabilities\n ), sep=' : ')",
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def sample_gil_from_data(self, X_d, loop_iters=5):\n data_samples = []\n prior_samples = []\n X_c = 0.0 * X_d\n X_m = 0.0 * X_d\n for i in range(loop_iters):\n # record the data samples for this iteration\n data_samples.append(1.0 * X_d)\n # sample from their inferred posteriors\n X_p = self.IN.sample_posterior(X_d, X_c, X_m)\n # record the sampled points (in the \"prior space\")\n prior_samples.append(1.0 * X_p)\n # get next data samples by transforming the prior-space points\n X_d = self.GN.transform_prior(X_p)\n result = {\"data samples\": data_samples, \"prior samples\": prior_samples}\n return result",
"def before_each(self, dataset: pydicom.dataset.Dataset) -> None:",
"def prior_sample(self):\n pass",
"def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))",
"def p_prior(self):\n sampler = self.__sampler\n nwalkers = self.nwalkers\n pRanges = self.pRanges\n if sampler == \"EnsembleSampler\":\n p = [posRange(pRanges) for i in range(nwalkers)]\n elif sampler == \"PTSampler\":\n ntemps = self.ntemps\n p = np.zeros((ntemps, nwalkers, self.ndim))\n for loop_t in range(ntemps):\n for loop_w in range(nwalkers):\n p[loop_t, loop_w, :] = posRange(pRanges)\n return p",
"def process_simulation(self):\n for i in range(self._n):\n probability = self._alpha / float(self._alpha + i - 1)\n tmp = np.random.uniform(size=(1,))\n if tmp < probability:\n self._results.append(np.random.normal(1))\n else:\n self._results.append(np.random.choice(self._results[:i-1], 1)[0])",
"def _update_cached_prediction_probabilities(self, i_iter, X):\n\n if i_iter == 0:\n self.y_pred_proba_latest = np.zeros((self._n_samples, self.n_classes_), \n dtype=np.float64)\n else:\n y_pred_proba_latest = self.y_pred_proba_latest\n y_pred_proba_new = self.estimators_[-1].predict_proba(X)\n self.y_pred_proba_latest = (y_pred_proba_latest * i_iter + y_pred_proba_new) / (i_iter+1)\n return",
"def prob_given(graph, posterior, prior):\n return graph.prob(merge(prior, posterior)) / graph.prob(prior)",
"def iterate_pagerank(corpus, damping_factor, final_dict = None):\n N = len(corpus)\n\n if final_dict is not None:\n \n convergence = {}\n\n for actual_page in corpus:\n\n number_of_links_of_page = len(corpus[actual_page])\n\n if number_of_links_of_page == 0:\n number_of_links_of_page = 1 / N\n\n old_value = final_dict[actual_page]\n\n final_dict[actual_page] = (1 - damping_factor) / N\n final_dict[actual_page] += damping_factor * final_dict[actual_page] / number_of_links_of_page\n\n if old_value - final_dict[actual_page] > -0.001 and old_value - final_dict[actual_page] < 0.001:\n convergence[actual_page] = True\n else :\n convergence[actual_page] = False\n \n if all(value == True for value in convergence.values()):\n return final_dict\n else:\n final_dict = iterate_pagerank(corpus, damping_factor, final_dict)\n\n else:\n final_dict = {}\n\n for actual_page in corpus:\n final_dict[actual_page] = 1/N\n \n final_dict = iterate_pagerank(corpus, damping_factor, final_dict)\n \n return final_dict",
"def update(self, caliStep, likelihood):\n posterior = np.zeros(self.numSamples)\n if caliStep == 0:\n posterior = likelihood / self.proposal\n else:\n posterior = self.posterior[:, caliStep - 1] * likelihood\n\n # regularize likelihood\n posterior /= np.sum(posterior)\n return posterior",
"def test_on_posterior(test_data, test_label, posterior_samples):\n print(\"Testing on posterior samples...\")\n num_posterior_samples = posterior_samples.shape[0]\n avg_pred_test = np.zeros((num_posterior_samples, ))\n avg_pred_log_lld = np.zeros((num_posterior_samples, ))\n \n for k in range(num_posterior_samples):\n # Use the posterior samples\n w_sampled = posterior_samples[k]\n \n # Get the hessian\n #pred, dot_product = get_output(w_sampled, train_data)\n #hessian = get_hessian (phi= train_data, pred= pred[:, np.newaxis], t= train_label[:, np.newaxis], dot_product= dot_product)\n \n pred_test, _ = get_output (w_sampled, test_data)\n acc = get_accuracy(pred_test, test_label) \n pred_likelihood = get_prediction_likelihood_without_complications(test_data, test_label, w_sampled) #get_prediction_likelihood(test_data, test_label, w_sampled, hessian)\n avg_pred_test[k] = acc\n avg_pred_log_lld [k] = np.log(pred_likelihood)\n \n if (k+1)%100 == 0 or k== num_posterior_samples-1:\n print(\"{:5d} Posterior Weight samples Test_data Pred_acc= {:.2f}, Pred_log_likelihood= {:.2f}\".format(k+1, np.mean(avg_pred_test[:k]), np.mean(avg_pred_log_lld[:k])))",
"def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())",
"def iterate_pagerank(corpus, damping_factor):\n # Set initial values to choosing a page randomly\n corpus_length = len(corpus)\n prev_iterated_page_rank = defaultdict(lambda: 1/corpus_length)\n max_abs_difference = inf\n while max_abs_difference > 0.001:\n max_iter_diff = -inf\n next_iterated_page_rank = defaultdict(lambda: (1 - damping_factor) / corpus_length)\n for prev_page in corpus:\n if not corpus[prev_page]:\n print(\"hi\")\n for next_page in corpus:\n next_iterated_page_rank[next_page] += prev_iterated_page_rank[prev_page] * 1/len(corpus)\n else:\n print(\"hi2\")\n for next_page in corpus[prev_page]:\n next_iterated_page_rank[next_page] += damping_factor * prev_iterated_page_rank[prev_page]/len(corpus[prev_page])\n\n for prev_prob, next_prob in zip(prev_iterated_page_rank.values(), next_iterated_page_rank.values()):\n max_iter_diff = max(max_iter_diff, abs(next_prob-prev_prob))\n max_abs_difference = min(max_abs_difference, max_iter_diff)\n\n prev_iterated_page_rank = next_iterated_page_rank.copy()\n assert abs(sum(prev_iterated_page_rank.values())-1) < 10**-2\n assert abs(sum(next_iterated_page_rank.values()) - 1) < 10**-2\n return prev_iterated_page_rank",
"def prior_of_priors(self, tt):\n for i in xrange(self.n_params): \n try: \n p_theta *= self.param_obj.prior()[i].pdf(tt[i]) \n\n except UnboundLocalError: \n p_theta = self.param_obj.prior()[i].pdf(tt[i]) \n\n return p_theta",
"def get_initial_marking_distribution(informed_prior):\n\n support = np.arange(0, MAX_MARK_VALUE+1)\n loc = support.min()\n max = support.max()\n scale = max-loc\n\n\n if not informed_prior:\n probs = ss.uniform(loc=loc, scale=scale).pdf(support)\n else: \n probs = ss.norm.pdf(support, scale=0.7, loc=support.mean()) \n \n probs /= probs.sum()\n x1 = np.random.choice(support, size=1000, p=probs)\n x2 = np.random.choice(support, size=1000, p=probs)\n x = x1-x2\n h,_ = np.histogram(x, bins=len(compute_support()))\n h = h / x.shape[0]\n plt.clf()\n plt.hist(x, bins=len(compute_support()))\n plt.savefig(FM().results_folder/'histogram')\n return ProbabilityDistribution(compute_support(), probability=h)",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n newrank = dict()\n\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n repeat = True\n\n while repeat:\n\n for page in pagerank:\n\n summation = 0\n\n links = get_links(corpus, page)\n\n if not links:\n for p in corpus:\n summation += pagerank[p] / len(corpus)\n\n for link in links:\n summation += pagerank[link] / len(corpus[link])\n\n newrank[page] = (1 - damping_factor) / len(corpus) + damping_factor * summation\n\n repeat = False\n\n for page in pagerank:\n if abs(newrank[page] - pagerank[page]) > 0.001:\n repeat = True\n\n pagerank[page] = newrank[page]\n\n return pagerank",
"def perturb(data):\n repeat = np.zeros(np.shape(data))\n\n count = 0\n for d in data:\n val = d[0]\n err = d[1]\n if err != 0.0:\n #print(val,err)\n val2 = np.random.normal(val, err)\n else:\n val2 = val\n repeat[count][0] = val2\n repeat[count][1] = err\n count = count + 1\n\n return repeat",
"def preprocess(\n self, data: List[Dict[str, Any]]\n ) -> Generator[Dict[str, Any], None, None]:\n raise NotImplementedError",
"def _precompute_probabilities(self):\n\n d_graph = self.d_graph\n first_travel_done = set()\n\n nodes_generator = self.graph.nodes() if self.quiet \\\n else tqdm(self.graph.nodes(), desc='Computing transition probabilities')\n\n for source in nodes_generator:\n\n # Init probabilities dict for first travel\n if self.PROBABILITIES_KEY not in d_graph[source]:\n d_graph[source][self.PROBABILITIES_KEY] = dict()\n\n for current_node in self.graph.neighbors(source):\n\n # Init probabilities dict\n if self.PROBABILITIES_KEY not in d_graph[current_node]:\n d_graph[current_node][self.PROBABILITIES_KEY] = dict()\n\n unnormalized_weights = list()\n first_travel_weights = list()\n d_neighbors = list()\n\n # Calculate unnormalized weights\n for destination in self.graph.neighbors(current_node):\n\n p = self.sampling_strategy[current_node].get(self.P_KEY,\n self.p) if current_node in self.sampling_strategy else self.p\n q = self.sampling_strategy[current_node].get(self.Q_KEY,\n self.q) if current_node in self.sampling_strategy else self.q\n\n if destination == source: # Backwards probability\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / p\n elif destination in self.graph[source]: # If the neighbor is connected to the source\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1)\n else:\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / q\n\n # Assign the unnormalized sampling strategy weight, normalize during random walk\n unnormalized_weights.append(ss_weight)\n if current_node not in first_travel_done:\n first_travel_weights.append(self.graph[current_node][destination].get(self.weight_key, 1))\n d_neighbors.append(destination)\n\n # Normalize\n unnormalized_weights = np.array(unnormalized_weights)\n d_graph[current_node][self.PROBABILITIES_KEY][\n source] = unnormalized_weights / unnormalized_weights.sum()\n\n if current_node not in first_travel_done:\n unnormalized_weights = np.array(first_travel_weights)\n d_graph[current_node][self.FIRST_TRAVEL_KEY] = unnormalized_weights / unnormalized_weights.sum()\n first_travel_done.add(current_node)\n\n # Save neighbors\n d_graph[current_node][self.NEIGHBORS_KEY] = d_neighbors",
"def bias_prior(self):",
"def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n\n #Modifying the corpus, to account the fact that,\n #\"A page that has no links at all should be interpreted as having one link for every page in the corpus\"\n modif_corpus = copy.deepcopy(corpus)\n for pg in modif_corpus.keys():\n if len(modif_corpus[pg]) == 0:\n modif_corpus[pg] = list(corpus.keys())\n\n #Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus\n for pg in modif_corpus.keys():\n pagerank[pg] = 1/len(modif_corpus.keys())\n\n convergence_check = False\n while not convergence_check:\n old_pagerank = copy.deepcopy(pagerank)\n\n for page in pagerank.keys():\n sigma = 0\n for pg in pagerank.keys():\n if page in modif_corpus[pg]: #Finding all the pages that link to 'page'\n sigma += pagerank[pg]/len(modif_corpus[pg])\n \n pagerank[page] = (1-damping_factor)/len(modif_corpus.keys()) + damping_factor*sigma\n\n #Making sure the new values differ more than 0.001\n convergence_check = True\n for pg in modif_corpus.keys():\n if abs(pagerank[pg] - old_pagerank[pg]) > 0.001:\n convergence_check = False\n break\n\n return pagerank",
"def posteriors(likelihoods, priors):\r\n # Check that there is a prior for each likelihood\r\n if len(likelihoods) != len(priors):\r\n raise ValueError(\"Lists not equal lengths.\")\r\n # Posterior probability is defined as prior * likelihood\r\n return [l * p for l, p in zip(likelihoods, priors)]",
"def train_loop_pre(self, current_step):\r\n pass",
"def index_data(self, data, class_):\r\n # !!! Write code to compute and store the parameters in self.conditional_prob[class_].\r\n for feat_index in range(len(data[0])):\r\n \r\n values = [i[feat_index] for i in data]\r\n \r\n deviation = np.std(values)\r\n mean = np.mean(values)\r\n\r\n self.conditional_prob[class_][feat_index] = [mean, deviation]\r\n\r\n # !!! Write code to compute prior.\r\n # Seems like it's been done for us?\r\n self.class_prob[class_] = float(len(data))/self.data_size",
"def policyIteration(P,R,gamma,theta,initial_policy,max_iter=1000000):\n policy_stable = False\n policy = np.copy(initial_policy)\n num_iter = 0\n \n while (not policy_stable) and num_iter < max_iter:\n num_iter += 1\n print('Policy Iteration: ', num_iter)\n # policy evaluation\n v = policyEval(policy,P,R,gamma,theta)\n # policy improvement\n policy, policy_stable = policyImprv(P,R,gamma,policy,v)\n return policy, v",
"def prior_sampler(self, nsamples, seed=0, test_lprob=False, lks=None, verbose=True, debug=False, **args):\n\n import tqdm\n from grgrlib import map2arr, serializer\n from .stats import get_prior\n\n store_reduce_sys = np.copy(self.fdict['reduce_sys'])\n\n l_max, k_max = lks or (None, None)\n\n # if not store_reduce_sys:\n # self.get_sys(reduce_sys=True, verbose=verbose > 1, **args)\n\n if test_lprob and not hasattr(self, 'ndim'):\n self.prep_estim(load_R=True, verbose=verbose > 2)\n\n frozen_prior = get_prior(self.prior, verbose=verbose)[0]\n self.debug |= debug\n\n if hasattr(self, 'pool'):\n from .estimation import create_pool\n create_pool(self)\n\n set_par = serializer(self.set_par)\n get_par = serializer(self.get_par)\n lprob = serializer(self.lprob) if test_lprob else None\n\n def runner(locseed):\n\n np.random.seed(seed+locseed)\n done = False\n no = 0\n\n while not done:\n\n no += 1\n\n with np.warnings.catch_warnings(record=False):\n try:\n np.warnings.filterwarnings('error')\n rst = np.random.randint(2**31) # win explodes with 2**32\n pdraw = [pl.rvs(random_state=rst+sn)\n for sn, pl in enumerate(frozen_prior)]\n\n if test_lprob:\n draw_prob = lprob(pdraw, linear=None,\n verbose=verbose > 1)\n done = not np.isinf(draw_prob)\n else:\n set_par(pdraw)\n done = True\n\n except Exception as e:\n if verbose > 1:\n print(str(e)+'(%s) ' % no)\n\n return pdraw, no\n\n if verbose > 1:\n print('[prior_sample:]'.ljust(15, ' ') + ' Sampling from the pior...')\n\n wrapper = tqdm.tqdm if verbose < 2 else (lambda x, **kwarg: x)\n pmap_sim = wrapper(self.mapper(runner, range(nsamples)), total=nsamples)\n\n draws, nos = map2arr(pmap_sim)\n\n # if not store_reduce_sys:\n # self.get_sys(reduce_sys=False, verbose=verbose > 1, **args)\n\n if verbose:\n smess = ''\n if test_lprob:\n smess = 'of zero likelihood, '\n print('[prior_sample:]'.ljust(\n 15, ' ') + ' Sampling done. %2.2f%% of the prior is either %sindetermined or explosive.' % (100*(sum(nos)-nsamples)/sum(nos), smess))\n\n return draws",
"def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]",
"def train_mdn_proposal_prior(save=True):\n\n n_iterations = n_bootstrap_iter\n n_data = 500\n\n # read data\n pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')\n obs_stats = helper.load(datadir + 'obs_stats.pkl')\n obs_stats -= pilot_means\n obs_stats /= pilot_stds\n\n # create an mdn\n net = mdn.MDN_SVI(n_inputs=9, n_hiddens=[50], act_fun='tanh', n_outputs=4, n_components=1)\n regularizer = lf.regularizerSvi(net.mps, net.sps, 0.01)\n prior_proposal = None\n\n for iter in xrange(n_iterations):\n\n # generate new data\n params = []\n stats = []\n dist = []\n i = 0\n\n while i < n_data:\n\n prop_params = sim_prior_params() if iter == 0 else np.exp(prior_proposal.gen())[0]\n if np.any(np.log(prop_params) < log_prior_min) or np.any(np.log(prop_params) > log_prior_max):\n continue\n try:\n lv = mjp.LotkaVolterra(init, prop_params)\n states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)\n except mjp.SimTooLongException:\n continue\n\n sum_stats = calc_summary_stats(states)\n sum_stats -= pilot_means\n sum_stats /= pilot_stds\n\n params.append(prop_params)\n stats.append(sum_stats)\n dist.append(calc_dist(sum_stats, obs_stats))\n i += 1\n\n print 'simulation {0}, distance = {1}'.format(i, dist[-1])\n\n params = np.array(params)\n stats = np.array(stats)\n dist = np.array(dist)\n\n # plot distance histogram\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(dist, bins=int(np.sqrt(n_data)))\n ax.set_title('iteration = {0}'.format(iter + 1))\n ax.set_xlim([0.0, 12.0])\n plt.show(block=False)\n\n # train an mdn to give the posterior\n minibatch = 100\n maxiter = int(2000 * n_data / minibatch)\n monitor_every = 100\n trainer = Trainer.Trainer(\n model=net,\n trn_data=[stats, np.log(params)],\n trn_loss=net.mlprob + regularizer / n_data,\n trn_target=net.y\n )\n trainer.train(\n maxiter=maxiter,\n minibatch=minibatch,\n show_progress=True,\n monitor_every=monitor_every\n )\n\n # calculate the approximate posterior\n mdn_mog = net.get_mog(obs_stats)\n approx_posterior = mdn_mog if iter == 0 else mdn_mog / prior_proposal\n prior_proposal = approx_posterior.project_to_gaussian()\n\n # save the net and the approximate posterior\n if save:\n helper.save((net, approx_posterior, prior_proposal, dist), netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(iter))",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n\r\n mu = np.zeros(2)\r\n Cov = np.array([[beta, 0], [0, beta]])\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu, Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Prior Distribution of α\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.show()\r\n \r\n return",
"def iterate_pagerank(corpus, damping_factor):\n pagerank = dict()\n new_pagerank = dict()\n repeat = True\n\n # Assigning each page a rank of 1 / N, where N is the total number of pages in the corpus.\n for page in corpus:\n pagerank[page] = 1 / len(corpus)\n\n # Repeatedly calculate new rank values based on all of the current rank values\n while repeat:\n for page in corpus:\n\n # Probability that we followed a link from a page i to current page.\n followed = 0.0\n for linked_page in linked_pages(corpus, page):\n followed += pagerank[linked_page] / number_of_links(corpus, linked_page)\n\n new_pagerank[page] = (1 - damping_factor) / len(corpus) + damping_factor * followed\n\n repeat = False\n\n # Repeat the process if new PageRank value changes by more than 0.001\n for page in pagerank:\n if not isclose(pagerank[page], new_pagerank[page], abs_tol=0.001):\n repeat = True\n\n # Assigning new values to the previous ones\n pagerank[page] = new_pagerank[page]\n\n # Sorting pagerank by keys\n pagerank = dict(sorted(pagerank.items()))\n\n return pagerank",
"def sample_posterior(self):\n if(self.Bayesian):\n for i in range(self.num_layers):\n getattr(self, 'LSTMCell%i'%(i+1)).sample_posterior()",
"def __predict_projection__(self, data: List[np.ndarray]) -> List[np.ndarray]:\n\n copied_data = np.copy(data)\n\n predicted_data = list()\n\n for curr_copied_sample in tqdm(copied_data,\n disable=not self.verbose,\n postfix=f'Predicting...'):\n curr_prediction = self.__forward_projection__(initial_data=curr_copied_sample)\n\n predicted_data.append(curr_prediction)\n\n return predicted_data",
"def betabinom_artifact_model_probability(data,\n prior_p=betabinom_p_prior[0],\n prior_beta=betabinom_beta_prior[0],\n mutation_object=True):\n\n if mutation_object is True:\n trajectories = data.data\n else:\n trajectories = data\n\n # initialise list of samples for p integration\n int_p = []\n # integral over betabinom p parameter\n for p_sample in prior_p[0, :]:\n # initialise list of samples for beta integration\n int_beta = []\n # integrate over beta parameter\n for beta_sample in prior_beta[0, :]:\n # For each combination of p and beta parameters compute\n # lieklihood of observing a given time-series conditional\n # on initial time-point.\n alpha_sample = beta_sample*p_sample/(1-p_sample)\n # compute likelihood for each individual omiting first time point\n ind_likelihoods = []\n for ind_traj in trajectories:\n likelihood = betabinom.pmf(k=ind_traj[1:].AO,\n n=ind_traj[1:].DP,\n a=alpha_sample,\n b=beta_sample)\n ind_likelihoods.append(np.product(likelihood))\n # for each beta append the total likelihood (product individuals).\n int_beta.append(np.product(ind_likelihoods))\n # For each p, compute the likelihood marginalised over beta\n int_p.append(\n np.trapz(x=prior_beta[0, :],\n y=int_beta*prior_beta[1, :]))\n \n # marginalise likelihood over p\n mutation_prob = np.trapz(x=prior_p[0, :],\n y=int_p*prior_p[1, :])\n\n if mutation_object is True:\n # return updated model_comparison object \n data.betabinom_artifact_prob = mutation_prob\n return data\n else:\n # return marginalised likelihood.\n return mutation_prob",
"def propose(self):\n\n p = type(self)(self.n, alpha=self.alpha)\n\n return p, p.compute_prior() - self.compute_prior()",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def lnprior(self):\n \n return",
"def process_prior(prior: Callable) -> Tuple[Distribution, int, bool]:\n\n if isinstance(prior, Distribution):\n return process_pytorch_prior(prior)\n\n # If prior is given as scipy.stats object, wrap to PyTorch.\n elif isinstance(prior, (rv_frozen, multi_rv_frozen)):\n event_shape = torch.Size([prior.rvs().size])\n # batch_shape is passed as default\n prior = ScipyPytorchWrapper(\n prior, batch_shape=torch.Size([]), event_shape=event_shape\n )\n return process_pytorch_prior(prior)\n\n # Otherwise it is a custom prior - check for .sample and .log_prob methods.\n else:\n return process_custom_prior(prior)",
"def sample_params_from_posterior(self, y_NT, prev_params):\n posterior_shape = self._shape + y_NT.sum(axis=1)\n posterior_scale = 1.0/((1.0/self._scale) + self.T)\n self._lambda_T[:] = rn.gamma(posterior_shape, posterior_scale)\n return self._lambda_T.copy()",
"def cal_limit(prior_a, posterior_a,prior_b, posterior_b):\n limiter = 0\n a=0\n b=0\n for l in prior_a:\n limiter += math.pow((prior_a[l]-posterior_a[l]),2)\n a+=posterior_a[l]\n for l in prior_b:\n limiter += math.pow((prior_b[l]-posterior_b[l]),2)\n a+=posterior_b[l] \n # a=0 do not meet the condition, need to continue iteration\n if a==0:\n b=1\n print(\"Warning: line.py: sum posterior flow = 0\")\n else:\n b=math.sqrt(limiter)/a\n return b",
"def randomize(self):\n #first take care of all parameters (from N(0,1))\n x = self._get_params_transformed()\n x = np.random.randn(x.size)\n self._set_params_transformed(x)\n #now draw from prior where possible\n x = self._get_params()\n [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None]\n self._set_params(x)\n self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...)",
"def _get_posterior_params(self, t, **kwargs):\n raise NotImplementedError",
"def update_params(x, prior, posterior):\r\n mu0, kappa0, alpha0, beta0 = prior\r\n mu_t, kappa_t, alpha_t, beta_t = posterior\r\n return np.r_[mu0, (kappa_t*mu_t + x)/(kappa_t + 1)], \\\r\n np.r_[kappa0, kappa_t + 1], \\\r\n np.r_[alpha0, alpha_t + 0.5], \\\r\n np.r_[beta0, beta_t + 0.5*kappa_t*(x - mu_t)**2/(kappa_t + 1)]",
"def postProb(self, alpha, beta):\n gamma = None\n\n # -------------------------------------------->\n\n # Your Code goes here\n gamma = np.zeros(alpha.shape)\n for i in range(len(gamma)):\n s = 0\n for j in range(len(gamma[i])):\n gamma[i,j] = alpha[i,j]*beta[i,j]\n s += gamma[i,j]\n for j in range(len(gamma[i])):\n gamma[i,j] = gamma[i,j]/s\n # <---------------------------------------------\n\n return gamma",
"def iterate(self):\n for i in range(self.generations):\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print(\n [item.to_string() for item in sorted_polulation[:8]],\n [round(item.fitness_function(item),2) for item in sorted_polulation]\n )\n\n # print([item.to_string() for item in self.data])\n\n self.step()\n print(\"result\")\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print([str(item) for item in sorted_polulation])",
"def setPrior(self,xPrior,priorWeight):\n assert self.regularizationLambda == 0\n if not isinstance(xPrior,np.ndarray):\n xPrior = np.array(xPrior)\n self.count = 1\n self.sumWeight = priorWeight\n self.scale = 1\n self.AtA = np.eye(self.n)*priorWeight\n self.AtAinv = np.eye(self.n)/priorWeight\n self.Atb = xPrior*priorWeight\n self.btb = np.dot(xPrior,xPrior)*priorWeight\n self.degenerate = False\n self.x = xPrior",
"def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])",
"def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break",
"def prior_vars(self):\n priors = []\n for i in self.active_ssms(0):\n ssm = self.ssms[i]\n prior = ssm.prior_vars()\n\n if self.ssm_starts[i] < 0:\n P = np.diag(prior)\n P2 = P.copy()\n for k in range(-self.ssm_starts[i]):\n ssm.transition_covariance(P2, k+1, P)\n ssm.transition_noise_diag(k+1, prior)\n np.fill_diagonal(P, np.diag(P) + prior)\n P2 = P\n\n # since the interface only supports independent\n # priors, return a diagonal approximation of the true\n # prior\n prior = np.diag(P)\n priors.append(prior)\n return np.concatenate(priors)",
"def posterior(store):\n return logl(store) + prior(store)",
"def iterates(sigma, w, n_iters=30):\n iters = [w]\n for i in range(n_iters):\n Tv = T(sigma, iters[i]) # i is previous.\n iters.append(Tv)\n iters.pop(0) # Leave only instances of LinInterp\n return iters",
"def posterior(self, val, **kwargs) -> float:\n\n data = self.data\n\n # override val with parameters specified via kwargs\n val = copy.deepcopy(val)\n for key, value in kwargs.items():\n setattr(val, key, value)\n\n # extract parameters\n gain = val.gain\n states = val.states\n pi = val.transitions\n pi_conc = val.transitions_conc\n mu_flor = val.mu_flor\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back = val.mu_back\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n load_weight = val.load_weight\n num_rois = val.num_rois\n num_load = val.num_load\n num_data = val.num_data\n num_states = val.num_states\n\n # calculate shape parameters\n idx = mu_flor_mean > 0\n mu_flor_scale = np.zeros(mu_flor_mean.shape)\n mu_flor_scale[idx] = mu_flor_mean[idx] / mu_flor_shape[idx]\n mu_back_scale = mu_back_mean / mu_back_shape\n # calculate effective pi for collapsed state space when weight on load is taken into account\n pi_eff = pi.copy()\n pi_eff[-1, :] *= load_weight\n pi_eff[-1, -1] = 1 - load_weight\n\n # probability from likelihood\n brightness = np.zeros(shape=data.shape)\n for r in range(num_rois):\n brightness[r, :] = mu_flor @ states_to_pops(states[r, :, :], num_states) + mu_back[r]\n lhood = np.sum(stats.gamma.logpdf(data, a=brightness, scale=gain))\n\n # probability from phototrajectory\n kinetic = 0\n for i in range(num_states):\n if pi_eff[-1, i] > 0:\n kinetic += np.sum(states[:, :, 0] == i) * np.log(pi_eff[-1, i])\n for j in range(num_states):\n if pi_eff[i, j] > 0:\n kinetic += np.sum((states[:, :, :-1] == i) * (states[:, :, 1:] == j)) * np.log(pi_eff[i, j])\n\n # probability from prior\n prior = (\n # prior on fluorophore brightness (ignore dark states)\n np.sum(stats.gamma.logpdf(mu_flor[idx], a=mu_flor_shape[idx], scale=mu_flor_scale[idx]))\n # prior on background brightness\n + np.sum(stats.gamma.logpdf(mu_back, a=mu_back_shape, scale=mu_back_scale))\n # prior on transitions\n + np.sum(Dirichlet.logpdf(pi, pi_conc))\n )\n\n prob = lhood + kinetic + prior\n\n return prob",
"def on_iterate(self, data: Any = None):\n raise NotImplementedError",
"def __init__(self, data, simulator, \n prior_dict = {}, \n N = 1000, \n eps0 = 0.01, \n T = 20, \n Nthreads = 10):\n\n self.data = data\n self.N = N \n self.eps0 = eps0 \n self.T = T \n self.Nthreads = Nthreads \n\n # simulator function has to be a function of theta_star \n self.simz = simulator\n\n self.prior_param(param_dict = prior_dict) # first run prior parameters",
"def randomize(self):\r\n # first take care of all parameters (from N(0,1))\r\n x = self._get_params_transformed()\r\n x = np.random.randn(x.size)\r\n self._set_params_transformed(x)\r\n # now draw from prior where possible\r\n x = self._get_params()\r\n if self.priors is not None:\r\n [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]\r\n self._set_params(x)\r\n self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)\r",
"def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]",
"def posterior_sample_parameter(self, parameter):\n pass",
"def log_prior(self, params):",
"def process_pytorch_prior(prior: Distribution,) -> Tuple[Distribution, int, bool]:\n\n # reject scalar priors\n if prior.sample().ndim == 0:\n raise ValueError(\n \"Detected scalar prior. Please make sure to pass a PyTorch prior with \"\n \"batch_shape=torch.Size([1]), or event_shape=torch.Size([1])\"\n )\n\n assert prior.batch_shape in (\n torch.Size([1]),\n torch.Size([]),\n ), f\"\"\"The prior must have batch shape torch.Size([]) or torch.Size([1]), but has\n {prior.batch_shape}.\n \"\"\"\n\n check_prior_batch_behavior(prior)\n\n check_for_batch_reinterpretation_extra_d_uniform(prior)\n\n parameter_dim = prior.sample().numel()\n\n return prior, parameter_dim, False",
"def priorProbabilities():\r\n\ttotal = 0.0\r\n\tpos = 0.0\r\n\tneg = 0.0\r\n\r\n\t# Count the amount of positives and negatives in the training data\r\n\tfor item in trainingData:\r\n\t\ttotal += 1\r\n\t\tif item[1] == '0':\r\n\t\t\tpos +=1\r\n\t\tif item[1] == '1':\r\n\t\t\tneg +=1\r\n\t\t\t\r\n\t\t\t\r\n\t# Return the positive and negative probabilities \r\n\tposProb = float(pos / total * 100)\r\n\tnegProb = float(neg / total * 100)\r\n\r\n\t\r\n\t\r\n\treturn posProb, negProb",
"def sample_hybrid_zprior_xmetro(network, niter, nprior, nmetro, prior_std=1.0, noise=0.02, z0=None, x0=None,\n mapper=None, verbose=0):\n from deep_boltzmann.sampling import MetropolisGauss\n Z = []\n X = []\n E = []\n J = []\n\n # initial configuration\n if z0 is not None and x0 is not None:\n raise ValueError('Cannot set both x0 and z0.')\n if x0 is not None:\n z0 = network.transform_xz(x0)\n\n for i in range(niter):\n if verbose > 0 and (i+1) % verbose == 0:\n print((i+1), '/', niter)\n # Gaussian prior MCMC\n prior_mc = GaussianPriorMCMC(network, z0=z0, std_z=prior_std, batchsize=nprior, xmapper=mapper)\n z, x, e, j = prior_mc.run(nprior)\n if mapper is not None:\n x = mapper.map(x)\n X.append(x)\n Z.append(z)\n E.append(e)\n J.append(j)\n z0 = prior_mc.z.copy()\n\n # Run Metropolis MCMC in x\n x0 = prior_mc.x\n lmg = MetropolisGauss(network.energy_model, x0, noise=noise, mapper=mapper)\n lmg.run(nmetro)\n X.append(lmg.trajs[0])\n E.append(lmg.etrajs[0])\n\n # transform to z\n ztraj = network.transform_xz(lmg.trajs[0])\n Z.append(ztraj)\n z0 = ztraj[-1]\n\n Z = np.vstack(Z)\n X = np.vstack(X)\n E = np.concatenate(E)\n J = np.concatenate(J)\n\n return Z, X, E, J",
"def extract_fake_probs(self, generator, generator_kwargs):\n latent_num = self.latent_num\n batch_size = self.batch_size\n if self.random_latents:\n g1 = torch.Generator(device=self.device)\n g1.manual_seed(self.seed)\n else:\n latent_codes = np.load(self.latent_file)[self.replica_indices]\n latent_codes = torch.from_numpy(latent_codes).to(torch.float32)\n if self.random_labels:\n g2 = torch.Generator(device=self.device)\n g2.manual_seed(self.seed)\n else:\n labels = np.load(self.label_file)[self.replica_indices]\n labels = torch.from_numpy(labels).to(torch.float32)\n\n G = generator\n G_kwargs = generator_kwargs\n G_mode = G.training # save model training mode.\n G.eval()\n\n self.logger.info(f'Extracting inception predictions from fake data '\n f'{self.log_tail}.',\n is_verbose=True)\n self.logger.init_pbar()\n pbar_task = self.logger.add_pbar_task('Fake', total=latent_num)\n all_probs = []\n for start in range(0, self.replica_latent_num, batch_size):\n end = min(start + batch_size, self.replica_latent_num)\n with torch.no_grad():\n if self.random_latents:\n batch_codes = torch.randn((end - start, *self.latent_dim),\n generator=g1, device=self.device)\n else:\n batch_codes = latent_codes[start:end].cuda().detach()\n if self.random_labels:\n if self.label_dim == 0:\n batch_labels = torch.zeros((end - start, 0),\n device=self.device)\n else:\n rnd_labels = torch.randint(\n low=0, high=self.label_dim, size=(end - start,),\n generator=g2, device=self.device)\n batch_labels = F.one_hot(\n rnd_labels, num_classes=self.label_dim)\n else:\n batch_labels = labels[start:end].cuda().detach()\n batch_images = G(batch_codes, batch_labels, **G_kwargs)['image']\n batch_probs = self.inception_model(batch_images,\n output_predictions=True,\n remove_logits_bias=True)\n gathered_probs = self.gather_batch_results(batch_probs)\n self.append_batch_results(gathered_probs, all_probs)\n self.logger.update_pbar(pbar_task, (end - start) * self.world_size)\n self.logger.close_pbar()\n all_probs = self.gather_all_results(all_probs)[:latent_num]\n\n if self.is_chief:\n assert all_probs.shape == (latent_num, PROBS_DIM)\n else:\n assert len(all_probs) == 0\n all_probs = None\n\n if G_mode:\n G.train() # restore model training mode.\n\n self.sync()\n return all_probs",
"def init_to_prior(site, skip_param=False):\n return init_to_median(site, num_samples=1, skip_param=skip_param)",
"def conditionalize(prior, conditional, observed):\n\n # construct joint probability table (Step 1 of Master Method)\n joint = PGM2(prior, conditional)\n #print(joint.get_cell(('POX', 'NOSPOTS')))\n\n # update joint probability table after observing value of N1 (Steps 2 and 3 of Master Method)\n joint.update(observed, 1)\n\n # marginalize to get probability distribution for N0 (Step 4 of Master Method)\n posterior = joint.marginalize(0)\n\n return posterior",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n\r\n # mean of posterior distribution is the MAP estimate of the weights a\r\n # tau^2(from notes) is beta\r\n\r\n extra_col = np.ones((x.shape[0], 1))\r\n x = np.append(extra_col, x, axis = 1)\r\n\r\n alpha_map = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))@(np.transpose(x)@z)\r\n mu = alpha_map\r\n\r\n Cov = np.linalg.inv((np.transpose(x)@x + (sigma2/beta)*np.eye(2)))*sigma2\r\n\r\n num_x = 100\r\n num_y = 100\r\n\r\n xvalues = np.linspace(-1, 1, num = num_x)\r\n yvalues = np.linspace(-1, 1, num = num_y)\r\n X_grid, Y_grid = np.meshgrid(xvalues, yvalues)\r\n\r\n samples = np.column_stack((X_grid.flatten(), Y_grid.flatten()))\r\n\r\n density = util.density_Gaussian(mu.squeeze(), Cov, samples)\r\n density_grid = np.reshape(density, (num_x, num_y))\r\n\r\n plt.figure(1)\r\n plt.title(\"Posterior Distribution of α Given 5 Data Points\")\r\n plt.xlabel('$α_0$')\r\n plt.ylabel('$α_1$')\r\n plt.scatter(-0.1, -0.5, c='r')\r\n plt.contour(X_grid, Y_grid, density_grid, cmap=plt.cm.winter)\r\n plt.show()\r\n\r\n return (mu,Cov)",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def before_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters:\r\n a_filter.before_each(dataset)",
"def sample_posterior(self):\n \n# print (\"SAMPLING FROM LINEAR SIMILARITY VB\")\n if (self.posterior_mean == False):\n self.weight = Vil.sample_posterior(self.mu_weight, Vil.softplus(self.rho_weight))\n self.bias = Vil.sample_posterior(self.mu_bias, Vil.softplus(self.rho_bias))\n# print (self.bias)\n else:\n self.weight.data = self.mu_weight.data\n self.bias.data = self.mu_bias.data",
"def priorities_generator():\n priorities = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]\n yield from itertools.chain(priorities, itertools.repeat(0.1))",
"def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p",
"def _pso_do_iter(self):\n for particle in self.particles:\n particle.update(self.best)\n if is_better(self.best[0], particle.value,\n self.find_max):\n self.best = (particle.value, deepcopy(particle.position))\n\n try:\n self.snapshots.append(\n tuple([deepcopy(self.particles), deepcopy(self.best)])\n )\n except AttributeError:\n pass",
"def posteriorDistribution(x,z,beta,sigma2):\r\n ### TODO: Write your code here\r\n mu = 0\r\n Cov = 0\r\n\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n\r\n X = []\r\n for i in x:\r\n j = [1,i[0]]\r\n X.append(j)\r\n X = np.array(X)\r\n\r\n common = np.matmul( X.T , X) + np.identity(2) * sigma2/beta\r\n common = np.linalg.inv(common)\r\n Cov = common * sigma2\r\n mu = np.matmul(common , np.matmul (X.T , z) )\r\n mu = mu.flatten()\r\n print(\"X.shape: \" , X.shape)\r\n print(\"z.shape: \",z.shape)\r\n print(\"Cov.shape\" , Cov.shape)\r\n print(\"mu.shape: \",mu.shape)\r\n density = util.density_Gaussian(mu , Cov , x_s).reshape(150 , 150 ).T\r\n print(\"density.shape\",density.shape)\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n\r\n \r\n\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )))\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.xlim = (-1,1)\r\n plt.ylim = (-1,1)\r\n plt.title('p(a|x1,z1....xn,zn) for '+ str(len(x)) +' samples')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x')\r\n\r\n return (mu,Cov)",
"def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()",
"def iter_prioritized(claims: Iterable[ScorableClaim]) -> Generator[PrioritizationClaim, None, None]:\n _measurement_name_base = f'{__name__}.{iter_prioritized.__name__}'\n\n _before_next_expectation = time.time()\n\n for claim in claims:\n _measurement_tags = {'entity_type': claim.entity_type, 'ad_account_id': claim.ad_account_id}\n\n Measure.timing(f'{_measurement_name_base}.next_expected', tags=_measurement_tags, sample_rate=0.01)(\n (time.time() - _before_next_expectation) * 1000\n )\n\n try:\n score = ScoreCalculator.assign_score(claim)\n with Measure.timer(f'{_measurement_name_base}.yield_result', tags=_measurement_tags):\n yield PrioritizationClaim(\n claim.entity_id,\n claim.entity_type,\n claim.report_type,\n claim.job_signature,\n score,\n ad_account_id=claim.ad_account_id,\n timezone=claim.timezone,\n range_start=claim.range_start,\n )\n except ScoringException as e:\n ErrorInspector.inspect(e, claim.ad_account_id, {'job_id': claim.job_id})\n\n _before_next_expectation = time.time()",
"def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n modstr = '%s__' % self.modality\n items = sorted([(k.replace('clf__'+modstr, ''), v) for k, v in p.items() if modstr in k])\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params"
] | [
"0.6235733",
"0.6220724",
"0.59709436",
"0.5916188",
"0.5812615",
"0.5801119",
"0.57054955",
"0.5677082",
"0.56668025",
"0.5654472",
"0.5648874",
"0.56254435",
"0.55976546",
"0.5591801",
"0.5586015",
"0.55826384",
"0.5580829",
"0.55756706",
"0.5572443",
"0.55515474",
"0.55488694",
"0.55463785",
"0.5525445",
"0.55224013",
"0.55179685",
"0.5514963",
"0.55098355",
"0.54941994",
"0.54751915",
"0.54649514",
"0.5464119",
"0.54401225",
"0.54291",
"0.5418424",
"0.541665",
"0.54155695",
"0.5404413",
"0.5402946",
"0.5392489",
"0.5377827",
"0.53370124",
"0.5334987",
"0.5332446",
"0.53257746",
"0.53248334",
"0.5298107",
"0.52868545",
"0.5279597",
"0.52762455",
"0.52629167",
"0.52470577",
"0.52444166",
"0.5237417",
"0.52335334",
"0.5193181",
"0.5192976",
"0.518461",
"0.5160675",
"0.515815",
"0.51569724",
"0.51417357",
"0.5141452",
"0.51367486",
"0.5136345",
"0.51338285",
"0.5131851",
"0.5131651",
"0.5131422",
"0.5128436",
"0.51265156",
"0.51261616",
"0.51180565",
"0.5116888",
"0.51074487",
"0.5105099",
"0.51018226",
"0.5092828",
"0.508672",
"0.5084894",
"0.50835776",
"0.5083226",
"0.50821275",
"0.50806725",
"0.5076438",
"0.5074881",
"0.5063714",
"0.5061999",
"0.5057715",
"0.5056976",
"0.5054465",
"0.5054356",
"0.50475824",
"0.5046379",
"0.5045426",
"0.50441855",
"0.5041838",
"0.5036294",
"0.5028351",
"0.5022872",
"0.5020482"
] | 0.69863427 | 0 |
Plots cumulative distribution function of input probability distribution. | def cumulative_distribution(self, dist='current'):
dictDist = {'current': np.cumsum(self.current),
'prior': np.cumsum(self.prior),
'posterior': np.cumsum(self.posterior, axis=1)
}
cdf = dictDist[dist]
return cdf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_cdf(self, **options):\n plt.plot(self.xs, self.ps, **options)",
"def cumulative_plot(self, with_powerlaw=False, **kwargs):\n x,y = split(self.cumulative_distribution())\n if \"label\" not in kwargs:\n kwargs[\"label\"] = \"$P(k%s)$\" % self.texindex\n p = pylab.loglog(x,y, **kwargs)\n pylab.xlabel(\"$k%s$\" % self.texindex, fontsize=self.labelfontsize)\n pylab.ylabel(\"$P(k%s)$\" % self.texindex, fontsize=self.labelfontsize)\n pylab.title(\"Cumulative %s distribution\" % self.degree_type)\n if with_powerlaw:\n kwargs.pop(\"marker\", None)\n kwargs.pop(\"label\", None)\n if self.gamma is None:\n self.exponent()\n powerlaw.plot(exponent=-self.gamma + 1,\n xmax=self.max_deg, xmin=self.k_min,\n num=2,\n **kwargs\n )\n return p",
"def make_cumulative_distr_plot(data):\n x = data.index\n y = data[\"cumprop\"]\n plot = go.Bar(x=x, y=y, showlegend=False)\n\n return plot",
"def plot_cumulative_distribution(data, fig_title, ax_labels=None, resolution=.01, filename=None):\r\n if not isinstance(data, ndarray):\r\n return TypeError('Expected data as numpy array.')\r\n\r\n cumulative_function = estimate_cumulative(data, num_bins=int(1 / resolution * 2))\r\n cumulative_function = vectorize(cumulative_function)\r\n reference = linspace(0, 1, 50)\r\n cumulative = cumulative_function(reference)\r\n\r\n fig, ax = subplots()\r\n ax.plot(reference, cumulative, '.-')\r\n\r\n ax.set_title(fig_title)\r\n\r\n set_labels(ax, fig_title, ax_labels)\r\n tight_layout()\r\n\r\n if filename is not None:\r\n savefig(filename)\r\n\r\n return ax",
"def cumulative_distribution(self, X):\n raise NotImplementedError",
"def plot_cdf(self, data, ax=None, survival=False, **kwargs):\n bins, CDF = self.cdf(data, survival=survival, **kwargs)\n if not ax:\n fig, ax = plt.subplots()\n ax.loglog(bins, CDF, **kwargs)\n else:\n fig = ax.get_figure()\n ax.plot(bins, CDF, **kwargs)\n return fig, ax",
"def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)",
"def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()",
"def cumulative_probability_distribution(self):\n return list(accumulate(self.probability_distribution()))",
"def plt_cumulative_hist(v, bins=10):\n values, base = np.histogram(v, bins=bins)\n cumulative = np.cumsum(values)\n plt.plot(base[:-1], cumulative, c='blue')",
"def convolute_plot(lam, mu, sigma, nEntries, randomState=None):\n np.random.seed(randomState) # to have the same starting point\n \n xb = np.arange(-30,500000, 5000)\n xp = np.arange(-30,30,0.2)\n \n # Plot the exponential curve\n plt.figure()\n plt.subplot(3,1,1)\n xf = stats.expon(0.,1./lam).rvs(nEntries)\n plt.hist(xf,xb, normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n # Plot the gaussian distribution\n plt.subplot(3,1,2) \n xg = stats.norm(mu, sigma).rvs(nEntries)\n plt.hist(xg,xp, normed=True)\n plt.plot(xp,stats.norm(mu,sigma).pdf(xp))\n \n # Plot the convolution of the two distributions\n plt.subplot(3,1,3)\n plt.hist(xf+xg,xb,normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n data_set = xf+xg\n return data_set",
"def _plot_ecdf(data, label='Value', alpha=1):\n data = np.array(data)\n data = np.sort(data)\n t = len(data)\n prob = np.arange(t) / t\n plt.plot(data, prob, label=label, alpha=alpha)",
"def plot_data_pca(data_dict):\n f = plt.figure()\n ndata, ntime, nhidden = data_dict['hiddens'].shape\n\n print('Number of data examples: ', ndata)\n print('Number of timesteps: ', ntime)\n print('Number of data dimensions: ', nhidden)\n pca = PCA(n_components=100)\n pca.fit(onp.reshape(data_dict['hiddens'], [ndata * ntime, nhidden]))\n\n plt.plot(onp.arange(1, 16), onp.cumsum(pca.explained_variance_ratio_)[0:15],\n '-o');\n plt.plot([1, 15], [0.95, 0.95])\n plt.xlabel('PC #')\n plt.ylabel('Cumulative Variance')\n plt.xlim([1, 15])\n plt.ylim([0.3, 1]);\n return f",
"def Lambda_CC_plot(data_CC,show_error=True,discrete=False):\n sns.set_context('paper')\n fig,ax = plt.subplots()\n plot_Lambda_CC(data_CC,ax,show_error=show_error,discrete=discrete)\n formatting(r'Fraction of cooperators, $n/N$',r'$\\Lambda^{CC}_n$',large=False)\n plt.savefig('Lambda_CC.pdf')",
"def plot_cum_clustering_dist(net, label, outpath, turbo):\n net.removeSelfLoops()\n local_cc = networkit.centrality.LocalClusteringCoefficient(net, turbo)\n local_cc.run()\n unique_cc, unique_cc_cnt = np.unique(local_cc.scores(), return_counts=True)\n unique_cc_cumcnt = np.cumsum(unique_cc_cnt)/sum(unique_cc_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n plt.axis([0, 1, 0, 1])\n ax.plot(unique_cc, unique_cc_cumcnt, 'b-')\n # ax.set_title('Cumulative distribution of clustering coefficient of nodes')\n ax.set_xlabel('local clustering coefficient c')\n ax.set_ylabel('p(x <= c)')\n plt.savefig(outpath + label + \"-cc-distribution.eps\")",
"def plot_distribution(self, variable, **kwargs):\n return self.visualizer.plot_distribution(variable, **kwargs)",
"def plot_distribution(d, start=0.01, stop=10.0, resolution=0.1):\n import pylab\n X = numpy.arange(start, stop, resolution)\n Y = [math.exp(d.log_pdf(x)) for x in X]\n pylab.plot(X, Y)",
"def test_cumulative_distribution_fit_call_pd(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()",
"def cdf(data, args):\n return Plot._dist(data, args)",
"def plotDistributions(self, normalize = True, cumulative = False, testMode = False, **kwargs):\n\n if (not testMode): # For testing: do not run plots if testMode\n figsize = kwargs.get('figsize',(10, 5))\n bins = kwargs.get('bins',100)\n\n f, ax = plt.subplots(figsize=figsize)\n ax.hist(self.collectedCounts, bins)\n return ax, self.collectedCounts\n else:\n return None, self.collectedCounts",
"def plot_cdf(self, distType='posterior', plotType='line', figSize=(5,4)):\n \n # Calculate cdf to plot\n distToPlot = self.cumulative_distribution(dist=distType)\n \n # Create figure\n fig = plt.figure(figsize=figSize)\n \n # Create colormap\n colors = cm.rainbow(np.linspace(0, 1, len(distToPlot)))\n\n # Determine plot type\n if plotType=='line':\n plt.plot(self.hypotheses, distToPlot.T)\n elif plotType=='bar':\n for row, co in zip(distToPlot, colors):\n plt.bar(self.hypotheses, row, width=0.25,\n align='center', alpha=0.5, color=co)\n elif plotType=='point':\n for row, co in zip(distToPlot, colors):\n plt.scatter(self.hypotheses, row,\n alpha=1.0, color=co)\n else:\n sys.exit('Plot type not recognized.')\n\n plt.legend(np.arange(np.shape(distToPlot)[0]),\n loc='center left',\n bbox_to_anchor=(1,0.5),\n title='Iteration')\n plt.xlabel('Hypotheses', fontsize=14)\n plt.ylabel('Probability', fontsize=14)\n plt.ticklabel_format(useOffset=False)\n \n # If less than 10 hypotheses, treat xticks as categorical\n if len(self.hypotheses) < 20:\n plt.xticks(self.hypotheses)\n \n return None",
"def plot_eigenvalue_cumsum(eigenvalues):\n normalized = eigenvalues / np.sum(abs(eigenvalues))\n cumsum = np.cumsum(normalized)\n plt.figure(0)\n plt.xlabel('Eigenvalue')\n plt.ylabel('Cumulative sum')\n plt.title('Eigenvalue cumulative sum')\n plt.plot(np.arange(len(cumsum)), cumsum)",
"def plot_distribution(self,show=True):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\t\n\t\tplt.figure(\"Probability distribution of Random Walk, theoretical\")\n\t\tplt.scatter(k_vals,prob_vals,s=4)\n\t\tplt.xlim((-self.n-1,self.n+1))\n\t\t\n\t\tplt.xlabel(\"x\\u2099 - Position after n jumps\")\n\t\tplt.ylabel(\"Probability\")\n\t\tplt.suptitle(\"Random Walk: p={}, n={}, \\u0394x={}\".format(self.p,self.n,self.delta_x))\n\t\tif show == True:\n\t\t\tplt.show()",
"def plot_ccum_centrality_dist(centrality_filename, label, outpath, centrality_name):\n unique_val, unique_cc_prob = networkit_util.get_cc_centrality_distr(centrality_filename)\n centrality_style = {'eigenvector-centrality': 'c*', 'pagerank': 'g*', 'hub': 'r*', 'authority': 'm*', 'betweeness': 'b*'}\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.loglog(unique_val, unique_cc_prob, centrality_style[centrality_name], label=label + '-' + centrality_name)\n ax.set_xlabel('v')\n ax.set_ylabel('P(x>=v)')\n plt.savefig(outpath + label + '-' + centrality_name + '-distribution.eps')\n return ax",
"def gen_plot_cumulative_gain(df, currency, filename):\n # remove existing plot\n if glob.glob('images/cumulative*.png'):\n for f in glob.glob(\"images/cumulative*.png\"):\n os.remove(f)\n\n # find year\n tax_year = df.iat[0, 2][:4]\n\n # summing transactions by date\n cum = df.groupby(by=[\"Date Sold\"]).sum().sort_index()[['Gain&Loss']]\n\n # fill in empty dates\n cum.index = pd.DatetimeIndex(cum.index)\n all_dates = pd.date_range(start=f\"{tax_year}-01-01\",\n end=f\"{tax_year}-12-31\")\n\n # calculate cumulative sum for all dates\n cum = cum.reindex(all_dates).fillna(0.0).rename_axis('Date Sold').cumsum()\n\n # generate cumulative plot\n cum_plot = sns.lineplot(data=cum, x=\"Date Sold\", y=\"Gain&Loss\")\n cum_plot.set_title(f\"Cumulative Gain and Loss in {tax_year} in {currency}\")\n cum_plot.set_xlabel('')\n cum_plot.get_figure().savefig(f\"images/{filename}\")\n plt.close()",
"def plot(self):\n cs = plt.contour(self.X, self.Y, self.fitness_function)\n plt.clabel(cs, inline=1, fontsize=6)\n plt.imshow(self.fitness_function, extent=self.limits, origin=\"lower\", alpha=0.3)",
"def gc_plot(data, **kwargs):\n hist = gc_percent_hist(data)\n sns.lineplot(x=range(0, 101), y=hist, **kwargs)",
"def cumulative_distribution(self):\n\n cum_dd = []\n sum_p = 0\n for k, p in reversed(self.dd):\n sum_p += p\n cum_dd.append((k, sum_p))\n return list(reversed(cum_dd))",
"def plot_gain(df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',\n steps=100, normalize=False, random_seed=42, figsize=(8, 8)):\n\n cumgain = get_cumgain(df, outcome_col, treatment_col, treatment_effect_col, steps, normalize, random_seed)\n\n cumgain.plot(figsize=figsize)\n plt.xlabel('Fraction of Population')\n plt.ylabel('Cumulative Gain')",
"def plot_cdf(Y, Y_Label='Y'):\r\n\r\n # Options for the graphic\r\n pltfont = {'fontname': 'Bitstream Vera Sans', 'fontsize': 15} # font\r\n lc = 'k' # line colour\r\n\r\n ###########################################################################\r\n # Check inputs\r\n ###########################################################################\r\n if not isinstance(Y, np.ndarray):\r\n raise ValueError('\"Y\" must be a numpy.array.')\r\n if Y.dtype.kind != 'f' and Y.dtype.kind != 'i' and Y.dtype.kind != 'u':\r\n raise ValueError('\"Y\" must contain floats or integers.')\r\n Ny = Y.shape\r\n if len(Ny) > 1:\r\n if Ny[1] != 1:\r\n raise ValueError('\"Y\" be of shape (N,1) or (N, ).')\r\n N = Ny[0]\r\n Y = Y.flatten() # shape (N, )\r\n\r\n if np.isnan(Y).any():\r\n warn('some data in \"Y\" are nan')\r\n if np.isinf(Y).any():\r\n warn('some data in \"Y\" are inf')\r\n\r\n if not isinstance(Y_Label, str):\r\n raise ValueError('\"Y_Label\" must be a string.')\r\n\r\n ###########################################################################\r\n # Create plot\r\n ###########################################################################\r\n\r\n Y = Y[~np.isnan(Y)] # remove NaNs\r\n ymin = np.min(Y)\r\n ymax = np.max(Y)\r\n\r\n #plt.figure()\r\n\r\n # Option 1: use the function empiricalcdf of SAFE\r\n Nmin = 5000\r\n if N > Nmin:\r\n Yi = np.sort(Y)\r\n F = empiricalcdf(Y, Yi)\r\n plt.plot(Yi, F, '.', color=lc)\r\n else:\r\n Yi = np.linspace(ymin, ymax, Nmin)\r\n F = empiricalcdf(Y, Yi)\r\n plt.plot(Yi, F, color=lc)\r\n\r\n # Option 2: use the ECDF function of the python package 'statsmodels'\r\n #ecdf = ECDF(Y)\r\n #plt.plot(ecdf.x,ecdf.y, color=lc)\r\n\r\n # Customise plot\r\n plt.xticks(**pltfont); plt.yticks(**pltfont)\r\n plt.xlabel(Y_Label, **pltfont)\r\n plt.ylabel('CDF', **pltfont)\r\n plt.box(on=True)\r\n\r\n # Limit for horizontal axisym = min(y);\r\n if ymin == ymax: # (i.e., if all data have the same value)\r\n ymin = ymin - ymin/10\r\n ymax = ymax + ymax/10\r\n plt.xlim((ymin, ymax))\r\n plt.ylim((0, 1))",
"def CumulativeDistribution(data, nbins, range=None, normed=True, centerbins=False):\n\n # 1) COMPUTE THE DISTRIBUTION OF THE DATA\n ydata, xdata = np.histogram(data, nbins, range, normed)\n\n # 1.1) Compute the cumulative sum of the probability\n ydata = ydata.cumsum()\n\n # 2) RETURN THE RESULTS\n if centerbins:\n dif = 0.5 * (xdata[-1] - xdata[0]) / nbins\n xdata += dif\n\n if normed:\n norm = 1.0 / ydata[-1]\n ydata *= norm\n\n return xdata[:-1], ydata\n\n else:\n return xdata[:-1], ydata",
"def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()",
"def pixel_ts_distribution(self):\n fig,ax = plt.subplots(figsize=(8,6))\n bins = np.linspace(0,25,501)\n tsvec=self.tsmap.vec\n ax.hist(tsvec, bins, log=True, histtype='step', lw=2, cumulative=-1, label='data');\n # make array corresponding to the hist\n h = np.histogram(tsvec, bins, )[0]\n x = bins[:-1]\n yh = sum(h)-h.cumsum() \n f = lambda x: np.exp(-x/2)\n ye=6e5*f(x)\n ax.plot(x, ye, '-g', lw=2, label='exp(-TS/2)')\n ax.fill_between(x,yh,ye,where=x>5, facecolor='red', alpha=0.6)\n plt.setp(ax, xscale='linear', xlabel='TS', ylim=(1,None), ylabel='# greater than TS')\n ax.legend()\n ax.set_title('Cumulative distribution of single-pixel TS values for {}'.format(self.skymodel),\n fontsize=14)\n ax.grid(True, alpha=0.5) \n fig.set_facecolor('white')\n return fig",
"def plot_distribution(img_path):\n img = Image.open(img_path)\n img_width, img_height = img.size\n img = prepare_image(img = img)\n model = vgg19(pretrained=True).cuda().eval() \n predict = model.forward(img)\n predict = predict.detach().cpu().numpy().reshape(-1)\n \n label = pd.read_csv('./label.csv', sep = ';', index_col=0)\n label['predict'] = predict\n label.sort_values(by = 'predict', inplace = True)\n trace = go.Bar(x = [str(i) + '_' + j for i, j in enumerate(label.label)], y = label.predict)\n l = go.Layout(\n title = 'Class distribution',\n xaxis = dict(\n title = 'Class'\n ),\n yaxis = dict(\n title = 'Score'\n )\n )\n fig = go.Figure(data = [trace], layout = l)\n iplot(fig)",
"def priorDistribution(beta):\r\n ### TODO: Write your code here\r\n m_v = np.zeros(2)\r\n print(\"m_v shape: \" ,m_v.shape)\r\n c_v = np.array( [ [ beta , 0 ] , [ 0 , beta ] ] )\r\n print(\"c_v shape: \",c_v.shape)\r\n x_s = []\r\n for i in np.linspace(-1 , 1 , 150):\r\n for j in np.linspace(-1 , 1 , 150):\r\n x_s.append([i,j])\r\n x_s = np.array(x_s)\r\n print(\"x_s shape: \",x_s.shape)\r\n density = util.density_Gaussian(m_v , c_v , x_s)\r\n #print(density)\r\n print(\"length density \",len(density))\r\n X,Y = np.meshgrid( np.linspace(-1,1,150) , np.linspace(-1,1,150) )\r\n plt.contour( X , Y , np.reshape(density , (150, 150 )) )\r\n plt.plot(-0.1 , -0.5 , marker = 'o' , MarkerSize = 10 , label = 'point a')\r\n plt.xlabel('a0 ')\r\n plt.ylabel(' a1 ')\r\n plt.legend()\r\n plt.title('p(a)')\r\n plt.show() \r\n print('-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x')\r\n return",
"def plot_cdf(self, max_t=200, show=True, savename=False):\n\n plt.figure()\n time = self.dt*np.arange(1, int(max_t / self.dt)).astype(float)\n\n plt.plot(time, self._analytical_passage_time_cdf(time), lw=2, label='analytical')\n plt.plot(time, self._empirical_cdf(time), lw=2, label='empirical')\n\n # formatting\n plt.legend(fontsize=14)\n plt.xlabel('Time', fontsize=14)\n plt.ylabel('Cumulative Density', fontsize=14)\n plt.tick_params(labelsize=14)\n plt.tight_layout()\n\n if savename is not None:\n plt.savefig('%s_cdf.pdf' % savename)\n\n if show:\n plt.show()",
"def plot_histogram(beta=3):\n m = 10 ** beta\n\n # generate m normal random variables of 100 points each\n X = np.random.randn(100, m)\n\n # take the maximum along the rows\n Z = np.max(X, axis=1)\n\n # plot the pdf with a gaussian kernel density estimate\n plt.subplot(121)\n sns.distplot(Z, kde=True)\n plt.title(r'Histogram of Z for $\\beta$ = {}'.format(beta))\n\n # plot the cdf and find t in relation with Q3)\n plt.subplot(122)\n plt.hist(Z, bins=25, normed=True, cumulative=True)\n plt.title(r'P[Z $\\leq$ t]$\\geq$0.9 for t$\\geq$%0.4f' % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the inverse cdf' % (norm.ppf(0.9 ** (1/m))))\n print('P[Z <= t] >= 0.9 for t >= %0.4f using the Chernoff bounding method'\n % (np.sqrt(2*(np.log(m) + np.log(10)))))\n\n # save the plot to file & show the plot\n plt.savefig('histogram_beta_{}.png'.format(beta))\n\n plt.show()",
"def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()",
"def plot_pdf(pdf,**kwargs):\n pl.hist(pdf.bins,bins=pdf.bins,weights=pdf.counts,**kwargs)\n return pdf.time",
"def _plot_ecdf(self, numerator_name, denominator_name):\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n\n lower_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-self.confidence_level)))]\n median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]\n upper_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-(1-self.confidence_level))))]\n\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = ('Median Lift was {0:.2%}, with a '\n '{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,\n ci,\n lower_bound,\n upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)",
"def plot_crossing_probability(ax, Percolation) -> np.ndarray:\n\n print(f\"Computing crossing probabilities for {Percolation.grid_type} \"\n \"percolation\")\n cross_proba = np.zeros_like(p_values)\n for i in progressbar.progressbar(range(nsim)):\n perco = Percolation(w, h)\n p_cross = perco.find_p_cross()\n cross_proba += np.where(p_values < p_cross, 0, 1)\n\n cross_proba /= nsim\n ax.plot(p_values, cross_proba, '-',\n label=f'{Percolation.grid_type} percolation')",
"def plot_ecdf(datasets, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n _plot_ecdf(data, labels[idx], alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Cumulative Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/ecdf_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plotDistributions(self, normalize = True, cumulative = False, testMode = False, **kwargs):\n\n countDistributions = {}\n\n if (not testMode): # For testing: do not run plots if testMode\n figsize = kwargs.get('figsize',(10, 5))\n bar_plt = kwargs.get('bar', False)\n f, ax = plt.subplots(figsize=figsize)\n\n # iterate through each sample\n for label, data in self.collectedCounts.items():\n\n if data == set(): # Do not plot any empty sets of data\n continue\n\n sData = sorted(data)\n values = list(map(lambda p: p[0], sData))\n counts = list(map(lambda p: p[1], sData))\n\n if normalize:\n # replace distribution counts with normalized values\n sumCounts = float(sum(counts))\n counts = [i/sumCounts for i in counts]\n\n if cumulative:\n # calculate cumulative sum of counts\n counts = np.cumsum(counts)\n\n # re-write manipulated data\n countDistributions[label]=list(zip(values, counts))\n\n if (not testMode): # For testing: do not run plots if testMode\n if (bar_plt):\n ax.bar(values, counts, 1, label = label)\n else:\n ax.plot(values, counts, label = label)\n\n # return plots once all samples have been added\n if (not testMode): # For testing: do not run plots if testMode\n ax.legend(loc=2, shadow = True, bbox_to_anchor=(1.05, 1))\n return ax, countDistributions\n else:\n return None, countDistributions",
"def plot_pagerank(net, label, outpath):\n _, pagerank_values = networkit_util.get_pagerank(net, label, outpath)\n unique_value, unique_cnt = np.unique(pagerank_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'r.')\n # ax.set_title('Cumulative distribution of pagerank of nodes')\n ax.set_xlabel('pagerank value v')\n ax.set_ylabel('p(x <= v)')\n plt.savefig(outpath + label + \"-pagerank-distribution.eps\")",
"def plot(self):\n\t\tself.plotOfCos1().plot()",
"def plotCumulativeMovingAverage(self, x: list, title: str = \"Cumulative moving average\") -> None:\n C = self.cumulativeMovingAverage(x)\n plt.plot(C)\n plt.xlabel('index')\n plt.ylabel('cumulative moving average')\n plt.title(title)\n plt.show()",
"def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()",
"def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]",
"def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)",
"def PlotPriorDist(pmf):\n thinkplot.Clf()\n thinkplot.PrePlot(num=1)\n\n cdf1 = thinkbayes2.Cdf(pmf, label='prior')\n\n thinkplot.Cdf(cdf1)\n thinkplot.Save(root='sat_prior',\n xlabel='p_correct', \n ylabel='CDF',\n formats=['pdf', 'eps'])",
"def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()",
"def continuous_plot(iterations, grn):\n width, height = size = (600,600)\n screen = pygame.display.set_mode(size)\n # order the colors for the TF andP proteins\n colors = []\n conc_list = []\n extra_up, extra_down = False, False\n\n for gene in grn.genes:\n \n if gene.gene_type == \"TF\":\n colors.append((0, 0, 255))\n elif gene.gene_type == \"P\":\n colors.append((0, 255, 0))\n elif gene.gene_type == \"EXTRA\":\n colors.append((255,0,0))\n prev_extra = 600-(gene.concentration * 600)\n\n conc_list.append(600-(gene.concentration * 600))\n\n # add variables for user input\n\n for i in range(iterations):\n #check for keypress\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if pygame.key.get_pressed()[pygame.K_UP]:\n extra_up = True\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n extra_down = True\n elif event.type == pygame.KEYUP:\n extra_up, extra_down = False, False\n if extra_up: \n grn.change_extra(0.01)\n if extra_down: \n grn.change_extra(-0.01)\n #run grn and get protein concentration\n results = grn.regulate_matrix(2, False)\n scaled = [int(600-(x * 600)) for x in results]\n old_conc = conc_list\n conc_list = scaled\n \n for idx, conc in enumerate(conc_list):\n pygame.draw.line(screen, colors[idx], \n (width-3, old_conc[idx]), \n (width-2, conc))\n\n # if draw_extra:\n # pygame.draw.line(screen, colors[-1], \n # (width-3, 600-prev_extra-1), \n # (width-2, 600-extra))\n\n pygame.display.flip()\n #screen.blit(screen, (-1, 0))\n screen.scroll(-1,0)\n pygame.time.wait(5)",
"def pdf(data, args):\n return Plot._dist(data, args)",
"def income_distribution_plot(income_data,year):\n income_year = income_data.loc[year]\n plt.figure(figsize=(10,8))\n income_year.hist(bins=100,alpha=0.3,color='k')\n plt.title('Income Distribution of Year %s' % year)\n plt.xlabel('Income per person')\n plt.ylabel('Frequency')\n plt.savefig('Income distribution of year %s' % year)",
"def display(f, x_min, x_max, delta=0.001):\n x = list(drange(x_min, x_max,delta))\n y = [f(i) for i in x]\n plt.title(f.__name__)\n plt.grid(True)\n plt.xlabel('X')\n plt.ylabel('Y= '+f.__name__ + '(X)')\n plt.plot(x,y, 'r')\n plt.show()",
"def plot_cumreturn(x):\n return go.Scatter(x=df_plot['Date'], y=df_plot[x], mode='lines', name=x)",
"def display(self, bin_size):\n xs = np.linspace(self.sample_min, self.sample_max, 2000)\n ys = np.zeros_like(xs)\n for (l, s), w in zip(self.gauss_params, self.dist_weights):\n ys += ss.norm.pdf(xs, loc=l, scale=s) * w\n plt.plot(xs, ys, color=\"blue\")\n plt.hist(self.samples, density=True, bins=bin_size, color=\"palegreen\")\n plt.xlabel(\"duration\")\n plt.ylabel(\"density\")\n _, _, ymin, ymax = plt.axis()\n if self.lower_bound > 0:\n plt.vlines([self.lower_bound], ymin, ymax, color=\"crimson\")\n if self.upper_bound < float(\"inf\"):\n plt.vlines([self.upper_bound], ymin, ymax, color=\"crimson\")\n plt.show()",
"def plot_pc(*args, x='date', y='pc_new_positives', days=0):\r\n # unpack args (stored in tuple) to list form\r\n a = []\r\n a.append(x)\r\n a.append(y)\r\n for arg in args:\r\n a.append(arg)\r\n # format dates\r\n if (days > 0) & (days < 32):\r\n locator = mdates.DayLocator()\r\n formatter = mdates.DateFormatter('%d')\r\n else:\r\n locator = mdates.MonthLocator()\r\n formatter = mdates.DateFormatter('%m-%d')\r\n # set up axes for subplots\r\n fig, ax = plt.subplots(figsize=(8, 8))\r\n # format dates on x-axis\r\n ax.xaxis.set_major_locator(locator)\r\n ax.xaxis.set_major_formatter(formatter)\r\n for state in args:\r\n # subset df for our plots. dropna() ensures same date range for all plots\r\n plot_data = df[df['State'] == state]\r\n # subset for number of days\r\n if days > 0:\r\n plot_data = plot_data[:days+1]\r\n # make plot\r\n ax.plot(plot_data[x], plot_data[y][plot_data['State'] == state], linestyle='-', label=state)\r\n # set titles\r\n ax.set_title(\"{} per 100,000 people\".format(y))\r\n #show legend\r\n ax.legend()",
"def plot_cdf(x, copy=True, fractional=True, **kwargs):\n N = float(len(x))\n if copy:\n x = x.copy()\n x.sort()\n if fractional:\n t = []\n for x, chunk in groupby(enumerate(x, 1), itemgetter(1)):\n xranks, _ = zip(*list(chunk))\n t.append((float(x), xranks[0] + np.ptp(xranks) / 2.0))\n t = np.asarray(t)\n else:\n t = np.c_[np.asfarray(x), np.arange(N) + 1]\n if 'ax' not in kwargs:\n ax = plt.gca()\n else:\n ax = kwargs.pop('ax')\n ax.loglog(t[:, 0], (N - t[:, 1]) / N, 'ow', **kwargs)\n return ax",
"def plot_cdf(self, param, plot_type, Nsplit=50, **kwargs):\n title = self.family.capitalize() + \" Copula CDF\" \n\n bounds = [0+1e-2, 1-1e-2]\n U_grid, V_grid = np.meshgrid(\n np.linspace(bounds[0], bounds[1], Nsplit),\n np.linspace(bounds[0], bounds[1], Nsplit))\n \n Z = np.array(\n [self.get_cdf(uu, vv, param) for uu, vv in zip(np.ravel(U_grid), np.ravel(V_grid)) ] )\n \n Z = Z.reshape(U_grid.shape)\n\n if plot_type == \"3d\":\n plot_bivariate_3d(U_grid,V_grid,Z, [0,1], title, **kwargs)\n elif plot_type == \"contour\":\n plot_bivariate_contour(U_grid,V_grid,Z, [0,1], title, **kwargs)\n else:\n print(\"only \\\"contour\\\" or \\\"3d\\\" arguments supported for type\")\n raise ValueError",
"def distribution_horizontale(args):\n number_files = [2,5,10,20];\n nbreFileNotDisplay = 0;\n comment = \"\";\n num_bins = args[\"num_bins\"];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n w = 4; h = 1; # width = largueur, height = longueur\n fig = plt.figure( figsize=(w,h) ); \n cpt_ax1 = 0;\n for num in number_files:\n print(\"num = \", num)\n num = int(num)\n cpt_ax1 += 1;#cpt = num; # cpt += 1\n \n # ax1\n ax1 = fig.add_subplot(2,len(number_files),cpt_ax1);\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(num)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"nbre_aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count()\n \n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dc\"])\n num_bins = df[\"moy_dc\"].max()+1\n bins = range(0,int(num_bins)); bins = range(0, 100)\n print(\"---> bins = \", bins, \" min = \",df[\"moy_dc\"].min(), \\\n \" max = \",df[\"moy_dc\"].max())\n \n max_count_dl, max_count_dh = count_max_df(df)\n \n sns.distplot(df[\"moy_dc\"], ax = ax1, bins = bins, kde = False)\n ax1.set(xlabel= \"moy_distance_correction\", ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(num)+\\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma)+ \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean))\n ax1.plot([num+1,num+1], (0,max_count_dl), 'r--' )\n ax1.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax1.get_yticks()])\n \n # ax2\n cpt_ax2 = cpt_ax1 +len(number_files); #cpt = num+len(number_files); # cpt +=1 ;\n ax2 = fig.add_subplot(2,len(number_files),cpt_ax2);\n N_graphs = df[\"moy_dh\"].count()\n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dh\"])\n \n num_bins = df[\"moy_dh\"].max()+1\n bins = range(0 ,int(num_bins)); bins = range(0, 100)\n\n sns.distplot(df[\"moy_dh\"], ax = ax2, bins = bins, kde = False, color = 'red')\n ax2.set(xlabel= \"moy_distance_hamming\", ylabel= \"nombre_graphe\", \\\n title = \"distance de Hamming pour \\n \"+ str(num)+ \\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma) + \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean()))\n# ax2.set_xticklabels(bins, rotation=90)\n ax2.plot([num+1,num+1], (0,max_count_dh), 'r--' )\n ax2.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax2.get_yticks()])\n \n for ax in [ax1,ax2]:\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(8)\n \n# plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n comment += \"_horizontale\";\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionHorizontale_k_0_\"+str(number_files[len(number_files)-1])+\\\n \"_\"+comment+\".jpeg\", \\\n dpi= 190)\n pass",
"def plot(self, n=1000, hist=True, kde=False):\n sns.set(rc={\"xtick.bottom\": True, \"ytick.left\": True})\n sims = [self.estimate() for i in range(n)]\n fig, ax = plt.subplots(figsize=(10, 8))\n\n if hist:\n kwargs = {'cumulative': False, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)), hist=True,\n kde=kde,norm_hist=False, hist_kws=kwargs,\n ax=ax)\n plt.title('Histogram - days to project completion '\n '- n = {}'.format(n))\n plt.axvline(x=np.median(sims), color='red', label='50%')\n plt.text(np.median(sims)-0.5, -2, '50%', color='red')\n plt.show()\n\n else:\n kwargs = {'cumulative': True, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)),\n hist=True, kde=False, norm_hist=True,\n hist_kws=kwargs)\n plt.title('Cumulative histogram - days project to completion '\n '- n = {}'.format(n))\n plt.show()\n\n return plot",
"def plot_dist_evolution(arr, nbins=20, fracs=np.array([0.1, 0.2, 0.3, 0.4]), last=0.5):\n fracs = fracs\n\n last = last\n last_subset = arr[int(last * arr.shape[0]) :]\n\n for ff in fracs:\n\n subset = arr[: int(ff * arr.shape[0])]\n\n pl.hist(\n subset, nbins, histtype=\"step\", density=True, label=\"f=0.0--{}\".format(ff)\n )\n\n pl.hist(\n last_subset,\n nbins,\n histtype=\"step\",\n density=True,\n label=\"f={}--1.0\".format(last),\n )\n\n pl.legend(loc=\"best\", ncol=3)\n\n pl.show()\n\n return None",
"def plot():\n pass",
"def plot_cumulative_gain(y_true, y_probas, title='Cumulative Gains Curve',\n ax=None, figsize=None, title_fontsize=\"large\",\n text_fontsize=\"medium\"):\n y_true = np.array(y_true)\n y_probas = np.array(y_probas)\n\n classes = np.unique(y_true)\n if len(classes) != 2:\n raise ValueError('Cannot calculate Cumulative Gains for data with '\n '{} category/ies'.format(len(classes)))\n\n # Compute Cumulative Gain Curves\n percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0],\n classes[0])\n percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1],\n classes[1])\n percentages, gains3 = cumulative_gain_curve(y_true, y_true,\n classes[0])\n percentages, gains4 = cumulative_gain_curve(y_true, y_true,\n classes[1])\n\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n\n ax.set_title(title, fontsize=title_fontsize)\n\n ax.plot(percentages, gains1, lw=3, label='Class {} (pred)'.format(classes[0]))\n ax.plot(percentages, gains2, lw=3, label='Class {} (pred)'.format(classes[1]))\n #ax.plot(percentages, gains3, lw=3, label='Class {} (true)'.format(classes[0]))\n ax.plot(percentages, gains4, lw=3, label='Class {} (true)'.format(classes[1]))\n\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.1])\n\n ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')\n\n ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)\n ax.set_ylabel('Gain', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.grid('on')\n ax.legend(loc='lower right', fontsize=text_fontsize)\n plt.show()\n return ax",
"def plot_stability_function(self,bounds=[-20,1]):\n import matplotlib.pyplot as plt\n p,q=self.stability_function()\n xx=np.arange(bounds[0], bounds[1], 0.01)\n yy=p(xx)/q(xx)\n fig, = plt.plot(xx,yy)\n plt.draw()",
"def run_plot(args):\n # print(\"running chronqc_plot\")\n chronqc_plot.main(args)",
"def plot_distributions(x, variable_name):\n n_cols = x.shape[1]\n\n plot_rows = n_cols // 2\n plot_rows += n_cols % 2\n plot_cols = 2\n\n position = range(1, n_cols + 1)\n fig = plt.figure()\n\n for col_index in range(n_cols):\n col_values = x[:, col_index]\n ax = fig.add_subplot(plot_rows, plot_cols, position[col_index])\n ax.hist(col_values)\n ax.set_title(\"Distribution of variable {}{}\".format(variable_name, col_index + 1))\n ax.set_ylabel(\"Frequency\")\n ax.set_xlabel(\"Value\")\n\n plt.tight_layout()\n plt.savefig(\"plots/{}Dist.png\".format(variable_name))\n plt.show()",
"def plot_cc(graph):\n\tclustering_coeffs = []\n\tfor node in graph.nodes():\n\t\tclustering_coeffs.append(nx.clustering(graph, node))\n\t\n\tplt.axvline(x=np.mean(clustering_coeffs), color='r', linestyle='-')\n\tplt.hist(clustering_coeffs, bins=100)",
"def plot_hist_for_pop(exp,data_crop,IV):\n if not os.path.exists('plots'):\n os.makedirs('plots')\n DVdata=data_crop.loc[:,IV] \n if exp == 'POP1':\n colors = [\"orangered\", \"silver\"]\n else:\n colors = [\"steelblue\", \"silver\"]\n \n plt.figure()\n customPalette = sns.set_palette(sns.color_palette(colors))\n plt.rc('font', size=15) \n\n \n sns.distplot(DVdata,kde=False)\n sns.despine(top=True,right=True)\n ax = plt.gca()\n if IV == 'Loss.streak.outcome...zero':\n ax.set_xlim(right=75)\n plt.tight_layout()\n plt.savefig(fname='plots/hist_'+IV + '.eps',format='eps',transparent=True)",
"def visualize(self):\n self.dataFrame.hist()\n plt.show()",
"def prime_dist(func, n):\n x = func(n)\n y = list(range(len(x)))\n for i in range(len(x)):\n y[i] = (i+1)*np.log(x[i])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_ylabel(r'k $\\cdot$ log(p$_k$)')\n ax.set_xlabel(r'p$_k$')\n plt.suptitle('Distribution of primes', fontsize=15)\n ax.plot(x, y, lw=2, color='#FE4365')\n plt.show()",
"def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')",
"def _plot_marginal_pdfs( res, nbins=101, **kwargs):\n\tfrom matplotlib import pyplot as pl\n\timport numpy as np\n\n\tnparam = len(res.vparam_names)\n\t# nrow = np.sqrt( nparam )\n\t# ncol = nparam / nrow + 1\n\tnrow, ncol = 1, nparam\n\n\tpdfdict = _get_marginal_pdfs( res, nbins )\n\n\tfig = plt.gcf()\n\tfor parname in res.vparam_names :\n\t\tiax = res.vparam_names.index( parname )+1\n\t\tax = fig.add_subplot( nrow, ncol, iax )\n\n\t\tparval, pdf, mean, std = pdfdict[parname]\n\t\tax.plot( parval, pdf, **kwargs )\n\t\tif np.abs(std)>=0.1:\n\t\t\tax.text( 0.95, 0.95, '%s %.1f +- %.1f'%( parname, np.round(mean,1), np.round(std,1)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.01:\n\t\t\tax.text( 0.95, 0.95, '%s %.2f +- %.2f'%( parname, np.round(mean,2), np.round(std,2)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telif np.abs(std)>=0.001:\n\t\t\tax.text( 0.95, 0.95, '%s %.3f +- %.3f'%( parname, np.round(mean,3), np.round(std,3)),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\t\telse :\n\t\t\tax.text( 0.95, 0.95, '%s %.3e +- %.3e'%( parname, mean, std),\n\t\t\t\t\t ha='right',va='top',transform=ax.transAxes )\n\n\tplt.draw()",
"def plot_win_cum_dist(perc: pd.Series, cum_perc_wins: pd.Series, title: str, \n ylims: Tuple[int, int]=(-0.05, 1.05)) -> Tuple[plt.Figure, plt.Axes]:\n fig, ax = plt.subplots()\n ax.plot(perc, cum_perc_wins)\n ax.set_ylim(ylims)\n ax.set_title(title)\n fig.tight_layout()\n return fig, ax",
"def cdf(self, value=None):\n if value is None:\n value = self.value\n return self.rv.cdf(\n value, *self._pymc_dists_to_value(self.args), **self.kwds\n )",
"def curve_plot(self):\n if self.session.active['mode'] == 'database':\n self.curvePlot.set_scroll_interval()\n self.curvePlot.update_depth()\n self.curvePlot.show()",
"def cdf(data_r, data_f, xlabel: str = 'Values', ylabel: str = 'Cumulative Sum', ax=None):\n x1 = np.sort(data_r)\n x2 = np.sort(data_f)\n y = np.arange(1, len(data_r) + 1) / len(data_r)\n\n ax = ax if ax else plt.subplots()[1]\n\n axis_font = {'size': '14'}\n ax.set_xlabel(xlabel, **axis_font)\n ax.set_ylabel(ylabel, **axis_font)\n\n ax.grid()\n ax.plot(x1, y, marker='o', linestyle='none', label='Real', ms=8)\n ax.plot(x2, y, marker='o', linestyle='none', label='Synthetic', alpha=0.5)\n ax.tick_params(axis='both', which='major', labelsize=8)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)\n\n # If labels are strings, rotate them vertical\n if isinstance(data_r, pd.Series) and data_r.dtypes == 'object':\n ax.set_xticklabels(data_r.value_counts().sort_index().index, rotation='vertical')\n\n if ax is None:\n plt.show()",
"def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()",
"def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def plot_p(self, show = False):\n try:\n difference = self.binom_null\n except:\n self.simulate_significance()\n difference = self.binom_null\n\n observed_difference = self.p_treatment - self.p_control\n\n mu, sigma = stats.norm.fit(difference)\n crit_density = stats.norm.pdf(observed_difference, mu, sigma)\n\n x = np.linspace(min(difference), max(difference), self.n_control + self.n_treatment)\n y = stats.norm.pdf(x, mu, sigma)\n\n line_curve = dict(color = 'blue', width = 2)\n\n data = [\n go.Scatter(\n x = x,\n y = y,\n mode = 'lines',\n showlegend = False,\n line = line_curve\n ),\n go.Scatter(\n x = x[x > observed_difference],\n y = y[np.where(x > observed_difference)],\n fill = 'tozeroy',\n showlegend = False,\n line = line_curve\n )\n ]\n\n layout = dict(\n plot_bgcolor = 'white',\n width = 800,\n height = 600,\n title = 'Significance',\n xaxis = dict(\n title = 'Difference in Probabilities',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n ),\n yaxis = dict(\n title = 'Density',\n showgrid = False,\n zeroline = False,\n showline = True,\n linecolor = 'black'\n )\n )\n\n fig = go.Figure(data = data, layout = layout)\n\n fig.add_vline(x = observed_difference,\n line_width = 2,\n line_dash = 'dash',\n line_color = 'black',\n annotation_text = 'P Value {:.4f}'.format(self.p_value),\n annotation_position = 'top right')\n\n if show:\n # Intended to be used in notebooks.\n # .py app files that use this module will handle saving and opening from desktop\n fig.show();\n\n return fig",
"def plot_ecdf(self, variant_one, variant_two):\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys() or \\\n variant_two not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in column '\n '{}'.format(self.bucket_col_name)))\n\n if variant_one in self.ecdf.keys() and \\\n variant_two in self.ecdf[variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one,\n denominator_name=variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_two, variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two,\n denominator_name=variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_one, variant_two))",
"def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()",
"def plot_density(sampler, threshold, sigma, width, n_random_samples = 10000):\n recX, labels = sampler.sample(n_random_samples)\n rec_t0 = recX[:,0]\n rec_amplitude = recX[:,1]\n generator.generate_pdf(threshold, sigma, width)\n fig = plt.figure(figsize = (12, 12))\n # pdf and random samples go to bottom right, margins on appropriate sides\n ax1 = plt.subplot2grid((12,12),(4,0), colspan = 9, rowspan = 8)\n pdf_map = ax1.contourf(generator.t0s, generator.amplitudes, generator.pdf, 10, cmap = 'Blues')\n ax1.scatter(rec_t0, rec_amplitude, s = 0.03, c = 'y')\n ax1.set_title('Probability density and random samples'.format(n_random_samples))\n ax1.set_xlabel('t0 [ns]')\n ax1.set_ylabel('amplitude [S/N]')\n ax1c = plt.subplot2grid((12,12), (1,9), rowspan = 3, colspan = 2)\n plt.colorbar(pdf_map, cax = ax1c, format = ticker.FuncFormatter(_fmt))\n ax2 = plt.subplot2grid((12,12),(1,0), colspan = 9, rowspan = 3, sharex = ax1)\n ax2.plot(generator.t0s[:,-1], generator.pdfu)\n ax2.hist(rec_t0, bins = generator.t0s[:,0], normed = True, alpha = 0.5)\n ax2.set_title('t0 margin distribution')\n ax2.set_ylabel('P(1 over)')\n plt.setp(ax2.get_xticklabels(), visible = False)\n ax3 = plt.subplot2grid((12,12),(4,9), rowspan = 8, colspan = 3, sharey = ax1)\n ax3.plot(generator.pdfv, generator.amplitudes[-1,:])\n ax3.hist(rec_amplitude, bins = generator.amplitudes[0,:], normed = True, orientation = 'horizontal', alpha = 0.5)\n ax3.set_title('Amplitude margin distribution')\n ax3.set_xlabel('P(1 over)')\n plt.setp(ax3.get_yticklabels(), visible = False)\n ax4 = plt.subplot2grid((12,12),(0,0), colspan = 9)\n ax4.text(0.5, 1.0, 'Exact P(one over) distribution and {0} random samples \\nthreshold : {1}, sigma : {2}, width : {3}'.format(n_random_samples, threshold, sigma, width), horizontalalignment = 'center', verticalalignment = 'top', fontsize = 18)\n ax4.set_axis_off()\n plt.tight_layout()\n plt.savefig('{0}/rng_test_thr{1}_sig{2}_w{3}.png'.format(plotdir, threshold, sigma, width))",
"def graph_coherence(coherence_values):\n limit=50; start=5; step=5;\n x = range(start, limit, step)\n plt.plot(x, coherence_values)\n plt.xlabel(\"Num Topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend((\"coherence_values\"), loc='best')\n plt.show()\n\n # Print the coherence scores \n for m, cv in zip(x, coherence_values):\n print(\"Num Topics =\", m, \" has Coherence Value of\", round(cv, 4))",
"def VisualizeDistribution(dataset, distribution, title, filename):\n # create the output directory if it doesn't exist\n if not os.path.exists('distributions'):\n os.mkdir('distributions')\n if not os.path.exists('distributions/{}'.format(dataset)):\n os.mkdir('distributions/{}'.format(dataset))\n\n # determine the appropriate units for this distribution\n max_duration = max(distribution)\n if max_duration > 10**8:\n units = 'seconds'\n for iv in range(len(distribution)):\n distribution[iv] = distribution[iv] / 10**9\n else:\n units = 'microseconds'\n\n # plot the figure\n plt.figure(figsize=(6, 4))\n\n # write the labels for this set of functions\n plt.title(title, pad=20, fontsize=14)\n plt.ylabel('Time ({})'.format(units), fontsize=12)\n plt.xlabel('No. Appearances: {}'.format(len(distribution)), fontsize=12)\n\n # plot the distribution\n plt.boxplot(distribution)\n\n plt.tight_layout()\n\n output_filename = 'distributions/{}/{}'.format(dataset, filename)\n plt.savefig(output_filename)\n\n # clear and close this figure\n plt.clf()\n plt.close()",
"def cdf(self, value):\n return self._normal.cdf(value)",
"def contingency(self, scale, distrib=True, dataname=''):\n print 'Generating the plot ...'\n\n cont = np.zeros((scale, scale))\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n\n # syn = (index, rel index, class)\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n # print posx,posy,data[i][0],data[i][1], normLat, normLon\n try:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n except IndexError:\n print self.dataset[i][0], self.dataset[i][1]\n if distrib:\n cont = cont / np.max(cont)\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n plt.title('Density ')\n\n plt.imshow(cont, interpolation='bicubic', cmap=cm.gist_yarg)\n vmax = np.max(cont)\n # vmin=np.min(cont)\n\n if distrib:\n plt.colorbar(ticks=np.round(np.linspace(0, 1, 10), 2),\n orientation='vertical')\n nfile = self.application + '-' + dataname\n\n fig.savefig(homepath + 'Results/' + self.city[2] + '-' + nfile + '.pdf', orientation='landscape', format='pdf')\n\n #plt.show()",
"def plot(self, n_confs):\n \n import pandas as pd\n import numpy as np\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n import csv\n \n n_iter = len(self.plot_data)\n \n data = np.ndarray((n_iter, n_confs+1))\n data[:,0] = [i[0] for i in self.plot_data]\n data[:,1:] = [i[1].detach().cpu().numpy() for i in self.plot_data]\n\n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n df.to_csv(f\"{self.plot_name}.tab\", sep=\"\\t\", quoting=csv.QUOTE_NONE) \n\n d = data[:,1:].reshape(-1)\n d = d[~np.isnan(d)]\n mine = d.min() - 0.01\n for i in range(n_confs): \n data[:,i+1] -= mine\n \n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n \n colors = (0,0,0)\n area = 10\n \n # Plot\n fig = plt.figure(figsize=(15, 15))\n ax = fig.add_subplot(1,1,1)\n for i in range(n_confs):\n ax.plot('iter', f'c{i+1}', data=df)\n ax.set_yscale('log')\n\n plt.xlabel('iter')\n plt.ylabel('loss')\n plt.savefig(f'{self.plot_name}.png')",
"def cdf(array, figsize, color, label, xlabel, ylabel, title, textsize, xsize, ysize, loc):\r\n fig, ax = plt.subplots(figsize=figsize)\r\n x = np.sort(array)\r\n y = np.array(range(len(array)))/float(len(array))*100 \r\n ax.plot(x, y, color = color, label = label) # plot the CDF\r\n ax.set_title(title, weight = 'bold', size = textsize)\r\n ax.set_xlabel(xlabel, weight = 'bold', size = textsize)\r\n ax.set_ylabel(ylabel, weight = 'bold', size = textsize)\r\n plt.xticks(fontsize = xsize)\r\n plt.yticks(fontsize = ysize)\r\n plt.legend(loc = loc)",
"def make_distplot(data, output_f, title, xlabel, prefix):\n\n plt.figure(figsize=(8, 8), dpi=1200)\n displot = sns.distplot(data, hist=False, rug=True, color=\"b\")\n out_name = prefix + \"_\" + title + \"_\" + output_f.split(\".\")[0] + \".pdf\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel('Density')\n displot.figure.savefig(out_name)\n plt.close()",
"def plot_img_and_hist(image, axes, bins=256):\n# image = img_as_float(image)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(image, cmap=plt.cm.gray);\n ax_img.set_axis_off()\n\n # Display histogram\n ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = exposure.cumulative_distribution(image, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf",
"def cum_density_func(xs,norm=True,rank=False,data_range='data',pdf=None):\n if pdf is None:\n pdf = prob_density_func(xs,False,data_range)\n pdfk = sorted(pdf.keys())\n pdfv = map(pdf.get,pdfk)\n if not rank:\n cdfv = np.cumsum(pdfv)\n if norm:\n cdfv = cdfv/np.sum(pdfv)\n else:\n cdfv = np.arange(1,len(pdfk)+1)\n if norm:\n cdfv = cdfv/float((len(pdfk)+1))\n return dict(zip(pdfk,cdfv))",
"def comp_cum_distribution(xs,norm=True,rank=False,data_range='data',pdf=None):\n cdf = cum_density_func(xs,norm,rank,data_range,pdf)\n max_v = np.max(cdf.values())\n return dict([(k,max_v - cdf[k]) for k in cdf.keys()])",
"def compute_cdf(ordered_weights):\n return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights",
"def plot_ccum_degree_dist(net, label, outpath, degree_type='all'):\n unique_deg, unique_cnt = networkit_util.get_cc_deg_dist(net, degree_type)\n title = {'all': '', 'in': 'In', 'out': 'Out'}\n outfile_name = {'all': 'cc', 'in': 'cc-in', 'out': 'cc-out'}\n marker_color = {'all': 'b', 'in': 'g', 'out': 'r'}\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.loglog(unique_deg, unique_cnt, color=marker_color[degree_type], marker='*', label=label)\n # ax.set_title('Complementary Cumulative ' + title[degree_type] + '-Degree distribution')\n ax.set_xlabel('k')\n ax.set_ylabel('P(x>=k)')\n # ax.legend(loc='best')\n plt.savefig(outpath + label + '-' + outfile_name[degree_type] + '-degree-distribution.eps')\n return ax",
"def plot_EC_pI_dist(EC_pi_data, filename, title, cutoff_pi):\n fig, ax = plt.subplots()\n\n modifications = define_seq_modifications()\n\n # unmodifed\n mod_dict = modifications['0']\n data = EC_pi_data[EC_pi_data['modification'] == 0]\n n, bins, patches = ax.hist(data['pi'],\n facecolor=mod_dict['colour'],\n alpha=0.5,\n histtype='stepfilled',\n bins=np.arange(0, 14 + 0.2, 0.5),\n label=mod_dict['name'])\n\n # modification 1 - succinylation\n mod_dict = modifications['1']\n data = EC_pi_data[EC_pi_data['modification'] == 1]\n n, bins, patches = ax.hist(data['pi'],\n facecolor=mod_dict['colour'],\n alpha=0.5,\n histtype='stepfilled',\n bins=np.arange(0, 14 + 0.2, 0.5),\n label=mod_dict['name'])\n\n # Set number of ticks for x-axis\n ax.tick_params(axis='both', which='major', labelsize=16)\n ax.set_xlabel('calculated pI', fontsize=16)\n ax.set_ylabel('count', fontsize=16)\n ax.set_xlim(0, 14)\n # plot pI cut-off\n ax.axvline(x=cutoff_pi, c='k', lw='2', linestyle='--')\n # legend\n ax.legend(fontsize=16)\n # title\n ax.set_title(title, fontsize=16)\n\n fig.tight_layout()\n fig.savefig(filename,\n dpi=720, bbox_inches='tight')",
"def plot_curve(epochs, hist, list_of_metrics):\n # list_of_metrics should be one of the names shown in:\n # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics\n\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Value\")\n\n for m in list_of_metrics:\n x = hist[m]\n plt.plot(epochs[1:], x[1:], label=m)\n\n plt.legend()\n plt.show()",
"def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )"
] | [
"0.6518769",
"0.65124094",
"0.6493381",
"0.64194536",
"0.60567564",
"0.6054903",
"0.6053019",
"0.6044755",
"0.6002551",
"0.59260064",
"0.5922537",
"0.5888744",
"0.58834714",
"0.5870317",
"0.58622444",
"0.58605176",
"0.5819715",
"0.58051467",
"0.5775122",
"0.5729377",
"0.57234955",
"0.57230604",
"0.56823504",
"0.56775326",
"0.56686646",
"0.5642131",
"0.56263655",
"0.55730706",
"0.5559796",
"0.5547592",
"0.55270845",
"0.55226463",
"0.55149907",
"0.54834265",
"0.54807353",
"0.54497373",
"0.544641",
"0.5444354",
"0.54146343",
"0.540888",
"0.5402398",
"0.53975236",
"0.539631",
"0.5393623",
"0.53809047",
"0.5364643",
"0.5360564",
"0.5358984",
"0.5340151",
"0.5337406",
"0.5324713",
"0.5323717",
"0.5311079",
"0.5289502",
"0.52787745",
"0.5278332",
"0.5271676",
"0.5267907",
"0.52662385",
"0.52588576",
"0.5257191",
"0.5247259",
"0.52403444",
"0.52402097",
"0.52286696",
"0.5224984",
"0.5201629",
"0.51978815",
"0.5196323",
"0.5190801",
"0.518932",
"0.5188935",
"0.5188529",
"0.518624",
"0.5184962",
"0.5183344",
"0.5181544",
"0.5179927",
"0.51695925",
"0.5162368",
"0.51618594",
"0.51610124",
"0.5159969",
"0.51599354",
"0.5157982",
"0.515593",
"0.5154469",
"0.51502985",
"0.51467276",
"0.5146505",
"0.5145566",
"0.51345915",
"0.51299924",
"0.51165247",
"0.5115633",
"0.5111064",
"0.50960016",
"0.50923425",
"0.5077241",
"0.5072211"
] | 0.54887307 | 33 |
Calculates credible interval for any probability distribution given input interval for cdf. | def credible_interval(self, distType='current', interval=(0.025, 0.975)):
# Calculate cdf to use for credible interval
distCred = self.cumulative_distribution(dist=distType)
# Prior and Current credible intervals
if (distType=='current' or distType=='prior'):
minCred = self.hypotheses[np.where((distCred-interval[0])>0)[0].min()]
maxCred = self.hypotheses[np.where((distCred-interval[1])>0)[0].min()]
ci = [(minCred, maxCred)]
# Posterior: all iterations credible intervals
else:
ci = []
for i, row in enumerate(distCred):
minCred = self.hypotheses[np.where((distCred[i]-interval[0])>0)[0].min()]
maxCred = self.hypotheses[np.where((distCred[i]-interval[1])>0)[0].min()]
ci.append((minCred, maxCred))
return ci | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_credible_interval(vals, weights, confidence: float = 0.95):\n if confidence <= 0.0 or confidence >= 1.0:\n raise ValueError(\n f\"Confidence {confidence} must be in the interval (0.0, 1.0).\"\n )\n alpha_lb = 0.5 * (1.0 - confidence)\n alpha_ub = confidence + alpha_lb\n lb = compute_quantile(vals, weights, alpha_lb)\n ub = compute_quantile(vals, weights, alpha_ub)\n return lb, ub",
"def credible_interval(self, parameter, interval=[0.05, 0.95]):\n\n if parameter not in self.parameters:\n raise ValueError(f\"Parameter '{parameter}' is not available\")\n\n intervals = {}\n for key, value in self.results.items():\n if isinstance(value, Grid):\n intervals[key] = Plot._credible_interval_grid(\n value, parameter, interval\n )\n else:\n credint = value.posterior[parameter].quantile(interval).to_list()\n intervals[key] = credint[0] if len(interval) == 1 else credint\n\n return list(intervals.values())[0] if len(self.results) == 1 else intervals",
"def range_probability_cdf(mean, devi, range_low, range_high):\r\n # 1 / (2 * pi * deviation**2) = x\r\n # e ** -((range_num - mean)**2 / 2*deviation**2 = y\r\n # area = y/x\r\n\r\n large = norm.cdf(range_high, mean, devi)\r\n print(\"scipy large area = \", large)\r\n small = norm.cdf(range_low, mean, devi)\r\n print(\"scipy small area = \", small)\r\n range_area = large - small\r\n message = f\"The area in range {range_low} - {range_high} is {range_area}\"\r\n return range_area",
"def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)",
"def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds",
"def get_interval_from_confidence_file(self, interval_dict):\n for arc in self.arc_info.keys():\n weight = self.arc_info[arc][\"weight\"]\n if weight == 0:\n interval = [0, 0]\n else:\n interval = interval_dict[weight]\n ub = interval[1]\n lb = interval[0]\n self.arc_info[arc][\"upper_bound\"] = ub\n self.arc_info[arc][\"lower_bound\"] = lb",
"def mycdf(mean, devi, range_low, range_high):\r\n\r\n devi_square = float(devi**2)\r\n low_e_num = math.exp(-((float(range_low) - float(mean))**2 / (2*devi_square)))\r\n denom = float( math.sqrt(2 * math.pi * devi_square) )\r\n high_e_num = math.exp(-((float(range_high) - float(mean))**2 / (2*devi_square)))\r\n low_area = float(low_e_num / denom)\r\n high_area = float(high_e_num / denom)\r\n if range_low > mean:\r\n low_area = 1 - low_area\r\n if range_high > mean:\r\n high_area = 1 - high_area\r\n print(\"my high_area = \", high_area)\r\n print(\"my low_area = \", low_area)\r\n under_curve = high_area - low_area\r\n message = f\"The area under the curve for range {range_low} - {range_high} = {under_curve}\"\r\n return under_curve",
"def _compute_register_bounds(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.cdf(bits, probability)\n return probs / probs[-1]",
"def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)",
"def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high",
"def _credible_interval_grid(grid, parameter, interval):\n\n from pesummary.utils.array import Array\n\n margpost = grid.marginalize_posterior(not_parameters=parameter)\n intervals = Array.percentile(\n grid.sample_points[parameter],\n weights=margpost,\n percentile=[100 * val for val in interval],\n )\n\n return intervals if len(interval) > 1 else intervals[0]",
"def cchalf(dataframe, function, bins):\n dist = dataframe.set_index(['H', 'K', 'L'])['D'].drop_duplicates()\n dmin = dist.min()\n dmax = dist.max()\n binedges = np.linspace(dmin**-2, dmax**-2, bins+1)**-0.5\n binedges = list(zip(binedges[:-1], binedges[1:]))\n a,b = split(dataframe)\n xval_a, xval_b = function(a), function(b)\n#TODO: Fix this awful hack\n key = [i for i in xval_a if i!='D'][0]\n xval_a, xval_b = xval_a.join(dist),xval_b.join(dist)\n idx = xval_a.index.intersection(xval_b.index)\n xval_a,xval_b = xval_a.loc[idx],xval_b.loc[idx]\n cchalf = []\n for dmin,dmax in binedges:\n idx = (xval_a['D'] > dmin) & (xval_a['D'] < dmax)\n a = np.array(xval_a[idx][key]).flatten()\n b = np.array(xval_b[idx][key]).flatten()\n cchalf.append(np.corrcoef(a,b)[0, 1])\n return cchalf, binedges",
"def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)",
"def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)",
"def CI(x, alpha=0.05):\n x = np.asarray(x)\n xs = x.size\n s = np.argsort(x)\n c = int((alpha / 2) * xs)\n return Interval(x[s[c-1]], x[s[xs-c]])",
"def _confidence_interval_function(xq, cinfo):\n a = cinfo.a.copy()\n a[cinfo.indx] = xq\n\n yfit, _ = cinfo.fit_function(a, pderflg=False)\n if yfit.dtype in ['complex64','complex128']:\n yfit = np.concatenate([yfit.real,yfit.imag])\n wchisqr1 = np.sum(cinfo.ww*(yfit-cinfo.dat)**2)/cinfo.nfree\n \n goal = abs(wchisqr1-cinfo.wchi*cinfo.factor)\n \n return goal",
"def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb",
"def ci_diff_prop(p1, p2, n1, n2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n prop_diff = p1 - p2\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n margin_of_error = z_star * (np.sqrt((p1 * (1 - p1) / n1) + (p2 * (1 - p2) / n2)))\n # calculate the lower and upper bound\n lcb = prop_diff - margin_of_error\n ucb = prop_diff + margin_of_error\n print(\n \"{}% Confidence Interval for difference in two Population proportions: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )",
"def rate_density(self, value):\n\n # TODO: analyse for certain that log units cancel out\n # with the change in occr\n\n if value.ndim == 2:\n value = value.T\n\n R_i = np.digitize(value[0], self._R_boundaries) - 1\n P_i = np.digitize(value[1], self._P_boundaries) - 1\n\n # Remove the ones out of bounds (oob_mask = out of bounds mask)\n oob_mask = np.zeros_like(R_i, dtype=bool)\n oob_mask = oob_mask | ((R_i < 0) | (R_i >= np.shape(self.occr)[0]))\n oob_mask = oob_mask | ((P_i < 0) | (P_i >= len(self._P_boundaries)-1))\n\n R_i = R_i[~oob_mask]\n P_i = P_i[~oob_mask]\n\n return self.occr[R_i] * self._cpf_grid[R_i, P_i]",
"def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)",
"def rvsWithinCDFbounds(self,lowerBound,upperBound):\n randResult = self._distribution.inverseCdf(float(random(1))*(upperBound-lowerBound)+lowerBound)\n return randResult",
"def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf",
"def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound",
"def chebint(self, a, b, c, n):\n sum = 0.0\n fac = 1.0\n con = 0.25 * (b - a) # factor that normalizes the interval\n cint = numpy.zeros(n)\n for j in range(1, n - 2):\n cint[j] = con * (c[j - 1] - c[j + 1]) / j\n sum = sum + fac * cint[j]\n fac = - fac\n cint[n - 1] = con * c[n - 2] / (n - 1)\n sum = sum + fac * cint[n - 1]\n cint[0] = 2.0 * sum # set constant of integration.\n return (cint)",
"def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")",
"def ci_prop(p, n, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # standard error\n std_error = np.sqrt(p * (1 - p) / n)\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n # calculate lower and upper confidence bounds\n lcb = np.round(p - margin_of_error, 2)\n ucb = np.round(p + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Proportion: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )",
"def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def gpdfInt(t1,t2,c,tau):\n return st.gamma.cdf(t2,c,loc=0,scale=tau)-st.gamma.cdf(t1,c,loc=0,scale=tau)",
"def CumulativeDistribution(data, nbins, range=None, normed=True, centerbins=False):\n\n # 1) COMPUTE THE DISTRIBUTION OF THE DATA\n ydata, xdata = np.histogram(data, nbins, range, normed)\n\n # 1.1) Compute the cumulative sum of the probability\n ydata = ydata.cumsum()\n\n # 2) RETURN THE RESULTS\n if centerbins:\n dif = 0.5 * (xdata[-1] - xdata[0]) / nbins\n xdata += dif\n\n if normed:\n norm = 1.0 / ydata[-1]\n ydata *= norm\n\n return xdata[:-1], ydata\n\n else:\n return xdata[:-1], ydata",
"def cagr_for_days(start_value: float, end_value: float, days: int):\n if start_value == end_value or start_value == 0:\n return 0.00\n\n years = float(days) / 365\n growth_rate: float = (((end_value / start_value) ** (1 / years)) - 1) * 100\n\n return round(growth_rate, 2)",
"def test_calculate_crow_bounds_cum_failure_rate_type2(self):\n\n _bounds = calculate_crow_bounds(22, 620.0, 0.4239, 0.6142, 0.9, 3, 2)\n self.assertAlmostEqual(_bounds[0], 0.02402216)\n self.assertAlmostEqual(_bounds[1], 0.04877491)",
"def get_cdf(dist):\n cdf = []\n total = 0\n for i in range(len(dist)):\n total += dist[i]\n cdf.append(total)\n return cdf",
"def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))",
"def create_proportions_interval(confidence, n_samples, data_point, method=\"AC\"):\n if data_point > 1 or data_point < 0:\n raise Exception(\"create_proportions_interval cannot be used for value outside of range [0,1].\")\n\n if method.lower() == \"clt\" or method.lower == \"wald\":\n clt_margin = st.norm.ppf(1 - (1 - confidence) / 2) * math.sqrt(data_point * (1 - data_point) / n_samples)\n clt = float(max(data_point - clt_margin, 0)), float(min(data_point + clt_margin, 1))\n return Interval(*clt)\n elif \"3\" in method or \"three\" in method:\n rule_of_three_margin = 3/n_samples\n rule_of_three = Interval(float(max(data_point - rule_of_three_margin, 0)), float(min(data_point + rule_of_three_margin, 1)))\n return rule_of_three\n elif method.lower() == \"ac\" or \"agresti\" in method.lower():\n AC = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"agresti_coull\")\n return Interval(*AC)\n elif method.lower() == \"wilson\":\n wilson = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"wilson\")\n return Interval(*wilson)\n elif \"clop\" in method.lower() or \"pear\" in method.lower():\n clopper_pearson = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"beta\")\n return Interval(*clopper_pearson)\n elif \"jef\" in method.lower():\n jeffreys = proportion_confint(round(data_point*n_samples), n_samples, alpha=1 - confidence, method=\"jeffreys\")\n return Interval(*jeffreys)\n elif \"hsb\" in method.lower():\n return create_interval_hsb(confidence, n_samples, data_point)\n else:\n raise Exception(\"Method mot found.\")",
"def the_function(interval):\n if math.ceil(interval.upper) % 2:\n return interval * type(interval).closed(\n fractions.Fraction(3, 2),\n fractions.Fraction(3, 2)\n )\n else:\n return interval * type(interval).closed(\n fractions.Fraction(1, 2),\n fractions.Fraction(1, 2)\n )",
"def cdf(self, points):\n if self._y_cdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_cdf(y)\n\n # select which x quantile curve to use.\n x_curve = (y_out - self.y_min) * self.y_res / (self.y_max - self.y_min)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_cdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )",
"def bin_cdf(n, p, x):\n\n # p C (bin_dist) ** 0 ) *(1-bin_dist)** p\n\n # n = (p)=20\n # x = x = 1 = r\n # nCr = n! / r!(n-r)\n\n \n\n\n\n\n\n\n\n\n def bin_dist(n, p, x):\n \"\"\"\n Given n number of trials, p the probability of success,\n what is the probability of having x successes?\n\n Your function should raise a ValueError if x is higher\n than n.\n\n If you need to compute combinations, you can import the\n function \"comb\" from the package \"scipy.special\"\n\n :param n: number of trials (int)\n :param p: probability of success\n :param x: number of successes (int)\n :return: probability of having x successes\n :rtype: float\n :raise ValueError: if x > n\n \"\"\"\n def factorial(x):\n if x >= 0:\n \n factorial = 1\n\n for i in range(1, x + 1):\n factorial = float(factorial * i)\n # print(f' The factorial of {x} is {factorial}') \n return factorial\n\n else:\n raise ValueError(\"Sorry x cannot be a negative number\")\n\n def combination(n, r):\n \"\"\"\n Given n total number of items,\n what is the number of possible ways\n to choose r items from it?\n\n :param n: total number of items (integer)\n :param r: number of items to arrange (int)\n :return: number of combinations\n :rtype: integer\n \"\"\"\n\n \n\n \n numerator = factorial(n)\n denominator = factorial(r)\n subtracted_answer = factorial(n-r)\n \n\n answer = numerator/(denominator * subtracted_answer)\n print(answer)\n return answer \n\n # from scipy.special import comb\n if x > n:\n raise ValueError(\"Error, x must be less than n\")\n else:\n\n\n prob_success = float((combination(n, x)) * ((p**x)*((1-p)**(n-x))))\n\n print(prob_success)\n return prob_success \n \n # an= 1-bin_dist(n,p,x)\n # print(f'word{an}')\n # n= 12\n # p=0.25\n # # x=0??\n # ((n!)/ (x!*(n-x)!)) * (p**x) * (1-p)**(n-x)\n sum_prob = []\n for i in range(x+1):\n print(i)\n prob = bin_dist(n,p,x=i)\n sum_prob.append(prob)\n print(sum_prob)\n total =sum(sum_prob)\n print(total)",
"def cdf2pval(cdf: nptyp.ArrayLike, tail='both') -> nptyp.ArrayLike:\n if tail == 'both':\n return 2 * np.minimum(cdf, 1 - cdf)\n elif tail == 'left':\n return cdf\n elif tail == 'right':\n return 1 - cdf\n else:\n raise ValueError(f\"tail={tail} not recognized\")",
"def get_dec_i_range(data_decs):\n data_dec_is = map_list(lambda x: (x - 1800)//10, data_decs)\n lower_range_i = data_dec_is[0]\n upper_range_i = data_dec_is[-1] + 1\n return (lower_range_i, upper_range_i)",
"def compute_cdf(ordered_weights):\n return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights",
"def cdf(self,x):\n return self.categoricalDist.cdf(x)",
"def get_conf_interval_from_sample(n, mean, sigma, alpha = 0.95) :\n df = n-1\n scale = sigma / np.sqrt(n)\n return stats.t.interval(alpha=alpha, df=df, loc=mean, scale=scale)",
"def test_calculate_crow_bounds_cum_failure_rate_type1(self):\n\n _bounds = calculate_crow_bounds(22, 620.0, 0.4239, 0.6142, 0.9, 3, 1)\n self.assertAlmostEqual(_bounds[0], 0.02402216)\n self.assertAlmostEqual(_bounds[1], 0.05255707)",
"def klucb(x, d, div, upperbound, lowerbound=-float('inf'), precision=1e-6):\n low = max(x, lowerbound)\n up = upperbound\n while up-low > precision:\n m = (low+up)/2\n if div(x, m) > d:\n up = m\n else:\n low = m\n return (low+up)/2",
"def test_pnorm_cdf():\n mu = np.array([[1.], [2.]])\n sigma = np.array([[2., 1.], [1., 3.]])\n\n lowerbound = np.pi/4\n upperbound = np.pi/2\n cdf = pnorm.cdf(lowerbound, upperbound, mu, sigma)\n\n cdf_ans = np.array([0.5066762601816892])\n assert np.allclose(cdf, cdf_ans)",
"def NPD_gos(df,bvals,c=1):\n j_dist = get_coop_coop_neighbour_dist(df) \n degree_dist = get_degree_distribution(df)\n f_jk = get_f_jkAB(j_dist,degree_dist)\n return pd.concat([gradient_of_selection(f_jk,NPD_benefit,b,c) for b in bvals],keys=bvals,names='b')",
"def test2():\r\n area = range_probability_cdf(12, 1.3, 9.4, 14.6)\r\n area2 = mycdf(12, 1.3, 9.4, 14.6)\r\n print(\"scipy result:\", area)\r\n print(\"my result:\", area2)",
"def cdf(self,x):\n if self.base == 'natural':\n cdfValue = (math.log(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n else:\n cdfValue = (math.log10(x)-self.lowerBound)/(self.upperBound-self.lowerBound)\n return cdfValue",
"def test3():\r\n scipy_area = range_probability_cdf(10, 1.5, 8.5, 11.5)\r\n my_area = mycdf(10, 1.5, 8.5, 11.5)\r\n print(\"scipy result:\", scipy_area)\r\n print(\"my result:\", my_area)",
"def _uniform_order_statistic_cdf(i, n, x):\n return betainc(i, n-i+1, x)",
"def icdf(self, points):\n if self._y_icdf is not None:\n x = points[:, 0]\n y = points[:, 1]\n\n # map the y coordinate first.\n y_out = self._y_icdf(y)\n\n # select which x quantile curve to use.\n x_curve = y_out * (self.y_res - 1)\n x_curve = np.floor(x_curve).astype(\"int\")\n\n # map the x coordinate.\n x_range = np.arange(x.shape[0])\n x_out = np.zeros_like(x)\n for i in range(self.y_res):\n mask = x_curve == i\n x_out[x_range[mask]] = self._x_icdfs[i](x[mask])\n\n x_out = tf.cast(x_out, dtype=points.dtype)\n y_out = tf.cast(y_out, dtype=points.dtype)\n return np.column_stack((x_out, y_out))\n else:\n raise RuntimeError(\n \"CumulativeDensityFunction: Must call compute() with the correct \"\n \"direction before evaluation.\"\n )",
"def IntToCoverage(intensity, caliConst):\n return (np.sqrt((intensity - caliConst[0])))/caliConst[1];",
"def ecdf(data):\n x = np.sort(data)\n cdf = np.linspace(0, 1, len(x))\n return cdf, x",
"def __calculate_cdf(self):\n \n for (f, dist) in self.__dists.iteritems():\n for dict in self.__queries:\n dict['rss_lower_cdf']=(dict[qs.QRY_LRSS]/self.__db_size)\n dict['rss_upper_cdf']=(dict[qs.QRY_URSS]/self.__db_size)",
"def circdiff(a, b, period=360, cmin=-180, cmax=180):\r\n diff = a-b\r\n diff = diff%period\r\n if diff > cmax:\r\n diff -= period\r\n elif diff <cmin:\r\n diff += period\r\n return diff",
"def comp_cum_distribution(xs,norm=True,rank=False,data_range='data',pdf=None):\n cdf = cum_density_func(xs,norm,rank,data_range,pdf)\n max_v = np.max(cdf.values())\n return dict([(k,max_v - cdf[k]) for k in cdf.keys()])",
"def get_confidence_interval(self, scores, ci_method='bca', ci_size=0.95, replications=100000, seed_value=None):\n def score(x):\n return np.array([x.mean()])\n data = np.array([float(score) for score in scores])\n if min(data) == max(data):\n return tuple([min(data), max(data)])\n bs = IIDBootstrap(data)\n if seed_value is not None:\n bs.seed(seed_value)\n ci = bs.conf_int(score, replications, method=ci_method, size=ci_size, tail='two')\n return tuple([ci[0][0], ci[1][0]])",
"def p_donate_ci(self, a=5, alpha =1, beta=1):\n ones = self.counts[1:]\n zeros = self.counts[0]\n dist = beta_dist(ones + alpha, zeros + beta, 10000)\n lower_bound = np.percentile(dist, a / 2.0)\n upper_bound = np.percentile(dist, 100 - a / 2.0)\n mean = np.mean(dist)\n return (lower_bound, self.p_donate, upper_bound)",
"def Df_2b(c):\n xc, yc = c\n df2b_dc = empty((len(c), x.size)) \n Ri = calc_R(xc, yc)\n df2b_dc[0] = (xc - x)/Ri # dR/dxc\n df2b_dc[1] = (yc - y)/Ri # dR/dyc\n df2b_dc = df2b_dc - df2b_dc.mean(axis=1)[:, newaxis]\n return df2b_dc",
"def confidence_intervals(self, level = 95):\n margin = (100 - level) / 2 # interval is middle level% of vals, so this is margin to either side of it\n try:\n len(self.binom_control)\n len(self.binom_treatment)\n\n except:\n self.binom_distribution()\n\n control = self.binom_control\n treatment = self.binom_treatment\n\n control_upper = np.percentile(a = control, q = level + margin)\n control_lower = np.percentile(a = control, q = margin)\n self.interval_control = {'lower': control_lower, 'upper':control_upper, 'level':level}\n\n treatment_upper = np.percentile(a = treatment, q = level + margin)\n treatment_lower = np.percentile(a = treatment, q = margin)\n self.interval_treatment = {'lower': treatment_lower, 'upper':treatment_upper, 'level':level}\n\n return self.interval_control, self.interval_treatment",
"def dcsrbf(r):\n return 3*(num.power(num.maximum(0, 1-r), 3) - num.power(num.maximum(0, 1-r),2)*(3*r+1))",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n return cdfValue",
"def CDFconvertToDistr(self,pts):\n return self._convertCdfPointsToDistr(self._convertStdPointsToCdf(pts))",
"def interq_range(sig):\n #ny.percentile(sig, 75) - ny.percentile(sig, 25)\n return np.percentile(sig, 75) - np.percentile(sig, 25)",
"def cdf(weights):\r\n\treturn np.cumsum(weights) / sum(weights)",
"def calculate_cof_int(rslt, init_dict, data_frame, mte, quantiles):\n # Import parameters and inverse hessian matrix\n hess_inv = rslt[\"AUX\"][\"hess_inv\"] / data_frame.shape[0]\n params = rslt[\"AUX\"][\"x_internal\"]\n numx = len(init_dict[\"TREATED\"][\"order\"]) + len(init_dict[\"UNTREATED\"][\"order\"])\n\n # Distribute parameters\n dist_cov = hess_inv[-4:, -4:]\n param_cov = hess_inv[:numx, :numx]\n dist_gradients = np.array([params[-4], params[-3], params[-2], params[-1]])\n\n # Process data\n covariates = init_dict[\"TREATED\"][\"order\"]\n x = np.mean(data_frame[covariates]).tolist()\n x_neg = [-i for i in x]\n x += x_neg\n x = np.array(x)\n\n # Create auxiliary parameters\n part1 = np.dot(x, np.dot(param_cov, x))\n part2 = np.dot(dist_gradients, np.dot(dist_cov, dist_gradients))\n # Prepare two lists for storing the values\n mte_up = []\n mte_d = []\n\n # Combine all auxiliary parameters and calculate the confidence intervals\n for counter, i in enumerate(quantiles):\n value = part2 * (norm.ppf(i)) ** 2\n aux = np.sqrt(part1 + value)\n mte_up += [mte[counter] + norm.ppf(0.95) * aux]\n mte_d += [mte[counter] - norm.ppf(0.95) * aux]\n\n return mte_up, mte_d",
"def cppi(risky_r, safe_r=None, m=3, start=initial, floor=0.8, riskfree_rate=risk_free_rate, drawdown=None):\n # set up the CPPI parameters\n dates = risky_r.index\n n_steps = len(dates)\n account_value = start\n floor_value = start*floor\n peak = account_value\n if isinstance(risky_r, pd.Series): \n risky_r = pd.DataFrame(risky_r, columns=[\"R\"])\n\n if safe_r is None:\n safe_r = pd.DataFrame().reindex_like(risky_r)\n safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number\n # set up some DataFrames for saving intermediate values\n account_history = pd.DataFrame().reindex_like(risky_r)\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\n cushion_history = pd.DataFrame().reindex_like(risky_r)\n floorval_history = pd.DataFrame().reindex_like(risky_r)\n peak_history = pd.DataFrame().reindex_like(risky_r)\n\n for step in range(n_steps):\n if drawdown is not None:\n peak = np.maximum(peak, account_value)\n floor_value = peak*(1-drawdown)\n cushion = (account_value - floor_value)/account_value\n risky_w = m*cushion\n risky_w = np.minimum(risky_w, 1)\n risky_w = np.maximum(risky_w, 0)\n safe_w = 1-risky_w\n risky_alloc = account_value*risky_w\n safe_alloc = account_value*safe_w\n # recompute the new account value at the end of this step\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\n # save the histories for analysis and plotting\n cushion_history.iloc[step] = cushion\n risky_w_history.iloc[step] = risky_w\n account_history.iloc[step] = account_value\n floorval_history.iloc[step] = floor_value\n peak_history.iloc[step] = peak\n risky_wealth = start*(1+risky_r).cumprod()\n backtest_result = {\n \"Wealth\": account_history,\n \"Risky Wealth\": risky_wealth, \n \"Risk Budget\": cushion_history,\n \"Risky Allocation\": risky_w_history,\n \"m\": m,\n \"start\": start,\n \"floor\": floor,\n \"risky_r\":risky_r,\n \"safe_r\": safe_r,\n \"drawdown\": drawdown,\n \"peak\": peak_history,\n \"floor\": floorval_history\n }\n return backtest_result",
"def uniform_cdf(x):\n if x <0: return 0 #uniform random is never less than 0\n elif x < 1: return x #e.g. P(x <= 0.4) = 0.4\n else: return 1 #uniform random is always less than 1",
"def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j",
"def probability_from_internal(internal_values, constr):\n return internal_values / internal_values.sum()",
"def Df_2b(c):\r\n xc, yc = c\r\n df2b_dc = empty((size(c), x.size))\r\n\r\n Ri = calc_R(xc, yc)\r\n df2b_dc[0] = (xc - x)/Ri # dR/dxc\r\n df2b_dc[1] = (yc - y)/Ri # dR/dyc\r\n df2b_dc = df2b_dc - df2b_dc.mean(axis=1)[:, newaxis]\r\n\r\n return df2b_dc",
"def __calc_concentration(self, diam, data, dmin, dmax):\n\n dp = np.log10(diam*1e-9)\n conc = data # smoothed\n dmin = np.max((np.log10(dmin),dp[0]))\n dmax = np.min((np.log10(dmax),dp[-1]))\n dpi = np.arange(dmin,dmax,0.001)\n conci = np.sum(interp1d(dp,conc,kind='nearest')(dpi)*0.001,axis=1)\n return conci",
"def rectangular_integral(f, xrange, intervals):\n int_out = 0\n delta_x = (max(xrange)-min(xrange))/intervals\n new_xrange = np.linspace(min(xrange), max(xrange), intervals)\n for x in new_xrange:\n int_out += f(x)\n return delta_x*int_out",
"def cdilate(f, g, b=None, n=1):\n\n if b is None: b = secross()\n y = intersec(f,g)\n for i in xrange(n):\n aux = y\n y = intersec(dilate(y,b),g)\n if isequal(y,aux): break\n return y",
"def _convertDistrPointsToCdf(self,pts):\n try:\n return self.cdf(pts.real)\n except TypeError:\n return list(self.cdf(x) for x in pts)",
"def cci(self) -> float:\n return self._cci",
"def compute_two_way_critic_val(dataframe, f0, f1, loc=0.95):\n ### Factor A has two levels so () Dof_sst = DFN = a -1 = 1\n a = len(set(dataframe[f0]))\n dfn1 = a -1\n ### Factor B Dof_sst = DFN = a - 1\n b = len(set(dataframe[f1]))\n dfn2 = b -1\n ## Factor AxB (interaction)\n dfn3 = (a-1) * (b-1)\n ## Denominator\n n = len(set(dataframe[f0])) #????? number of subject in each group\n dfd = a*b*(n-1)\n\n f_cv_a = stats.f.ppf(loc, dfn1, dfd)\n f_cv_b = stats.f.ppf(loc, dfn2, dfd) ## A and B factor critical value is different when they have \n # different level of factors\n f_cv_ab = stats.f.ppf(loc, dfn3, dfd)\n print(f'Critical value for {f0}:', f_cv_a)\n print(f'Critical value for {f1}:', f_cv_b)\n print('Critical value for interaction:', f_cv_ab)\n\n F_critical = [f_cv_b, f_cv_a, f_cv_ab]\n return F_critical",
"def invcdf(p, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n p = _validate_p(p)\n\n x0, x1 = _find_bracket(lambda x: cdf(x, a, b), p, 0, 1)\n if x0 == x1:\n return x0\n\n return _fun.betaincinv(a, b, p, method=('bisect', [x0, x1]))",
"def colfct(self, x):\n for i in xrange(self.anz_seg):\n # find interval which contains x\n if self.xmin[i]<=x<=self.xmax[i]:\n # normalize to [0, 1]\n x = (x-self.xmin[i])/(self.xmax[i]-self.xmin[i])\n return self.colmap[i].colfct(x)\n print \"no interval found for x=%e - should not happen\" % x\n return 0.0",
"def contComp(periods, pr=10, pv=100):\n r = pr / 100 \n return fmtFV(pv * exp(r * periods))",
"def lagrange_coefficients_ASTME2022(interval=10, interval_type='inner'):\n\n global _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE\n if _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE is None:\n _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE = CaseInsensitiveMapping()\n\n name_lica = ', '.join((str(interval), interval_type))\n if name_lica in _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE:\n return _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE[name_lica]\n\n r_n = np.linspace(1 / interval, 1 - (1 / interval), interval - 1)\n d = 3\n if interval_type.lower() == 'inner':\n r_n += 1\n d = 4\n\n lica = _LAGRANGE_INTERPOLATING_COEFFICIENTS_CACHE[name_lica] = (\n as_float_array([lagrange_coefficients(r, d) for r in r_n]))\n\n return lica",
"def population2cdf(population: np.ndarray) -> np.ndarray:\n\n population = np.sort(population)\n return np.searchsorted(population, population, side=\"right\") / len(population)",
"def a_c(self, ci, tl, ared):\n\t return self.v_cmax(tl, ared)*(ci - self.gamma(tl))/(ci + self.k_c(tl)*(1. + (self.OI*1000.)/self.k_o(tl)))",
"def cbisector(f, B, n):\n\n y = intersec(f,0)\n for i in xrange(n):\n nb = sesum(B,i)\n nbp = sesum(B,i+1)\n f1 = erode(f,nbp)\n f2 = cdilate(f1,f,B,n)\n f3 = subm(erode(f,nb),f2)\n y = union(y,f3)\n return y",
"def _convertCdfPointsToDistr(self,pts):\n try:\n return self.ppf(pts.real)\n except TypeError:\n return list(self.ppf(x) for x in pts)",
"def ecdf(a):\n xs = np.sort(np.array(a))\n ys = np.arange(1, len(xs) + 1) / float(len(xs))\n\n return xs, ys",
"def check_cdfIntegrity(self, step):\n # Selecting bins automatically:\n x_max = self.onpower_train.max().values[0]\n x_min = 0\n step = 1\n x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = 0\n x_min = self.offpower_train.min().values[0]\n step = 1\n x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n x_max = self.duration_train.max().values[0]\n x_min = 0\n step = 1\n x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)\n\n # Evaluating score for:\n # Onpower\n y_onpower = self.__pdf2(self.onpower, x_onpower)\n print(\"Onpower cdf: \" + str(y_onpower.sum()))\n\n # Offpower\n y_offpower = self.__pdf2(self.offpower, x_offpower)\n print(\"Offpower cdf: \" + str(y_offpower.sum()))\n\n # duration\n y_duration = self.__pdf2(self.duration, x_duration)\n print(\"Duration cdf: \" + str(y_duration.sum()))\n\n # Plots:\n # fig1 = plt.figure()\n # ax1 = fig1.add_subplot(311)\n # ax2 = fig1.add_subplot(312)\n # ax3 = fig1.add_subplot(313)\n\n # ax1.plot(x_onpower, y_onpower)\n # ax1.set_title(\"PDF CDF: Onpower\")\n # ax1.set_ylabel(\"density\")\n # ax1.set_xlabel(\"Watts\")\n\n # ax2.plot(x_offpower, y_offpower)\n # ax2.set_title(\" PDF CDF: Offpower\")\n # ax2.set_ylabel(\"denisty\")\n # ax2.set_xlabel(\"Watts\")\n\n # ax3.plot(x_duration, y_duration)\n # ax3.set_title(\"PDF CDF: Duration\")\n # ax3.set_ylabel(\"density\")\n # ax3.set_xlabel(\"Seconds\")",
"def ct(df=1, loc=0, scale=1, type=\"equal\", conf=0.95):\n # ==========================================================================\n # Account for the different types of cutoff quantiles\n alpha = 1 - conf\n if (type == \"less\"):\n p_lower = alpha\n p_upper = 1.0\n elif (type == \"more\"):\n p_lower = 0.0\n p_upper = conf\n elif (type == \"equal\"):\n p_lower= alpha/2\n p_upper = 1 - (alpha/2)\n\n # calculate the cutoff points\n cutoff_lower = qt(p_lower, df=df, lowertail=True, loc=loc, scale=scale)\n cutoff_upper = qt(p_upper, df=df, lowertail=True, loc=loc, scale=scale)\n return [cutoff_lower, cutoff_upper]",
"def calculate_ci(data, ci_level=0.99):\n\n # remove NaNs\n ys = data.dropna().values\n\n # calculate CI\n n = len(ys)\n std_err = sem(ys)\n h = std_err * t.ppf((1 + ci_level) / 2, n - 1)\n\n return h",
"def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]",
"def make_cdf(self, bin_edges, enval, enindex, czindex, czval, dist_params):\n dim = \"coszen\" if czval is not None else \"energy\"\n\n weighted_physical_int = []\n binwise_cdfs = []\n for this_dist_dict in dist_params:\n dist_kwargs = {}\n for dist_prop, prop_vals in this_dist_dict['kwargs'].items():\n dist_kwargs[dist_prop] = prop_vals[enindex, czindex]\n frac = this_dist_dict['fraction'][enindex,czindex]\n\n # now add error to true parameter value\n dist_kwargs['loc'] += czval if czval is not None else enval\n rv = this_dist_dict['dist'](**dist_kwargs)\n cdfs = frac*rv.cdf(bin_edges)\n\n if self.only_physics_domain_sum:\n cdf_low, cdf_high = get_trunc_cdf(rv=rv, dim=dim)\n int_weighted_physical = frac*(cdf_high-cdf_low)\n weighted_physical_int.append(int_weighted_physical)\n\n if self.only_physics_domain_distwise:\n cdfs = truncate_and_renormalise_dist(\n rv=rv, frac=frac, bin_edges=bin_edges, dim=dim\n )\n\n binwise_cdfs.append(cdfs[1:] - cdfs[:-1])\n\n binwise_cdf_summed = np.sum(binwise_cdfs, axis=0)\n\n if self.only_physics_domain_sum:\n binwise_cdf_summed = \\\n truncate_and_renormalise_superposition(\n weighted_integrals_physical_domain=weighted_physical_int,\n binwise_cdf_summed=binwise_cdf_summed\n )\n\n return binwise_cdf_summed",
"def cdf(self,x):\n if self.method == 'spline':\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n cdfValue = self._distribution.cdf(coordinate)\n else:\n self.raiseAnError(NotImplementedError,'cdf not yet implemented for ' + self.method + ' method')\n return cdfValue",
"def continuous_to_discrete(cont_data, bounds, values=[], labels=[]):\n\n \n N = len(bounds)-1\n disc_data = cont_data.copy()\n assign_values = []\n\n for ii in range(N):\n if len(values)==N:\n assign_val = values[ii]\n else:\n assign_val = (bounds[ii]+bounds[ii+1])/2.0\n \n assign_values = assign_values+[assign_val]\n disc_data[(disc_data>=bounds[ii]) & (disc_data<bounds[ii+1])] = assign_val\n\n # Set colorbar ticks and labels\n CB_tickvalues = np.array(list(zip(bounds,assign_values))).flatten()\n \n if len(labels)==N:\n CB_ticklabels = np.array(list(zip(['']*N,labels))).flatten()\n else:\n #CB_ticklabels = np.array(['']*(N*2)).flatten()\n assign_values_str = [str(a) for a in assign_values]\n CB_ticklabels = np.array(list(zip(['']*N,assign_values_str))).flatten()\n\n # CB0.set_ticks(CB_tickvalues)\n # CB0.set_ticklabels(CB_ticklabels)\n \n return disc_data, CB_tickvalues, CB_ticklabels",
"def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)",
"def getInterval(self) -> float:\n\t\treturn self[self._bcni]",
"def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")",
"def cdf(self, x):\n\n pi = 3.1415926536\n mean = self.mean\n stddev = self.stddev\n\n x1 = (x - mean) / (stddev * (2 ** 0.5))\n\n erf1 = (2/pi**0.5)\n erf2 = (x1-((x1**3)/3)+((x1**5)/10)-((x1**7)/42)+((x1**9)/216))\n erf = erf1 * erf2\n cdf = (1/2)*(1+erf)\n\n return cdf",
"def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])"
] | [
"0.6312664",
"0.60228646",
"0.5892485",
"0.58310425",
"0.56982505",
"0.5682813",
"0.5677748",
"0.5657644",
"0.5619041",
"0.5616922",
"0.55988735",
"0.5576793",
"0.5562751",
"0.55557513",
"0.5543005",
"0.5532123",
"0.55102867",
"0.55094415",
"0.55092555",
"0.5476659",
"0.54524666",
"0.5439542",
"0.542626",
"0.5417793",
"0.5405346",
"0.5402678",
"0.5390032",
"0.5385955",
"0.53728783",
"0.5359358",
"0.53471154",
"0.53465784",
"0.53386503",
"0.5320026",
"0.5317475",
"0.53023154",
"0.5302116",
"0.5288451",
"0.52834",
"0.5281701",
"0.5260467",
"0.5238819",
"0.52144426",
"0.520248",
"0.5176481",
"0.5173251",
"0.5173249",
"0.5166435",
"0.5158202",
"0.5155566",
"0.5146861",
"0.51403177",
"0.51395875",
"0.51391953",
"0.5127144",
"0.51207507",
"0.5118926",
"0.5114121",
"0.5109452",
"0.5101614",
"0.5092468",
"0.5091695",
"0.5086956",
"0.5086956",
"0.50862217",
"0.50858027",
"0.5083214",
"0.5081659",
"0.50775176",
"0.507561",
"0.50623035",
"0.50601417",
"0.5051338",
"0.5049836",
"0.50313914",
"0.50281954",
"0.5028156",
"0.5020162",
"0.50201106",
"0.50200707",
"0.5017049",
"0.50108105",
"0.50089383",
"0.50028986",
"0.49997088",
"0.4999457",
"0.49919426",
"0.49778545",
"0.49745142",
"0.4970863",
"0.49629146",
"0.49618518",
"0.49560323",
"0.4955698",
"0.4954098",
"0.49529088",
"0.49525982",
"0.49459603",
"0.4942586",
"0.4935952"
] | 0.7396832 | 0 |
Plots cumulative distribution for various inputs. | def plot_cdf(self, distType='posterior', plotType='line', figSize=(5,4)):
# Calculate cdf to plot
distToPlot = self.cumulative_distribution(dist=distType)
# Create figure
fig = plt.figure(figsize=figSize)
# Create colormap
colors = cm.rainbow(np.linspace(0, 1, len(distToPlot)))
# Determine plot type
if plotType=='line':
plt.plot(self.hypotheses, distToPlot.T)
elif plotType=='bar':
for row, co in zip(distToPlot, colors):
plt.bar(self.hypotheses, row, width=0.25,
align='center', alpha=0.5, color=co)
elif plotType=='point':
for row, co in zip(distToPlot, colors):
plt.scatter(self.hypotheses, row,
alpha=1.0, color=co)
else:
sys.exit('Plot type not recognized.')
plt.legend(np.arange(np.shape(distToPlot)[0]),
loc='center left',
bbox_to_anchor=(1,0.5),
title='Iteration')
plt.xlabel('Hypotheses', fontsize=14)
plt.ylabel('Probability', fontsize=14)
plt.ticklabel_format(useOffset=False)
# If less than 10 hypotheses, treat xticks as categorical
if len(self.hypotheses) < 20:
plt.xticks(self.hypotheses)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_cumulative_distr_plot(data):\n x = data.index\n y = data[\"cumprop\"]\n plot = go.Bar(x=x, y=y, showlegend=False)\n\n return plot",
"def plot_cdf(self, **options):\n plt.plot(self.xs, self.ps, **options)",
"def plot_cumulative_distribution(data, fig_title, ax_labels=None, resolution=.01, filename=None):\r\n if not isinstance(data, ndarray):\r\n return TypeError('Expected data as numpy array.')\r\n\r\n cumulative_function = estimate_cumulative(data, num_bins=int(1 / resolution * 2))\r\n cumulative_function = vectorize(cumulative_function)\r\n reference = linspace(0, 1, 50)\r\n cumulative = cumulative_function(reference)\r\n\r\n fig, ax = subplots()\r\n ax.plot(reference, cumulative, '.-')\r\n\r\n ax.set_title(fig_title)\r\n\r\n set_labels(ax, fig_title, ax_labels)\r\n tight_layout()\r\n\r\n if filename is not None:\r\n savefig(filename)\r\n\r\n return ax",
"def plotDistributions(self, normalize = True, cumulative = False, testMode = False, **kwargs):\n\n if (not testMode): # For testing: do not run plots if testMode\n figsize = kwargs.get('figsize',(10, 5))\n bins = kwargs.get('bins',100)\n\n f, ax = plt.subplots(figsize=figsize)\n ax.hist(self.collectedCounts, bins)\n return ax, self.collectedCounts\n else:\n return None, self.collectedCounts",
"def cumulative_plot(self, with_powerlaw=False, **kwargs):\n x,y = split(self.cumulative_distribution())\n if \"label\" not in kwargs:\n kwargs[\"label\"] = \"$P(k%s)$\" % self.texindex\n p = pylab.loglog(x,y, **kwargs)\n pylab.xlabel(\"$k%s$\" % self.texindex, fontsize=self.labelfontsize)\n pylab.ylabel(\"$P(k%s)$\" % self.texindex, fontsize=self.labelfontsize)\n pylab.title(\"Cumulative %s distribution\" % self.degree_type)\n if with_powerlaw:\n kwargs.pop(\"marker\", None)\n kwargs.pop(\"label\", None)\n if self.gamma is None:\n self.exponent()\n powerlaw.plot(exponent=-self.gamma + 1,\n xmax=self.max_deg, xmin=self.k_min,\n num=2,\n **kwargs\n )\n return p",
"def plot_cum_clustering_dist(net, label, outpath, turbo):\n net.removeSelfLoops()\n local_cc = networkit.centrality.LocalClusteringCoefficient(net, turbo)\n local_cc.run()\n unique_cc, unique_cc_cnt = np.unique(local_cc.scores(), return_counts=True)\n unique_cc_cumcnt = np.cumsum(unique_cc_cnt)/sum(unique_cc_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n plt.axis([0, 1, 0, 1])\n ax.plot(unique_cc, unique_cc_cumcnt, 'b-')\n # ax.set_title('Cumulative distribution of clustering coefficient of nodes')\n ax.set_xlabel('local clustering coefficient c')\n ax.set_ylabel('p(x <= c)')\n plt.savefig(outpath + label + \"-cc-distribution.eps\")",
"def plot_eigenvalue_cumsum(eigenvalues):\n normalized = eigenvalues / np.sum(abs(eigenvalues))\n cumsum = np.cumsum(normalized)\n plt.figure(0)\n plt.xlabel('Eigenvalue')\n plt.ylabel('Cumulative sum')\n plt.title('Eigenvalue cumulative sum')\n plt.plot(np.arange(len(cumsum)), cumsum)",
"def gen_plot_cumulative_gain(df, currency, filename):\n # remove existing plot\n if glob.glob('images/cumulative*.png'):\n for f in glob.glob(\"images/cumulative*.png\"):\n os.remove(f)\n\n # find year\n tax_year = df.iat[0, 2][:4]\n\n # summing transactions by date\n cum = df.groupby(by=[\"Date Sold\"]).sum().sort_index()[['Gain&Loss']]\n\n # fill in empty dates\n cum.index = pd.DatetimeIndex(cum.index)\n all_dates = pd.date_range(start=f\"{tax_year}-01-01\",\n end=f\"{tax_year}-12-31\")\n\n # calculate cumulative sum for all dates\n cum = cum.reindex(all_dates).fillna(0.0).rename_axis('Date Sold').cumsum()\n\n # generate cumulative plot\n cum_plot = sns.lineplot(data=cum, x=\"Date Sold\", y=\"Gain&Loss\")\n cum_plot.set_title(f\"Cumulative Gain and Loss in {tax_year} in {currency}\")\n cum_plot.set_xlabel('')\n cum_plot.get_figure().savefig(f\"images/{filename}\")\n plt.close()",
"def plot_cdf(self, data, ax=None, survival=False, **kwargs):\n bins, CDF = self.cdf(data, survival=survival, **kwargs)\n if not ax:\n fig, ax = plt.subplots()\n ax.loglog(bins, CDF, **kwargs)\n else:\n fig = ax.get_figure()\n ax.plot(bins, CDF, **kwargs)\n return fig, ax",
"def cumulative_distribution(self, X):\n raise NotImplementedError",
"def plotDistributions(self, normalize = True, cumulative = False, testMode = False, **kwargs):\n\n countDistributions = {}\n\n if (not testMode): # For testing: do not run plots if testMode\n figsize = kwargs.get('figsize',(10, 5))\n bar_plt = kwargs.get('bar', False)\n f, ax = plt.subplots(figsize=figsize)\n\n # iterate through each sample\n for label, data in self.collectedCounts.items():\n\n if data == set(): # Do not plot any empty sets of data\n continue\n\n sData = sorted(data)\n values = list(map(lambda p: p[0], sData))\n counts = list(map(lambda p: p[1], sData))\n\n if normalize:\n # replace distribution counts with normalized values\n sumCounts = float(sum(counts))\n counts = [i/sumCounts for i in counts]\n\n if cumulative:\n # calculate cumulative sum of counts\n counts = np.cumsum(counts)\n\n # re-write manipulated data\n countDistributions[label]=list(zip(values, counts))\n\n if (not testMode): # For testing: do not run plots if testMode\n if (bar_plt):\n ax.bar(values, counts, 1, label = label)\n else:\n ax.plot(values, counts, label = label)\n\n # return plots once all samples have been added\n if (not testMode): # For testing: do not run plots if testMode\n ax.legend(loc=2, shadow = True, bbox_to_anchor=(1.05, 1))\n return ax, countDistributions\n else:\n return None, countDistributions",
"def plt_cumulative_hist(v, bins=10):\n values, base = np.histogram(v, bins=bins)\n cumulative = np.cumsum(values)\n plt.plot(base[:-1], cumulative, c='blue')",
"def convolute_plot(lam, mu, sigma, nEntries, randomState=None):\n np.random.seed(randomState) # to have the same starting point\n \n xb = np.arange(-30,500000, 5000)\n xp = np.arange(-30,30,0.2)\n \n # Plot the exponential curve\n plt.figure()\n plt.subplot(3,1,1)\n xf = stats.expon(0.,1./lam).rvs(nEntries)\n plt.hist(xf,xb, normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n # Plot the gaussian distribution\n plt.subplot(3,1,2) \n xg = stats.norm(mu, sigma).rvs(nEntries)\n plt.hist(xg,xp, normed=True)\n plt.plot(xp,stats.norm(mu,sigma).pdf(xp))\n \n # Plot the convolution of the two distributions\n plt.subplot(3,1,3)\n plt.hist(xf+xg,xb,normed=True)\n plt.plot(xb, stats.expon(0,1./lam).pdf(xb))\n \n data_set = xf+xg\n return data_set",
"def ecdf_plot(ecdf_q1, ecdf_q2, ecdf_q3, ecdf_q4, performance_measure, ecdf_parameter):\n from plotly.offline import iplot\n import plotly.graph_objs as go\n\n performance_measure = performance_measure.replace('_', ' ').capitalize()\n ecdf_parameter = ecdf_parameter.replace('_', ' ').capitalize()\n\n ecdf_1 = go.Scatter(x=ecdf_q1.x,\n y=ecdf_q1.y,\n name='0 to 25',\n mode='lines+markers',\n marker=dict(size='7', color='#0C3383'))\n ecdf_2 = go.Scatter(x=ecdf_q2.x,\n y=ecdf_q2.y,\n name='25 to 50',\n mode='lines+markers',\n marker=dict(size='7', color='#57A18F'))\n ecdf_3 = go.Scatter(x=ecdf_q3.x,\n y=ecdf_q3.y,\n name='50 to 75',\n mode='lines+markers',\n marker=dict(size='7', color='#F2A638'))\n ecdf_4 = go.Scatter(x=ecdf_q4.x,\n y=ecdf_q4.y,\n name='75 to 100 (best wells)',\n mode='lines+markers',\n marker=dict(size='7', color='#D91E1E'))\n\n data = [ecdf_1, ecdf_2, ecdf_3, ecdf_4]\n\n layout = go.Layout(height=650,\n width=650,\n title='ECDF ' + ecdf_parameter,\n titlefont=dict(size=18),\n\n xaxis=dict(title=ecdf_parameter,\n titlefont=dict(size=16),\n type=None,\n zeroline=False,\n showgrid=True,\n showline=False,\n autorange=True),\n\n yaxis=dict(title='Cumulative Probability',\n titlefont=dict(size=16),\n showgrid=True,\n showline=False,\n zeroline=False,\n tickvals=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n range=[-0.03, 1.03]),\n\n legend=dict(x=0.65, y=0.1, font=dict(size=14)),\n margin={'l': 50, 'r': 10, 'b': 50, 't': 85})\n\n layout.update(dict(annotations=[go.Annotation(text='Quantiles: ' + performance_measure,\n x=np.max(ecdf_q4.x),\n y=0.3,\n showarrow=False,\n bgcolor='#FFFFFF',\n font=dict(size=16))]))\n\n plot = go.Figure(data=data, layout=layout)\n\n iplot(plot, show_link=False)",
"def gc_plot(data, **kwargs):\n hist = gc_percent_hist(data)\n sns.lineplot(x=range(0, 101), y=hist, **kwargs)",
"def plotCumulativeMovingAverage(self, x: list, title: str = \"Cumulative moving average\") -> None:\n C = self.cumulativeMovingAverage(x)\n plt.plot(C)\n plt.xlabel('index')\n plt.ylabel('cumulative moving average')\n plt.title(title)\n plt.show()",
"def cdf(data, args):\n return Plot._dist(data, args)",
"def plot(self, n=1000, hist=True, kde=False):\n sns.set(rc={\"xtick.bottom\": True, \"ytick.left\": True})\n sims = [self.estimate() for i in range(n)]\n fig, ax = plt.subplots(figsize=(10, 8))\n\n if hist:\n kwargs = {'cumulative': False, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)), hist=True,\n kde=kde,norm_hist=False, hist_kws=kwargs,\n ax=ax)\n plt.title('Histogram - days to project completion '\n '- n = {}'.format(n))\n plt.axvline(x=np.median(sims), color='red', label='50%')\n plt.text(np.median(sims)-0.5, -2, '50%', color='red')\n plt.show()\n\n else:\n kwargs = {'cumulative': True, 'edgecolor': \"k\", 'linewidth': 1}\n plot = sns.distplot(sims, bins=math.floor(max(sims)),\n hist=True, kde=False, norm_hist=True,\n hist_kws=kwargs)\n plt.title('Cumulative histogram - days project to completion '\n '- n = {}'.format(n))\n plt.show()\n\n return plot",
"def run_plot(args):\n # print(\"running chronqc_plot\")\n chronqc_plot.main(args)",
"def plot_win_cum_dist(perc: pd.Series, cum_perc_wins: pd.Series, title: str, \n ylims: Tuple[int, int]=(-0.05, 1.05)) -> Tuple[plt.Figure, plt.Axes]:\n fig, ax = plt.subplots()\n ax.plot(perc, cum_perc_wins)\n ax.set_ylim(ylims)\n ax.set_title(title)\n fig.tight_layout()\n return fig, ax",
"def plot_cumreturn(x):\n return go.Scatter(x=df_plot['Date'], y=df_plot[x], mode='lines', name=x)",
"def plot_ccum_centrality_dist(centrality_filename, label, outpath, centrality_name):\n unique_val, unique_cc_prob = networkit_util.get_cc_centrality_distr(centrality_filename)\n centrality_style = {'eigenvector-centrality': 'c*', 'pagerank': 'g*', 'hub': 'r*', 'authority': 'm*', 'betweeness': 'b*'}\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.loglog(unique_val, unique_cc_prob, centrality_style[centrality_name], label=label + '-' + centrality_name)\n ax.set_xlabel('v')\n ax.set_ylabel('P(x>=v)')\n plt.savefig(outpath + label + '-' + centrality_name + '-distribution.eps')\n return ax",
"def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')",
"def _plot_ecdf(data, label='Value', alpha=1):\n data = np.array(data)\n data = np.sort(data)\n t = len(data)\n prob = np.arange(t) / t\n plt.plot(data, prob, label=label, alpha=alpha)",
"def visualize(self):\n self.dataFrame.hist()\n plt.show()",
"def distribution_horizontale(args):\n number_files = [2,5,10,20];\n nbreFileNotDisplay = 0;\n comment = \"\";\n num_bins = args[\"num_bins\"];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n w = 4; h = 1; # width = largueur, height = longueur\n fig = plt.figure( figsize=(w,h) ); \n cpt_ax1 = 0;\n for num in number_files:\n print(\"num = \", num)\n num = int(num)\n cpt_ax1 += 1;#cpt = num; # cpt += 1\n \n # ax1\n ax1 = fig.add_subplot(2,len(number_files),cpt_ax1);\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(num)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"nbre_aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count()\n \n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dc\"])\n num_bins = df[\"moy_dc\"].max()+1\n bins = range(0,int(num_bins)); bins = range(0, 100)\n print(\"---> bins = \", bins, \" min = \",df[\"moy_dc\"].min(), \\\n \" max = \",df[\"moy_dc\"].max())\n \n max_count_dl, max_count_dh = count_max_df(df)\n \n sns.distplot(df[\"moy_dc\"], ax = ax1, bins = bins, kde = False)\n ax1.set(xlabel= \"moy_distance_correction\", ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(num)+\\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma)+ \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean))\n ax1.plot([num+1,num+1], (0,max_count_dl), 'r--' )\n ax1.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax1.get_yticks()])\n \n # ax2\n cpt_ax2 = cpt_ax1 +len(number_files); #cpt = num+len(number_files); # cpt +=1 ;\n ax2 = fig.add_subplot(2,len(number_files),cpt_ax2);\n N_graphs = df[\"moy_dh\"].count()\n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dh\"])\n \n num_bins = df[\"moy_dh\"].max()+1\n bins = range(0 ,int(num_bins)); bins = range(0, 100)\n\n sns.distplot(df[\"moy_dh\"], ax = ax2, bins = bins, kde = False, color = 'red')\n ax2.set(xlabel= \"moy_distance_hamming\", ylabel= \"nombre_graphe\", \\\n title = \"distance de Hamming pour \\n \"+ str(num)+ \\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma) + \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean()))\n# ax2.set_xticklabels(bins, rotation=90)\n ax2.plot([num+1,num+1], (0,max_count_dh), 'r--' )\n ax2.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax2.get_yticks()])\n \n for ax in [ax1,ax2]:\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(8)\n \n# plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n comment += \"_horizontale\";\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionHorizontale_k_0_\"+str(number_files[len(number_files)-1])+\\\n \"_\"+comment+\".jpeg\", \\\n dpi= 190)\n pass",
"def plot(self):\n\t\tself.plotOfCos1().plot()",
"def plot_cumreward_normalized(reward_cache_qlearning, reward_cache_SARSA):\n cum_rewards_q = []\n rewards_mean = np.array(reward_cache_qlearning).mean()\n rewards_std = np.array(reward_cache_qlearning).std()\n count = 0 # used to determine the batches\n cur_reward = 0 # accumulate reward for the batch\n for cache in reward_cache_qlearning:\n count = count + 1\n cur_reward += cache\n if(count == 10):\n # normalize the sample\n normalized_reward = (cur_reward - rewards_mean)/rewards_std\n cum_rewards_q.append(normalized_reward)\n cur_reward = 0\n count = 0\n \n cum_rewards_SARSA = []\n rewards_mean = np.array(reward_cache_SARSA).mean()\n rewards_std = np.array(reward_cache_SARSA).std()\n count = 0 # used to determine the batches\n cur_reward = 0 # accumulate reward for the batch\n for cache in reward_cache_SARSA:\n count = count + 1\n cur_reward += cache\n if(count == 10):\n # normalize the sample\n normalized_reward = (cur_reward - rewards_mean)/rewards_std\n cum_rewards_SARSA.append(normalized_reward)\n cur_reward = 0\n count = 0 \n # prepare the graph \n plt.plot(cum_rewards_q, label = \"q_learning\")\n plt.plot(cum_rewards_SARSA, label = \"SARSA\")\n plt.ylabel('Cumulative Rewards')\n plt.xlabel('Batches of Episodes (sample size 10) ')\n plt.title(\"Q-Learning/SARSA Convergence of Cumulative Reward\")\n plt.legend(loc='lower right', ncol=2, mode=\"expand\", borderaxespad=0.)\n plt.show()\n plt.savefig('cumulative_reward.png')",
"def plot(self, n_confs):\n \n import pandas as pd\n import numpy as np\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n import csv\n \n n_iter = len(self.plot_data)\n \n data = np.ndarray((n_iter, n_confs+1))\n data[:,0] = [i[0] for i in self.plot_data]\n data[:,1:] = [i[1].detach().cpu().numpy() for i in self.plot_data]\n\n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n df.to_csv(f\"{self.plot_name}.tab\", sep=\"\\t\", quoting=csv.QUOTE_NONE) \n\n d = data[:,1:].reshape(-1)\n d = d[~np.isnan(d)]\n mine = d.min() - 0.01\n for i in range(n_confs): \n data[:,i+1] -= mine\n \n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n \n colors = (0,0,0)\n area = 10\n \n # Plot\n fig = plt.figure(figsize=(15, 15))\n ax = fig.add_subplot(1,1,1)\n for i in range(n_confs):\n ax.plot('iter', f'c{i+1}', data=df)\n ax.set_yscale('log')\n\n plt.xlabel('iter')\n plt.ylabel('loss')\n plt.savefig(f'{self.plot_name}.png')",
"def continuous_plot(iterations, grn):\n width, height = size = (600,600)\n screen = pygame.display.set_mode(size)\n # order the colors for the TF andP proteins\n colors = []\n conc_list = []\n extra_up, extra_down = False, False\n\n for gene in grn.genes:\n \n if gene.gene_type == \"TF\":\n colors.append((0, 0, 255))\n elif gene.gene_type == \"P\":\n colors.append((0, 255, 0))\n elif gene.gene_type == \"EXTRA\":\n colors.append((255,0,0))\n prev_extra = 600-(gene.concentration * 600)\n\n conc_list.append(600-(gene.concentration * 600))\n\n # add variables for user input\n\n for i in range(iterations):\n #check for keypress\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if pygame.key.get_pressed()[pygame.K_UP]:\n extra_up = True\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n extra_down = True\n elif event.type == pygame.KEYUP:\n extra_up, extra_down = False, False\n if extra_up: \n grn.change_extra(0.01)\n if extra_down: \n grn.change_extra(-0.01)\n #run grn and get protein concentration\n results = grn.regulate_matrix(2, False)\n scaled = [int(600-(x * 600)) for x in results]\n old_conc = conc_list\n conc_list = scaled\n \n for idx, conc in enumerate(conc_list):\n pygame.draw.line(screen, colors[idx], \n (width-3, old_conc[idx]), \n (width-2, conc))\n\n # if draw_extra:\n # pygame.draw.line(screen, colors[-1], \n # (width-3, 600-prev_extra-1), \n # (width-2, 600-extra))\n\n pygame.display.flip()\n #screen.blit(screen, (-1, 0))\n screen.scroll(-1,0)\n pygame.time.wait(5)",
"def PlotHistCumul_AllMovies(exp_type=\"MDCK_WT_Pure\", generation=1, show=False):\n\n # Extract, analyse & plot merged data first (zorder = high -> to ensure it will be on top):\n directory = \"/Volumes/lowegrp/Data/Kristina/{}/\".format(exp_type)\n merged_file = directory + \"cellIDdetails_merged.txt\"\n generation_list = PlotHistGenerationCCT(txt_file=merged_file).CreateGenerationList(print_stats=False)\n gen = generation_list[generation-1]\n merged_mean = round(np.mean(gen), 2)\n merged_std = round(np.std(gen), 2)\n\n # Plot cumulative hist of merged data:\n n_bins = 50\n plt.figure(figsize=(8.5, 6))\n plt.hist(gen, bins=n_bins, density=True, histtype='step', cumulative=True, linewidth=3.0, color='black',\n label='MERGED DATA\\ncellIDs = {}'.format(len(gen)), zorder=20)\n plt.axvline(merged_mean, alpha=0.5, color='grey', zorder=1, linestyle='dashed', linewidth=2.0)\n plt.axvspan(merged_mean - merged_std, merged_mean + merged_std, alpha=0.3, color='grey', zorder=1,\n label='MERGED DATA\\nMean ± St.Dev\\n{} ± {}'.format(merged_mean, merged_std))\n\n # Now add individual movies to the plot:\n _, txt_file_list = GetMovieFilesPaths(exp_type=exp_type)\n\n for file in sorted(txt_file_list):\n filtered_file = file.replace(\"raw\", \"filtered\")\n file = file.split(\"/\")\n file_generation_list = PlotHistGenerationCCT(txt_file=filtered_file).CreateGenerationList(print_stats=False)\n if len(file_generation_list) >= generation: # to make sure that index is not out of range in the list\n file_gen = file_generation_list[generation-1]\n plt.hist(file_gen, bins=n_bins, density=True, histtype='step', cumulative=True, linewidth=1.5,\n label='{}-{}\\ncellIDs = {}'.format(file[-4], file[-3], len(file_gen)))\n\n # Tidy up the figure:\n plt.title(\"Cumulative step histograms:\\nGeneration #{} - Merged data vs. {} '{}' movies\"\n .format(generation, len(txt_file_list), exp_type))\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=5)\n plt.ylabel('Likelihood of occurrence')\n plt.xlabel('Cell Cycle Duration [hours]')\n plt.xticks(list(range(0, n_bins + 2, 2)))\n plt.xlim(0 - n_bins / 20, n_bins + n_bins / 20) # 5% ± the min & max point\n plt.ylim(-0.1, 1.1)\n\n # Save, show & close:\n plt.savefig(directory + 'Hist_Cumulative_CCT_Gen-{}_All-Movies.jpeg'.format(generation), bbox_inches=\"tight\")\n if show is True:\n plt.show()\n plt.close()",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def pixel_ts_distribution(self):\n fig,ax = plt.subplots(figsize=(8,6))\n bins = np.linspace(0,25,501)\n tsvec=self.tsmap.vec\n ax.hist(tsvec, bins, log=True, histtype='step', lw=2, cumulative=-1, label='data');\n # make array corresponding to the hist\n h = np.histogram(tsvec, bins, )[0]\n x = bins[:-1]\n yh = sum(h)-h.cumsum() \n f = lambda x: np.exp(-x/2)\n ye=6e5*f(x)\n ax.plot(x, ye, '-g', lw=2, label='exp(-TS/2)')\n ax.fill_between(x,yh,ye,where=x>5, facecolor='red', alpha=0.6)\n plt.setp(ax, xscale='linear', xlabel='TS', ylim=(1,None), ylabel='# greater than TS')\n ax.legend()\n ax.set_title('Cumulative distribution of single-pixel TS values for {}'.format(self.skymodel),\n fontsize=14)\n ax.grid(True, alpha=0.5) \n fig.set_facecolor('white')\n return fig",
"def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()",
"def plot_cdf(self, max_t=200, show=True, savename=False):\n\n plt.figure()\n time = self.dt*np.arange(1, int(max_t / self.dt)).astype(float)\n\n plt.plot(time, self._analytical_passage_time_cdf(time), lw=2, label='analytical')\n plt.plot(time, self._empirical_cdf(time), lw=2, label='empirical')\n\n # formatting\n plt.legend(fontsize=14)\n plt.xlabel('Time', fontsize=14)\n plt.ylabel('Cumulative Density', fontsize=14)\n plt.tick_params(labelsize=14)\n plt.tight_layout()\n\n if savename is not None:\n plt.savefig('%s_cdf.pdf' % savename)\n\n if show:\n plt.show()",
"def plot_ecdf(self, variant_one, variant_two):\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys() or \\\n variant_two not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in column '\n '{}'.format(self.bucket_col_name)))\n\n if variant_one in self.ecdf.keys() and \\\n variant_two in self.ecdf[variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one,\n denominator_name=variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_two, variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two,\n denominator_name=variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_one, variant_two))",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def cumulative_distribution(self):\n\n cum_dd = []\n sum_p = 0\n for k, p in reversed(self.dd):\n sum_p += p\n cum_dd.append((k, sum_p))\n return list(reversed(cum_dd))",
"def plot_ecdf(datasets, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n _plot_ecdf(data, labels[idx], alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Cumulative Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/ecdf_\"+\"_\".join(labels)+\".png\")\n plt.show()",
"def plotCadHists(self):\n n_lcs = len(self.lcs)\n if n_lcs > 0:\n x = int(np.sqrt(n_lcs))\n y = n_lcs / x + int(n_lcs % x > 0)\n plotnum = 1\n for lc in self.lcs:\n plt.subplot(x, y, plotnum)\n plt.hist(lc.cads, 50, range=(0, np.std(lc.cads) * 2.0))\n plt.xlabel('Time to next obs.')\n plt.ylabel('# Occurrences')\n plotnum += 1\n plt.show()\n return",
"def plot_gain(df, outcome_col='y', treatment_col='w', treatment_effect_col='tau',\n steps=100, normalize=False, random_seed=42, figsize=(8, 8)):\n\n cumgain = get_cumgain(df, outcome_col, treatment_col, treatment_effect_col, steps, normalize, random_seed)\n\n cumgain.plot(figsize=figsize)\n plt.xlabel('Fraction of Population')\n plt.ylabel('Cumulative Gain')",
"def main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):\n axes = [x_axis, y_axis, 'description']\n uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)\n gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)",
"def Lambda_CC_plot(data_CC,show_error=True,discrete=False):\n sns.set_context('paper')\n fig,ax = plt.subplots()\n plot_Lambda_CC(data_CC,ax,show_error=show_error,discrete=discrete)\n formatting(r'Fraction of cooperators, $n/N$',r'$\\Lambda^{CC}_n$',large=False)\n plt.savefig('Lambda_CC.pdf')",
"def distribution_plot(data):\r\n ready_data = sorted((data))\r\n fit = stats.norm.pdf(ready_data, np.mean(ready_data), np.std(ready_data))\r\n plt.plot(ready_data, fit, '-o')\r\n plt.ylabel(\"Prob\")\r\n plt.xlabel(\"Prices\")\r\n plt.title(\"Distribution of prices (Under 50 days) Demand Function\")\r\n plt.show()",
"def plot_pc(*args, x='date', y='pc_new_positives', days=0):\r\n # unpack args (stored in tuple) to list form\r\n a = []\r\n a.append(x)\r\n a.append(y)\r\n for arg in args:\r\n a.append(arg)\r\n # format dates\r\n if (days > 0) & (days < 32):\r\n locator = mdates.DayLocator()\r\n formatter = mdates.DateFormatter('%d')\r\n else:\r\n locator = mdates.MonthLocator()\r\n formatter = mdates.DateFormatter('%m-%d')\r\n # set up axes for subplots\r\n fig, ax = plt.subplots(figsize=(8, 8))\r\n # format dates on x-axis\r\n ax.xaxis.set_major_locator(locator)\r\n ax.xaxis.set_major_formatter(formatter)\r\n for state in args:\r\n # subset df for our plots. dropna() ensures same date range for all plots\r\n plot_data = df[df['State'] == state]\r\n # subset for number of days\r\n if days > 0:\r\n plot_data = plot_data[:days+1]\r\n # make plot\r\n ax.plot(plot_data[x], plot_data[y][plot_data['State'] == state], linestyle='-', label=state)\r\n # set titles\r\n ax.set_title(\"{} per 100,000 people\".format(y))\r\n #show legend\r\n ax.legend()",
"def plot_cdf(Y, Y_Label='Y'):\r\n\r\n # Options for the graphic\r\n pltfont = {'fontname': 'Bitstream Vera Sans', 'fontsize': 15} # font\r\n lc = 'k' # line colour\r\n\r\n ###########################################################################\r\n # Check inputs\r\n ###########################################################################\r\n if not isinstance(Y, np.ndarray):\r\n raise ValueError('\"Y\" must be a numpy.array.')\r\n if Y.dtype.kind != 'f' and Y.dtype.kind != 'i' and Y.dtype.kind != 'u':\r\n raise ValueError('\"Y\" must contain floats or integers.')\r\n Ny = Y.shape\r\n if len(Ny) > 1:\r\n if Ny[1] != 1:\r\n raise ValueError('\"Y\" be of shape (N,1) or (N, ).')\r\n N = Ny[0]\r\n Y = Y.flatten() # shape (N, )\r\n\r\n if np.isnan(Y).any():\r\n warn('some data in \"Y\" are nan')\r\n if np.isinf(Y).any():\r\n warn('some data in \"Y\" are inf')\r\n\r\n if not isinstance(Y_Label, str):\r\n raise ValueError('\"Y_Label\" must be a string.')\r\n\r\n ###########################################################################\r\n # Create plot\r\n ###########################################################################\r\n\r\n Y = Y[~np.isnan(Y)] # remove NaNs\r\n ymin = np.min(Y)\r\n ymax = np.max(Y)\r\n\r\n #plt.figure()\r\n\r\n # Option 1: use the function empiricalcdf of SAFE\r\n Nmin = 5000\r\n if N > Nmin:\r\n Yi = np.sort(Y)\r\n F = empiricalcdf(Y, Yi)\r\n plt.plot(Yi, F, '.', color=lc)\r\n else:\r\n Yi = np.linspace(ymin, ymax, Nmin)\r\n F = empiricalcdf(Y, Yi)\r\n plt.plot(Yi, F, color=lc)\r\n\r\n # Option 2: use the ECDF function of the python package 'statsmodels'\r\n #ecdf = ECDF(Y)\r\n #plt.plot(ecdf.x,ecdf.y, color=lc)\r\n\r\n # Customise plot\r\n plt.xticks(**pltfont); plt.yticks(**pltfont)\r\n plt.xlabel(Y_Label, **pltfont)\r\n plt.ylabel('CDF', **pltfont)\r\n plt.box(on=True)\r\n\r\n # Limit for horizontal axisym = min(y);\r\n if ymin == ymax: # (i.e., if all data have the same value)\r\n ymin = ymin - ymin/10\r\n ymax = ymax + ymax/10\r\n plt.xlim((ymin, ymax))\r\n plt.ylim((0, 1))",
"def plot_cumulative_gain(y_true, y_probas, title='Cumulative Gains Curve',\n ax=None, figsize=None, title_fontsize=\"large\",\n text_fontsize=\"medium\"):\n y_true = np.array(y_true)\n y_probas = np.array(y_probas)\n\n classes = np.unique(y_true)\n if len(classes) != 2:\n raise ValueError('Cannot calculate Cumulative Gains for data with '\n '{} category/ies'.format(len(classes)))\n\n # Compute Cumulative Gain Curves\n percentages, gains1 = cumulative_gain_curve(y_true, y_probas[:, 0],\n classes[0])\n percentages, gains2 = cumulative_gain_curve(y_true, y_probas[:, 1],\n classes[1])\n percentages, gains3 = cumulative_gain_curve(y_true, y_true,\n classes[0])\n percentages, gains4 = cumulative_gain_curve(y_true, y_true,\n classes[1])\n\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n\n ax.set_title(title, fontsize=title_fontsize)\n\n ax.plot(percentages, gains1, lw=3, label='Class {} (pred)'.format(classes[0]))\n ax.plot(percentages, gains2, lw=3, label='Class {} (pred)'.format(classes[1]))\n #ax.plot(percentages, gains3, lw=3, label='Class {} (true)'.format(classes[0]))\n ax.plot(percentages, gains4, lw=3, label='Class {} (true)'.format(classes[1]))\n\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.1])\n\n ax.plot([0, 1], [0, 1], 'k--', lw=2, label='Baseline')\n\n ax.set_xlabel('Percentage of sample', fontsize=text_fontsize)\n ax.set_ylabel('Gain', fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.grid('on')\n ax.legend(loc='lower right', fontsize=text_fontsize)\n plt.show()\n return ax",
"def cumulative_days_plot(self, cm_plot_style, newfig=True, skip_yticks=False):\n if newfig:\n plt.figure(figsize=(3, 3), dpi=300)\n\n nRs, nCMs, nDs = self.ActiveCMs.shape\n\n ax = plt.gca()\n mask = np.reshape((self.NewDeaths.mask == False), (nRs, 1, nDs))\n days_active = np.sum(np.sum(self.ActiveCMs * np.repeat(mask, nCMs, axis=1), axis=0), axis=1)\n plt.barh(-np.arange(nCMs), days_active, color=[0.7522446028276593, 0.5089037847613617, 0.6733963201089419])\n\n plt.yticks(\n -np.arange(len(self.CMs)),\n [f\"{f} \" if not skip_yticks else \" \" for f in self.CMs]\n )\n\n x_min, x_max = plt.xlim()\n x_r = x_max - x_min\n for i, (ticklabel, tickloc) in enumerate(zip(ax.get_yticklabels(), ax.get_yticks())):\n ticklabel.set_color(cm_plot_style[i][1])\n plt.text(-0.09 * x_r, tickloc, cm_plot_style[i][0], horizontalalignment='center',\n verticalalignment='center',\n fontproperties=fp2, fontsize=7, color=cm_plot_style[i][1])\n\n plt.xticks([0, 500, 1000, 1500, 2000, 2500, 3000], fontsize=6)\n # ax.tick_params(axis=\"both\", which=\"major\", labelsize=10)\n plt.title(\"Total Days Active\", fontsize=8)\n plt.xlabel(\"Days\", fontsize=8)\n plt.ylim([-len(self.CMs) + 0.5, 0.5])",
"def plot_data_pca(data_dict):\n f = plt.figure()\n ndata, ntime, nhidden = data_dict['hiddens'].shape\n\n print('Number of data examples: ', ndata)\n print('Number of timesteps: ', ntime)\n print('Number of data dimensions: ', nhidden)\n pca = PCA(n_components=100)\n pca.fit(onp.reshape(data_dict['hiddens'], [ndata * ntime, nhidden]))\n\n plt.plot(onp.arange(1, 16), onp.cumsum(pca.explained_variance_ratio_)[0:15],\n '-o');\n plt.plot([1, 15], [0.95, 0.95])\n plt.xlabel('PC #')\n plt.ylabel('Cumulative Variance')\n plt.xlim([1, 15])\n plt.ylim([0.3, 1]);\n return f",
"def cumulative_quantiles_plot(\n samples, plot_width=960, plot_height=480, show_samples=True\n):\n plot = bp.figure(plot_width=960, plot_height=480)\n\n names = samples.columns.levels[0]\n _colors = {\n name: color for name, color in zip(names, colors.colors(len(names)))\n }\n\n def draw(group):\n name = group.columns[0][0]\n color = _colors[name]\n group.columns = group.columns.droplevel(0)\n group = group.dropna()\n quantile_source = bm.ColumnDataSource(\n pd.DataFrame(\n data={\"lower\": group[\"25%\"], \"upper\": group[\"75%\"]},\n index=group.index,\n ).dropna().reset_index()\n )\n extreme_source = bm.ColumnDataSource(\n pd.DataFrame(\n data={\"lower\": group[\"min\"], \"upper\": group[\"max\"]},\n index=group.index,\n ).dropna().reset_index()\n )\n plot.line(group.index, group[\"50%\"], line_color=color, legend=name)\n plot.add_layout(\n bm.Band(\n base=\"time\",\n lower=\"lower\",\n upper=\"upper\",\n source=quantile_source,\n fill_color=color,\n fill_alpha=0.2,\n )\n )\n plot.add_layout(\n bm.Band(\n base=\"time\",\n lower=\"lower\",\n upper=\"upper\",\n source=extreme_source,\n fill_color=color,\n fill_alpha=0.025,\n )\n )\n plot.line(\n \"time\", \"lower\", line_color=color, alpha=0.5, source=extreme_source\n )\n plot.line(\n \"time\", \"upper\", line_color=color, alpha=0.5, source=extreme_source\n )\n\n cumulative_quantiles(samples).groupby(axis=1, level=0).apply(draw)\n\n if show_samples:\n\n def scatter(group):\n name = group.columns[0][0]\n color = _colors[name]\n group = isolate(group)\n t = timings(group).set_index(group.iloc[:, 1])\n t.index.name = \"time\"\n t.columns = [\"value\"]\n source = bm.ColumnDataSource(t.reset_index())\n plot.circle(\n x=\"time\",\n y=\"value\",\n source=source,\n color=color,\n size=1,\n alpha=0.5,\n )\n\n samples.groupby(axis=1, level=0).apply(scatter)\n\n bi.show(plot)",
"def _plot_ecdf(self, numerator_name, denominator_name):\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n\n lower_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-self.confidence_level)))]\n median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]\n upper_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-(1-self.confidence_level))))]\n\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = ('Median Lift was {0:.2%}, with a '\n '{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,\n ci,\n lower_bound,\n upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)",
"def plot(self, *args, **kwargs):\n pass",
"def distribution_horizontale_new(args):\n number_files = [2,5,10,20];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n fig = plt.figure(); default_size = fig.get_size_inches(); \n f, ax_arrs = plt.subplots(2, 4, figsize=(default_size[0]*2.2, \\\n default_size[1]*1.5), \\\n );\n cpt1 = 0; cpt2 = 1; tab_bins = [20, 40, 80,100, 100, 100]\n for ind, k in enumerate(number_files) :\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(k)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count();\n \n aretes = find_aretes(args, df)\n bins = range(0, (ind+1)*args[\"num_bins\"]);\n bins = range(0, tab_bins[ind]);\n max_count_dl, max_count_dh = count_max_df(df)\n \n # plot ax1\n (mu, sigma) = norm.fit(df[\"moy_dc\"]); # best fit of data\n sns.distplot(df[\"moy_dc\"], ax = ax_arrs[cpt1, ind], bins = bins, kde = False)\n ax_arrs[cpt1, ind].set(xlabel= \"moy_distance_correction\", \\\n ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(k)+ \\\n \" cases modifiees\")\n ax_arrs[cpt1, ind].plot([k+1,k+1], (0,max_count_dl), 'r--' )\n ax_arrs[cpt1, ind].set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax_arrs[cpt1, ind].get_yticks()])\n \n #plot ax2\n (mu, sigma) = norm.fit(df[\"moy_dh\"]); # best fit of data\n sns.distplot(df[\"moy_dh\"], ax = ax_arrs[cpt2, ind], bins = bins, kde = False)\n ax_arrs[cpt2, ind].set(xlabel= \"moy_distance_correction\", \\\n ylabel= \"nombre_graphe\", \\\n title = \"distance Hamming pour \\n \"+ str(k)+\\\n \" cases modifiees\")\n ax_arrs[cpt2, ind].plot([k+1,k+1], (0,max_count_dl), 'r--' )\n ax_arrs[cpt2, ind].set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax_arrs[cpt2, ind].get_yticks()])\n \n# fig = ax_arrs[0,0].figure ;\n# fig.text(0.5,0.04, \"Some very long and even longer xlabel\", ha=\"center\", va=\"center\")\n# fig.text(0.05,0.5, \"Some quite extensive ylabel\", ha=\"center\", va=\"center\", rotation=90)\n\n \n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionMoyDCDHp05k1251020.jpeg\",\\\n dpi= 250) #190 ",
"def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)",
"def cdf(data_r, data_f, xlabel: str = 'Values', ylabel: str = 'Cumulative Sum', ax=None):\n x1 = np.sort(data_r)\n x2 = np.sort(data_f)\n y = np.arange(1, len(data_r) + 1) / len(data_r)\n\n ax = ax if ax else plt.subplots()[1]\n\n axis_font = {'size': '14'}\n ax.set_xlabel(xlabel, **axis_font)\n ax.set_ylabel(ylabel, **axis_font)\n\n ax.grid()\n ax.plot(x1, y, marker='o', linestyle='none', label='Real', ms=8)\n ax.plot(x2, y, marker='o', linestyle='none', label='Synthetic', alpha=0.5)\n ax.tick_params(axis='both', which='major', labelsize=8)\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)\n\n # If labels are strings, rotate them vertical\n if isinstance(data_r, pd.Series) and data_r.dtypes == 'object':\n ax.set_xticklabels(data_r.value_counts().sort_index().index, rotation='vertical')\n\n if ax is None:\n plt.show()",
"def plot_ccum_degree_dist(net, label, outpath, degree_type='all'):\n unique_deg, unique_cnt = networkit_util.get_cc_deg_dist(net, degree_type)\n title = {'all': '', 'in': 'In', 'out': 'Out'}\n outfile_name = {'all': 'cc', 'in': 'cc-in', 'out': 'cc-out'}\n marker_color = {'all': 'b', 'in': 'g', 'out': 'r'}\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.loglog(unique_deg, unique_cnt, color=marker_color[degree_type], marker='*', label=label)\n # ax.set_title('Complementary Cumulative ' + title[degree_type] + '-Degree distribution')\n ax.set_xlabel('k')\n ax.set_ylabel('P(x>=k)')\n # ax.legend(loc='best')\n plt.savefig(outpath + label + '-' + outfile_name[degree_type] + '-degree-distribution.eps')\n return ax",
"def plotCoulombEnergy(self, phys, forces, step): \r\n self.plotQuantity(step, phys.app.energies.getTable(0), 'coulombenergy')",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_curve(epochs, hist, list_of_metrics):\n # list_of_metrics should be one of the names shown in:\n # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics\n\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Value\")\n\n for m in list_of_metrics:\n x = hist[m]\n plt.plot(epochs[1:], x[1:], label=m)\n\n plt.legend()\n plt.show()",
"def plot_curve(epochs, hist, list_of_metrics): \n # list_of_metrics should be one of the names shown in:\n # https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#define_the_model_and_metrics \n\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Value\")\n\n for m in list_of_metrics:\n x = hist[m]\n plt.plot(epochs[1:], x[1:], label=m)\n\n plt.legend()",
"def plot_img_and_hist(image, axes, bins=256):\n# image = img_as_float(image)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(image, cmap=plt.cm.gray);\n ax_img.set_axis_off()\n\n # Display histogram\n ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = exposure.cumulative_distribution(image, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf",
"def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')",
"def plot(self):\n pass",
"def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()",
"def plot_distribution(d, start=0.01, stop=10.0, resolution=0.1):\n import pylab\n X = numpy.arange(start, stop, resolution)\n Y = [math.exp(d.log_pdf(x)) for x in X]\n pylab.plot(X, Y)",
"def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()",
"def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])",
"def PlotContributions( ax=None, dev=False, measure='DM', redshift=0.1, cumulative=False, N_inter=False, **scenario ):\n if ax is None:\n fig, ax = plt.subplots()\n for region in regions:\n models = scenario.get( region )\n if models:\n for model in models:\n P = GetLikelihood( region=region, model=model, measure=measure, redshift=redshift, N_inter=N_inter, dev=dev )\n PlotLikelihood( *P, measure=measure, label=region+': '+Label(model) , linestyle=linestyle_region[region], ax=ax, cumulative=cumulative )\n ax.legend()\n ax.set_title( \"redshift = %.1f\" % redshift )",
"def target_cov_plot(context):",
"def summary_plot(self, cm_plot_style):\n plt.figure(figsize=(10, 3), dpi=300)\n plt.subplot(1, 2, 1)\n self.conditional_activation_plot(cm_plot_style, False)\n plt.subplot(1, 2, 2)\n self.cumulative_days_plot(cm_plot_style, False)\n plt.tight_layout()\n plt.savefig(\"FigureCA.pdf\", bbox_inches='tight')\n # sns.despine()",
"def curve_plot(self):\n if self.session.active['mode'] == 'database':\n self.curvePlot.set_scroll_interval()\n self.curvePlot.update_depth()\n self.curvePlot.show()",
"def plot_dist_evolution(arr, nbins=20, fracs=np.array([0.1, 0.2, 0.3, 0.4]), last=0.5):\n fracs = fracs\n\n last = last\n last_subset = arr[int(last * arr.shape[0]) :]\n\n for ff in fracs:\n\n subset = arr[: int(ff * arr.shape[0])]\n\n pl.hist(\n subset, nbins, histtype=\"step\", density=True, label=\"f=0.0--{}\".format(ff)\n )\n\n pl.hist(\n last_subset,\n nbins,\n histtype=\"step\",\n density=True,\n label=\"f={}--1.0\".format(last),\n )\n\n pl.legend(loc=\"best\", ncol=3)\n\n pl.show()\n\n return None",
"def CumulativeDistribution(data, nbins, range=None, normed=True, centerbins=False):\n\n # 1) COMPUTE THE DISTRIBUTION OF THE DATA\n ydata, xdata = np.histogram(data, nbins, range, normed)\n\n # 1.1) Compute the cumulative sum of the probability\n ydata = ydata.cumsum()\n\n # 2) RETURN THE RESULTS\n if centerbins:\n dif = 0.5 * (xdata[-1] - xdata[0]) / nbins\n xdata += dif\n\n if normed:\n norm = 1.0 / ydata[-1]\n ydata *= norm\n\n return xdata[:-1], ydata\n\n else:\n return xdata[:-1], ydata",
"def visualize_data(dqn_rewards, ddqn_rewards):\n \n fig, ax = plt.subplots()\n x_values = list(range(1, dqn_rewards.size + 1))\n ax.plot(x_values, dqn_rewards, label='dqn rewards')\n ax.plot(x_values, ddqn_rewards, label='ddqn rewards')\n plt.xlabel('episodes')\n plt.title('Cumulative Reward per Game')\n plt.legend()\n plt.show()",
"def cum_sum(self):\n\n # create cdo command and runit\n cdo_command = \"cdo -timcumsum\"\n run_this(cdo_command, self, output=\"ensemble\")",
"def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()",
"def create_plot_binary(dist=100.0, num_sys=100, bins=25):\n\n global binary_set\n\n if binary_set is None or len(binary_set) != num_sys:\n generate_binary_set(num_sys=num_sys, dist=dist)\n\n\n fig, ax1 = plt.subplots(1,1, figsize=(6,4))\n\n # Plot limits\n xmin, xmax = 0.0, 5000.0\n ymin, ymax = 0.0, 3.0\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(ymin, ymax)\n\n # Plot labels\n ax1.set_xlabel(\"Projected separation (AU)\")\n ax1.set_ylabel(\"Proper motion difference (km/s)\")\n\n # Plot distribution\n contourf_kwargs = {'bins':bins}\n corner.hist2d(binary_set['proj_sep']*c.Rsun_to_cm/c.AU_to_cm, binary_set['pm'], nbins=bins,\n range=([xmin,xmax],[ymin,ymax]), **contourf_kwargs)\n\n # Add angular separation at dist axis\n ax2 = ax1.twiny()\n xticks = np.linspace(xmin,xmax,6)\n angles = (xticks * c.AU_to_cm)/(dist * c.pc_to_cm) * (180.0 * 3600.0 / np.pi)\n ax2.set_xticks(angles)\n ax2.set_xlabel('Angular separation at distance of ' + str(dist) + ' pc (arcsec)')\n\n # Add proper motion at dist axis\n ax3 = ax1.twinx()\n yticks = np.linspace(ymin, ymax, 7)\n def pm_at_dist(pm, dist=100.0):\n return (pm * 1.0e5)/(dist * c.pc_to_cm) * (1.0e3 * 180.0 * 3600.0 / np.pi) * c.day_to_sec*365.25\n\n ax3.set_ylim(0.0, pm_at_dist(ax1.get_ylim()[1], dist=dist))\n ax3.set_ylabel('Proper motion at distance of ' + str(dist) + ' pc (mas/yr)')\n\n plt.tight_layout()\n plt.show()",
"def plot(self, ax=None, ylabel=\"CDF(x)\", xlabel=\"y\", upper_quantile=.25, lower_quantile=.75, force_recomputation=False, show=False, outputname=None, color=\"C2\", plot_cCDF=False):\n \n \"\"\"If data set is empty, return without plotting\"\"\"\n if self.samples_x == []:\n return\n \n \"\"\"Create figure if none was provided\"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n \"\"\"Compute plots if not already done or if recomputation was requested\"\"\"\n if (self.series_y is None) or force_recomputation:\n self.make_figure(upper_quantile, lower_quantile)\n \n \"\"\"Switch to cCDF if requested\"\"\"\n if plot_cCDF:\n self.reverse_CDF()\n \n \"\"\"Plot\"\"\"\n ax.fill_between(self.quantile_series_x, self.quantile_series_y_lower, self.quantile_series_y_upper, facecolor=color, alpha=0.25)\n ax.plot(self.median_x, self.series_y, color=color)\n ax.plot(self.mean_x, self.series_y, dashes=[3, 3], color=color)\n \n \"\"\"Set plot attributes\"\"\"\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n \n \"\"\"Save if filename provided\"\"\"\n if outputname is not None:\n plt.savefig(outputname + \".pdf\")\n plt.savefig(outputname + \".png\", density=300)\n \n \"\"\"Show if requested\"\"\"\n if show:\n plt.show()",
"def plotacc(accff, freqreq=None):\n dataobj = CVCfiles(accff)\n if freqreq is None:\n freqreq = 0.0\n sb, _nqzone = modeparms.freq2sb(freqreq)\n for fileidx in range(0, dataobj.getnrfiles()):\n filecvc = dataobj[fileidx]\n while sb < 512:\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)\n absdatplt = ax1.pcolormesh(numpy.abs(filecvc[sb]))\n ax1.set_title('Abs value')\n ax1.set_ylabel('RCU [#]')\n fig.colorbar(absdatplt, ax=ax1)\n angdatplt = ax2.pcolormesh(numpy.angle(filecvc[sb]),\n cmap=plt.get_cmap('hsv'))\n ax2.set_title('Phase value')\n ax2.set_xlabel('RCU [#]')\n ax2.set_ylabel('RCU [#]')\n fig.colorbar(angdatplt, ax=ax2)\n plt.suptitle('Station element covariance. Time: {}UT, SB: {}'\\\n .format(dataobj.samptimeset[fileidx][sb], sb))\n plt.show()\n sb += 1",
"def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)",
"def plot_cc(graph):\n\tclustering_coeffs = []\n\tfor node in graph.nodes():\n\t\tclustering_coeffs.append(nx.clustering(graph, node))\n\t\n\tplt.axvline(x=np.mean(clustering_coeffs), color='r', linestyle='-')\n\tplt.hist(clustering_coeffs, bins=100)",
"def cumulative_distribution(self, dist='current'):\n \n dictDist = {'current': np.cumsum(self.current),\n 'prior': np.cumsum(self.prior),\n 'posterior': np.cumsum(self.posterior, axis=1)\n }\n \n cdf = dictDist[dist]\n \n return cdf",
"def trades_chart(self, ax, cm):\n self.trades.cumsum().reindex(self.tc.dates, method='ffill').plot(\n ax=ax,\n figsize=(12, 6),\n title='Cumulative investment per instrument',\n cmap=cm\n )\n ax.set_xlim(self.tc.dates[0], self.tc.dates[-1])\n ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ',')))",
"def plot_cumulative_monthly_schedule(self):\n\n monthly_schedule = self.schedule_monthly\n \n monthly_schedule['Cumulative Payment'] = monthly_schedule['Payment'].cumsum()\n monthly_schedule['Cumulative Principal'] = monthly_schedule['Principal'].cumsum()\n monthly_schedule['Cumulative Interest'] = monthly_schedule['Interest'].cumsum()\n \n plt.figure()\n plt.plot(monthly_schedule[\"Month Date\"], monthly_schedule[\"End Balance\"], color='r', label='Remaining Balance', **_general_plot_properties)\n plt.plot(monthly_schedule[\"Month Date\"], monthly_schedule[\"Cumulative Payment\"], color='k', label='Cumulative Payments', **_general_plot_properties)\n plt.plot(monthly_schedule[\"Month Date\"], monthly_schedule[\"Cumulative Principal\"], color='b', label='Cumulative Principal Paid', **_general_plot_properties)\n plt.plot(monthly_schedule[\"Month Date\"], monthly_schedule[\"Cumulative Interest\"], color='g', label='Cumulative Interest Paid', **_general_plot_properties)\n \n plt.legend()\n plt.xlabel('Payment Month')\n plt.xticks(rotation=45)\n plt.ylabel('Amount')\n\n plt.title('Cumulative Monthly Payment Schedule')",
"def plot_distribution(self, variable, **kwargs):\n return self.visualizer.plot_distribution(variable, **kwargs)",
"def plot_cdf(self, param, plot_type, Nsplit=50, **kwargs):\n title = self.family.capitalize() + \" Copula CDF\" \n\n bounds = [0+1e-2, 1-1e-2]\n U_grid, V_grid = np.meshgrid(\n np.linspace(bounds[0], bounds[1], Nsplit),\n np.linspace(bounds[0], bounds[1], Nsplit))\n \n Z = np.array(\n [self.get_cdf(uu, vv, param) for uu, vv in zip(np.ravel(U_grid), np.ravel(V_grid)) ] )\n \n Z = Z.reshape(U_grid.shape)\n\n if plot_type == \"3d\":\n plot_bivariate_3d(U_grid,V_grid,Z, [0,1], title, **kwargs)\n elif plot_type == \"contour\":\n plot_bivariate_contour(U_grid,V_grid,Z, [0,1], title, **kwargs)\n else:\n print(\"only \\\"contour\\\" or \\\"3d\\\" arguments supported for type\")\n raise ValueError",
"def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()",
"def test_cumulative_distribution_fit_call_pd(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()",
"def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)",
"def make_graph(cmatrix, timesteps, Numpoints, dt):\n\n # Create a figure with size 15, 5\n fig, ax = plt.subplots(1,1, figsize=(15, 5))\n \n # Set the figure title, and the axes labels.\n the_title = fig.text(0.25, 0.95, 'Concentrations Results from t = %.3fs to %.3fs' % (0, dt*timesteps))\n ax.set_ylabel('Concentration')\n ax.set_xlabel('Grid Point')\n\n # We use color to differentiate lines at different times. Set up the color map\n cmap = plt.get_cmap('spectral')\n cNorm = colors.Normalize(vmin=0, vmax=1.*timesteps)\n cNorm_inseconds = colors.Normalize(vmin=0, vmax=1.*timesteps*dt)\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)\n\n # Only try to plot 20 lines, so choose an interval if more than that (i.e. plot\n # every interval lines\n interval = np.int(np.ceil(timesteps/20))\n\n # Do the main plot\n for time in range(0, timesteps, interval):\n colorVal = scalarMap.to_rgba(time)\n ax.plot(cmatrix[time, :], color=colorVal)\n\n # Add the custom colorbar\n ax2 = fig.add_axes([0.95, 0.05, 0.05, 0.9])\n cb1 = colorbar.ColorbarBase(ax2, cmap=cmap, norm=cNorm_inseconds)\n cb1.set_label('Time (s)')\n return",
"def plot_pagerank(net, label, outpath):\n _, pagerank_values = networkit_util.get_pagerank(net, label, outpath)\n unique_value, unique_cnt = np.unique(pagerank_values, return_counts=True)\n unique_cumcnt = np.cumsum(unique_cnt) / sum(unique_cnt)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(unique_value, unique_cumcnt, 'r.')\n # ax.set_title('Cumulative distribution of pagerank of nodes')\n ax.set_xlabel('pagerank value v')\n ax.set_ylabel('p(x <= v)')\n plt.savefig(outpath + label + \"-pagerank-distribution.eps\")",
"def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')",
"def plot_distribution(img_path):\n img = Image.open(img_path)\n img_width, img_height = img.size\n img = prepare_image(img = img)\n model = vgg19(pretrained=True).cuda().eval() \n predict = model.forward(img)\n predict = predict.detach().cpu().numpy().reshape(-1)\n \n label = pd.read_csv('./label.csv', sep = ';', index_col=0)\n label['predict'] = predict\n label.sort_values(by = 'predict', inplace = True)\n trace = go.Bar(x = [str(i) + '_' + j for i, j in enumerate(label.label)], y = label.predict)\n l = go.Layout(\n title = 'Class distribution',\n xaxis = dict(\n title = 'Class'\n ),\n yaxis = dict(\n title = 'Score'\n )\n )\n fig = go.Figure(data = [trace], layout = l)\n iplot(fig)",
"def generate_plots(self, input_data, input_labels=None):\n pass",
"def plot():\n pass",
"def display_distributions_over_positions(self, distributions):\n dists = []\n for dist in distributions:\n if dist is not None:\n if not isinstance(dist, util.Counter):\n raise Exception(\"Wrong type of distribution\")\n dists.append(dist)\n else:\n dists.append(util.Counter())\n\n if ((self.display is not None and\n 'update_distributions' in dir(self.display))):\n self.display.update_distributions(dists)\n else:\n self._distributions = dists # These can be read by pacclient.py",
"def income_distribution_plot(income_data,year):\n income_year = income_data.loc[year]\n plt.figure(figsize=(10,8))\n income_year.hist(bins=100,alpha=0.3,color='k')\n plt.title('Income Distribution of Year %s' % year)\n plt.xlabel('Income per person')\n plt.ylabel('Frequency')\n plt.savefig('Income distribution of year %s' % year)",
"def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()",
"def cumulative_profile(metdat, catinfo, category=None):\n \n if category is None:\n print('not sure what to plot...')\n pass\n \n # extract vertical locations of data from variable names\n colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category]) \n \n plotdat = metdat[colnames].mean()\n\n fig, ax = plt.subplots(figsize=(3.5,5))\n ax.plot(plotdat, vertlocs)\n\n ax.set_ylabel('Probe Height [m]')\n ax.set_xlabel(catinfo['labels'][category])\n fig.tight_layout()\n \n return fig, ax",
"def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()"
] | [
"0.67289984",
"0.64278775",
"0.64250666",
"0.6319937",
"0.6233318",
"0.6181403",
"0.6145437",
"0.6055146",
"0.6021088",
"0.59941614",
"0.5906233",
"0.58567",
"0.5850428",
"0.577539",
"0.5723072",
"0.57081443",
"0.5707813",
"0.5703675",
"0.57034284",
"0.56929755",
"0.5645915",
"0.56344527",
"0.5629131",
"0.5621118",
"0.560728",
"0.5606446",
"0.5603098",
"0.5589513",
"0.5574564",
"0.5571292",
"0.555655",
"0.55486006",
"0.55427563",
"0.55258596",
"0.5516472",
"0.55098146",
"0.55052716",
"0.5485258",
"0.5483412",
"0.5464387",
"0.5463148",
"0.54580975",
"0.5455611",
"0.5454817",
"0.5454475",
"0.5448318",
"0.54458195",
"0.54355997",
"0.54217786",
"0.5403623",
"0.5400977",
"0.53977",
"0.5396131",
"0.53949046",
"0.5392685",
"0.5380895",
"0.5375678",
"0.53739357",
"0.53703064",
"0.53683877",
"0.5366036",
"0.5362409",
"0.53482664",
"0.53368217",
"0.5335092",
"0.53299975",
"0.5319072",
"0.5318524",
"0.5316011",
"0.5311773",
"0.5307817",
"0.5300301",
"0.52980924",
"0.5296539",
"0.5286256",
"0.526716",
"0.52612454",
"0.5260494",
"0.525656",
"0.52535886",
"0.5251901",
"0.52481616",
"0.52470195",
"0.52438813",
"0.52416766",
"0.5228752",
"0.522099",
"0.52196807",
"0.52092296",
"0.52057165",
"0.5199665",
"0.5192351",
"0.51904416",
"0.51726353",
"0.51704884",
"0.51704484",
"0.51676387",
"0.51609457",
"0.51504844",
"0.51496375"
] | 0.5508781 | 36 |
Plots all posterior iterations using matplotlib. | def plot_posteriors(self, plotType='line',
plotEvery=1, figSize=(5,4)):
# Create figure
fig = plt.figure(figsize=figSize)
# Create colormap
colors = cm.rainbow(np.linspace(0, 1, len(self.posterior)))
# Determine plot type
if plotType=='line':
plt.plot(self.hypotheses,
self.posterior[0::plotEvery,:].T)
elif plotType=='bar':
for row, co in zip(self.posterior, colors):
plt.bar(self.hypotheses, row, width=0.25,
align='center', alpha=0.5, color=co)
elif plotType=='point':
for row, co in zip(self.posterior, colors):
plt.scatter(self.hypotheses, row,
alpha=1.0, color=co)
else:
sys.exit('Plot type not recognized.')
#np.arange(np.shape(bCoin.posterior[0::10,:].T)[1])*10
plt.legend(np.arange(np.shape(self.posterior[0::plotEvery,:].T)[1])*plotEvery,
loc='center left',
bbox_to_anchor=(1,0.5),
title='Iteration')
plt.xlabel('Hypotheses', fontsize=14)
plt.ylabel('Probability', fontsize=14)
plt.ticklabel_format(useOffset=False)
# If less than 10 hypotheses, treat xticks as categorical
if len(self.hypotheses) < 20:
plt.xticks(self.hypotheses)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_iterations(iters):\n axes = [plt.plot(lin.X, lin.Y, color='k', alpha=.15) for lin in iters]\n return axes",
"def plotPosteriors(posteriors):\n for i,p in enumerate(posteriors):\n plt.hist(p,bins=20,histtype='stepfilled',alpha=0.5,\n density=True,label='Bin {0}'.format(i))\n plt.legend()\n plt.ylabel(\"Probability\")\n plt.xlabel(\"Posterior\")\n\n return",
"def plot_posteriors(chain, P, nplot='all'):\n\n if 'posterior_plots' in opt:\n temp = opt['posterior_plots'].split(',')\n pairs = []\n for pair in temp:\n pairs.append(pair.split())\n nplot = len(pairs)\n elif nplot == 'all':\n nplot = chain.shape[-1]\n\n\n nrows, ncols = get_nrows_ncols(nplot)\n fig,axes = get_fig_axes(nrows, ncols, nplot)\n\n for i,ax in enumerate(axes):\n if 'posterior_plots' in opt:\n i0 = P['names'].index(pairs[i][0])\n i1 = P['names'].index(pairs[i][1])\n else:\n i0 = i\n i1 = i + 1\n if i1 == npar:\n i1 = 0\n\n #ax.plot(chain[:,0,i], chain[:,0,j], '.r', ms=4, label='p$_{initial}$')\n dhist(chain[:, i0], chain[:, i1],\n xbins=P['bins'][i0], ybins=P['bins'][i1],\n fmt='.', ms=1.5, c='0.5', chist='b', ax=ax, loc='left, bottom')\n\n #if contours:\n # i1sig = get_levels(np.array([chain[:,i], chain[:,j]]).T,)\n # #cont = ax.contour(*par, colors='k',linewidths=0.5)\n # for ind in P.ijoint_sig:\n # x,y = chain[:,i][ind], chain[:,j][ind]\n # delaunay = Delaunay(np.array([x, y]).T)\n # for i0,i1 in delaunay.convex_hull:\n # ax.plot([x[i0], x[i1]], [y[i0], y[i1]], 'k', lw=0.5)\n x,y = chain[:,i0][P['ijoint_sig'][1]], chain[:,i1][P['ijoint_sig'][1]]\n ax.plot(x,y,'g.', ms=3, mew=0)\n x,y = chain[:,i0][P['ijoint_sig'][0]], chain[:,i1][P['ijoint_sig'][0]]\n ax.plot(x,y,'r.', ms=3, mew=0)\n\n ax.plot(P['ml'][i0], P['ml'][i1], 'xk', ms=12, mew=4)\n ax.plot(P['ml'][i0], P['ml'][i1], 'xr', ms=10, mew=2)\n\n c = 'crimson'\n ax.axvline(P['p1sig'][i0][0], ymax=0.2, color=c, lw=0.5)\n ax.axvline(P['p1sig'][i0][1], ymax=0.2, color=c, lw=0.5)\n ax.axhline(P['p1sig'][i1][0], xmax=0.2, color=c, lw=0.5)\n ax.axhline(P['p1sig'][i1][1], xmax=0.2, color=c, lw=0.5)\n ax.axvline(P['median'][i0], ymax=0.2, color=c, lw=1.5)\n ax.axhline(P['median'][i1], xmax=0.2, color=c, lw=1.5)\n\n puttext(0.95, 0.05, P['names'][i0], ax, fontsize=16, ha='right')\n puttext(0.05, 0.95, P['names'][i1], ax, fontsize=16, va='top')\n x0, x1 = np.percentile(chain[:,i0], [5, 95])\n dx = x1 - x0\n ax.set_xlim(x0 - dx, x1 + dx)\n y0, y1 = np.percentile(chain[:,i1], [5, 95])\n dy = y1 - y0\n ax.set_ylim(y0 - dy, y1 + dy)\n\n return fig, axes",
"def fig_2():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig2.png'\n posteriors_plot(model, features, filters, figname, idx=-3)",
"def _plot_posteriors(self, variants=[]):\n if variants == []:\n variants = list(self.posteriors.keys())\n for variant in variants:\n sns.kdeplot(self.posteriors[variant].get_posterior_sample(),\n shade=True,\n color=self.posteriors[variant].get_color())\n plt.legend(labels=variants, loc='upper right')\n if self.prior_function == 'beta':\n plt.xlabel('Conversion Rate')\n elif (self.prior_function == 'log-normal'\n or self.prior_function == 'normal'):\n plt.xlabel(self.metric)\n sns.despine(left=True)\n plt.yticks([], [])\n title = 'Distribution(s) for {0} for {1}'.format(\n self._stringify_variants(variants),\n self.metric)\n title = self._format_title(title)\n plt.title(title)\n if self.prior_function == 'beta':\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)",
"def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()",
"def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def plot_posteriors_burn(chain, P, npar='all'):\n nwalkers, nsamples, npartot = chain.shape\n c = chain.reshape(-1, npartot)\n\n if npar == 'all':\n npar = npartot\n\n nrows, ncols = get_nrows_ncols(npar)\n fig, axes = get_fig_axes(nrows, ncols, npar)\n\n for i,ax in enumerate(axes):\n j = i+1\n if j == npar:\n j = 0\n\n ax.plot(c[:, i], c[:, j], '.', ms=1, color='0.5')\n\n # plot initial walker positions\n ax.plot(chain[:,0,i], chain[:,0,j], '.r', ms=4, label='p$_{initial}$')\n\n # and final positions\n ax.plot(chain[:,-1,i], chain[:,-1,j], '.y', ms=4, label='p$_{final}$')\n\n ax.plot(P['ml'][i], P['ml'][j], 'xk', ms=12, mew=4)\n ax.plot(P['ml'][i], P['ml'][j], 'xr', ms=10, mew=2) \n\n puttext(0.95, 0.05, P['names'][i], ax, fontsize=16, ha='right')\n puttext(0.05, 0.95, P['names'][j], ax, fontsize=16, va='top')\n x0, x1 = chain[:, 0, i].min(), chain[:, 0, i].max()\n dx = x1 - x0\n ax.set_xlim(x0 - 0.1*dx, x1 + 0.1*dx)\n y0, y1 = chain[:, 0, j].min(), chain[:, 0, j].max()\n dy = y1 - y0\n ax.set_ylim(y0 - 0.1*dy, y1 + 0.1*dy)\n\n axes[0].legend()\n return fig, axes",
"def plot_results(infer_images, inference_predicted_class, inference_predictions, class_names=['plants', 'water']):\n plt.style.use(['dark_background', 'bmh'])\n rc('figure', figsize=(8, 8), max_open_warning=False)\n rc('axes', facecolor='none')\n plt.figure(figsize=(15, 15))\n\n for i, (infer_img, _) in enumerate(infer_images.take(10)):\n ax = plt.subplot(5, 2, i + 1)\n plt.imshow(infer_img.numpy()/255)\n\n # Find the predicted class from predictions\n m = \"Predicted: {}, {:.2f}%\".format(\n class_names[inference_predicted_class[i]], inference_predictions[i]*100)\n plt.title(m)\n plt.axis(\"off\")\n plt.show()",
"def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()",
"def plot_all(show=True):\n fig, axes = plt.subplots(max_iterations, 1, figsize=(6, 12))\n for t in range(max_iterations):\n with open('results/%s/df_%d.pkl' % (id, t), 'rb') as f:\n df = pickle.load(f)\n with open('results/%s/w_%d.pkl' % (id, t), 'rb') as f:\n w = pickle.load(f)\n axes[t].hist2d(x=df['vision'], y=df['metab'], weights=w, density=True,\n bins=((xticks, yticks)), cmap='magma')\n axes[t].set_ylabel('max metabolism')\n axes[t].set_xticks(vision_domain)\n axes[t].set_yticks((2, 3, 4))\n axes[3].set_xlabel('max vision')\n fig.tight_layout()\n if show:\n plt.show()\n else:\n plt.savefig('results/%s/abc_results.pdf' % id)",
"def plot(self):\n\t\tself.plotOfSpect().plot()",
"def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()",
"def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()",
"def fdplot(self, imx):\n fig = plt.figure()\n maxval = np.max(imx)\n ims = list(map(lambda im: [plt.imshow(np.fabs(im),norm=colors.Normalize(0.0,maxval))], imx))\n animation = anim.ArtistAnimation(fig,ims,interval=50)\n plt.show()",
"def plot(self):\n pass",
"def plot_iterations():\n\n file = open('data/parallelSDC_iterations_precond_MPI.pkl', 'rb')\n results = pickle.load(file)\n\n # find the lists/header required for plotting\n qd_type_list = []\n setup_list = []\n for key in results.keys():\n if isinstance(key, ID):\n if key.qd_type not in qd_type_list:\n qd_type_list.append(key.qd_type)\n elif isinstance(key, str):\n setup_list.append(key)\n print('Found these type of preconditioners:', qd_type_list)\n print('Found these setups:', setup_list)\n\n assert len(qd_type_list) == 5, 'ERROR did not find four preconditioners, got %s' % qd_type_list\n assert len(setup_list) == 4, 'ERROR: did not find four setup, got %s' % setup_list\n\n qd_type_list = ['IEpar', 'Qpar', 'MIN', 'MIN3', 'MIN_GT']\n marker_list = ['s', 'o', '^', 'v', 'x']\n color_list = ['r', 'g', 'b', 'c', 'm']\n\n plt_helper.setup_mpl()\n\n # loop over setups and Q-delta types: one figure per setup, all Qds in one plot\n for setup in setup_list:\n plt_helper.newfig(textwidth=238.96, scale=0.89)\n\n for qd_type, marker, color in zip(qd_type_list, marker_list, color_list):\n niter = np.zeros(len(results[setup][1]))\n for key in results.keys():\n if isinstance(key, ID):\n if key.setup == setup and key.qd_type == qd_type:\n xvalue = results[setup][1].index(key.param)\n niter[xvalue] = results[key]\n ls = '-'\n lw = 1\n plt_helper.plt.semilogx(\n results[setup][1],\n niter,\n label=qd_type,\n lw=lw,\n linestyle=ls,\n color=color,\n marker=marker,\n markeredgecolor='k',\n )\n\n if setup == 'heat':\n xlabel = r'$\\nu$'\n elif setup == 'advection':\n xlabel = r'$c$'\n elif setup == 'fisher':\n xlabel = r'$\\lambda_0$'\n elif setup == 'vanderpol':\n xlabel = r'$\\mu$'\n else:\n print('Setup not implemented..', setup)\n exit()\n\n plt_helper.plt.ylim([0, 60])\n plt_helper.plt.legend(loc=2, ncol=1)\n plt_helper.plt.ylabel('number of iterations')\n plt_helper.plt.xlabel(xlabel)\n plt_helper.plt.grid()\n\n # save plot as PDF and PGF\n fname = 'data/parallelSDC_preconditioner_MPI_' + setup\n plt_helper.savefig(fname)\n\n assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'\n # assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'",
"def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)",
"def map_plot(self, iter_no):\n \n m = self._m\n n = self._n\n plt.figure()\n label=np.zeros(m*n)\n self._trained = True\n mapped = self.map_vects(datanorm)\n mapped=tuple(map(tuple, mapped))\n c=Counter(mapped)\n \n c= sorted(c.items(), key=itemgetter(1))\n a=[m*n]\n for i in range(0,len(c)):\n x=(((c[i])[0])[0])\n y=(((c[i])[0])[1])\n z=((c[i])[1])\n plt.plot(x, y, 'ro', markersize= z/(2*m*n)) \n plt.savefig('exoplanet{}.png'.format(iter_no))\n p=plt.imread('exoplanet{}.png'.format(iter_no))\n imgs.append(p)\n plt.show()\n plt.close()\n print(c)\n self._trained = False",
"def plot_posterior(x_train, y_train, x_test, kernel, params): \n\n\tparams = combine_params(params)\n\tfor p in params:\n\t\ty, mu, sigma_sq = sample_GP_posterior(x_train, y_train, x_test, \n\t\t\t\t\t\t\t\t\t\t\t zero_mean, kernel, p)\n\t\tmake_plot(x_test, y)\n\t\tplt.plot(x_train, y_train, 'ko')\n\t\tplt.gca().fill_between(x_test.flat, mu-2*sigma_sq, \n\t\t\t\t\t\t\t mu+2*sigma_sq, color=\"#dddddd\")\n\t\tplt.savefig('gp_plot_' + str(plt.gcf().number) + str('.png'),\n\t\t\t\t\tbbox_inches='tight')",
"def plot(self, iteration):\n plt.cla()\n self.plot_function()\n x = self._population[:,0]\n y = self._population[:,1]\n z = self._last_evaluation\n max_x, max_y = self._global_optimal_position\n max_z = self._global_optimal_value\n self._axes.scatter3D(x, y, z, c=\"r\")\n self._axes.scatter(max_x, max_y, max_z, marker=\"*\", s=500, c=\"b\")\n\n self._axes.set_xlabel(f\"x (best={max_x})\")\n self._axes.set_ylabel(f\"y (best={max_y})\")\n self._axes.set_zlabel(f\"z (best={max_z})\")\n self._axes.text(self._lower_bound, self._lower_bound, 1.5*max_z, f\"#Iteration {iteration}\")\n\n plt.gcf().canvas.mpl_connect(\"key_press_event\", self.key_press_handler)\n if self._wait_for_key:\n while not plt.waitforbuttonpress():\n pass\n else:\n plt.pause(0.5)",
"def plot(self):\n\t\tself.plotOfXray().plot()",
"def plot_priors(params):\n prior_dicts = {'ic' : params['ic_prior'], 'ii' : params['ii_prior']}\n pidxs = (pidx for pidx in onp.arange(1,12))\n f = plt.figure(figsize=(12,8))\n for k in prior_dicts:\n for j in prior_dicts[k]:\n plt.subplot(2,3,next(pidxs));\n data = prior_dicts[k][j]\n if \"log\" in j:\n data = onp.exp(data)\n j_title = j.strip('log')\n else:\n j_title = j\n plt.stem(data)\n plt.title(k + ' ' + j_title)\n return f",
"def plot(self):\n\t\tself.plotOfSpect()",
"def PlotIterations(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style lines')\n gp.clear()\n gp.xlabel('iterations')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n dataset.RescaleTo(metadata.iterations)\n x = numpy.arange(len(dataset.data), dtype='int_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='lines ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='lines ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')",
"def montage(W):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(2, 5)\n for i in range(2):\n for j in range(5):\n im = W[i * 5 + j, :].reshape(32, 32, 3, order='F')\n sim = (im - np.min(im[:])) / (np.max(im[:]) - np.min(im[:]))\n sim = sim.transpose(1, 0, 2)\n ax[i][j].imshow(sim, interpolation='nearest')\n ax[i][j].set_title(\"y=\" + str(5 * i + j))\n ax[i][j].axis('off')\n #plt.savefig(\"plots/ \"+fname +\".png\")\n plt.show()",
"def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig",
"def showPriors(self, figNum=1):\n\n if not self.plotPrior: ### Do nothing if we don't want to plot\n return\n\n if len(self.plotFineX) < 1:\n return\n\n fig=plt.figure(figNum)\n fig.clf()\n fig.subplots_adjust(wspace=0.3, hspace=0.4)\n\n ### Set up the number of rows from the number of columns and\n ### the number of parameter-sets we've evaluated\n\n nSets = len(self.plotFineX)\n self.plotNrows = int(np.ceil(nSets/float(self.plotNcols)) )\n\n # OK now we loop through and plot\n for iPlot in range(nSets):\n thisAx = fig.add_subplot(self.plotNcols, self.plotNrows, iPlot+1)\n\n thisX = self.plotFineX[iPlot]\n thisY = self.plotFineY[iPlot]\n thisL = self.plotLabels[iPlot]\n\n ### Strip off the units to create the Y-axis\n sLabelY = thisL.split('(')[0].strip()\n\n dum = thisAx.plot(thisX, thisY, 'b-', lw=2)\n thisAx.set_xlabel(thisL, fontsize=10)\n thisAx.set_ylabel(r'Prior(%s)' % (sLabelY), fontsize=10)\n thisAx.tick_params(axis='both', labelsize=10)\n\n ### give a little more room in the axis range. Clumsy hack\n ### for now...\n yLo = np.min(thisY)\n yHi = np.max(thisY)\n yOff = (yHi-yLo)*0.1\n thisAx.set_ylim(yLo - yOff, yHi+yOff)\n\n if self.namePrior.find('ixed') > 0:\n if self.mixedNames[iPlot].find('Log') > -1:\n xLo = self.hyper[0][iPlot] * 0.1\n thisAx.set_xlim(xLo, np.max(thisX))\n thisAx.set_xscale('log')\n\n ### show the grid\n dum = thisAx.grid(which='both')\n \n # save the figure\n if len(self.figPrior) > 3:\n fig.savefig(self.figPrior)",
"def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()",
"def plot_priorsamps(meta):\n priorsamps = np.array(meta.priordist.sample_ps(len(meta.colors))[0])\n f = plt.figure(figsize=(5,10))\n sps_log = f.add_subplot(2,1,1)\n sps_lin = f.add_subplot(2,1,2)\n sps_log.set_title(meta.name)\n f.subplots_adjust(hspace=0, wspace=0)\n sps_log.set_ylabel(r'$\\ln[p(z|\\vec{\\theta})]$')\n sps_lin.set_xlabel(r'$z$')\n sps_lin.set_ylabel(r'$p(\\vec{\\theta})$')\n sps_log.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n sps_lin.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n plotstep(sps_log,meta.binends,meta.logintPz,l=r'Log Interim Prior $\\ln[p(z|\\vec{\\theta}^{0})$]')\n plotstep(sps_lin,meta.binends,meta.intPz,l=r'Interim Prior $p(z|\\vec{\\theta}^{0})$')\n for c in lrange(meta.colors):\n plotstep(sps_log,meta.binends,priorsamps[c]-np.log(meta.ngals),c=meta.colors[c])\n plotstep(sps_lin,meta.binends,np.exp(priorsamps[c]-np.log(meta.ngals)),c=meta.colors[c])\n sps_log.legend(loc='upper right',fontsize='x-small')\n sps_lin.legend(loc='upper right',fontsize='x-small')\n f.savefig(os.path.join(meta.topdir, 'priorsamps.pdf'),bbox_inches='tight', pad_inches = 0)\n return",
"def plot(self):\n\t\tself.plotOfTF().plot()",
"def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)",
"def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()",
"def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################",
"def plot(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Covid-19 Progression Simulation\")\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n\n x_values = np.array([])\n y_values = np.array([])\n color_values = np.array([])\n\n for p in self.persons:\n x_values = np.append(x_values, p.position[0])\n y_values = np.append(y_values, p.position[1])\n color_values = np.append(color_values, self.color(p.state))\n\n colors = [\"green\", \"red\", \"blue\", \"black\"]\n\n scatter = ax.scatter(x_values, y_values,\n c=color_values, vmin=0, vmax=100)\n\n ax.legend(handles=self.legend_elements, loc='upper right')\n\n self.anim = manim.FuncAnimation(\n fig, self.animate, interval=self.update_interval, fargs=(self, ax, scatter))\n\n plt.tight_layout()\n plt.show()",
"def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]",
"def posteriors_plot(model, features, filters, figname, fgal=0.5, N=60000,\n idx=-1, seed=123):\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n\n # only data within window\n dlt = 0.05\n ind = (X[:, 0] > 21 - dlt) & (X[:, 0] < 21 + dlt)\n X = X[ind]\n Xcov = Xcov[ind]\n\n # find stars and galaxies\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # pick one of each (ind = 510, 365 on my machine)\n np.random.seed(seed)\n i = np.random.randint(X[sind].shape[0])\n star = X[sind][i]\n starcov = Xcov[sind][i]\n i = np.random.randint(X[sind].shape[0])\n gal = X[gind][i]\n galcov = Xcov[gind][i]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n Ns = 10000\n Nd = X.shape[1]\n a, m, v = model.posterior(star.reshape(1, Nd), starcov.reshape(1, Nd, Nd))\n star_post = model.sample(a[0], m[0], v[0], size=Ns)\n a, m, v = model.posterior(gal.reshape(1, Nd), galcov.reshape(1, Nd, Nd))\n gal_post = model.sample(a[0], m[0], v[0], size=Ns)\n\n # fig parms\n fs = 5\n ms1, mew1 = 8, 2\n ms2, mew2 = 12, 3\n nb = 5\n lw = 2\n lsize = 20\n fac = 1.2\n idx = [[0, -1], [2, 3], [3, 4]]\n bins = [50, 50, 50]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n # figure\n f = pl.figure(figsize=(3 * fs, 2 * fs))\n X = [star_post, gal_post]\n pl.subplots_adjust(wspace=0.3)\n for i in range(len(X)):\n if i == 0:\n X = star\n Xcov = starcov\n P = star_post\n else:\n X = gal\n Xcov = galcov\n P = gal_post\n for j in range(len(idx)):\n ax = pl.subplot(2, 3, 3 * i + j + 1)\n post = np.vstack((P[:,idx[j][0]], P[:,idx[j][1]]))\n mu = np.mean(post, axis=1)\n cov = np.cov(post)\n pl.plot(mu[0], mu[1], 'ks', ms=ms1, mew=mew1)\n pl.plot(X[idx[j][0]], X[idx[j][1]], 'k+', ms=ms2, mew=mew2)\n error_ellipse(mu, cov, ax=ax, lw=2)\n error_ellipse(X[idx[j]], Xcov[idx[j]][:,idx[j]], ax=ax, lw=2, \n ls='dashed')\n d = np.sqrt(np.diag(Xcov))\n mn = X[idx[j][0]] - fac * d[idx[j][0]]\n mx = X[idx[j][0]] + fac * d[idx[j][0]]\n pl.xlim((mn, mx))\n mn = X[idx[j][1]] - fac * d[idx[j][1]]\n mx = X[idx[j][1]] + fac * d[idx[j][1]]\n pl.ylim((mn, mx))\n pl.xlabel(xlab[j], fontsize=lsize)\n pl.ylabel(ylab[j], fontsize=lsize)\n pl.locator_params(nbins=nb)\n f.savefig(figname, bbox_inches='tight')",
"def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')",
"def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)",
"def make_posterior_plots(self, combined=False):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n if combined:\n outdir = os.path.join(self.outdir, 'CombinedPosteriors')\n maintitle = self.make_main_title(end='Posteriors')\n else:\n outdir = os.path.join(self.outdir, 'IndividualPosteriors')\n maintitle = self.make_main_title(end='Posterior')\n mkdir(outdir)\n\n for injkey in self.values.keys():\n for fhkey in self.values[injkey].keys():\n # Set up multi-plot if needed\n if combined:\n num_rows = self.get_num_rows(\n data=self.values[injkey][fhkey],\n omit_metric=False\n )\n plt.figure(figsize=(20, 5*num_rows+2))\n subplotnum = 1\n else:\n subplotnum = None\n # Loop through the systematics\n for systkey in self.values[injkey][fhkey].keys():\n fittitle = self.make_fit_title(\n fhkey=fhkey,\n trials=self.num_trials\n )\n systunits = self.values[injkey][fhkey][systkey]['units']\n if systkey == 'metric_val':\n xlabel = self.tex_axis_label(\n self.values[injkey][fhkey][systkey]['type']\n )\n else:\n xlabel = self.tex_axis_label(systkey)\n if not systunits == 'dimensionless':\n xlabel += r' (%s)'%self.tex_axis_label(systunits)\n # Specify the subplot, if necessary\n if combined:\n plt.subplot(num_rows, 4, subplotnum)\n self.make_1d_hist_plot(\n data=np.array(\n self.values[injkey][fhkey][systkey]['vals']\n ),\n xlabel=xlabel,\n title=maintitle+r'\\\\'+fittitle,\n ylabel='Number of Trials',\n subplotnum=subplotnum\n )\n # Add the details i.e. injected/fiducial lines and priors\n plt.ylim(0, 1.35*plt.ylim()[1])\n if not systkey == 'metric_val':\n self.add_inj_fid_lines(\n injkey=injkey,\n systkey=systkey,\n fhkey=fhkey\n )\n self.add_prior_region(\n injkey=injkey,\n systkey=systkey,\n fhkey=fhkey\n )\n plt.legend(\n loc='upper left',\n fontsize=12,\n framealpha=1.0\n )\n plt.subplots_adjust(\n left=0.10,\n right=0.90,\n top=0.85,\n bottom=0.11\n )\n # Advance the subplot number, if necessary\n if combined:\n subplotnum += 1\n # Else, save/close this plot\n else:\n self.save_plot(\n fhkey=fhkey,\n outdir=outdir,\n end='%s_posterior'%systkey\n )\n plt.close()\n # Save the whole canvas, if necessary\n if combined:\n plt.suptitle(maintitle+r'\\\\'+fittitle, fontsize=36)\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n self.save_plot(\n fhkey=fhkey,\n outdir=outdir,\n end='posteriors'\n )\n plt.close()",
"def plot_i(im, Prior, nit, chi2_1, chi2_2, ipynb=False):\n\n plt.ion()\n plt.pause(0.00001)\n plt.clf()\n\n plt.imshow(im.reshape(Prior.ydim,Prior.xdim), cmap=plt.get_cmap('afmhot'), interpolation='gaussian')\n xticks = ticks(Prior.xdim, Prior.psize/RADPERAS/1e-6)\n yticks = ticks(Prior.ydim, Prior.psize/RADPERAS/1e-6)\n plt.xticks(xticks[0], xticks[1])\n plt.yticks(yticks[0], yticks[1])\n plt.xlabel('Relative RA ($\\mu$as)')\n plt.ylabel('Relative Dec ($\\mu$as)')\n plt.title(\"step: %i $\\chi^2_1$: %f $\\chi^2_2$: %f\" % (nit, chi2_1, chi2_2), fontsize=20)\n #plt.draw()\n\n if ipynb:\n display.clear_output()\n display.display(plt.gcf())",
"def plot_posterior_model_probabilities(self, plot_opts=dict()):\n \n fig, axes = plt.subplots(nrows=self.K, ncols=1, \n sharex=True, sharey=True, \n figsize=(6, 6*self.K))\n \n cmodel_color = plot_opts.get('cmodel_color', 'black')\n dmodel_color = plot_opts.get('dmodel_color', '#cc7d21')\n for i, kernel_name in enumerate(self.kernel_dict.keys()):\n summary = self.results[kernel_name].summary(b=self.b) \n pmc, pmd = summary['pmp']['pmc'], summary['pmp']['pmd']\n \n axes[i].pie([pmc, pmd], \n colors=[cmodel_color, dmodel_color], \n labels=[np.round(pmc, 2), np.round(pmd, 2)], \n explode=[0.05, 0.05]) \n axes[i].set_title('{:s} kernel'.format(kernel_name))\n axes[-1].legend(loc='best')\n for ax in axes:\n ax.set_aspect(1.0)\n \n return fig, axes",
"def plot(self, epochs, title=\"Learning Rate Schedule\"):\n lrs = [self(i) for i in epochs]\n\n # plot the learning rate schedule\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(epochs, lrs)\n plt.title(title)\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Learning Rate\")\n plt.close()",
"def plot(self):\n\t\tself.plotOfLoopVoltage()",
"def parallel_plot(data, rg):\n my_colors = list(islice(cycle([\"b\", \"r\", \"g\", \"y\", \"k\"]), None, len(data)))\n plt.figure(figsize=(18, 8)).gca().axes.set_ylim(rg)\n parallel_coordinates(data, \"prediction\", color=my_colors, marker=\"o\")",
"def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)",
"def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")",
"def plot(self):\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n ns = np.array([self(i) for i in x]).T\n\n for n in ns:\n plt.plot(x, n)\n\n return plt.show()",
"def show():\n\tplt.show()",
"def plot_ivals(meta):\n f = plt.figure(figsize=(5, 5))#plt.subplots(1, nsurvs, figsize=(5*nsurvs,5))\n sps = f.add_subplot(1,1,1)\n f.subplots_adjust(hspace=0, wspace=0)\n sps.set_ylabel(r'$\\ln N(z)$')\n sps.set_xlabel(r'$z$')\n sps.set_xlim(meta.binlos[0]-meta.bindif,meta.binhis[-1]+meta.bindif)\n plotstep(sps,meta.binends,meta.mean)\n for i in lrange(meta.ivals):\n plotstep(sps,meta.binends,meta.ivals[i],a=1./meta.factor,c=meta.colors[i%len(meta.colors)])\n f.savefig(os.path.join(meta.topdir,'initializations.pdf'),bbox_inches='tight', pad_inches = 0)#,dpi=100)\n return",
"def plot(self, corner = True):\n pos = self.posterior_samples\n if self.verbose>=3 and self.NS.prior_sampling is False:\n pri = self.prior_samples\n mc = self.mcmc_samples\n elif self.verbose>=3 or self.NS.prior_sampling is True:\n pri = self.prior_samples\n mc = None\n else:\n pri = None\n mc = None\n from . import plot\n if self.NS.prior_sampling is False:\n for n in pos.dtype.names:\n plot.plot_hist(pos[n].ravel(), name = n,\n prior_samples = self.prior_samples[n].ravel() if pri is not None else None,\n mcmc_samples = self.mcmc_samples[n].ravel() if mc is not None else None,\n filename = os.path.join(self.output,'posterior_{0}.pdf'.format(n)))\n for n in self.nested_samples.dtype.names:\n plot.plot_chain(self.nested_samples[n],name=n,filename=os.path.join(self.output,'nschain_{0}.pdf'.format(n)))\n if self.NS.prior_sampling is False:\n import numpy as np\n plotting_posteriors = np.squeeze(pos.view((pos.dtype[0], len(pos.dtype.names))))\n if pri is not None:\n plotting_priors = np.squeeze(pri.view((pri.dtype[0], len(pri.dtype.names))))\n else:\n plotting_priors = None\n\n if mc is not None:\n plotting_mcmc = np.squeeze(mc.view((mc.dtype[0], len(mc.dtype.names))))\n else:\n plotting_mcmc = None\n\n if corner:\n plot.plot_corner(plotting_posteriors,\n ps=plotting_priors,\n ms=plotting_mcmc,\n labels=pos.dtype.names,\n filename=os.path.join(self.output,'corner.pdf'))\n plot.plot_indices(self.NS.insertion_indices, filename=os.path.join(self.output, 'insertion_indices.pdf'))",
"def plot_decompose(self):\n try:\n assert self._arr_seasonal is not None\n except AssertionError:\n self.ts_decompose()\n\n fig, axes = plt.subplots(5, 1, figsize=(20, 9), sharex=True)\n axes[0].plot(self._res_decomp.observed)\n axes[0].set_ylabel(\"Original\")\n #\n axes[1].plot(self._arr_trend)\n axes[1].set_ylabel(\"Trend\")\n #\n axes[2].plot(self._arr_seasonal)\n axes[2].set_ylabel(\"Seasonal\")\n #\n axes[3].plot(self._arr_baseline)\n axes[3].set_ylabel(\"Baseline\")\n #\n axes[4].plot(self.residuals)\n axes[4].set_ylabel(\"Residuals\")\n #\n if self.upper_whisker_res is not None:\n axes[4].axhline(y=self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n axes[4].axhline(y=-self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()",
"def show():\n setup()\n plt.show()",
"def plot_iter(V, Pi, params):\n n_rows = params['n_rows']\n n_cols = params['n_cols'] \n occ_grid = params['occ_grid']\n R = params['R']\n\n goal = params['goal']\n sink = params['sink']\n\n actions = ['left','right','up','down']\n\n fig1 = plt.figure(1, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if occ_grid[row, col] == 1:\n plt.text(col, n_rows - 1 - row, '0.0', color='k', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n else:\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(V[row, col]), \n color='b', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n\n fig2 = plt.figure(2, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if not Pi[row, col] == -1:\n plt.text(col, n_rows - 1 - row, actions[Pi[row, col]], \n color='k', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n fig2.canvas.draw()\n fig2.canvas.flush_events()",
"def show(self):\n plt.show()",
"def plot_graph(self) -> None:",
"def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()",
"def plot(self, solution: Matrix) -> None:\n plots.plot_matrices(\"Total Variation Denoising\", self.M, solution)",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]",
"def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()",
"def plot_summary(self, **kwargs):\n ncols = 2\n nparams = len(self.distribution_parameter_names)\n d, r = divmod(nparams, 2)\n if r > 0:\n nrows = d + 1\n else:\n nrows = d\n\n gs = gridspec.GridSpec(nrows, ncols)\n fig = plt.figure(num=1, figsize=(16, 3*nrows))\n ax = []\n for n, parameter in enumerate(self):\n r, c = divmod(n, 2)\n ax.append(fig.add_subplot(gs[r, c]))\n\n if c == 0 and (n <= nparams-1):\n self._plot_posterior_pdf(parameter, ax[-1],\n y_label='Posterior pdf',\n x_label=parameter)\n elif n <= nparams-1:\n self._plot_posterior_pdf(parameter, ax[-1],\n y_label=None,\n x_label=parameter)\n\n fig.tight_layout()\n\n return fig, ax",
"def plot_pmf(self, **options):\n xs, ps = zip(*sorted(self.items()))\n plt.plot(xs, ps, **options)",
"def momentum_kde2_paperplot(fields):\n plt.figure(figsize=(2.65, 2.5))\n ax = plt.axes([0.18, 0.17, 0.8, 0.8])\n colorList = [med_color, high_color]\n lw = 1.5\n i = 0\n meankx_2 = []\n meankx_3 = []\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium')\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color)\n ax.axhline(0, color='black', linestyle='--', linewidth=0.5)\n # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n meankx_2.append(utilities.mean_kx(chi_2_i, electron_df))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n meankx_3.append(utilities.mean_kx(chi_3_i, electron_df))\n\n ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n i = i + 1\n # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$')\n # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.locator_params(axis='y', nbins=6)\n ax.locator_params(axis='x', nbins=6)\n # ax.tick_params(direction='in')\n ax.set_xlim(-0.085, 0.081)\n\n plt.xlabel(r'$\\rm k_x \\, \\, (\\AA^{-1})$')\n plt.ylabel(r'Deviational occupation $\\rm \\Delta f_{\\mathbf{k}}$')\n # plt.grid(lw=0.8, linestyle='dotted')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n # plt.ylim([-1,1])\n plt.legend(frameon=False,prop={'size':different_small_size})\n plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)",
"def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1",
"def plot_all(self, cmap='Greys', size=(10,10)):\n\n fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2,\n ncols=2,\n sharex=True,\n sharey=True)\n\n ax0.imshow(self.I, cmap=cmap)\n ax0.set_title(f'Original {self.I.shape}',\n fontsize=15)\n ax1.imshow(self.W, cmap=cmap)\n ax1.set_title(f'W Loadings {self.W.shape}',\n fontsize=15)\n ax2.imshow(self.H, cmap=cmap)\n ax2.set_title(f'H Loadings {self.H.shape}',\n fontsize=15)\n ax3.imshow(self.E, cmap=cmap)\n ax3.set_title(f'W * H with n={self._n_components} {self.E.shape}',\n fontsize=15)\n\n fig.set_figheight(size[0])\n fig.set_figwidth(size[1])\n fig.tight_layout()\n plt.show()",
"def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();",
"def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()",
"def rho_plot2(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.rho_sub_plot(ax, axRect, pred=pred)\n else:\n self.rho_sub_plot(ax, axRect)",
"def plot(self, x, y, b, path=None):\n label = [\"atypical\", \"indeterminate\", \"negative\", \"typical\"]\n _, pred = self.cam_model.predict(x)\n for i in range(len(x)):\n image = x[i] if x.shape[-1] == 3 else np.squeeze(x[i], -1)\n\n fig, axs = plt.subplots(2, 2)\n for j in range(4):\n ax_x = [0, 1, 0, 1]\n ax_y = [0, 0, 1, 1]\n ax = axs[ax_x[j], ax_y[j]]\n p = np.argmax(pred[i])\n a = np.argmax(y[i])\n c = '(pa)' if j == p and p == a else '(p)' if j == p else '(a)' if j == a else ''\n ax.title.set_text(f\"{label[j]} {c}\")\n # hide axis ticks\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n ax.tick_params(axis='both', which='both', length=0)\n # plot original image with boxes\n ax.imshow(image, cmap=\"gray\", aspect=\"equal\")\n for box in b[i]:\n ax.add_patch(Rectangle((box[\"x\"], box[\"y\"]), box[\"width\"], box[\"height\"], linewidth=1, edgecolor=\"r\", facecolor=\"None\", alpha=0.6))\n # plot CAM\n camap = self.generate(x[i], label=j, zoom=True)\n camap = ax.imshow(camap, cmap=\"coolwarm\", aspect=\"equal\", alpha=0.6)\n #cax = fig.add_axes([ax2.get_position().x1+0.01, ax2.get_position().y0,0.02, ax2.get_position().height])\n #plt.colorbar(camap, cax=cax, orientation=\"vertical\")\n if path != None: plt.savefig(path + f\"_{i}.png\", dpi=300, format=\"png\")\n plt.show()",
"def show():\n plt.show()",
"def show():\n plt.show()",
"def show():\n plt.show()",
"def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return",
"def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))",
"def plot_digits():\n digits = load_digits()\n for i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.imshow(digits.images[i], cmap='binary')\n plt.axis('off')\n\n plt.show()",
"def show_plots():\n plt.show()",
"def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')",
"def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)",
"def plot_params(p, hist=False, xlab=None, ylab=None):\n #print(np.ndim(p))\n\n if np.ndim(p) == 2:\n N = int((len(p[0,:])-1)/2)\n print('plot histogram of returned samples')\n f1, (ax1, ax2) = plt.subplots(2,1)\n if hist is True:\n name = 'hist'\n \n ax1.hist(p[:,:N].flatten(), bins=50, color='r', histtype='step')\n ax2.hist(p[:,N:-1], bins=50, color='k',histtype='step')\n ax2.hist(p[:,-1], bins=50, color='b',histtype='step')\n #ax1.set_xlabel(xlab[0]) \n ax1.set_ylabel(ylab[0])\n\n ax2.set_xlabel(xlab[1])\n ax2.set_ylabel(ylab[0])\n f1.suptitle('Sample distribution for parameters')\n else:\n name = 'chain'\n \n ax1.plot(p[:,:N], '-r')\n ax2.plot(p[:,N:-1], '-k')\n ax2.plot(p[:,-1], '-b')\n #ax1.set_xlabel(xlab[0]) \n ax1.set_ylabel(ylab[0])\n\n ax2.set_xlabel(xlab[0])\n ax2.set_ylabel(ylab[1])\n f1.suptitle('Sampling chains for parameters')\n # \n ax2.legend([r'$Q$',r'$U$'])\n #f1.savefig('Figures/Sampling/MH_samples_{}.png'.format(name))\n\n else:\n print('Plot maximum likelihood parameters')\n f, ((a1,a2,a3),(a4,a5,a6)) = plt.subplots(2,3, figsize=(9, 5))\n N = int((len(p)-1)/2)\n sub_plot(a1, p[:N], c='r', lab=r'$R_{{P/p}}$ [MJy/sr]')\n sub_plot(a2, p[N:-1], c='k', lab=r'$Q_{{bkgr}}$ [MJy/sr]')\n sub_plot(a3, p[-1], c='b', lab=r'$U_{{bkgr}}$ [MJy/sr]')\n\n sub_plot(a4, p[:N], c='r', hist=True, lab=r'$Q_{{bkgr}}$ [MJy/sr]')\n sub_plot(a5, p[N:-1], c='k', hist=True, lab=r'$Q_{{bkgr}}$ [MJy/sr]')\n sub_plot(a6, p[-1], c='b', hist=True, lab=r'$Q_{{bkgr}}$ [MJy/sr]')\n a1.set_title(r'$R_{{P/p}}$ [MJy/sr]')\n a2.set_title(r'$Q_{{bkgr}}$ [MJy/sr]')\n a3.set_title(r'$U_{{bkgr}}$ [MJy/sr]')\n f.suptitle('Maximum likelihood parameters')\n #f.savefig('Figures/Sampling/maxL_params.png')\n # ",
"def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):\n \"\"\"\n plt.figure(0)\n plot_approximation(\"product\", modelSetProd, 1, epochs, gpus=0)\n \"\"\"\n\n data = [\n {\n \"title\": \"Piecewise Discontinuous Function Approximation\",\n \"layer\": \"discontinuous\",\n \"model_set\": modelSetD,\n },\n {\n \"title\": \"Piecewise Continuous Function Approximation\",\n \"layer\": \"continuous\",\n \"model_set\": modelSetC,\n },\n {\n \"title\": \"Polynomial function approximation\",\n \"layer\": \"polynomial\",\n \"model_set\": modelSetP,\n },\n {\n \"title\": \"Fourier function approximation\",\n \"layer\": \"fourier\",\n \"model_set\": modelSetF,\n },\n ]\n\n for index, element in enumerate(data):\n if plot is True:\n plt.figure(index)\n plot_approximation(\n element[\"layer\"],\n element[\"model_set\"],\n 5,\n epochs,\n accelerator=\"cpu\",\n periodicity=2,\n )\n\n if plot is True:\n plt.title(\"Piecewise Discontinuous Function Approximation\")\n\n if plot is True:\n plt.show()",
"def plot(self):\n\t\tself.plotOfIP().plot()",
"def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()",
"def plot_data(self):\n # plot every log image\n for log_img in self.log_img_map.itervalues():\n log_img.plot()",
"def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)",
"def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))",
"def plot_r(self):\n for k, v, o in self.data:\n self.plot_r1(k, v, o)",
"def get_convergence_plot(self):\n fig, ax = plt.subplots()\n first_episode = self.get_convergence_episode()\n\n values = self.stats['return_stats']['episode_totals']\n ax.plot(np.arange(len(values)), values, color='steelblue', lw=2, alpha=.9,\n label='Return')\n ax.axvline(first_episode, color='seagreen', lw=2, label='Converged')\n ax.set_xlim(left=0, right=first_episode * 2)\n\n ax.set_title('Normalized regret = {:.3f}'.format(\n self.get_normalized_regret()))\n ax.legend()\n ax.set_ylabel('Return')\n ax.set_xlabel('Episode')\n return fig",
"def PlotPosteriors(self, other):\n thinkplot.Clf()\n thinkplot.PrePlot(num=2)\n\n cdf1 = thinkbayes2.Cdf(self, label='posterior %d' % self.score)\n cdf2 = thinkbayes2.Cdf(other, label='posterior %d' % other.score)\n\n thinkplot.Cdfs([cdf1, cdf2])\n thinkplot.Save(xlabel='efficacy', \n ylabel='CDF', \n axis=[0, 4.6, 0.0, 1.0],\n root='sat_posteriors_eff',\n formats=['pdf', 'eps'])",
"def plot(self):\n\n fig, ax = plt.subplots()\n\n for run in self.runs:\n # Load datasets\n data_measure = run.get_dataset(\"stats-collect_link_congestion-raw-*.csv\")\n data_sp = run.get_dataset(\"stats-collect_link_congestion-sp-*.csv\")\n\n # Extract link congestion information\n data_measure = data_measure['msgs']\n data_sp = data_sp['msgs']\n\n # Compute ECDF and plot it\n ecdf_measure = sm.distributions.ECDF(data_measure)\n ecdf_sp = sm.distributions.ECDF(data_sp)\n\n variable_label = \"\"\n size = run.orig.settings.get('size', None)\n if size is not None:\n variable_label = \" (n=%d)\" % size\n\n ax.plot(ecdf_measure.x, ecdf_measure.y, drawstyle='steps', linewidth=2,\n label=\"U-Sphere%s\" % variable_label)\n ax.plot(ecdf_sp.x, ecdf_sp.y, drawstyle='steps', linewidth=2,\n label=u\"Klasični usmerjevalni protokol%s\" % variable_label)\n\n ax.set_xlabel('Obremenjenost povezave')\n ax.set_ylabel('Kumulativna verjetnost')\n ax.grid()\n ax.axis((28, None, 0.99, 1.0005))\n self.convert_axes_to_bw(ax)\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n fig.savefig(self.get_figure_filename())",
"def plot_observer(population, num_generations, num_evaluations, args):\r\n import pylab\r\n import numpy\r\n \r\n stats = inspyred.ec.analysis.fitness_statistics(population)\r\n best_fitness = stats['best']\r\n worst_fitness = stats['worst']\r\n median_fitness = stats['median']\r\n average_fitness = stats['mean']\r\n colors = ['black', 'blue', 'green', 'red']\r\n labels = ['average', 'median', 'best', 'worst']\r\n data = []\r\n if num_generations == 0:\r\n pylab.ion()\r\n data = [[num_evaluations], [average_fitness], [median_fitness], [best_fitness], [worst_fitness]]\r\n lines = []\r\n for i in range(4):\r\n line, = pylab.plot(data[0], data[i+1], color=colors[i], label=labels[i])\r\n lines.append(line)\r\n # Add the legend when the first data is added.\r\n pylab.legend(loc='lower right')\r\n args['plot_data'] = data\r\n args['plot_lines'] = lines\r\n pylab.xlabel('Evaluations')\r\n pylab.ylabel('Fitness')\r\n else:\r\n data = args['plot_data']\r\n data[0].append(num_evaluations)\r\n data[1].append(average_fitness)\r\n data[2].append(median_fitness)\r\n data[3].append(best_fitness)\r\n data[4].append(worst_fitness)\r\n lines = args['plot_lines']\r\n for i, line in enumerate(lines):\r\n line.set_xdata(numpy.array(data[0]))\r\n line.set_ydata(numpy.array(data[i+1]))\r\n args['plot_data'] = data\r\n args['plot_lines'] = lines\r\n ymin = min([min(d) for d in data[1:]])\r\n ymax = max([max(d) for d in data[1:]])\r\n yrange = ymax - ymin\r\n pylab.xlim((0, num_evaluations))\r\n pylab.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange))\r\n pylab.draw()",
"def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes",
"def ion():\n plt.ion()",
"def plot_10_by_10_images(images):\n\n\tn = images.shape[0]\n\n\tq = n // 10\n\tr = n%10\n\tprint n,q,r\n\n\tfig = plt.figure()\n\tplt.ion()\n\n\tfor x in range(q):\n\t\tprint x\n\t\tif not x%10:\n\t\t\tplt.clf()\n\t\tfor y in range(10):\n\t\t\tax = fig.add_subplot(10, 10, 10*y+x%10+1)\n\t\t\tax.matshow(images[10*y+x%10], cmap = mpl.cm.binary)\n\t\t\tplt.xticks(np.array([]))\n\t\t\tplt.yticks(np.array([]))\n\t\tplt.show()\n\t\t_=raw_input(\"Press enter to show next 10\")",
"def convergence():\n fig, axes = plt.subplots(nrows=2, figsize=figsize(aspect=1.2))\n\n # label names\n label1 = str(league.lambda1)\n label2_list = [str(lambda2) for lambda2 in league.lambda2_list]\n\n # point spread and point total subplots\n subplots = [\n (False, [-0.5, 0.5], league.spreads, 'probability spread > 0.5'),\n (True, [200.5], league.totals, 'probability total > 200.5'),\n ]\n\n for ax, (commutes, lines, values, ylabel) in zip(axes, subplots):\n\n # train margin-dependent Elo model\n melo = Melo(lines=lines, commutes=commutes, k=1e-4)\n melo.fit(league.times, league.labels1, league.labels2, values)\n\n line = lines[-1]\n\n for label2 in label2_list:\n\n # evaluation times and labels\n times = np.arange(league.times.size)[::1000]\n labels1 = times.size * [label1]\n labels2 = times.size * [label2]\n\n # observed win probability\n prob = melo.probability(times, labels1, labels2, lines=line)\n ax.plot(times, prob)\n\n # true (analytic) win probability\n if ax.is_first_row():\n prob = skellam.sf(line, int(label1), int(label2))\n ax.axhline(prob, color='k')\n else:\n prob = poisson.sf(line, int(label1) + int(label2))\n ax.axhline(prob, color='k')\n\n # axes labels\n if ax.is_last_row():\n ax.set_xlabel('Iterations')\n ax.set_ylabel(ylabel)\n\n set_tight(w_pad=.5)",
"def dplot(self):\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n ns = np.array([self.d(i) for i in x]).T\n\n for n in ns:\n plt.plot(x, n)\n\n return plt.show()",
"def run_plots(self):\n # load the files\n self.pre_dark_file = os.path.join(self.input_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.input_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.input_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.input_dir, 'step_rate.fits')\n self.ramp_file = glob.glob(os.path.join(self.input_dir, '*.fits'))[0]\n\n # plots\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def show_mdn_posterior_with_bootstrapping():\n\n fig = plt.figure()\n all_dist = np.array([])\n\n for iter in xrange(n_bootstrap_iter):\n\n # load approximate posterior\n _, approx_posterior, _, dist = helper.load(netsdir + 'mdn_svi_proposal_prior_{0}.pkl'.format(iter))\n\n # print means and variances\n m, S = approx_posterior.calc_mean_and_cov()\n print 'mixing coefficients = {0}'.format(approx_posterior.a)\n for i in xrange(4):\n print 'log theta {0}: true = {1:.2} \\t estimate = {2:.2} +/- {3:.2}'.format(i+1, np.log(true_params[i]), m[i], 2.0 * np.sqrt(S[i, i]))\n print ''\n\n # plot marginals\n helper.plot_pdf_marginals(pdf=approx_posterior, lims=[log_prior_min, log_prior_max], gt=np.log(true_params))\n\n # plot distance histograms\n ax = fig.add_subplot(2, n_bootstrap_iter/2, iter+1)\n ax.hist(dist, bins=int(np.sqrt(dist.size)))\n ax.set_title('iteration = {0}'.format(iter+1))\n ax.set_xlim([0.0, 12.0])\n all_dist = np.append(all_dist, dist)\n\n # plot distance trace\n _, ax = plt.subplots(1, 1)\n ax.plot(all_dist, '.')\n ax.set_xlabel('summary statistics samples')\n ax.set_ylabel('distance')\n\n plt.show(block=False)",
"def plotAllRobots(self):\n plt.figure(figsize=(10, 10))\n ax = plt.gca()\n\n for robotID in self.robotDict.keys():\n isCollided = self.isCollided(robotID)\n self.plotRobotCore(ax=ax, robotID=robotID, isCollided=isCollided)\n\n rr = 340.\n plt.xlim(np.array([-1., 1.]) * rr)\n plt.ylim(np.array([-1., 1.]) * rr)\n return"
] | [
"0.7567773",
"0.71228373",
"0.6709755",
"0.65574926",
"0.65548545",
"0.6547027",
"0.65101844",
"0.6499985",
"0.6397951",
"0.6385926",
"0.6382974",
"0.63662165",
"0.63592637",
"0.6354584",
"0.6353605",
"0.6352886",
"0.6328921",
"0.63259894",
"0.63183016",
"0.63164365",
"0.6308684",
"0.6297143",
"0.62920254",
"0.6285449",
"0.6239635",
"0.6238157",
"0.62332976",
"0.6194732",
"0.619017",
"0.61753905",
"0.6174312",
"0.6154336",
"0.61490077",
"0.61412513",
"0.61310333",
"0.61236894",
"0.61027706",
"0.6100378",
"0.6091526",
"0.6082133",
"0.6079335",
"0.6076859",
"0.6076836",
"0.6073893",
"0.6067127",
"0.60612404",
"0.60534614",
"0.60529745",
"0.60443777",
"0.60441774",
"0.6036942",
"0.60353404",
"0.6034706",
"0.6034031",
"0.60186356",
"0.60174775",
"0.6015458",
"0.5996868",
"0.5993673",
"0.5984294",
"0.59813464",
"0.59731656",
"0.5972984",
"0.5969808",
"0.59660196",
"0.596117",
"0.5959676",
"0.5953881",
"0.5943219",
"0.59426",
"0.5940646",
"0.5936129",
"0.5936129",
"0.5936129",
"0.59319997",
"0.59305704",
"0.5929677",
"0.5924825",
"0.591687",
"0.5915823",
"0.59048617",
"0.5902575",
"0.58983976",
"0.5897952",
"0.589555",
"0.589273",
"0.58844644",
"0.5884427",
"0.58711845",
"0.5868657",
"0.586711",
"0.58663344",
"0.58565515",
"0.58407605",
"0.58325243",
"0.5832144",
"0.5831484",
"0.5830783",
"0.5820934",
"0.5812723"
] | 0.65124786 | 6 |
Normalize the product of likelihood and prior. | def _normalize(self, inp):
return inp/inp.sum() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize(X, mu, sigma):\n return (X - mu) / sigma",
"def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n",
"def normalize(init_probs):\n total_prob = sum(init_probs)\n if total_prob > 0. + InferenceUtils._eps:\n for idx in range(len(init_probs)):\n init_probs[idx] = init_probs[idx] / total_prob\n\n # TODO: check refactor > do we have to return distrib with new instance?\n return init_probs",
"def normalize(x):\r\n return x/norm(x)",
"def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return",
"def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight",
"def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)",
"def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total",
"def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())",
"def normalize(self):\n self._data /= self.norm()",
"def normalize(self, factor):",
"def normalize(self):\n return (1. / abs(self)) * self",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X",
"def normalize_l2(x):\n return x / (npla.norm(x))",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)",
"def normalize_probability(p_unnormalized):\n p_normalized=p_unnormalized/p_unnormalized.sum(axis=0)\n return p_normalized",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)",
"def normalize(self, external=None) -> np.array:\n return self.y / np.max(self.y) if external is None else self.y / external",
"def normalize_to_prob(inp):\n return (inp + 1)/2",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)",
"def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))",
"def normalize(self):\n return Vector(self.args + []) / self.magnitude()",
"def stdProbabilityNorm(self):\n B = factorial(self.alpha-1)*factorial(self.beta-1)/factorial(self.alpha+self.beta-1)\n norm = 1.0/(2**(self.alpha+self.beta-1)*B)\n return norm",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def normalized(self):\n return self / self.norm()",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))",
"def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1",
"def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum",
"def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p",
"def normalize(self, X):\n return X - X.mean()",
"def normalize(values):\n return (values - np.mean(values)) / np.std(values)",
"def normalize(self, lam):\n return (lam.T / np.sum(lam, axis=1)).T",
"def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)",
"def prior_distribution(self):\n out = self.model.forward(self.inducing_points)\n return MultivariateNormal(out.mean, out.lazy_covariance_matrix.evaluate_kernel())",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)",
"def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))",
"def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))",
"def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value",
"def normalize(X):\n # z-score\n mean = np.mean(X, axis=(0, 1, 2, 3))\n std = np.std(X, axis=(0, 1, 2, 3))\n # avoid dividing zero by adding a very small number\n X = (X - mean) / (std + 1e-7)\n\n return X",
"def normalize(self):\n if self.normed:\n return\n self._normalize()",
"def _compute_input_normalization(*amps):\n if len(amps) < 2:\n raise ValueError('At least 2 amplitudes must be provided.')\n n_bosons = len(amps)\n left_range = range(n_bosons)\n right_ranges = list(itertools.permutations(left_range))\n total = 0.\n for right_range in right_ranges:\n i_prod = 1.\n for idx1, idx2 in zip(left_range, right_range):\n # if `idx1` and `idx2` are equal the contribution is given\n # by the inner product of an amplitude with itself. Given\n # that we are assuming the amplitudes to be normalized,\n # the result is always 1 and we can just skip it\n if idx1 == idx2:\n pass\n # otherwise we update the partial product computing the\n # inner product of the two relevant amplitudes (states)\n i_prod *= np.vdot(amps[idx1], amps[idx2])\n total += i_prod\n return np.sqrt(total)",
"def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()",
"def _normalize(self):\n\n n = len(self.e2[0])\n E = []\n\n for e2 in self.e2:\n if len(e2) != n:\n print 'WARNING: non consistent length in error statistics!!!'\n E.append(np.nansum(np.sqrt(e2))) # temporal aggregation\n\n E = np.asarray(E)\n EM = E.mean() # take square root, as e2 is still the squared error!\n self.e_norm = (E - EM) / EM # see Glecker et al, eq.2",
"def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S",
"def normalize(self):\n self.vector /= np.linalg.norm(self.vector)",
"def normaliza(self):\n return self * (1 / self.norma())",
"def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)",
"def _normalize(X: np.ndarray) -> np.ndarray:\n # return X * np.sqrt(1 / np.sum(X ** 2, axis=1))[:, None]\n return X * np.sqrt(X.shape[1] / np.sum(X ** 2, axis=1))[:, None]",
"def normalize(inp):\n\n out = inp / np.linalg.norm(inp, axis=1, keepdims=True)\n\n return out",
"def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))",
"def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs",
"def _normalize(weights, axis, log=True):\n if log:\n normalizer = tf.reduce_logsumexp(weights, axis=axis, keepdims=True)\n return weights - normalizer\n normalizer = tf.reduce_sum(weights, axis=axis)\n return weights / normalizer",
"def normalize(P, E):\n total = P.sum(axis = 0)\n P = np.transpose(P)\n for i in range(len(P)):\n P[i] = P[i]/total[i]\n E[i] = E[i] * total[i]\n P = np.transpose(P)\n return P, E",
"def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))",
"def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)",
"def normalize_initial(self):\n self._i /= self._i.sum()",
"def normalize(self):\n det = self._mat[0][0]*self._mat[1][1] - self._mat[0][1]*self._mat[1][0]\n for i in range(2):\n for j in range(2):\n self._mat[i][j] = (self._mat[i][j])/(np.sqrt(det))",
"def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x",
"def normalize(X, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)",
"def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01",
"def normalize(self) -> NoReturn:\n self._ionic_fractions = self._ionic_fractions / np.sum(self._ionic_fractions)",
"def normalisation_l2(x):\n res = np.zeros(x.shape)\n print(x.shape)\n for i in range(x.shape[0]):\n res[i] = x[i]/(np.linalg.norm(x[i],2)+1e-5)\n std = res.std()\n mean = res.mean()\n print(\"normalisation done\")\n return(mean,std,res)",
"def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)",
"def normalize_log_likelihoods(X):\n h, w = np.shape(X)\n return X - np.tile(logsumexp(X, axis=0), (h, 1))\n # return X - np.matlib.repmat(logsumexp(X, axis=0), h, 1)",
"def normalize(X, norm=..., *, axis=..., copy=..., return_norm=...):\n ...",
"def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])",
"def normalization_P(P_prime):\n return (P_prime.T / np.sum(P_prime, axis=1)).T",
"def testNormalize(self):\n v1 = Vector.ones(4)\n n = v1.norm()\n assert n == 2\n assert v1.normalize() == [ 0.5, 0.5, 0.5, 0.5 ]",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())",
"def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]",
"def normalization_constants(X):\n# U = (1/X.shape[0]) * np.sum(X.T)\n# X1 = X - U\n# var2 = 1/X.shape[0] * np.sum(X ** 2)\n U = X.mean(axis=0)\n des = X.std(axis=0)\n return U, des",
"def normalize_weight(self, Z):\n self.weight /= Z",
"def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2",
"def stdProbabilityNorm(self):\n return 0.5",
"def normalize_emission(self):\n self._e /= self._e.sum(0)",
"def normalize_test_3(self):\n\n res = self.XY_factor_n.normalize([self.X, self.Y])\n assert(res.rand_vars == [self.X, self.Y] and\n res.values == [1/6, 1/6, 2/6, 2/6])\n\n res = self.XY_factor_n.normalize()\n assert(res.rand_vars == [self.X, self.Y] and\n res.values == [1/6, 1/6, 2/6, 2/6])",
"def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s",
"def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))",
"def _assure_normalized(self):\n for iwann in range(self.nwann):\n norm = np.trace(\n self.wannR[:, :, iwann].conj().T @ self.wannR[:, :, iwann])\n #print(f\"Norm {iwann}: {norm}\")",
"def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW",
"def test_normalize(self):\n\n a1 = vectors.Vector(4, 0, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(1, 0, 0))\n\n a1 = vectors.Vector(0, 4, 0)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 1, 0))\n\n a1 = vectors.Vector(0, 0, 4)\n self.assertEqual(a1.normalize(),\n vectors.Vector(0, 0, 1))",
"def normalize(self, x, axis=-1):\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x",
"def set_proba(self):\n self.__c_elem().log_normalise()",
"def _compute_normalization(self, normalize=True):\n self._normalization_constant = 1.0 / self._normalization_correction\n\n if normalize:\n # compute normalization constant so that\n # N*C*sum(data) = 1:\n if self._img_norm is None:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._normalization_constant /= self._img_norm\n self._normalization_status = 0\n\n else:\n self._normalization_constant = 1.0\n self._normalization_status = 1\n warnings.warn(\"Overflow encountered while computing \"\n \"normalization constant. Normalization \"\n \"constant will be set to 1.\", NonNormalizable)\n\n else:\n self._normalization_status = 2",
"def normalize(attributions):\n # keepdims for division broadcasting\n total = np.abs(attributions).sum(axis=1, keepdims=True)\n\n return np.abs(attributions) / total",
"def normalize(self):\n\t\tnorm = self.norm()\n\t\tif norm == 0:\n\t\t\traise ValueError(\"Can't normalize zero vector\")\n\t\treturn self / norm",
"def convertHermiteToNormal(self,x):\n return self.sigma*x+self.untruncatedMean()",
"def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')",
"def norm_inplace(q):\n q /= amplitude(q)",
"def normalize(self, context=None):\r\n self._real.normalize(context)\r\n self._imag.normalize(context)",
"def normalize( self, insitu = False, dimension = 1 ) :\n\n norm = self.coefficients[0]\n coefficients = [ coefficient / norm for coefficient in self ]\n if( insitu ) :\n self.setData( coefficients )\n return( self )\n return( self.returnAsClass( self, coefficients ) )",
"def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x",
"def normalized(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2==0] = 1\n return a / np.expand_dims(l2, axis)",
"def normalizeData(pre_signal):\n\n if sp.any(sp.isnan(pre_signal)):\n print('there are NaNs in the data matrix, making them zero')\n\n pre_signal[sp.isnan(pre_signal)] = 0\n mean_vector = sp.mean(pre_signal, axis=0, keepdims=True)\n normed_signal = pre_signal - mean_vector\n norm_vector = sp.linalg.norm(normed_signal, axis=0, keepdims=True)\n norm_vector[norm_vector == 0] = 1e-116\n normed_signal = normed_signal / norm_vector\n\n return normed_signal, mean_vector, norm_vector",
"def normalize_particles(self):\n tot_weight = sum([particle.w for particle in self.particle_cloud]) or 1\n for particle in self.particle_cloud:\n particle.w = particle.w / tot_weight;"
] | [
"0.68239534",
"0.67643946",
"0.6680338",
"0.66775995",
"0.66477513",
"0.66317827",
"0.6610939",
"0.65681386",
"0.6563714",
"0.65164256",
"0.6468991",
"0.6431344",
"0.6422754",
"0.6422754",
"0.6386382",
"0.63844436",
"0.63793725",
"0.6371887",
"0.63595843",
"0.63073415",
"0.63022774",
"0.63011914",
"0.6296997",
"0.6289034",
"0.6277023",
"0.62377846",
"0.62096643",
"0.61980057",
"0.6197396",
"0.6193082",
"0.6189423",
"0.61868703",
"0.6183906",
"0.6178662",
"0.6176103",
"0.6175828",
"0.61519986",
"0.6149002",
"0.61473775",
"0.6144332",
"0.6143355",
"0.61401623",
"0.61390966",
"0.6138131",
"0.6135754",
"0.6129788",
"0.6128725",
"0.6126476",
"0.6121801",
"0.61175257",
"0.61147547",
"0.61141753",
"0.6086941",
"0.6085653",
"0.60843635",
"0.60674745",
"0.6066502",
"0.6064785",
"0.605993",
"0.60592854",
"0.60545564",
"0.6049683",
"0.6048716",
"0.60482454",
"0.6044466",
"0.60444605",
"0.60438406",
"0.6039498",
"0.60276943",
"0.60234886",
"0.6011926",
"0.6005535",
"0.6005535",
"0.60054594",
"0.6004879",
"0.600243",
"0.60014087",
"0.6001264",
"0.6000985",
"0.5999311",
"0.5999208",
"0.599636",
"0.599076",
"0.59896827",
"0.5988544",
"0.59783083",
"0.59774905",
"0.59747934",
"0.5973904",
"0.5970306",
"0.5959449",
"0.59556204",
"0.59552926",
"0.59537476",
"0.59528255",
"0.59475416",
"0.59457093",
"0.59412163",
"0.5932419",
"0.5926949"
] | 0.680125 | 1 |
Likelihood function for Bernoulli process. Assumes that | def likelihood(self, inData):
lh = np.zeros(len(self.hypotheses))
if inData==1:
lh = self.hypotheses/100.0
else:
lh = (100 - self.hypotheses)/100.0
return lh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob",
"def bernoulli(p):\n bern = rn.binomial(1,p)\n return bern",
"def likelihood(self, data, hypo):\n tagged, n, k = data\n if hypo < tagged + n - k:\n return 0\n\n p = tagged / hypo\n like = thinkbayes.eval_binomial_pmf(k, n, p)\n return like",
"def calculateBernoulli(x, mean, stdev):\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob",
"def forward(self, xs, like_params, nan_mask=None):\n\t\tassert len(like_params) == 1, f\"BernoulliLikelihood only takes\" \\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\tassert len(like_params.shape) == 4, f\"len({like_params.shape}) != 4\"\n\t\txs = xs.unsqueeze(1) # [b,1,m,m_dim]\n\t\tdist = Bernoulli(logits=like_params)\n\t\tlog_probs = dist.log_prob(xs).sum(dim=3) # [b,s,m]\n\t\tif nan_mask is not None:\n\t\t\ttemp_mask = (~nan_mask).float().unsqueeze(1).expand(log_probs.shape)\n\t\t\tassert temp_mask.shape == log_probs.shape, \\\n\t\t\t\t\tf\"{temp_mask.shape} != {log_probs.shape}\"\n\t\t\tlog_probs = log_probs * temp_mask # [b,s,m]\n\t\treturn log_probs",
"def bernoulli_num(n):\n return mp.bernoulli(n)",
"def BernoulliExponentialLoss(lamb) :\n def bexl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = K.sum((-1./lamb) + K.log(lamb) - 1, axis=-1)\n return recon+dkl\n return bexl",
"def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)",
"def compute_prob_mle(X: np.ndarray) -> float:\n\n Bernoulli._check_input_data(X=X)\n Bernoulli._check_support(X=X)\n\n prob = X.mean()\n return prob",
"def log_prob(self):",
"def lnprobability(self):\n return",
"def naivebayesPXY_mle(x,y):\n pos_denom = x[y==1].sum()\n neg_denom = x[y==-1].sum()\n posprob = x[y==1].sum(axis = 0)/pos_denom\n negprob = x[y==-1].sum(axis = 0)/neg_denom\n return posprob, negprob",
"def prob_logit(x):\n try:\n if len(x.shape) != 1:\n raise ValueError(\"unexpected shape of input vector\\nexpected:\" + str(1) + \", actual: \" + str(len(x.shape)))\n except ValueError as e:\n print(e)\n print()\n raise\n\n x = 1.0 * np.exp(-x)\n\n probability = np.concatenate(\n (\n (x / (1.0 + x)).reshape(x.shape[0], 1),\n (1.0 / (1.0 + x)).reshape(x.shape[0], 1)\n ),\n axis=1\n )\n\n return probability",
"def get_log_likelihood(response_probability, response):\n pass",
"def multinomial_likelihood(m_true, alpha, alpha0, m_probs):\n\n ll = tf.reduce_sum(input_tensor=m_true * (tf.math.log(alpha0) - tf.math.log(alpha)), axis=1, keepdims=True)\n ll = tf.reduce_mean(input_tensor=ll)\n return ll",
"def lnprob(theta, observables):\n prior = lnprior(theta)\n if not np.isfinite(prior):\n return -inf\n return prior + lnlike(theta, observables)",
"def predictionBinaryClassifier(x, beta):\n x = np.insert(x, 0, 1, axis = 1)\n probability = logisticFunction(np.dot(beta, x.T))\n func = np.vectorize(lambda x: 1 if x >=0.5 else 0)\n probability = func(probability)\n return probability",
"def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1",
"def Bernoulli(p, succ=1, fail=0, symbol=None):\n\n return BernoulliPSpace(p, succ, fail, symbol).value",
"def Likelihood(self, data, hypo):\n p_correct = hypo\n score = data\n\n k = self.exam.Reverse(score)\n n = self.exam.max_score\n like = thinkbayes2.EvalBinomialPmf(k, n, p_correct)\n return like",
"def likelihood(self):\n \n raise NotImplementedError()",
"def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood",
"def log_likelihood_bernoulli(mu, target):\n # init\n batch_size = mu.size(0)\n mu = mu.view(batch_size, -1)\n target = target.view(batch_size, -1)\n\n # log_likelihood_bernoulli\n log_bernoulli = torch.sum(target * torch.log(mu) + (1. - target) * torch.log(1. - mu), dim=1)\n return log_bernoulli",
"def test_Bernoulli_NB_estimators():",
"def prob_distr(self, x):\n return 1.0/x",
"def compute_prob_mle(X: np.ndarray) -> float:\n\n Geometric._check_input_data(X=X)\n Geometric._check_support(X=X)\n\n prob = 1 / X.mean()\n return prob",
"def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])",
"def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()",
"def bernoulli(n):\n\n x, res, s, c = Rat(0), Rat(0), Rat(0), Rat(-1)\n for k in range(1, n+2):\n c *= 1 - Rat(n + 2)/k\n s += x**n\n x += 1\n res += c*s/k\n return res",
"def bernoulli(gp_link=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Probit()\r\n #else:\r\n # assert isinstance(gp_link,noise_models.gp_transformations.GPTransformation), 'gp_link function is not valid.'\r\n\r\n if isinstance(gp_link,noise_models.gp_transformations.Probit):\r\n analytical_mean = True\r\n analytical_variance = False\r\n\r\n elif isinstance(gp_link,noise_models.gp_transformations.Heaviside):\r\n analytical_mean = True\r\n analytical_variance = True\r\n\r\n else:\r\n analytical_mean = False\r\n analytical_variance = False\r\n\r\n return noise_models.bernoulli_noise.Bernoulli(gp_link,analytical_mean,analytical_variance)",
"def get_likelihood(self, observation, position, direction):\n if self.real_robot and observation == 0.0:\n return 1.0\n\n closest = self.world_model.get_closest_wall(position, direction)\n if closest == None:\n # probability of a false positive is 0\n if observation == 0.0:\n return 1.0\n else:\n return 0.0\n elif closest != None and observation == 0.0:\n # probability of missing an obstacle is 0\n return 0.0\n return norm(0, self.model_noise_rate).pdf(abs(position - closest) - observation)",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def BernoulliGaussianLoss(mu_kl, log_var_kl) :\n def bgl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = -0.5 * K.sum(-K.exp(log_var_kl) - K.square(mu_kl) + 1. + log_var_kl, axis=-1)\n return dkl + recon\n return bgl",
"def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))",
"def bernoulli_logpmf(X, p):\n return -T.nnet.binary_crossentropy(p, X).sum(axis=-1)",
"def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)",
"def Likelihood(self, data, hypo):\n # TODO: fill this in\n like = 1\n return like",
"def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)",
"def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs",
"def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)",
"def isBernoulli(self):\n return self._bernoulli",
"def naivebayes(x,y,xtest,naivebayesPXY):\n pos, neg = naivebayesPY(x, y)\n posprob, negprob = naivebayesPXY(x, y)\n numerator = np.dot(xtest, np.log(posprob)) + np.log(pos)\n denominator = np.dot(xtest, np.log(negprob)) + np.log(neg)\n logratio = numerator - denominator\n return logratio",
"def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def _sigmoidLikelihood(self, x, label):\n logit = sigmoid(np.dot(x, self.weights))\n \n if label == 0:\n return (1-logit)\n elif label == 1:\n return logit",
"def log_probability(self, X):\n\n\t\treturn self.__log_probability(X)",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def fit_and_predict_BernoulliNB(X_train, Y_train, X_test):\n\n # Import the package\n from sklearn.naive_bayes import BernoulliNB \n\n ### YOUR SOLUTION STARTS HERE### \n #referenced to sklearn documentation \n # fit the model... \n clf = BernoulliNB(binarize=0.0).fit(X_train, Y_train) #fit naive bayes to X and Y train data\n # make predictions\n predicted_bernNB = clf.predict(X_test)\n return predicted_bernNB\n ### END SOLUTION ### ",
"def lnprob(theta, dtarray, dmagarray, sigmaarray):\n lp = lnprior(theta)\n\n if not np.isfinite(lp):\n #if (lp==-(10**32)):\n return -np.inf\n #return -(10**32)\n return lp +lnlike(theta, dtarray, dmagarray, sigmaarray)",
"def likelihood(x, n, P):\n if not isinstance(n, int) or (n <= 0):\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or (x < 0):\n raise ValueError(\n 'x must be an integer that is greater than or equal to 0')\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError('P must be a 1D numpy.ndarray')\n if not np.all((P >= 0) & (P <= 1)):\n raise ValueError('All values in P must be in the range [0, 1]')\n nume = np.math.factorial(n)\n deno = (np.math.factorial(x) * (np.math.factorial(n - x)))\n fact = nume / deno\n P_likelihood = fact * (np.power(P, x)) * (np.power((1 - P), (n - x)))\n return P_likelihood",
"def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)",
"def logistic_function(self, data, b0, b1):\n return np.array([1/(1+exp(-1*b0+(-1*b1*x))) for x in data])",
"def test_bernoulli(self):\n with Model() as model:\n Bernoulli('x', 0.5)\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)",
"def bernoulliSample(x):\r\n g = tf.get_default_graph()\r\n\r\n with ops.name_scope(\"BernoulliSample\") as name:\r\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\r\n\r\n if args[\"deterministic_train\"]:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n train_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n if args[\"deterministic_eval\"]:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.ones(tf.shape(x)) * 0.5)\r\n else:\r\n eval_fn = lambda: tf.minimum(tf.ones(tf.shape(x)), tf.random_uniform(tf.shape(x)))\r\n\r\n mus = tf.cond(is_training, train_fn, eval_fn)\r\n\r\n return tf.ceil(x - mus, name=name)",
"def log_probability(self, samples):\n pass",
"def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)",
"def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])",
"def mean(self, like_params):\n\t\tassert len(like_params) == 1, f\"BernoulliLikelihood only takes\" \\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\treturn (torch.sigmoid(like_params[0]),)",
"def binary_log_likelihood(y, log_y_hat):\n return tf.reduce_sum(y*(-softplus(-log_y_hat)) +\n (1 - y)*(-log_y_hat-softplus(-log_y_hat)),\n 1)",
"def sample(self, like_params):\n\t\tassert len(like_params) == 1, f\"BernoulliLikelihood only takes\" \\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\tdist = Bernoulli(logits=like_params)\n\t\tsamples = dist.sample()\n\t\treturn (samples,)",
"def lnprob(theta, model, priors, x, y, yerr):\n lp = lnprior(theta, priors)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, model, x, y, yerr)",
"def compute_prob_mle(X: np.ndarray, k: int) -> np.ndarray:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n Categorical._check_input_data(X=X)\n Categorical._check_support(X=X, k=k)\n\n prob = np.zeros(k)\n for x in X:\n prob[x] += 1\n prob /= prob.sum()\n\n return prob",
"def Bernstein(i, n, t):\n return special.binom(n, i) * t ** i * (1 - t) ** (n - i)",
"def log_likelihood(self, data, reward_model, bias_params):",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def regularized_multinomial_likelihood(m_true, alpha, alpha0, m_probs, global_step, annealing_step=1000, max_lambda=1.0):\n\n ll = multinomial_likelihood(m_true, alpha, alpha0, m_probs)\n kl = kullback_leibler_dirichlet(m_true, alpha)\n lamb = tf.cast(tf.minimum(max_lambda, global_step / annealing_step), dtype=tf.float32)\n loss = ll + lamb * kl\n return loss",
"def joint_proba(self, X):\n return self.weights * self._bernoulli(X)",
"def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans",
"def calculate_likelihoods_bernoulli(data, labels, vocab):\r\n classes = set(labels)\r\n likelihoods = {}\r\n # Calculate likelihood for each class\r\n for cls in classes:\r\n documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls]\r\n numDocsInClass = len(documentsInClass)\r\n results = {}\r\n for word in vocab:\r\n numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass))\r\n # Binary variable-- either present or not present\r\n results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2)\r\n # Special laplace smoothing for words not found in training data\r\n results[None] = laplace_smooth(0, numDocsInClass, 2)\r\n likelihoods[cls] = results\r\n return likelihoods",
"def lnprob(self, theta):\n # TODO: add prior function to Prior class\n n = self.t.size\n c = - .5 * n * np.log(2 * np.pi) - .5 * np.log(self.dy).sum()\n return c - .5 * self.chi(theta)",
"def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # add bias variable 1\n prob = np.zeros(X.shape[0], self.num_classes)\n ### YOUR CODE HERE\n z = X.dot(self.w)\n prob = soft_reg.softmax(z)\n ### END CODE\n return prob",
"def _bernoulli_lower(self, p, n, delta):\n if p < 1e-6:\n return 0.\n else:\n lower = scipy.stats.beta.ppf(delta / 2, p * n, n - p * n + 1)\n return lower",
"def Likelihood(self, data, hypo):\n if hypo < data:\n return 0\n else:\n denom = np.arange(1, hypo)\n return hypo**-1.0 / sum(denom**-1.0)",
"def LLR_binom(k, n, p0, EPS=1E-15):\n phat = k/n # maximum likelihood estimate\n phat[phat < EPS] = 2*EPS\n\n # Log-likelihood (density) ratios\n LLR = 2*( (k*np.log(phat)+(n-k)*np.log(1-phat)) - (k*np.log(p0)+(n-k)*np.log(1-p0)))\n return LLR",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def __call__(self):\n\n accepted = False\n\n while not accepted:\n\n test_log10E = np.random.uniform(1, 7)\n\n test_pdf = np.random.uniform(self._min_pdf, self._max_pdf)\n\n if test_pdf < self._likelihood(10 ** test_log10E, self._index):\n\n accepted = True\n\n return 10 ** test_log10E",
"def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)",
"def get_lnprob(self, x):\n return self.lnprob(x, *self.lnprob_args)",
"def lnprob(params, cos2, y, yerr):\n\n # Get prior given parameters\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n\n # Include likelihood given data\n llh = lp + lnlike(params, cos2, y, yerr)\n\n return llh",
"def gibbs_ask_traffic(self, X, e, Z, bn, N):\n\n #makes copies\n X = e\n e = e\n\n #probability\n probability = [0,0]\n numerator = 0\n\n\n #True, False\n\n for x in range(N):\n # second joint\n if Z == True: # if non evidence variable\n random_choice = np.random.choice([0,1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][0]\n else:\n random_choice = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0] #Rain or No Rain\n X[1] = bn[1][random_choice][1]\n\n # first joint\n if X[1] == 0.8 or X[1] == 0.2: # Rain is true\n X[0] = bn[0][0]\n else: # Rain is False\n X[0] = bn[0][1]\n\n # third joint\n if X[1] == 0.8 or X[1] == 0.1: # traffic\n random_late = np.random.choice([0,1], 1, True, [0.5,0.5])[0]\n X[2] = bn[2][0][random_late]\n else: # no traffic\n random_late = np.random.choice([0, 1], 1, True, [0.5, 0.5])[0]\n X[2] = bn[2][1][random_late]\n\n # print(X)\n if X[0] == 0.1:\n probability[0] += 1\n else:\n probability[1] += 1\n\n\n probability[0] = probability[0] / N\n probability[1] = probability[1] / N\n # print(probability)\n return probability",
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def Bayes_classifier(pi, x0, lamb):\n D = len(x0)\n ans = [1-pi, pi]\n for y in range(2):\n arg = [poisson.pmf(x0[d], lamb[y][d]) for d in range(D)]\n ans[y] *= np.prod(arg)\n return np.argmax(ans)",
"def lnprior(params):\n a, b, f = params\n if -10.0 < b < 0. and 0. < a < 10 and 0. < f:\n return 0.0\n\n return -np.inf",
"def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))",
"def naive_bn(data, attributes):\n bn = []\n attr = attributes['attr'].tolist()\n # each attribute is only dependent on the class node\n i = 0\n while (i < len(attr)-1):\n row = [attr[i], attr[-1]]\n bn.append(row)\n i= i + 1\n # frequency table \n freq = counts_table(data, attributes)\n # conditional probabilities and prior probabilities\n cond_probs, prior0, prior1 = conditional_probability(data, attributes, freq)\n\n return bn, cond_probs, prior0, prior1",
"def sample_bernoulli(params):\n assert False, 'tfp not available on cluster gpu yet'\n \"\"\"\n shape = tf.shape(params)\n bernoulli_dist = tfp.distributions.Bernoulli(logits=params, dtype=tf.float32)\n return bernoulli_dist.sample()\n \"\"\"",
"def gaussian_likelihood(x, mu, log_std):\n prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(prob, axis=1)",
"def lnprob(self, theta, x, y, yerr, bat_pars, tmod):\n lp = self.lnprior(theta)\n if not np.isfinite(lp): # if the prior is infinitely small, return -infinity without calculating the model to save computational time.\n return -np.inf, list(0.0 for xx in x)\n\n likelihood, trial_fit = self.lnlike(theta, x, y, yerr, bat_pars, tmod)\n return lp + likelihood, trial_fit # the second argument goes in the blobs variable and saves all trial fits this way.",
"def bernoulliSample(x):\n \n g = tf.get_default_graph()\n \n with ops.name_scope(\"BernoulliSample\") as name:\n with g.gradient_override_map({\"Ceil\": \"Identity\", \"Sub\": \"BernoulliSample_ST\"}):\n return tf.ceil(x - tf.random_uniform(tf.shape(x)),name=name)",
"def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)",
"def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)",
"def plot_likelihood(expected_posts_per_month, probability):\n bar_amount = max(10, int(5 * expected_posts_per_month * probability)) # at least 10 bars, not too long of a tail\n print(\"Generating likelihood plot\")\n distribution = [binom.pmf(option, expected_posts_per_month, probability) for option in range(bar_amount)]\n plt.bar(range(bar_amount), distribution)\n plt.xlabel(\"occurrences\")\n plt.ylabel(\"likelihood\")\n plt.title(\"Likelihood of word occurences next month\")\n plt.show()",
"def BIC(y_true, y_pred, n_features):\n ll = log_likelihood(y_true, y_pred)\n n_samples = y_true.size\n BIC = np.log(n_samples) * n_features - 2 * ll\n return BIC",
"def logistic(mu, hw, x): \n n = np.exp(- ((x-mu)/(.477*hw))**2)\n return (2. * n)/( 1 + n)",
"def lnprob(self, theta):\n lp = self.lnprior(theta)\n if not np.isfinite(lp):\n return -np.inf\n n = self.x.size\n c = - .5 * n * np.log(2 * np.pi) - .5 * np.log(self.dy).sum()\n return c - .5 * self.chi(theta)",
"def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll",
"def calcprob(beta, x):\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, x is a vector, len(beta)=2.\n N, npreds = len(x), 1\n if len(beta) != npreds+1:\n raise ValueError,'sizes of beta and x do not match!'\n if npreds==1: # simple logistic regression\n return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n ebx = NA.exp(NA.dot(beta, X))\n return 100.*ebx/(1.+ebx)",
"def brownian_motion_log_returns(param):\n sqrt_delta_sigma = math.sqrt(param.time_rate) * param.vol\n return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.time)",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint",
"def beta_binomial_log_likelihood_grad(\n alpha, beta,\n positive_weights, negative_weights, total_weights\n):\n res = np.empty((2, alpha.size))\n res[0] = sparse_gammaln_ratio(alpha, positive_weights, deriv=1)\n res[1] = sparse_gammaln_ratio(beta, negative_weights, deriv=1)\n res -= dense_gammaln_ratio(alpha + beta, total_weights, deriv=1)\n return res",
"def lnprior(theta):\n gamma, A = theta\n\n if 0.0 < gamma and 0.0 < A < 2.0 :\n return ( np.log(1.0/A) + np.log(1.0/(1.0+(gamma**2.0))) )\n\n return -np.inf"
] | [
"0.7083514",
"0.7078356",
"0.6964173",
"0.6953273",
"0.68988603",
"0.68163997",
"0.6747757",
"0.6741851",
"0.6740194",
"0.66882396",
"0.66816545",
"0.66319585",
"0.6620972",
"0.6600291",
"0.65937495",
"0.65915376",
"0.6562052",
"0.6561633",
"0.65579647",
"0.6542335",
"0.6538686",
"0.6509212",
"0.6472667",
"0.6461553",
"0.64559877",
"0.64313585",
"0.6419899",
"0.6415828",
"0.6411721",
"0.63879955",
"0.63808906",
"0.63728374",
"0.63583744",
"0.6340881",
"0.63406163",
"0.63057446",
"0.62957275",
"0.62949216",
"0.6292926",
"0.6292773",
"0.62751347",
"0.6267985",
"0.62541187",
"0.62536633",
"0.623995",
"0.6237336",
"0.6228998",
"0.6225636",
"0.6224342",
"0.6219472",
"0.62188035",
"0.6217879",
"0.62177",
"0.62158734",
"0.6208407",
"0.62082005",
"0.6206656",
"0.61990786",
"0.61906624",
"0.6171369",
"0.6170127",
"0.6165391",
"0.61581254",
"0.6156953",
"0.6156441",
"0.6125367",
"0.61176133",
"0.6116395",
"0.6108518",
"0.6108293",
"0.610223",
"0.6094425",
"0.6094319",
"0.60842973",
"0.60683626",
"0.6061056",
"0.6049298",
"0.6038886",
"0.6014704",
"0.60141385",
"0.6011982",
"0.6011318",
"0.60087484",
"0.5999095",
"0.5993093",
"0.59856653",
"0.59767264",
"0.5975184",
"0.5968001",
"0.59665394",
"0.59576976",
"0.59553164",
"0.5953952",
"0.59378994",
"0.5931122",
"0.5924838",
"0.5924719",
"0.5923068",
"0.591912",
"0.59161353",
"0.5896205"
] | 0.0 | -1 |
Likelihood function for 2D paintball process. Assumes that | def likelihood(self, inData):
lh = np.zeros(len(self.hypotheses))
# Calculation possible locations (xs) from hypotheses
locs = list(set(self.hypotheses[:,0]))
# Loop through all hypotheses
for i, row in enumerate(self.hypotheses):
# Define a, b position for given hypothesis
a, b = row
# Then, create pmf for x given a, b
# - calculate angle for each x
thetas = np.arctan((locs-a)/b)
# - calculate probability using speed 1/(dx/dtheta)
probs = 1.0 / (b / (np.cos(thetas)*np.cos(thetas)))
probs = self._normalize(probs)
#Then, likelihood is probability of inData
pos = np.where(locs==inData)[0]
lh[i] = probs[pos]
return lh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scattering_efficiency(self):\r\n n = np.arange(1, self.n + 1)\r\n return 2*(np.linalg.norm(np.sqrt(2*n+1)*self.a)\r\n + np.linalg.norm(self.b))/self.x**2",
"def preceptron(X,Y,g,epochs=1000):\n w = g\n for epoch in range(epochs):\n H = np.sign(X.dot(w))\n missclassified = np.where(Y != H)[0] #obtain a list of missclassified point\n if len(missclassified) == 0:\n break\n mc_sample = np.random.choice(missclassified) #pick one missclassified point\n w = w + (Y[mc_sample]*X[mc_sample])\n return w, epoch + 1",
"def log_likelihoodJoint(theta, x, y, data, var, size):\n #unpack the parameters\n #[xpos, ypos]*images) +[amplitude, radius, focus])\n images = len(theta[:-5]) / 2\n peak, radius, focus, width_x, width_y = theta[-5:]\n\n lnL = 0.\n for tmp in xrange(images):\n #X and Y are always in pairs\n center_x = theta[2*tmp]\n center_y = theta[2*tmp+1]\n\n #1)Generate a model Airy disc\n amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius,\n x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))\n airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)\n adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)\n\n #2)Apply Focus, no normalisation as smoothing\n f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)\n focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)\n model = signal.convolve2d(adata, focusdata, mode='same')\n\n #3)Apply CCD diffusion, approximated with a Gaussian -- max = 1 as centred\n CCD = models.Gaussian2D(1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.)\n CCDdata = CCD.eval(x, y, 1., size[0]/2.-0.5, size[1]/2.-0.5, width_x, width_y, 0.).reshape(size)\n model = signal.convolve2d(model, CCDdata, mode='same').flatten()\n\n #lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var[tmp].flatten())\n #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)\n var = var[tmp] + model.copy()\n lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var)\n\n return lnL",
"def likelihood(self):\n \n raise NotImplementedError()",
"def expert_likelihood(self, X, y): #give to it a proper name!!!\n\t\tgaussians_mean = self.experts_predictions(X) #(N,K) X*W + b\n\t\ty = np.repeat( np.reshape(y, (len(y),1)), self.K, axis = 1) #(N,K)\n\n\t\t#print('sigma: ', self.sigma)\n\t\tres = scipy.stats.norm.pdf( np.divide((y - gaussians_mean), self.sigma) ) #(N,K)\n\t\treturn np.divide(res, self.sigma) #normalizing result",
"def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):",
"def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients",
"def get_likelihood(self, observation, position, direction):\n if self.real_robot and observation == 0.0:\n return 1.0\n\n closest = self.world_model.get_closest_wall(position, direction)\n if closest == None:\n # probability of a false positive is 0\n if observation == 0.0:\n return 1.0\n else:\n return 0.0\n elif closest != None and observation == 0.0:\n # probability of missing an obstacle is 0\n return 0.0\n return norm(0, self.model_noise_rate).pdf(abs(position - closest) - observation)",
"def log_prob(self):",
"def Likelihood(self, data, hypo):\n # TODO: fill this in\n like = 1\n return like",
"def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood",
"def loglikehood_coefficient(n_items, X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n\n def safeLog(d):\n if d <= 0.0:\n return 0.0\n else:\n return np.log(d)\n\n def logL(p, k, n):\n return k * safeLog(p) + (n - k) * safeLog(1.0 - p)\n\n def twoLogLambda(k1, k2, n1, n2):\n p = (k1 + k2) / (n1 + n2)\n return 2.0 * (logL(k1 / n1, k1, n1) + logL(k2 / n2, k2, n2)\n - logL(p, k1, n1) - logL(p, k2, n2))\n\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n result = []\n\n # TODO: Check if it is possible to optimize this function\n\n i = 0\n for arrayX in X:\n result.append([])\n for arrayY in Y:\n XY = np.intersect1d(arrayX, arrayY)\n\n if XY.size == 0:\n result[i].append(0.0)\n else:\n nX = arrayX.size\n nY = arrayY.size\n if (nX - XY.size == 0) or (n_items - nY) == 0:\n result[i].append(1.0)\n else:\n logLikelihood = twoLogLambda(float(XY.size),\n float(nX - XY.size),\n float(nY),\n float(n_items - nY))\n\n result[i].append(1.0 - 1.0 / (1.0 + float(logLikelihood)))\n result[i] = np.asanyarray(result[i])\n i += 1\n\n return np.asanyarray(result)",
"def intercept_ball(self):\n\t\t\t##Parameters\n\t\t\tdistance_annulation = 15\n\t\t\tvitesse_danulation = 0\n\t\t\tcoefficient_prediction = 10\n\t\t\t\n\t\t\tif self.get_distance_to_ball() <= distance_annulation or self.b_speed < vitesse_danulation:\n\t\t\t\treturn self.PB\n\t\t\t\n\t\t\telse:\n\t\t\t\tpoint_interception = self.PB + coefficient_prediction * self.ball.vitesse\n\t\t\t\treturn point_interception",
"def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood",
"def log_likelihood(self, data, reward_model, bias_params):",
"def loglikelihood(self, y):\n raise NotImplementedError",
"def compute_movie_rating_likelihood(M):\n\n # define the size to begin with\n likelihood = np.zeros((M, M))\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (c)\n #\n # Remember to normalize the likelihood, so that each column is a\n # probability distribution.\n \n for i in range(M):\n for j in range(M):\n if i == j:\n likelihood[i][j] = 2\n else:\n likelihood[i][j] = 1/abs(j-i)\n \n likelihood = likelihood / likelihood.sum(axis = 1)\n \n #\n # END OF YOUR CODE FOR PART (c)\n # -------------------------------------------------------------------------\n\n return likelihood",
"def NLL(self,y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)",
"def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8",
"def naivebayesPXY_mle(x,y):\n pos_denom = x[y==1].sum()\n neg_denom = x[y==-1].sum()\n posprob = x[y==1].sum(axis = 0)/pos_denom\n negprob = x[y==-1].sum(axis = 0)/neg_denom\n return posprob, negprob",
"def _gv(self):\n return self.y - self.err_inf",
"def gObs(p):\n g = np.array([0.,0.])\n for obs in obstacles(p):\n g += gPenalty(obs[0])*obs[1]\n return g",
"def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood",
"def prob1():\n x, y = sy.symbols('x, y')\n return sy.Rational(2,5) * sy.exp(x**2 - y) * sy.cosh(x + y) + \\\n sy.Rational(3,7) * sy.log(x*y + 1)",
"def see(p, y, yHat):\n n = y.shape[0]\n numer = ((y - yHat) ** 2).sum()\n denom = n - p - 1\n if (denom == 0):\n s = 0\n elif ( (numer / denom) < 0 ):\n s = 0.001\n else:\n s = (numer / denom) ** 0.5\n return s",
"def brentfcn(x: np.ndarray) -> np.ndarray:\n n = x.shape[1]\n assert n == 2, \"The Brent function is defined only on the 2-D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n\n scores = (X + 10) ** 2 + (Y + 10) ** 2 + np.exp(-(X**2) - Y**2)\n return scores",
"def getLikelihood(self, caliStep):\n\n # state vector y_t = H(x_t)+Sigma_t\n stateVec = self.yadeData[caliStep, :, :].dot(self.__obsMatrix)\n obsVec = self.obsData[caliStep, :]\n\n # row-wise subtraction obsVec[numObs]-stateVec[numSamples,numObs]\n vecDiff = obsVec - stateVec\n Sigma = self.getCovMatrix(caliStep, self.obsWeights)\n invSigma = np.linalg.inv(Sigma)\n likelihood = np.zeros(self.numSamples)\n\n # compute likelihood = exp(-0.5*(y_t-H(x_t))*Sigma_t^{-1}*(y_t-H(x_t)))\n for i in range(self.numSamples):\n power = (vecDiff[i, :]).dot(invSigma.dot(vecDiff[i, :].T))\n likelihood[i] = np.exp(-0.5 * power)\n\n # regularize likelihood\n likelihood /= np.sum(likelihood)\n return likelihood",
"def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))",
"def _log_lik(self, X, Y, delta):\n pi = self.predict_proba(X, self.fit_intercept, self.coeffs)\n p0, p1, pc = self.p0, self.p1, self.pc\n prb = ((pi * p0 * (1. - p0) ** (Y - 1.)\n + (1. - pi) * p1 * (1. - p1) ** (Y - 1.)\n ) * (1. - pc) ** Y\n ) ** delta \\\n * ((pi * (1 - p0) ** Y\n + (1. - pi) * (1. - p1) ** Y\n ) * pc * (1. - pc) ** (Y - 1.)\n ) ** (1. - delta)\n return np.mean(np.log(prb))",
"def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l",
"def log_lhood(X, Z, Y, a, ep, lamb):\n \n K = Z.shape[1]\n N, T = X.shape\n \n # p(X)\n ZY = np.dot(Z,Y) \n log_pX = 0\n log_pX = log_pX + np.sum(X * np.log(1 - ((1 - lamb) ** ZY) * (1 - ep)))\n log_pX = log_pX + np.sum((1 - X) * np.log(((1 - lamb) ** ZY) * (1 - ep))) \n \n # p(Z)\n HN = 0\n for n in range(1, N+1):\n HN += 1.0/n\n m = Z.sum(axis=0)\n log_pZ = (K * np.log(a) - (a * HN)) + np.sum(gammaln(m) +\n gammaln(N - m + 1) - gammaln(N + 1)) \n\n return log_pZ + log_pX",
"def lnlike(params, cos2, y, yerr):\n a, b, f = params\n model = f * (b * cos2**2 + a * cos2 + 1)\n\n return -0.5 * np.sum((y - model)**2 / yerr**2 + np.log(yerr**2))",
"def score(self, X, y):\n ...",
"def obj(beta, lambd, x, y, h=0.5):\n yt = y*x.dot(beta)\n hinge_loss = (1+h-yt)**2/(4*h)*(np.abs(1-yt) <= h) + (1-yt)*(yt < (1-h)) \n\n return np.mean(hinge_loss) + lambd*np.dot(beta, beta)",
"def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z",
"def mean_poisson_deviance(y_true, y_pred, *, sample_weight=...):\n ...",
"def genball(npt, ndim, rstate=None):\n # use Barthe2005\n x = rstate.standard_normal(size=(npt, ndim))\n y = rstate.exponential(0.5, size=npt)\n x1 = x / np.sqrt((y + (x**2).sum(axis=1)))[:, None]\n return x1",
"def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def l2_training_penalty(batched_out: base.Output):\n if isinstance(batched_out, base.OutputWithPrior):\n return 0.5 * jnp.mean(jnp.square(batched_out.train))\n else:\n logging.warning('L2 weight penalty only works for OutputWithPrior.')\n return 0.",
"def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)",
"def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1",
"def backprop(self, x, y):\n \n ### YOUR CODE HERE\n nabla_output = np.zeros(self.params['nh2'])\n del1 = self.aout - y\n nabla_output = del1 * self.ahidden2\n #print (nabla_output)\n del2 = np.zeros(self.params['nh2'])\n for i in range(0, self.params['nh2']):\n del2[i] = self.w_output[i] * del1 * self.ahidden2[i] * (1-self.ahidden2[i])\n # del2 = np.multiply(a1,a2)\n # nabla_input = np.multiply(del2, x)\n nabla_middle = np.zeros([self.params['nh1'],self.params['nh2']])\n for i in range (0, self.params['nh1']):\n for j in range(0, self.params['nh2']):\n #nabla_middle[i][j] = self.ahidden2[j] * del2[j]\n nabla_middle[i][j] = self.ahidden1[i] * del2[j]\n \n #nabla\n del3 = np.zeros(self.params['nh1'])\n # del3 = np.dot(self.w_middle, del2)\n for i in range(0, self.params['nh1']):\n del3[i] = np.dot(self.w_middle[i], del2) * self.ahidden1[i] * (1-self.ahidden1[i])\n # del2 = np.multiply(a1,a2)\n # nabla_input = np.multiply(del2, x)\n nabla_input = np.zeros([x.shape[0],self.params['nh1']])\n for i in range (0, x.shape[0]):\n for j in range(0, self.params['nh1']):\n #nabla_input[i][j] = self.ahidden1[j]*del3[j]\n nabla_input[i][j] = x[i]*del3[j]\n \n \n #nabla_input = np.dot(x, np.transpose(del2))\n ### END YOUR CODE\n \n assert nabla_input.shape == self.w_input.shape\n assert nabla_output.shape == self.w_output.shape\n return (nabla_input, nabla_middle, nabla_output)",
"def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss",
"def lnprob(params, cos2, y, yerr):\n\n # Get prior given parameters\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n\n # Include likelihood given data\n llh = lp + lnlike(params, cos2, y, yerr)\n\n return llh",
"def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood",
"def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)",
"def see_what_its_doing_2d(length_scale, cheating, pollution_mean, pollution_std, pick_number):\n\n a = create_points_with_spatially_correlated_pollution_2d(10, 100, 10, length_scale, 1)\n b = pick_uniform_random_points_on_map_of_maps(a, pick_number, pollution_mean, pollution_std)\n if cheating:\n c = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(length_scale), fixed=True)\n else:\n c = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(np.random.randint(1, 10000)), fixed=False)\n\n x1 = []\n y1 = []\n z1 = []\n for point in b[0].values():\n x1.append(point.get_x_cord())\n y1.append(point.get_y_cord())\n z1.append(point.get_pollution_value())\n\n x2 = []\n y2 = []\n z2 = []\n\n for label, point in c[0][0].items():\n if not label in b[0].keys():\n x2.append(point.get_x_cord())\n y2.append(point.get_y_cord())\n z2.append(point.get_pollution_value())\n print(average_rmse_of_maps(c))\n plot_numbers_3d_and_save(x1, y1, z1, x2, y2, z2, \"Rotating Graph.gif\")\n\n # mywriter = animation.FFMpegWriter(fps=60)\n # rot_animation.save(\"rotation.mp4\",dpi = 80, writer= mywriter)",
"def computeLoss(self):\n return sum(np.arccosh(-minkowskiArrayDot(self.examples, self.centroid)) ** 2)[0] / np.shape(self.examples)[0]",
"def fObs(p):\n f = 0\n for obs in obstacles(p):\n f += fPenalty(obs[0])\n return f",
"def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll",
"def pix_analysis(length, legs, count):\n mats_wins = 0\n pats_wins = 0\n ties = 0\n for _ in range(count):\n end_of_game = pix(length, legs)\n if end_of_game == 1:\n mats_wins += 1\n elif end_of_game == -1:\n pats_wins += 1\n else:\n ties += 1\n print('Mats wins:', mats_wins, '\\nPats wins:', pats_wins, '\\nTies:', ties)",
"def joy(self):\n return self._joy_likelihood",
"def blurred(self):\n return self._blurred_likelihood",
"def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)",
"def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans",
"def _loglik(self, cur_y, cur_z):\n assert(cur_z.shape[1] == cur_y.shape[0])\n\n n_by_d = np.dot(cur_z, cur_y)\n not_on_p = np.power(1. - self.lam, n_by_d) * (1. - self.epislon)\n loglik_mat = np.log(np.abs(self.obs - not_on_p))\n return loglik_mat.sum()",
"def r2(self) -> float:\n zx = (self.true - np.mean(self.true)) / np.std(self.true, ddof=1)\n zy = (self.predicted - np.mean(self.predicted)) / np.std(self.predicted, ddof=1)\n r = np.sum(zx * zy) / (len(self.true) - 1)\n return float(r ** 2)",
"def beta_log_likelihood(x, shape1, shape2):\n logbeta = loggamma(shape1) + loggamma(shape2) - loggamma(shape1+shape2)\n return (1.0-shape1)*np.sum(np.log(x)) + (1.0-shape2)*np.sum(np.log(1.0-x)) + len(x)*logbeta",
"def naivebayesPXY_smoothing(x,y):\n \n shape = x.shape\n d = shape[1] if shape[1:] else 1\n pos_denom = x[y==1].sum() + d\n neg_denom = x[y==-1].sum() + d\n posprob = (x[y==1].sum(axis = 0) + 1)/pos_denom\n negprob = (x[y==-1].sum(axis = 0) + 1)/neg_denom\n return posprob, negprob",
"def linearFitWithOutliers(x, y, e, outtriangle='linear.png'):\n # theta will be an array of length 2 + N, where N is the number of points\n # theta[0] is the intercept, theta[1] is the slope,\n # and theta[2 + i] is the weight g_i\n def log_prior(theta):\n #g_i needs to be between 0 and 1\n if (all(x > 0. for x in theta[2:]) and all(x < 1. for x in theta[2:])) and \\\n 0. < theta[0] < 10. and 0. < theta[1] < 0.1:\n return 0\n else:\n return -np.inf # recall log(0) = -inf\n\n def log_likelihood(theta, x, y, e, sigma_B):\n dy = y - theta[0] - theta[1] * x\n g = np.clip(theta[2:], 0, 1) # g<0 or g>1 leads to NaNs in logarithm\n logL1 = np.log(g) - 0.5 * np.log(2 * np.pi * e ** 2) - 0.5 * (dy / e) ** 2\n logL2 = np.log(1 - g) - 0.5 * np.log(2 * np.pi * sigma_B ** 2) - 0.5 * (dy / sigma_B) ** 2\n return np.sum(np.logaddexp(logL1, logL2))\n\n def log_posterior(theta, x, y, e, sigma_B):\n return log_prior(theta) + log_likelihood(theta, x, y, e, sigma_B)\n\n\n #find starting point\n def squared_loss(theta, x=x, y=y, e=e):\n dy = y - theta[0] - theta[1] * x\n return np.sum(0.5 * (dy / e) ** 2)\n theta1 = optimize.fmin(squared_loss, [0, 0], disp=False)\n\n ndim = 2 + len(x) # number of parameters in the model\n nwalkers = 200 # number of MCMC walkers\n nburn = 5000 # \"burn-in\" period to let chains stabilize\n nsteps = 50000 # number of MCMC steps to take\n\n # set theta near the maximum likelihood, with\n starting_guesses = np.zeros((nwalkers, ndim))\n starting_guesses[:, :2] = np.random.normal(theta1, 1, (nwalkers, 2))\n starting_guesses[:, 2:] = np.random.normal(0.5, 0.1, (nwalkers, ndim - 2))\n\n #initiate sampler\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x, y, e, 20])\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, nburn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #sample shape = (nwalkers, nsteps, ndim)\n sample = sampler.chain.reshape(-1, ndim)\n\n params = np.mean(sample[:, :2], 0)\n g = np.mean(sample[:, 2:], 0)\n outliers = (g < 0.5)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index][:2]\n errors = [sampler.flatchain[:, i].std() for i in xrange(ndim)][:2]\n\n fig = triangle.corner(sample, labels=['intercept' , 'slope'] + len(x)*['Gi',])\n fig.savefig(outtriangle)\n plt.close()\n\n\n return params, params_fit, errors, outliers",
"def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:",
"def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id",
"def log_likelihood_loss(y, tx, w):\n p_1 = sigmoid(tx.dot(w))\n p_0 = np.log(1-p_1)\n p_1 = np.log(p_1)\n return -np.sum((y == 1)*p_1+(y == 0)*p_0)",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint",
"def Likelihood(self, data, hypo):\n p_correct = hypo\n score = data\n\n k = self.exam.Reverse(score)\n n = self.exam.max_score\n like = thinkbayes2.EvalBinomialPmf(k, n, p_correct)\n return like",
"def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss",
"def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)",
"def function1(individual, position, height, width):\n value = 0.0\n for x, p in zip(individual, position):\n value += (x - p)**2\n return height / (1 + width * value)",
"def p2(self) -> float:\n return self.distortion_coefficients[4]",
"def bPlusbStar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx *= (1 / self.prob.gamma)\n running_total += 1 - abcxx - ayxx\n return running_total",
"def compute_thick_lens_approximation(self):\r\n\r\n x = self.film_diagnal * 0.001\r\n so = ti.Vector([x, 0.0, self.front_z() + 1.0])\r\n sd = ti.Vector([0.0, 0.0, -1.0])\r\n fo = ti.Vector([x, 0.0, self.rear_z() - 1.0])\r\n fd = ti.Vector([0.0, 0.0, 1.0])\r\n ok1, o1, d1 = self.gen_ray_from_scene(so, sd)\r\n ok2, o2, d2 = self.gen_ray_from_film(fo, fd)\r\n assert ok1 == True and ok2 == True\r\n fz, pz = self.compute_cardinal_points(so, o1, d1)\r\n fz1, pz1 = self.compute_cardinal_points(fo, o2, d2)\r\n assert fz1 < pz1 and pz < fz\r\n return fz, pz, fz1, pz1",
"def lnlike(params, observables, nDraws=1000000):\n #print('checking type ({}) and length ({}) of params in lnlikefxn'.format(type(params),len(params)))\n evalData=generateModelData(params, distance_standoffMid, nDraws)\n evalHist, evalBinEdges = np.histogram(evalData[:,3], tof_nBins, tof_range,\n density=True)\n logEvalHist = np.log(evalHist)\n #print(logEvalHist)\n # find what TOFs have zero observed data\n # we'll use this to handle cases where we might wind up with -inf*0\n # likelihood is fine if PDF is 0 somewhere where no data is found\n # without checks though, ln(PDF=0)=-inf, -inf*0 = nan\n # however, if PDF is 0 (lnPDF=-inf) where there IS data, lnL should be -inf\n zeroObservedIndices = np.where(observables == 0)[0]\n for idx in zeroObservedIndices:\n if logEvalHist[idx] == -inf:\n logEvalHist[zeroObservedIndices] = 0\n \n loglike = np.dot(logEvalHist,observables)\n return loglike",
"def logloss_mc(y_true, y_prob, epsilon=1e-15):\n # normalize\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\n y_prob = np.maximum(epsilon, y_prob)\n y_prob = np.minimum(1 - epsilon, y_prob)\n # get probabilities\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\n ll = - np.mean(np.log(y))\n return ll",
"def calculate_negative_log_likelihood(self):\n data = self.played_points_hist[:self.t]\n kernel_matrix = self.kernel_fn(data, data, self.best_ard_params)\n c_matrix = kernel_matrix + (self.noise_sigma ** 2) * np.eye(data.shape[0])\n c_matrix_inv = np.linalg.inv(c_matrix)\n first_term = np.matmul(self.rews_hist[:self.t].T, np.matmul(c_matrix_inv, self.rews_hist[:self.t]))\n second_term = np.log(np.linalg.det(c_matrix))\n return first_term + second_term",
"def predict(x,w,bb):\r\n return 2*((x.dot(w)+bb)>0)-1",
"def feller(self):\n return 2 * self.kappa_y * self.mean_v - self.eta_y**2 > 0",
"def log_lik(Y, delta, coeffs):\n p0, p1, pc, pi = coeffs[0], coeffs[1], coeffs[2], coeffs[3]\n prb = ((pi * p0 * (1. - p0) ** (Y - 1.)\n + (1. - pi) * p1 * (1. - p1) ** (Y - 1.)\n ) * (1. - pc) ** Y\n ) ** delta \\\n * ((pi * (1 - p0) ** Y\n + (1. - pi) * (1. - p1) ** Y\n ) * pc * (1. - pc) ** (Y - 1.)\n ) ** (1. - delta)\n return np.mean(np.log(prb))",
"def info(probs):\n e = probs.reshape(-1, 3).dot(_alleles)\n f = probs.reshape(-1, 3).dot(_sq_alleles)\n theta_hat = e.sum() / (2 * len(e))\n info = 1\n if theta_hat > 0 and theta_hat < 1:\n info -= (f - numpy.square(e)).sum() / (2 * len(e) * theta_hat * (1 - theta_hat))\n return e, info",
"def leonfcn(x: np.ndarray) -> np.ndarray:\n assert x.shape[1] == 2, \"Leon function is defined only on a 2D space.\"\n X = x[:, 0]\n Y = x[:, 1]\n scores = 100 * ((Y - X**3) ** 2) + ((1 - X) ** 2)\n return scores",
"def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))",
"def negative_log_likelihood(self, y):\r\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\r\n # number of examples (call it n) in the minibatch\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])",
"def N_gfun(self,y):\n return 0.0",
"def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")",
"def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)",
"def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()",
"def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result",
"def getL(y):\n\n x = np.array(range(0,len(y)))\n b = cov(x,y) / cov(x,x)\n a = np.mean(y) - b * np.mean(x)\n \n return b * len(y) + a",
"def dwalls(r,param):\r\n V = param[0]\r\n sig = param[1]\r\n L = param[2]\r\n\r\n a = 1/sig\r\n\r\n x0 = L/2.\r\n y0 = 0.\r\n V0 = 10000*V\r\n Rx = 0.01*L\r\n Ry = 0.6*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n\r\n\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n try:\r\n f1 = -V0*((np.sign(x)*np.exp((px-Rx)/a))/(a*(np.exp((px-Rx)/a)+1)**2))*(1/(1 + np.exp((py-Ry)/a)))\r\n\r\n x0 = 0.\r\n y0 = L/2.\r\n V0 = 10000*V\r\n Rx = 0.6*L\r\n Ry = 0.01*L\r\n\r\n x = r[0] - x0*np.sign(r[0])\r\n y = r[1] - y0*np.sign(r[1])\r\n px = np.sqrt(x**2)\r\n py = np.sqrt(y**2)\r\n\r\n f2 = -V0*((np.sign(x)*np.exp((Rx+px)/a))/(a*(np.exp(Rx/a)+np.exp(px/a))**2))*(1/(1 + np.exp((py-Ry)/a)))\r\n except RuntimeWarning:\r\n f1 = 0.\r\n f2 = 0.\r\n except FloatingPointError:\r\n f1 = 0.\r\n f2 = 0.\r\n f = f1 + f2\r\n return f",
"def logloss_mc(y_true, y_prob, epsilon=10e-15):\r\n # normalize\r\n y_prob = y_prob / y_prob.sum(axis=1).reshape(-1, 1)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n y_prob = np.maximum(epsilon, y_prob)\r\n y_prob = np.minimum(1 - epsilon, y_prob)\r\n print 'y_prob: ' + str(y_prob[1])\r\n print 'y_true: ' + str(y_true[1])\r\n # get probabilities\r\n y = [y_prob[i, j] for (i, j) in enumerate(y_true)]\r\n print 'y: ' + str(y[1])\r\n print 'y_true: ' + str(y_true[1])\r\n ll = - np.mean(np.log(y))\r\n return ll",
"def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")",
"def test_G_2_by_2_1tailed_examples(self):\r\n # first up...the famous arginine case\r\n self.assertFloatEqualAbs(G_2_by_2(36, 16, 38, 106), (29.111609, 0),\r\n 0.00001)\r\n # then some other miscellaneous positive and negative values\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(0, 52, 12, 132), (-7.259930, 0.996474),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 14, 130), (-0.000481, 0.508751),\r\n 0.00001)\r\n self.assertFloatEqualAbs(\r\n G_2_by_2(5, 47, 36, 108), (-6.065167, 0.993106),\r\n 0.00001)",
"def evaluate(t, x, y):\n r = np.sqrt(x**2 + y**2)\n return contrast * np.cos(kx_g*x + ky_g*y - w_g*t) * (1 - heaviside(r - patch_diameter*0.5))",
"def get_likelihood(self, d):\n pos = d.pos - self.parent.pos\n pos = np.dot(rotmat(-self.angle), pos)\n lik = halfnorm.pdf(pos[0],scale=self.length) * \\\n vonmises.pdf(np.arctan2(pos[1],pos[0]),self.vonmisesscale,loc=self.angle)\n #assert lik!=0.0\n return lik",
"def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)",
"def false_pos(yt, yp) -> Any:\n from keras import backend as K\n return K.sum(K.cast(yp * (1 - yt) > 0.5, 'float')) / K.maximum(1.0, K.sum(1 - yt))",
"def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z",
"def _loglik_nth(self, cur_y, cur_z, n):\n assert(cur_z.shape[1] == cur_y.shape[0])\n \n not_on_p = np.power(1. - self.lam, np.dot(cur_z[n], cur_y)) * (1. - self.epislon)\n loglik = np.log(np.abs(self.obs[n] - not_on_p)).sum()\n return loglik",
"def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])",
"def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint"
] | [
"0.5982309",
"0.57880336",
"0.57070684",
"0.56869805",
"0.5685148",
"0.56721294",
"0.563048",
"0.5612882",
"0.5603829",
"0.5589663",
"0.5569503",
"0.55325603",
"0.5488627",
"0.54650915",
"0.54553336",
"0.54523873",
"0.5445402",
"0.54440176",
"0.544028",
"0.54351896",
"0.5431328",
"0.54208297",
"0.54187554",
"0.541102",
"0.5390471",
"0.5387445",
"0.5376929",
"0.53715235",
"0.53614146",
"0.5356263",
"0.53338456",
"0.5326866",
"0.5323457",
"0.5321885",
"0.53218436",
"0.52824426",
"0.528046",
"0.5280086",
"0.527031",
"0.52593255",
"0.52525806",
"0.52495486",
"0.52436465",
"0.5235284",
"0.52318186",
"0.52314305",
"0.52310115",
"0.5225164",
"0.5214717",
"0.52138555",
"0.5213373",
"0.5195783",
"0.5190605",
"0.51887864",
"0.5186278",
"0.5179834",
"0.5179133",
"0.51790535",
"0.51771855",
"0.51768756",
"0.5176132",
"0.51755345",
"0.5171382",
"0.5160093",
"0.51588774",
"0.5157139",
"0.5149903",
"0.5148375",
"0.51482946",
"0.51471907",
"0.5145692",
"0.51443857",
"0.5143685",
"0.5142595",
"0.5138991",
"0.51380855",
"0.5136056",
"0.51333916",
"0.51312226",
"0.513035",
"0.5129376",
"0.5128491",
"0.5128199",
"0.51256496",
"0.5123845",
"0.51204354",
"0.5112945",
"0.51124924",
"0.5105446",
"0.51015896",
"0.50969785",
"0.5095208",
"0.5095113",
"0.5093928",
"0.50920326",
"0.5088228",
"0.50859094",
"0.50823575",
"0.5080958",
"0.5080084",
"0.5078674"
] | 0.0 | -1 |
Midpoint between two points | def mid(p1, p2):
return [(p1[0]+p2[0])/2., (p1[1]+p2[1])/2.] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mid_point(a: Point, b: Point) -> Point:\n return Point((a.x + b.x) / 2, (a.y + b.y) / 2)",
"def midpoint(point1, point2):\n\n x, y = (int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2))\n return (x, y)",
"def midpoint(ptA, ptB):\n return( (ptA[0] + ptB[0]) * 0.5, (ptA[1]+ ptB[1]) * 0.5 )",
"def midpoint(p1, p2):\n return np.array([(p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2, (p1[2] + p2[2]) / 2])",
"def getMidPoint(self):\n return p.Point((self.start.normalVector + self.end.normalVector)/2.0)",
"def midpoint(a, b):\n return ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)",
"def midpoint(a, b):\n mp = [(a.x + b.x) / 2, (a.y + b.y) / 2]\n return Vector(*mp)",
"def _mid(pt1, pt2):\n (x0, y0), (x1, y1) = pt1, pt2\n return 0.5 * (x0 + x1), 0.5 * (y0 + y1)",
"def test_midpoint(self):\n p1 = Point(0, 0)\n p2 = Point(10, 10)\n midpoint = p1.midpoint(p2)\n self.assertAlmostEqual(midpoint.lat, 5)\n self.assertAlmostEqual(midpoint.lon, 5)",
"def _middle_point(p1, p2):\n x = int((p1.x + p2.x) / 2)\n y = int((p1.y + p2.y) / 2)\n return (x, y)",
"def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)",
"def midpoint(self, other: PointType = None) -> PointType:\n return (self + (other or Point())) / 2",
"def midpoint(self) -> Tuple[int, int]:\n pass",
"def midpoint(self) -> Tuple[int, int]:\n minx, miny, maxx, maxy = self.substrates.bounds\n return ((minx + maxx) // 2, (miny + maxy) // 2)",
"def mid_point(self, vector):\n return self.eval_2pts(vector, 0.5)",
"def midcoords(p, c1, c2):\n return make_coords(pos=midpoint(p, c1.worldpos(), c2.worldpos()),\n rot=midrot(p, c1.worldrot(), c2.worldrot()))",
"def midpoint_euclidean(self, x1, y1, x2, y2):\n dist_x = abs(x1 - x2) / 2.\n dist_y = abs(y1 - y2) / 2.\n res_x = x1 - dist_x if x1 > x2 else x2 - dist_x\n res_y = y1 - dist_y if y1 > y2 else y2 - dist_y\n return res_x, res_y",
"def mid(p1, p2):\n return (p1[0]+p2[0])/2, (p1[1]+p2[1])/2, (p1[2]+p2[2])/2",
"def pointcenter(x):\n return point(x)",
"def midpoint(bbox):\n return (0.5*(bbox[0][0] + bbox[1][0]), 0.5*(bbox[0][1] + bbox[1][1]))",
"def midpt_formula(loc1, loc2):\n xm = (loc1[0] + loc2[0])/2.0\n ym = (loc1[1] + loc2[1])/2.0\n return [xm, ym]",
"def midpoint(self,i,f):\n\n summation = self.points[f, :] + self.points[i, :]\n midploint = summation/2\n x_mid = midploint[0]\n y_mid = midploint[1]\n\n return x_mid,y_mid",
"def midpoint_of_points(pnts: Iterable[Point]) -> Point:\n num = len(pnts)\n x = sum(pnt.x for pnt in pnts)/num\n y = sum(pnt.y for pnt in pnts)/num\n z = sum(pnt.z for pnt in pnts)/num\n return Point(x, y, z)",
"def midpoint(f, x0, h):\n return 2.0*h*f(x0+h);",
"def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat",
"def center_point(self) -> tuple:\n return (self.min_lat + self.max_lat) / 2, (self.min_lon + self.max_lon) / 2",
"def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]",
"def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b",
"def middlePoint(a, b, createLocator=False):\r\n if cmds.objExists(a) and cmds.objExists(b):\r\n # get xform datas:\r\n aPos = cmds.xform(a, query=True, worldSpace=True, rotatePivot=True)\r\n bPos = cmds.xform(b, query=True, worldSpace=True, rotatePivot=True)\r\n # calculating the result position:\r\n resultPosX = ( aPos[0] + bPos[0] )/2\r\n resultPosY = ( aPos[1] + bPos[1] )/2\r\n resultPosZ = ( aPos[2] + bPos[2] )/2\r\n resultPos = [resultPosX, resultPosY, resultPosZ]\r\n if createLocator:\r\n middleLoc = cmds.spaceLocator(name=a+\"_\"+b+\"_middle_loc\", position=resultPos)[0]\r\n cmds.xform(middleLoc, centerPivots=True)\r\n return [resultPos, middleLoc]\r\n return[resultPos]",
"def measure_between_two_points(self, point_a, point_b):\n # cHaversine expects points to be given as (latitude, longitude) pairs.\n # TODO: Determine if this check for non-null values is necessary.\n if point_a and point_b:\n return haversine(tuple(point_a), tuple(point_b))",
"def get_midpoint(half_distance: int, steps: [(int, str, str)]) -> (str, str):\r\n cur_distance = 0\r\n cur_step = 0\r\n while(cur_distance + steps[cur_step][0] <= half_distance):\r\n cur_distance += steps[cur_step][0]\r\n cur_step += 1\r\n\r\n left_over_distance = half_distance - cur_distance\r\n\r\n # lat, lng\r\n startpos = (steps[cur_step][1]['lat'],steps[cur_step][1]['lng'])\r\n endpos = (steps[cur_step][2]['lat'],steps[cur_step][2]['lng'])\r\n lat_length = endpos[0] - startpos[0]\r\n lng_length = endpos[1] - startpos[1]\r\n\r\n\r\n # get angle in radians\r\n angle = math.atan(lng_length/lat_length)\r\n\r\n if (endpos[0] > startpos[0] and endpos[1] > startpos[1]) or \\\r\n (endpos[0] < startpos[0] and endpos[1] > startpos[1]):\r\n return (startpos[0]+left_over_distance*math.cos(angle)/111000, startpos[1]+left_over_distance*math.sin(angle)/111000)\r\n return (startpos[0]-left_over_distance*math.cos(angle)/111000, startpos[1]-left_over_distance*math.sin(angle)/111000)",
"def center_point(polyline):\n\tpts = unique(polyline.points)\n\treturn sum(pts) / len(pts)",
"def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis",
"def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)",
"def halfway(self, target):\n mx = (self.x + target.x) / 2\n my = (self.y + target.y) / 2\n return Point(mx, my)",
"def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))",
"def angle_midpoint(ang1,ang2,units):\n return ang1 + angle_difference(ang1,ang2,units)/2.",
"def centre(self):\n n = len(self.point)\n return Point(\n sum(map(lambda p: p.x, self.point)) / n,\n sum(map(lambda p: p.y, self.point)) / n\n )",
"def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)",
"def halfway(self, target):\r\n mx = (self.x + target.x)/2\r\n my = (self.y + target.y)/2\r\n return Point(mx, my)",
"def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)",
"def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)",
"def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)",
"def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))",
"def mid_point(img, box):\n\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n \n x_mid = int((x1+x2)/2)\n y_mid = int(y2)\n mid = (x_mid,y_mid)\n \n _ = cv2.circle(img, mid, 5, (255, 0, 0), -1)\n \n return mid",
"def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4",
"def _center(pos: ArrayLike, shift: ArrayLike) -> Tuple[float, float]:\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2",
"def great_circle(a: Point, b: Point) -> Km:\n\n lat1, lng1, lat2, lng2 = map(radians, [a.latitude, a.longitude, b.latitude, b.longitude])\n sin_lat1, sin_lat2 = map(sin, [lat1, lat2])\n cos_lat1, cos_lat2 = map(cos, [lat1, lat2])\n delta_lng = lng2 - lng1\n cos_delta_lng, sin_delta_lng = cos(delta_lng), sin(delta_lng)\n\n d = atan2(\n sqrt((cos_lat2 * sin_delta_lng) ** 2 + (cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_delta_lng) ** 2),\n sin_lat1 * sin_lat2 + cos_lat1 * cos_lat2 * cos_delta_lng,\n )\n\n return Km(6371.009 * d) # Radius of earth in kilometers is 6371",
"def calculate_slope_between_two_points(point_a: Dict[str,float], point_b: Dict[str, float]) -> float: # _5 [✅] \n if len(point_a) == len(point_b) == 0: raise ValueError\n if set(point_a).symmetric_difference(set(point_b)) == set():\n return float('inf') if int(point_b['x'] - point_a['x']) == 0 else int((int(point_b['y'] - point_a['y']) / int(point_b['x'] - point_a['x'])))\n elif set(point_a).symmetric_difference(set(point_b)) != set(): raise ValueError\n elif point_a['x'] == point_b['x'] and point_b['y'] == point_a['y']: return float('inf')",
"def root_midpoint(self):\n node1, node2, distance = self.find_middle_point()\n self.root_nodes(node1, node2, distance)",
"def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)",
"def _center_distance(self):\n # Split positions in segments of two points :\n cut = np.vsplit(self.a_position, int(self.a_position.shape[0]/2))\n # Get center position and starting line position :\n center = np.mean(cut, axis=1)\n\n # ============ EUCLIDIAN DISTANCE ============\n diff = np.sqrt(np.square(center[:, np.newaxis, :] - center).sum(2))\n diff[np.tril_indices_from(diff)] = np.inf\n\n return center, diff",
"def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y",
"def compute_x1_x2_points(point_a: list, point_b: list, nav: navigation.GPSComputing, logger: utility.Logger):\n\n cur_vec_dist = nav.get_distance(point_a, point_b)\n\n # check if moving vector is too small for maneuvers\n if config.MANEUVER_START_DISTANCE * 2 >= cur_vec_dist:\n msg = \"No place for maneuvers; config start maneuver distance is (that will be multiplied by 2): \" + \\\n str(config.MANEUVER_START_DISTANCE) + \" current moving vector distance is: \" + str(cur_vec_dist) + \\\n \" Given points are: \" + str(point_a) + \" \" + str(point_b)\n # print(msg)\n logger.write(msg + \"\\n\")\n return None, None\n\n point_x1 = nav.get_point_on_vector(\n point_a, point_b, config.MANEUVER_START_DISTANCE)\n point_x2 = nav.get_point_on_vector(\n point_a, point_b, cur_vec_dist - config.MANEUVER_START_DISTANCE)\n return point_x1, point_x2",
"def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)",
"def normal(point0: Point, point1: Point) -> Tuple[Point, float]:\n mid: Point = ((point0[0] + point1[0]) / 2, (point0[1] + point1[1]) / 2)\n v: Vector2 = (point1[0] - point0[0], point1[1] - point0[1])\n normal: Vector2 = (-v[1], v[0])\n\n angle = math.atan(v[1] / v[0])\n angleNorm = math.atan(normal[1] / normal[0])\n assert(abs(abs(angle - angleNorm) - math.pi / 2) < 0.001)\n\n x = [mid[0], mid[0] + normal[0]]\n y = [mid[1], mid[1] + normal[1]]\n plt.plot(x, y, \":\")\n\n return (mid, angleNorm)",
"def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2",
"def d_midpoint(edge):\n v0, v1 = EDGES[edge]\n v0_pos = VERTICES[v0]\n v1_pos = VERTICES[v1]\n return ((x+y) for (x,y) in zip(v0_pos, v1_pos))",
"def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())",
"def closest_point(p1: Vector3, p2: Vector3, p3: Vector3) -> Vector3:\n k = ((p2.y - p1.y) * (p3.x - p1.x) - (p2.x - p1.x) * (p3.y - p1.y)) / ((p2.y - p1.y) ** 2 + (p2.x - p1.x) ** 2)\n x4 = p3.x - k * (p2.y - p1.y)\n y4 = p3.y + k * (p2.x - p1.x)\n\n return Vector3(x4, y4, 0)",
"def midpoint(self) -> Point:\n l = self._line.meet(infty_hyperplane(self.dim))\n return harmonic_set(*self.vertices, l)",
"def calc_point_squre_dist(point_a, point_b):\n distx = point_a[0] - point_b[0]\n disty = point_a[1] - point_b[1]\n return distx ** 2 + disty ** 2",
"def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def point_to_point(p1: Vec2, p2: Vec2):\n return length(dir_vector(p1, p2))",
"def point_add(self, a, b):\n\n if a.at_inf and b.at_inf: return point.inf()\n if a.at_inf: return b.dup()\n if b.at_inf: return a.dup()\n if a == b: return self.point_double(a)\n if a.x == b.x and a.y == -b.y: return point.inf()\n\n x1, y1, x2, y2 = modp(self.p, a.x, a.y, b.x, b.y)\n L = (y2 - y1) / (x2 - x1)\n x3 = L ** 2 - x1 - x2\n y3 = L * (x1 - x3) - y1\n return point.xy(int(x3), int(y3))",
"def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2",
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']",
"def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y",
"def distance(point_a, point_b):\r\n a_to_b = math.hypot(point_b[0] - point_a[0], point_b[1] - point_a[1])\r\n return a_to_b",
"def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)",
"def calcApproxDist(lon1, lat1, lon2, lat2):\n\n import math\n from shapely.geometry import Point\n\n if lat1 == lat2 and lon1 == lon2:\n return 0.0\n\n point1 = Point(lon1,lat1)\n point2 = Point(lon2, lat2)\n\n return math.acos(math.sin(math.radians(point1.y))*math.sin(math.radians(point2.y))+math.cos(math.radians(\n point1.y))*math.cos(math.radians(point2.y))*math.cos(math.radians(point2.x)-math.radians(point1.x)))*6371",
"def center(self):\n return Point(self.width/2, self.height/2)",
"def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)",
"def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection",
"def get_center_point(self):\n raise NotImplementedError()",
"def find_midpoint(start, end):\n mid = (start + end) / 2\n return int(mid)",
"def minkowski_distance(point1, point2):\n p = 3\n d = [abs(x - y) ** p for x, y in zip(point1, point2)]\n a = sum(d)**(1/p)\n\n return a",
"def get_bottom_center(left, right):\n x = (right.x - left.x) / 2 + left.x\n y = right.y - (right.y - left.y) / 5\n return (x, y)",
"def find_incentre(point_1, point_2, point_3):\r\n _a = np.linalg.norm([point_1.x - point_2.x, point_1.y - point_2.y])\r\n _b = np.linalg.norm([point_2.x - point_3.x, point_2.y - point_3.y])\r\n _c = np.linalg.norm([point_1.x - point_3.x, point_1.y - point_3.y])\r\n _p = _a + _b + _c\r\n\r\n centre_x = (_a * point_3.x + _b * point_1.x + _c * point_2.x)/ _p\r\n centre_y = (_a * point_3.y + _b * point_1.y + _c * point_2.y)/ _p\r\n\r\n return Point(centre_x, centre_y)",
"def calculateCenter(self):\n y_avg = int(sum(self.points[:,0])/float(len(self.points)))\n x_avg = int(sum(self.points[:,1])/float(len(self.points)))\n self.center = (x_avg, y_avg)\n return(x_avg,y_avg)",
"def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)",
"def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)",
"def nearest_on_boundary(self, point):\n _, minpt = self._nearest_to_point(point)\n return Point(minpt, crs=self.crs)",
"def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5",
"def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)",
"def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)",
"def midpoints(self):\n return self.bins[:, 1]",
"def closeTo(pointOne, pointTwo):\r\n\tif abs(pointOne.lat-pointTwo.lat) < 0.0002:\r\n\t\tif abs(pointOne.lon-pointTwo.lon) < 0.0002:\r\n\t\t\treturn True\r\n\treturn False",
"def get_midpoint(vecA, vecB, weight=0.5):\r\n try:\r\n vecA = dt.Vector(vecA) # just in case it isn't already cast as a vector\r\n vecB = dt.Vector(vecB)\r\n vecC = vecB-vecA\r\n vecD = vecC * weight # 0.5 is default which finds the mid-point.\r\n vecE = vecA + vecD\r\n return vecE\r\n\r\n except Exception, e:\r\n # TODO: include some useful error checking\r\n return False",
"def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)",
"def calculate_distance(startpoint, endpoint):\n\n \"\"\"\n #http://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1 = startpoint.x, startpoint.y\n lon2, lat2 = endpoint.x, endpoint.y\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n km = config.EARTH_RADIUS / 1000.0 * c\n\n return km",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def median(points):\r\n\t\tN = len(points)\r\n\t\tif (N%2 == 0):\t\t\t\r\n\t\t\tleftOfCentre = points[N//2-1]\r\n\t\t\trightOfCentre = points[N//2]\r\n\t\t\treturn (leftOfCentre + rightOfCentre) / 2\r\n\t\telse:\r\n\t\t\treturn points[N//2]",
"def calc_centroid(x1, y1, x2, y2):\n x = x1 + ((x2 - x1) / 2.0)\n y = y1 + ((y2 - y1) / 2.0)\n return [x, y]",
"def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)",
"def center(self):\n return (self.upper_right + self.lower_left) * 0.5",
"def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))",
"def measure_offset(left_fitx, right_fitx, midpoint=640):\n return (midpoint-(right_fitx[-1]+left_fitx[-1])/2)*3.7/700",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5"
] | [
"0.8457866",
"0.8228469",
"0.8115049",
"0.78480625",
"0.7818327",
"0.7814459",
"0.77966034",
"0.7770699",
"0.75567853",
"0.7517811",
"0.7410664",
"0.7348551",
"0.7298461",
"0.7200335",
"0.7175701",
"0.70480204",
"0.7011095",
"0.6999809",
"0.6845968",
"0.6803912",
"0.6802132",
"0.6736757",
"0.66079503",
"0.65546304",
"0.6433236",
"0.6398808",
"0.63800746",
"0.6363895",
"0.63518625",
"0.63099897",
"0.6284088",
"0.62801003",
"0.62414926",
"0.61682254",
"0.61468697",
"0.61460245",
"0.6136399",
"0.61310744",
"0.61157435",
"0.6110923",
"0.6104726",
"0.6104726",
"0.6104726",
"0.60916805",
"0.60861856",
"0.60574114",
"0.6054095",
"0.603248",
"0.6025734",
"0.6004712",
"0.598815",
"0.5976875",
"0.59764326",
"0.5961365",
"0.5961353",
"0.59537053",
"0.59429324",
"0.5938313",
"0.59167516",
"0.5912631",
"0.5911109",
"0.5898548",
"0.58842975",
"0.5863744",
"0.5856303",
"0.58523107",
"0.5851312",
"0.58493274",
"0.58442956",
"0.5827436",
"0.58216506",
"0.5818067",
"0.58152807",
"0.5805577",
"0.58050025",
"0.58048666",
"0.57982343",
"0.579",
"0.57867086",
"0.57758176",
"0.5769912",
"0.5760007",
"0.574436",
"0.57426363",
"0.5741341",
"0.5739804",
"0.5733633",
"0.5732589",
"0.57302415",
"0.5727419",
"0.5725821",
"0.57235664",
"0.5723097",
"0.5713653",
"0.5712883",
"0.57040304",
"0.5702561",
"0.5700676",
"0.5700401",
"0.56969774"
] | 0.68873215 | 18 |
Distance between two points | def distance(p1, p2):
return sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)",
"def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)",
"def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2",
"def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)",
"def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5",
"def distance_between_two_points(p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)",
"def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d",
"def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)",
"def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2",
"def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5",
"def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))",
"def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))",
"def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))",
"def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5",
"def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)",
"def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)",
"def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)",
"def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)",
"def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)",
"def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)",
"def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])",
"def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )",
"def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))",
"def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)",
"def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))",
"def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)",
"def distance(p1,p2):\r\n x1,y1 = p1\r\n x2,y2 = p2\r\n return hypot(x2 - x1, y2 - y1)",
"def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5",
"def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist",
"def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d",
"def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)",
"def getDistance(self,p1,p2):\n return sum([(p1[i]-p2[i])**2 for i in range(2)])",
"def getDistance(self, x1, x2, y1, y2):\n return ((x1 - x2)**2 + (y1 - y2)**2)**0.5",
"def distance(p1,p2):\n x1,y1 = p1\n x2,y2 = p2\n return hypot(x2 - x1, y2 - y1)",
"def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)",
"def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))",
"def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])",
"def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist",
"def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance",
"def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5",
"def distance_between(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)",
"def distance_between(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)",
"def distance_between(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)",
"def distance_between(point1, point2):\n x1, y1 = point1\n x2, y2 = point2\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)",
"def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length",
"def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist",
"def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance",
"def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5",
"def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )",
"def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5",
"def distanceTo(self,other):\n if not isinstance(other,Point):\n return \n return math.sqrt((self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2)",
"def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5",
"def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)",
"def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']",
"def distance(xy1, xy2):\n x_dist = xy2[0] - xy1[0]\n y_dist = xy2[1] - xy1[1]\n dist = np.sqrt(x_dist ** 2 + y_dist ** 2)\n return dist",
"def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance",
"def distance(x1, y1, x2, y2):\n\n distance_between_two_points = (((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1))) ** 0.5\n return round(distance_between_two_points, 2)",
"def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))",
"def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)",
"def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d",
"def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)",
"def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)",
"def distance(point_a, point_b):\r\n a_to_b = math.hypot(point_b[0] - point_a[0], point_b[1] - point_a[1])\r\n return a_to_b",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d",
"def distance_between(pt1: tuple, pt2: tuple) -> float:\r\n\r\n return ((pt2[1] - pt1[1])**2 + (pt2[0] - pt1[0])**2)**0.5",
"def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)",
"def distance(self, x1, x2):\n return np.sum(np.power((x1-x2),2.0))",
"def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)",
"def distance(p_1, p_2):\n return ((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2 \\\n + (p_2[2] - p_1[2]) ** 2) ** 0.5",
"def distance(p1, p2):\n return math.hypot(p2[0] - p1[0], p2[1] - p1[1])",
"def distance_between(point_one, point_two):\n sum = 0\n for d1,d2 in zip(point_one,point_two):\n sum += math.pow(float(d1) - float(d2), 2)\n\n return math.sqrt(sum)",
"def distanceTwoPoints(self, A, B):\r\n # productive\r\n # used by addNeedleToScene\r\n if frequent: profprint()\r\n length = ((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2 + (A[2] - B[2]) ** 2) ** 0.5\r\n return length",
"def distance(self,pose1, pose2):\n return math.sqrt((pose1[0] - pose2[0]) ** 2 + (pose1[1] - pose2[1]) ** 2) + 0.001",
"def calculate_distance(p1, p2):\n\n dist = np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n return dist",
"def dist(self,x, y):\n\n x1, y1 = x\n x2, y2 = y\n return np.sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))",
"def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)",
"def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))",
"def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)",
"def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)",
"def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))",
"def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )",
"def distance(p1, p2):\n return np.linalg.norm(p2-p1)",
"def get_distance(point_1, point_2):\n result = ((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2) ** 0.5\n return result",
"def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)",
"def get_distance(pose1, pose2):\n return math.sqrt((pose1.x-pose2.x)**2+(pose1.y-pose2.y)**2)",
"def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)",
"def distance(self, x2, y2):\r\n return math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)",
"def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)",
"def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))",
"def __distance(start_x, start_y, end_x, end_y):\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance"
] | [
"0.841957",
"0.8382808",
"0.83652633",
"0.8354172",
"0.83238786",
"0.83209145",
"0.8289851",
"0.82782465",
"0.8273264",
"0.82517046",
"0.82215977",
"0.8218881",
"0.8194025",
"0.81917197",
"0.818483",
"0.8160363",
"0.81530356",
"0.8152897",
"0.8151394",
"0.81368905",
"0.8125228",
"0.8095293",
"0.808199",
"0.808199",
"0.80704206",
"0.806068",
"0.8039219",
"0.8033718",
"0.80280775",
"0.8020585",
"0.8012497",
"0.7996247",
"0.7985728",
"0.7973759",
"0.79612434",
"0.79548186",
"0.79546016",
"0.7952609",
"0.79351395",
"0.7915851",
"0.7914846",
"0.7913184",
"0.79072624",
"0.790704",
"0.7898039",
"0.78862953",
"0.7885335",
"0.7874656",
"0.78709495",
"0.78709495",
"0.78709495",
"0.78709495",
"0.78649706",
"0.78632367",
"0.7857495",
"0.78569895",
"0.78495103",
"0.7845895",
"0.7837545",
"0.78334993",
"0.78172356",
"0.78094834",
"0.78063595",
"0.7804967",
"0.7801325",
"0.77937573",
"0.77884114",
"0.77878517",
"0.7785499",
"0.77829695",
"0.7770762",
"0.7770457",
"0.776658",
"0.7764114",
"0.77634305",
"0.7757665",
"0.7747581",
"0.77280575",
"0.7721098",
"0.77200824",
"0.7718073",
"0.7716412",
"0.77159274",
"0.77144027",
"0.771007",
"0.7707375",
"0.77069044",
"0.77051437",
"0.7700429",
"0.76958096",
"0.7691316",
"0.76860887",
"0.76819277",
"0.7679417",
"0.76783854",
"0.7669109",
"0.7656442",
"0.76495373",
"0.76350063",
"0.76317185"
] | 0.7940824 | 38 |
Angle between two points, radians | def getangle(p1, p2):
return atan2( p2[1]-p1[1], p2[0]-p1[0] ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(p1, p2):\n x_dist = p2[0] - p1[0]\n y_dist = p2[1] - p1[1]\n return math.atan2(-y_dist, x_dist) % (2 * math.pi)",
"def angle(point1, point2):\n return atan2(point2.y() - point1.y(), point2.x() - point1.x())",
"def angle(p1, p2):\n return dot(p1, p2)",
"def angle(a: Point, b: Point) -> int:\n ang = math.degrees(math.atan2(b.y - a.y, b.x - a.x)) + 90\n return ang + 360 if ang < 0 else ang",
"def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)",
"def angle(pt_a, pt_b):\n x1, y1 = pt_a\n x2, y2 = pt_b\n return atan2(y2-y1, x2-x1)",
"def angle(firstPoint, secondPoint):\n\txDiff = secondPoint.x - firstPoint.x\n\tyDiff = secondPoint.y - firstPoint.y\n\treturn math.degrees(math.atan2(yDiff, xDiff))",
"def angleTo(x1, y1, x2, y2):\n assert not (x1 == 0 and y1 == 0) and not (x2 == 0 and y2 == 0), \"neither point should be the origin\"\n if x1 == x2:\n if y1 < y2:\n return math.pi / 2\n elif y1 == y2:\n return 0\n return math.pi * 3 / 2\n dx, dy = x2 - x1, y2 - y1\n rawDeg = math.atan(dy / dx)\n if dx < 0:\n rawDeg += math.pi\n return rawDeg % (math.pi * 2)",
"def get_angle(p1, p2):\n return math.atan2(p2[1] - p1[1], p2[0] - p1[0])",
"def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta",
"def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))",
"def angle(p1, p2):\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n if dx == 0:\n if dy == 0:\n return 0\n return 90\n alpha = math.atan(dy / dx) * 180 / math.pi\n if alpha < 0:\n alpha = 180 - alpha\n return alpha",
"def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))",
"def getAngle(p1, p2, unit=\"rad\"):\n\n t = math.atan((p2.x - p1.x)/(p2.y - p1.y))\n \n if unit == 'rad':\n return t\n elif unit=='deg':\n return t * 180/math.pi",
"def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle",
"def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))",
"def angle_between(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n\n # We return negative because pyglet and math treat rotation differently\n return -math.atan2(dy, dx)",
"def get_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n if abs(dx)<=TOL and dy>0:\n angle=0.5*np.pi\n elif abs(dy)<=TOL and dx<0:\n angle=np.pi\n elif abs(dx)<=TOL and dy<0:\n angle=1.5*np.pi\n elif abs(dy)<=TOL and dx>0:\n angle=0.0\n else:\n raise ValueError(\"Warning! The angle between the two points must be an \"\n \"integer multiples of 90deg from each other\")\n return angle",
"def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle",
"def angle(vec1, vec2):\n\n return math.acos(dotproduct(vec1, vec2) / (length(vec1) * length(vec2)))",
"def angle_between(a, b):\n from math import acos\n return acos( dot_product(a, b) / (magnitude(a) * magnitude(b)) )",
"def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))",
"def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))",
"def angle(self, other):\n return acosd(self.normalized().dot(other.normalized()))",
"def get_exact_angle(pt1, pt2):\n dx, dy = pt2[0]-pt1[0], pt2[1]-pt1[1]\n return math.atan2(dy,dx)",
"def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))",
"def angle_between(v1: Vec2, v2: Vec2):\n v = dir_vector(v1, v2)\n a = atan2(v.y, v.x)\n if a < 0:\n a = 2 * pi + a\n return a",
"def calculate_angle(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.atan2(dy, dx) * 180.0 / math.pi",
"def angle(p0, p1, prv_ang=0):\r\n ang = math.atan2(p0[1] - p1[1], p0[0] - p1[0])\r\n a0 = (ang - prv_ang)\r\n a0 = a0 % (PI * 2) - PI\r\n return a0",
"def angle_btw(v1, v2):\n cos_ang = np.dot(v1, v2)\n sin_ang = np.linalg.norm(np.cross(v1, v2))\n return np.arctan2(sin_ang, cos_ang) * 180 / math.pi",
"def angle(first, other=FreeCAD.Vector(1,0,0)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return math.acos(dotproduct(normalized(first),normalized(other)))",
"def angle_between(v1, v2):\n return np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))",
"def angle_in_degrees(x, y):\n return math.atan2(y, x) / math.pi * 180",
"def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)",
"def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)",
"def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)",
"def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))",
"def getAngle(v1,v2,prec=1E-6):\n \n return(math.acos((np.dot(v1,v2))/np.linalg.norm(v1)/np.linalg.norm(v2)))",
"def AngleBetween(a, b):\n r = a.Length() * b.Length()\n if r < 1.0e-8:\n return BadVectorError()\n dot = (a.x*b.x + a.y*b.y + a.z*b.z) / r\n if dot <= -1.0:\n return 180.0\n if dot >= +1.0:\n return 0.0\n return math.degrees(math.acos(dot))",
"def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))",
"def atan2 (cls, y, x) :\n return Angle_R (math.atan2 (y, x))",
"def angle(self, other):\n n1 = self.norm()\n n2 = other.norm()\n c = (self * other) / (n1 * n2)\n # Take care of roundoff errors\n c = min(c, 1)\n c = max(-1, c)\n return numpy.arccos(c)",
"def angle(vec1, vec2):\n assert vec1.shape == vec2.shape\n \n cos_vec = np.inner(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))\n angle = math.acos(cos_vec)\n in_deg = math.degrees(angle)\n if in_deg >= 90:\n return (180-in_deg)\n return in_deg",
"def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)",
"def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)",
"def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang",
"def py_ang(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)",
"def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)",
"def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90",
"def angleBetween(v1, v2):\n v1_u = unitVector(v1)\n v2_u = unitVector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))",
"def calculate_polar_angle(p1, p2):\n # note the negative sign before the first component, which is y component\n # the y in scikit-image is flipped.\n # it is to convert the angle into right-handed coordinate\n # the range is from -pi to pi\n angle = np.arctan2(-(p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / np.pi\n\n return angle",
"def angleBetweenVectors(v1, v2):\n v2Size = vectorLength(v2)\n if not v2Size:\n theta = 0.0\n else:\n theta = math.acos(dotProduct(v1, v2) / v2Size)\n return theta",
"def angle_between(v1, v2):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n #takes out if vectors are 1 or -1 (basically if they're the same direction)\n angle = math.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))\n return angle",
"def get_angle_degrees_between(self, other):\n return math.degrees(self.get_angle_between(other))",
"def calculate_angle(start: tuple, end: tuple):\n radians = -math.atan2(end[0] - start[0], end[1] - start[1])\n return math.degrees(radians) % 360",
"def AngleFromXAxis(self,pnt1,pnt2):\n\t\treturn self.acad.ActiveDocument.Utility.AngleFromXAxis(pnt1,pnt2)",
"def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)",
"def get_angle_rad_between_joints(joint_a: Joint2D, joint_b: Joint2D) -> float:\n return math.atan2(joint_a.y - joint_b.y, joint_a.x - joint_b.x)",
"def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))",
"def atan2(y, x):\n return 0.0",
"def angle_between(v1, v2):\n v = np.array(v1)\n w = np.array(v2)\n\n norm_v = norm(v)\n norm_w = norm(w)\n\n cos_angle = np.around(np.dot(v, w) / norm_v / norm_w, PRECISION)\n\n if not -1 <= cos_angle <= 1:\n return None\n else:\n return np.around(np.arccos(cos_angle) * 360 / 2 / np.pi, PRECISION)",
"def angle(x, y, deg=False):\n rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y)))\n if deg:\n return rad_angle*(180.0/np.pi)\n else:\n return rad_angle",
"def _angle_between(self, point_1, point_2):\n angle_1 = math.atan2(point_1.y, point_1.x)\n angle_2 = math.atan2(point_2.y, point_2.x)\n return angles.shortest_angular_distance(angle_1, angle_2)",
"def get_angle(vert1, vert2):\n x_axis = np.array([1, 0])\n input_axis = vert2 - vert1\n input_axis = input_axis / np.linalg.norm(input_axis)\n return math.degrees(np.arccos(np.dot(x_axis, input_axis)))",
"def compute_angle_in_rad(location1, location2):\n return np.arctan2(location1[0] - location2[0], location1[1] - location2[1])",
"def angle(dx, dy):\n\n return math.atan2(dy, dx)",
"def angle_difference(θ1, θ2):\n ordinary_diff = (θ2 - θ1) % np.pi\n return (np.pi / 2) - np.abs(ordinary_diff - (np.pi / 2))",
"def angle(v1,v2, deg = False):\n # v1.v2 = ||v1||||v2|| cos(angle) => angle = arcos(v1.v2/||v1||||v2||)\n # see more: http://www.wikihow.com/Find-the-Angle-Between-Two-Vectors\n # tested with http://codereview.stackexchange.com/a/54413\n if deg: return np.rad2deg(np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))) # *180.0/np.pi\n return np.arccos(old_div(np.dot(v1,v2),(anorm(v1)*anorm(v2))))",
"def angle2pos(pos1: np.ndarray, pos2: np.ndarray) -> float:\n assert pos1.shape == pos2.shape\n diff = pos2 - pos1\n diff /= np.linalg.norm(diff)\n # x1: y-coordinates, x2: x-coordinates\n angle = np.arctan2(diff[1], diff[0])\n return angle",
"def get_theta(p1,p2):\r\n \r\n dy = p1[1] - p2[1]\r\n dx = p1[0] - p2[0]\r\n theta = atan2(dy,dx)\r\n return theta",
"def vec_angle_deg(v1,v2):\r\n \r\n return math.degrees(vec_angle_rad(v1,v2))",
"def angle_between_two(self, other):\n # angle = math.atan2(other.position.y - self.position.y,\n # other.position.x - self.position.x)\n minus = other.position - self.position\n angle = math.atan2(minus.y, minus.x)\n return angle",
"def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)",
"def angle(self) -> float:\n ...",
"def get_angle(p0, p1=np.array([0, 0]), p2=None):\n if p2 is None:\n p2 = p1 + np.array([1, 0])\n v0 = np.array(p0) - np.array(p1) \n v1 = np.array(p2) - np.array(p1)\n\n angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))\n return np.degrees(angle)",
"def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle",
"def angle_between_vectors(vec1, vec2):\n vec = vec1 - vec2\n vec = vec.perpendicular()\n return vec.angle",
"def angle_diff(a1, a2):\n a = a1 - a2\n if abs(a) > 180:\n return np.sign(a)*360 - a\n else:\n return a",
"def vector_angle(v1, v2):\n cos_theta = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)\n # Clip ensures that cos_theta is within -1 to 1 by rounding say -1.000001 to -1 to fix numerical issues\n angle = np.arccos(np.clip(cos_theta, -1, 1))\n\n return angle",
"def angle(self):\n return atan2(self.v.y, self.v.x)",
"def angle_between(v2, v1):\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n result = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n if np.isnan(result):\n if abs(v1_u + v2_u) < .5 * (abs(v1_u) + abs(v2_u)):\n return np.pi\n else:\n return 0.0\n if Left( [v2[1],v2[3]], [0,0], [v1[1],v1[3]] ):\n return 2*np.pi - result\n return result",
"def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None",
"def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))",
"def calculate_vector_angle(vector_1, vector_2):\n dot = dot_product(vector_1, vector_2)\n cos_angle = float(dot / (two_norm(vector_1) * two_norm(vector_2)))\n # Buffer for floating point errors\n if 1.2 > cos_angle > 1:\n cos_angle = 1\n elif -1.2 < cos_angle < -1:\n cos_angle = -1\n elif -1.2 > cos_angle or 1.2 < cos_angle:\n raise KeypointError(\"Ratio for angle is outside of the domain.\")\n if cos_angle > 0:\n multiplier = 1\n else:\n multiplier = -1\n angle_of_interest = (180 - math.degrees(math.acos(cos_angle))) * multiplier\n return angle_of_interest",
"def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)",
"def angle_to(self, other):\n return other.angle - self.angle",
"def angle_between(vec1, vec2, radian=True):\n cos = np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)\n angle = np.arccos(np.clip(cos, -1, 1))\n if not radian:\n angle = angle / np.pi * 180\n return angle",
"def angle(self):\n return math.degrees(math.atan2(self[1], self[0]))",
"def angle_difference(x, y):\n return 180 - abs(abs(x - y) - 180)",
"def angle_between_points(a, b, c):\n ax, ay = a\n bx, by = b\n cx, cy = c\n\n return angle_between([ax - bx, ay - by], [cx - bx, cy - by])",
"def cal_angle_between_two_vectors(vec_1, vec_2):\n unit_vec_1 = vec_1 / np.linalg.norm(vec_1)\n unit_vec_2 = vec_2 / np.linalg.norm(vec_2)\n dot_product = np.dot(unit_vec_1, unit_vec_2)\n \n return np.arccos(dot_product) / np.pi * 180",
"def compute_angle(self, direction):\n scaled_cosine = self.w1.dot(direction) # ||direction|| cos(theta)\n scaled_sine = self.w2.dot(direction) # ||direction|| sin(theta)\n return np.arctan2(scaled_sine, scaled_cosine)",
"def angle1d(x: float, y: float):\n\n return np.degrees(np.arctan(y / x))",
"def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle",
"def calc_point_direction_angle(point_a, point_b):\n return direction_diff(point_a[2], point_b[2])",
"def _calculate_angle(x0, y0, x1, y1):\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle",
"def angle_difference(ang1,ang2,units):\n ang1r = angle_to_radians(ang1,units)\n ang2r = angle_to_radians(ang2,units)\n y = np.sin(ang2r-ang1r)\n x = np.cos(ang2r-ang1r)\n angdiffr = np.arctan2(y,x)\n return radians_to_angle(angdiffr,units)",
"def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))",
"def angle(self) -> int:",
"def point_to_point_angle(point0, point1, out=None):\n point0 = np.reshape(point0, (-1, 1))\n diff = point_to_point_vector(point0, point1)\n if out is None:\n return np.arctan2(diff[0], diff[1])\n else:\n return np.arctan2(diff[0], diff[1], out=out)"
] | [
"0.8326875",
"0.82458794",
"0.82118714",
"0.817075",
"0.81550974",
"0.8051775",
"0.7991552",
"0.7988766",
"0.79600656",
"0.7946205",
"0.7899237",
"0.7889753",
"0.7851867",
"0.78455144",
"0.7784987",
"0.77833635",
"0.77058",
"0.7688795",
"0.7670997",
"0.7608207",
"0.75978124",
"0.7575172",
"0.756625",
"0.755231",
"0.7547056",
"0.7546138",
"0.7526677",
"0.75220597",
"0.7512944",
"0.7484545",
"0.74736965",
"0.7467074",
"0.74627566",
"0.7449298",
"0.74462616",
"0.7438997",
"0.7425196",
"0.74246854",
"0.74172795",
"0.73909867",
"0.73545796",
"0.73500824",
"0.7339836",
"0.73310786",
"0.7327858",
"0.7318133",
"0.729437",
"0.72867614",
"0.72817975",
"0.7274744",
"0.7268824",
"0.7266991",
"0.7261607",
"0.72451144",
"0.72305703",
"0.72241956",
"0.7205361",
"0.72042954",
"0.71836406",
"0.7182996",
"0.71821177",
"0.718113",
"0.7174518",
"0.71736723",
"0.7171054",
"0.7171038",
"0.7167724",
"0.7161985",
"0.7147877",
"0.7145457",
"0.71353585",
"0.71046877",
"0.71012366",
"0.70849866",
"0.7081266",
"0.70653677",
"0.70494837",
"0.7047472",
"0.70277065",
"0.70264477",
"0.7000832",
"0.69956625",
"0.6984286",
"0.69813454",
"0.69660413",
"0.6964737",
"0.693731",
"0.693516",
"0.691011",
"0.6887468",
"0.6873858",
"0.6869611",
"0.68623286",
"0.68601173",
"0.6858971",
"0.68587065",
"0.68444306",
"0.68090457",
"0.67970437",
"0.679431"
] | 0.78615785 | 12 |
Given a point, travel "length" in the "theta" direction. Like turtle graphics. | def extendpt(p, theta, length):
return [ p[0]+cos(theta)*length, p[1]+sin(theta)*length] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_triangle(x, y, length=10):\n radius = length/math.sqrt(3)\n my_turtle.penup()\n my_turtle.goto(x, y+radius)\n my_turtle.pendown()\n my_turtle.right(60)\n for i in range(3):\n my_turtle.forward(length)\n my_turtle.right(120)\n\n my_turtle.left(60)\n my_turtle.penup()",
"def setDirectionTowardPoint(self, x, y, speed):\n currX = self.xcor()\n currY = self.ycor()\n # get actual vector from t to x,y\n dXactual = x - currX\n dYactual = y - currY\n\n # get the length of that vector. Can also use turtle.distance\n length = math.hypot(dXactual, dYactual)\n\n # now scale the vector\n try:\n self.dx = dXactual / length * speed\n self.dy = dYactual / length * speed\n except:\n self.dx = 0\n self.dy = 0",
"def translate_point(pt, length, direction):\n if isinstance(direction,float):\n # direction is a float (in radians)\n return (pt[0]+length*np.cos(direction), pt[1]+length*np.sin(direction))\n elif str(direction)==\"NORTH\":\n return (pt[0], pt[1]+length)\n elif str(direction)==\"SOUTH\":\n return (pt[0], pt[1]-length)\n elif str(direction)==\"WEST\":\n return (pt[0]-length, pt[1])\n elif str(direction)==\"EAST\":\n return (pt[0]+length, pt[1])",
"def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def rotate(self, theta, point):\n r = self - point\n x = r.x * math.cos(theta) - r.y * math.sin(theta)\n y = r.x * math.sin(theta) + r.y * math.cos(theta)\n return Point(point + x, point + y, *self[2:])",
"def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90",
"def drawSpiral(turtle, angle):\n\n length = 1\n\n for i in range(84):\n turtle.forward(length)\n turtle.right(angle)\n length = length + 2",
"def NextPoint(self, currentpoint, length, angle):\r\n x = length * math.degrees(math.cos(math.radians(angle)))\r\n y = length * math.degrees(math.sin(math.radians(angle)))\r\n return [currentpoint[0] + x, currentpoint[1] + y]",
"def draw_point(turt, pos, count):\r\n turt.goto(pos)\r\n turt.color(\"lawngreen\")\r\n turt.dot(8)\r\n turt.pu()\r\n turt.forward(5)\r\n turt.color(\"HotPink1\")\r\n turt.write(count, True, align=\"left\")\r\n turt.hideturtle()",
"def offsetByVector(self, angle, length):\n x = int(cos(angle) * length) + self.x\n y = int(sin(angle) * length) + self.y\n return point(x, y)",
"def move_forward(self,length,draw=True):\r\n new_x = self.x + length * math.cos(math.radians(self.angle))\r\n new_y = self.y + length * math.sin(math.radians(self.angle))\r\n self.draw_tool.line(((self.x,self.y),(new_x,new_y)), fill=(0,0,0),width=2)\r\n self.x = new_x\r\n self.y = new_y",
"def tangeant(self, t, length):\n a = self.a0 + t * self.da\n ca = cos(a)\n sa = sin(a)\n p = self.c + Vector((self.r * ca, self.r * sa))\n v = Vector((length * sa, -length * ca))\n if self.da > 0:\n v = -v\n return Line(p, v)",
"def point(pt, angle, dist):\n x, y = pt\n return dist * cos(angle) + x, dist * sin(angle) + y,",
"def goto_point(self,targetx,targety):\n #if point is 0,0, make 0.01,0.01 to avoid divide by 0\n if targetx == 0 and targety == 0:\n targetx = 0.01\n targety = 0.01\n self.targetdistance = math.sqrt((self.currentx-targetx)**2 + (self.currenty-targety)**2)\n self.targetangle = math.atan2(targety-self.currenty,targetx-self.currentx)\n self.angledifference = self.angle_diff(self.targetangle,self.orientation)\n if abs(self.angledifference) < .10:\n self.turnspeed = 0\n else:\n self.turnspeed = math.tanh(self.kturn*self.angledifference)\n self.speed = math.tanh(self.targetdistance*self.kspeed/self.angledifference)\n if self.speed < 0:\n self.speed = 0\n self.linearVector = Vector3(x=self.speed, y=0.0, z=0.0)\n self.angularVector = Vector3(x = 0.0, y = 0.0, z = self.turnspeed)\n # print \"currentx = \" + str(self.currentx)\n # print \"currenty = \" + str(self.currenty)\n # print \"orientation = \" + str(self.orientation)\n # print \"targetangle = \" + str(self.targetangle)\n # print \"angledifference = \" + str(self.angledifference)\n #print \"turnspeed = \" + str(self.turnspeed)\n #print \"speed = \" + str(self.speed)",
"def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)",
"def go_to_angle(user_theta):\n global rate\n theta_new = user_theta - theta\n if theta_new > 0:\n # Left\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = 0.4\n pub.publish(speed)\n rate.sleep()\n else:\n # Take a Right\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = - 0.4\n pub.publish(speed)\n rate.sleep()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)",
"def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)",
"def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))",
"def move_turtle(self):\n self.forward(self.move_speed)",
"def rotate_point_by(P:Vec3, O:Vec3, dtheta:float):\n PO = P - O\n R = norm2(PO)\n cos_theta = PO.x / R\n sin_theta = PO.y / R\n cos_dtheta = cos(dtheta)\n sin_dtheta = sin(dtheta)\n cos_theta_plus_dtheta = cos_theta*cos_dtheta - sin_theta*sin_dtheta\n sin_theta_plus_dtheta = sin_theta*cos_dtheta + sin_dtheta*cos_theta\n return Vec3(cos_theta_plus_dtheta, sin_theta_plus_dtheta)*R + O",
"def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)",
"def target_state(self, s):\n # YOUR CODE HERE\n if s > self.total_length:\n s = self.total_length\n\n theta = s/self.total_length*self.angle\n \n x = np.sin(theta)*self.radius\n if self.left_turn:\n y = (1 - np.cos(theta))*self.radius\n else:\n y = -(1 - np.cos(theta))*self.radius\n\n if not self.left_turn:\n theta = -theta\n\n theta = (theta+np.pi)%(np.pi*2) - np.pi\n # theta = theta%(np.pi*2)\n return np.array([x, y, theta])\n # return np.append(np.dot(self.g, np.array([x, y, 1]))[:2], theta + self.start_theta)",
"def armLocation(self,length, theta, position = [0,0]):\n #print \"Angle:\",theta\n \n width = 300\n dx = 125\n #dy = 40\n bumpx = 150\n bumpy = length/2\n #width = 300\n \n #dx = 175\n dy = 170\n \n #p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n #p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n #p3 = (p2[0]-width*cos(theta),p2[1]+width*sin(theta))\n #p4 = (p3[0]+length*sin(theta),p3[1]+length*cos(theta))\n\n p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n p3 = (p2[0]-(width+bumpx)*cos(theta),p2[1]+(width+bumpx)*sin(theta))\n p4 = (p3[0]+bumpy*sin(theta),p3[1]+bumpy*cos(theta))\n p5 = (p4[0]+bumpx*cos(theta),p4[1]-bumpx*sin(theta))\n p6 = (p5[0]+(length-bumpy)*sin(theta),p5[1]+(length-bumpy)*cos(theta))\n\n \n #plt.plot([p1[0], p2[0], p3[0], p4[0], p1[0]], [p1[1], p2[1], p3[1], p4[1], p1[1]])\n #plt.axis([-700, 700, -200, 700])\n #plt.show()\n return [p1, p2, p3, p4, p5, p6]",
"def steps_to_angle():\n pass",
"def computeCoordinate(start, length, angle):\n angle = (angle*2*math.pi)/360\n return (start[0]+length*math.cos(angle),start[1]-length*math.sin(angle))",
"def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)",
"def polago(x , y, size, n, clr):\n # turtle setting\n turtle.screensize(1000)\n turtle.speed(30)\n turtle.setheading(0)\n turtle.color(clr)\n turtle.fillcolor(clr) \n turtle.goto(x, y)\n # draw random polagon \n while n > 1:\n # make random polagon\n turtle.pendown()\n turtle.begin_fill()\n # random size\n s = random.randint(10, size)\n a = random.randint(3, 8)\n for i in range (a):\n turtle.forward(s)\n turtle.left(360 / a) \n turtle.end_fill()\n n -= 1\n turtle.penup()\n turtle.goto(random.uniform(-300, 300), random.uniform(-300, 300))\n\n turtle.done",
"def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()",
"def point_to_dipole(point, length, deg=True):\n\n # Get coordinates relative to centrum.\n xyz = rotation(point[3], point[4], deg=deg)*length/2\n\n # Add half a dipole on both sides of the center.\n return point[:3] + np.array([-xyz, xyz])",
"def straight(self, length, t=1):\n return self.tangeant(t, length)",
"def moveTurt(t, pole, count):\n x = t.xcor()\n y = t.ycor()\n t.goto(x, y + 10)\n t.goto(pole, y + 10)\n t.goto(pole, y)\n count += 1",
"def polygon(n,r):\n \n window = turtle.Screen()\n\n david = turtle.Turtle()\n david.pensize(2)\n\n a = float(360 / n) \t\t #this is the angle the turtle will turn each time\n l = 2 * (math.sin(math.radians(a / 2)) * r) #this is the length of the sides\n\n david.penup()\n david.speed(0)\n david.right(90)\n david.forward(r * math.cos(math.radians(a / 2)))\n david.right(90)\n david.forward(l / 2)\n david.left(180)\n david.pendown()\n david.speed(1/2)\n\n for x in range(n):\n david.forward(l)\n david.left(a)",
"def restrict_theta(theta):\n tnew = theta + np.pi\n tnew += -2.0*np.pi*np.floor(tnew/(2.0*np.pi))\n tnew -= np.pi\n return tnew",
"def circle(r, mv_direction):\n vert_amount = 80\n edge = 2 * r * math.sin(math.radians(360 / (2 * vert_amount))) \n polygon_angle = (vert_amount - 2) / vert_amount * 180\n angle = 180 - polygon_angle\n \n for i in range(vert_amount):\n if i == 0: \n rotate_turtle(polygon_angle / 2, not mv_direction)\n else:\n rotate_turtle(angle, mv_direction)\n turtle.forward(edge)",
"def torus_l1_distance(point, shape):\n point = as_tensor(point, tf.float32)\n if len(shape) == 1:\n max_x = shape[0]\n coor_x = tf.range(0, max_x, 1, dtype=tf.float32)\n dx = tf.abs(point - coor_x)\n distance = tf.minimum(dx, tf.math.mod(-dx, max_x))\n elif len(shape) == 2:\n max_x = shape[0]\n max_y = shape[1]\n\n xys = grid_2d(shape)\n xys = tf.cast(xys, tf.float32)\n\n xs, ys = tf.unstack(xys, num=2, axis=-1)\n\n px, py = tf.unstack(point, num=2, axis=-1)\n px = tf.expand_dims(px, 1)\n py = tf.expand_dims(py, 1)\n\n dx = tf.abs(px - xs)\n dy = tf.abs(py - ys)\n\n dx = tf.minimum(dx, tf.math.mod(-dx, max_x))\n\n dy = tf.minimum(dy, tf.math.mod(-dy, max_y))\n\n distance = dx + dy\n else:\n raise ValueError(\"Invalid shape parameter, shape must have len 1 or 2\")\n\n return distance",
"def plot(f, color):\n turtle.penup()\n turtle.setposition(x_begin, f(x_begin))\n turtle.pencolor(color)\n turtle.pendown()\n x = x_begin\n while x < x_end:\n turtle.setposition(x, f(x))\n x += x_increment",
"def drawPetal(size, petalColor):\n turtle.color(petalColor)\n turtle.begin_fill()\n for x in range(6):\n turtle.forward(size)\n turtle.left(60)\n turtle.end_fill()",
"def circular_movement(radius = 150, theta=None):\n y = radius * np.sin(theta)\n if theta == 0:\n x = radius\n elif np.pi*0.99 < theta < np.pi*1.01:\n x = -radius\n else:\n x = y/np.tan(theta)\n return x, y",
"def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )",
"def draw(t, length, n):\n\n if n == 0:\n return\n\n angle = 45\n\n t.fd(length*n)\n t.lt(angle)\n\n # first branch\n draw(t, length, n-1)\n\n t.rt(2*angle)\n\n # second branch\n draw(t, length, n-1)\n\n t.lt(angle)\n t.bk(length*n)",
"def goto(x, y):\n turtleTmp.setposition(x, y)",
"def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)",
"def draw_target():\n\n t = turtle\n t.up()\n t.goto(-target[SIZE], -target[SIZE])\n t.setheading(0)\n t.pensize(2)\n t.down()\n for side in range(4):\n t.fd(target[SIZE] * 2)\n t.left(90)",
"def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()",
"def spiralaa(x,y):\n #print('inside')\n #turtle.setx(x)\n #turtle.sety(y)\n #t.pendown()\n turtle.home()\n # Random color\n turtle.pencolor(random.randrange(0,255),random.randrange(0,255),200)\n # Random width\n turtle.width(random.randrange(2,13))\n # Random direction\n turtle.setheading(random.randrange(1,360))\n\n for i in range(70):\n \tturtle.forward(20+i)\n \tturtle.left(30 - i/1.5)",
"def walkTo(self, x, y, angle):\n self.motionProxy.moveTo(x, y, math.pi*float(angle)/180.0)",
"def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()",
"def point (p, direction: str):\n def wrap (magnitude: int):\n change = changes [direction]\n return (\n p [0] + (change [0] * magnitude),\n p [1] + (change [1] * magnitude),\n )\n return wrap",
"def revolve(self, angle_step):\n self.angle += radians(self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)",
"def drawLine(self, dx, dy):\n assert (type(dx) in [int, float]), \"parameter x:%s is not a valid number\" % `dx`\n assert (type(dy) in [int, float]), \"parameter y:%s is not a valid number\" % `dy`\n x = self._turtle.xcor()\n y = self._turtle.ycor()\n self._turtle.setposition(x+dx, y+dy)",
"def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)",
"def rotate_waypoint(self, direction: str, argument: int):\n if direction == \"R\":\n angle = radians(argument)\n else:\n angle = -1 * radians(argument)\n y = self.waypoint_vector[0]\n x = self.waypoint_vector[1]\n self.waypoint_vector[0] = int(round(x * sin(angle) + y * cos(angle)))\n self.waypoint_vector[1] = int(round(x * cos(angle) - y * sin(angle)))",
"def rotate_point(ref_point: types.Point, angle: int, point: types.Point) -> types.Point:\n theta = math.radians(angle)\n return (round(math.cos(theta) * (point[0] - ref_point[0]) - math.sin(theta) * (point[1] - ref_point[1]) +\n ref_point[0]),\n round(math.sin(theta) * (point[0] - ref_point[0]) + math.cos(theta) * (point[1] - ref_point[1]) +\n ref_point[1]))",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def DrawLine(p_length: int, p_character: str):\n print(p_character * p_length)\n return",
"def scattering_direction(v, theta):\r\n # Sample cos_phi and sin_phi, phi is the azimuthal angle of the scattering event\r\n continue_loop = True\r\n while continue_loop:\r\n eta1 = 1-2*random.random()\r\n eta2 = 1-2*random.random()\r\n alpha = eta1**2 + eta2**2\r\n if alpha <= 1:\r\n continue_loop = False\r\n cos_phi = eta1/np.sqrt(alpha)\r\n sin_phi = eta2/np.sqrt(alpha)\r\n \r\n new_x = v[0]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[0]*v[2]*cos_phi + v[1]*sin_phi)\r\n new_y = v[1]*np.cos(theta) - np.sin(theta)/np.sqrt(1-v[2]**2) * (v[1]*v[2]*cos_phi - v[0]*sin_phi)\r\n new_z = v[2]*np.cos(theta) + np.sqrt(1-v[2]**2)*np.sin(theta)*cos_phi\r\n \r\n return [new_x, new_y, new_z]",
"def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)",
"def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)",
"def theta():\n pass",
"def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)",
"def travel(direction, x, y):\n x_new = x\n y_new = y\n for i in range(len(direction)):\n test = direction[i].lower()\n if test == 'n':\n y_new += 1\n elif test == 's':\n y_new -= 1\n elif test == 'e':\n x_new += 1\n elif test == 'w':\n x_new -= 1\n return (x_new, y_new)",
"def four_wheel_drive(x, y, heading, speed, length, steering_angle, gas, brake, gas_to_acc=1, brake_to_acc=1):\n\n return x, y, heading, speed",
"def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)",
"def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)",
"def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)",
"def directed_distance(trail, point):\n expectedLocation = \\\n (trail[-1][0] + (trail[-1][0] - trail[-2][0]),\n trail[-1][1] + (trail[-1][1] - trail[-2][1]))\n return euclidean_distance(expectedLocation, point)",
"def move(self, theta, phi):\n print('Writing the theta angle: {}'.format(str(theta)))\n self.servo.write(theta)\n #print(\"Serial Port: \" + str(self.servo.read()))\n print('Writing the theta angle: {}'.format(str(phi)))\n self.servo.write(phi)\n #print(\"Serial Port: \" + str(self.servo.read()))",
"def triangle(length=40.0, r=3.175 / 2):\n\t# equilateral triangle:\n\ta = np.array([0, 0])\n\tb = np.array([length, 0])\n\tc = np.array([length / 2, length * math.sin(math.pi / 3)])\n\ttri_pts = PolyLine([a, b, c, a])\n\toffs_pts = addOffset(tri_pts, r)\n\ttri_pts = centerObjects(offs_pts, tri_pts)\n\treturn tri_pts, offs_pts",
"def drawPoles(wn):\n wn.setworldcoordinates(-1, -5, 3, 20)\n t = turtle.Turtle()\n t.speed(0)\n t.pensize(3)\n t.up()\n t.goto(-.5, 0)\n t.down()\n t.goto(2.5, 0)\n t.up()\n for i in range(3):\n t.goto(i, 0)\n t.down()\n t.goto(i, 10)\n t.up()\n t.hideturtle()",
"def line_length_angle(line:tuple)->tuple:\n squared_dist = point_sqr_distance(line[0], line[1])\n if squared_dist == 0:\n return 0,1\n distance = math.sqrt(squared_dist)\n angle_cosine = (line[1][0] - line[0][0]) / distance\n return squared_dist, angle_cosine",
"def drawTo(self, x, y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n self._turtle.setposition(x, y)",
"def dir_vect(theta):\n return np.array([np.cos(theta),np.sin(theta)])",
"def rotatePoint(self, point, dir=+1):\n pnew = np.zeros([len(point), point.shape[1], 2])\n pnew[:, :, 0] = point[:, :, 0]*self.ctheta + point[:, :, 1]*self.stheta*dir\n pnew[:, :, 1] = -point[:, :, 0] * \\\n self.stheta*dir + point[:, :, 1]*self.ctheta\n return pnew",
"def DrawPoint(self, p, size, color):\r\n self.DrawCircle(p, size/self.zoom, color, drawwidth=0)",
"def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R",
"def move(self,amount):\n angle=self.dirction/180*math.pi\n self.postionx += amount*math.cos(angle)\n self.postiony += amount*math.sin(angle)",
"def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)",
"def pointlength(x):\n return 0.0",
"def quiver(point, gradient, length=None, length_scale=1., width_scale=1.,\n color=None, opacity=None, fig=\"gcf\", label=None):\n\n if length is None:\n length = geom.distance(gradient)\n if length_scale != 1:\n length *= length_scale\n\n return arrow(point, point + gradient, length, width_scale, color, opacity,\n fig, label)",
"def walk(self, dir):\n x, y, theta = dir\n self.motionProxy.moveToward(x, y, theta, [[\"Frequency\", 1]])\n self.isWalking = True",
"def drawCircle(self, r):\n assert (type(r) in [int, float]), \"parameter r:%s is not a valid number\" % `r` \n x = self._turtle.xcor()\n y = self._turtle.ycor()\n \n # Move the pen into position\n fstate = self._turtle.pendown()\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y-r)\n if fstate:\n self._turtle.pendown()\n \n # Draw the circle and fill if necessary\n self._turtle.circle(r)\n self.flush()\n self._turtle.forward(0)\n \n # Return the pen to the position\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y)\n if fstate:\n self._turtle.pendown()",
"def polar(cls, angle, length=1.0):\n x, y = cos_sin_deg(angle)\n vec = tuple.__new__(cls, (x * length, y * length))\n vec.__dict__['length'] = length * 1.0\n return vec",
"def rotate_about(self, angle, point):\n\t\tif not isinstance(point, Vector2d):\n\t\t\tpoint = Vector2d(*point)\n\t\treturn (self - point).rotate(angle) + point",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)",
"def MoveToPoint(*args):\n return _gdi_.GraphicsPath_MoveToPoint(*args)",
"def draw_hex(length, color):\n turtle.color(color)\n turtle.begin_fill()\n for x in range(6):\n turtle.forward(length)\n turtle.right(60)\n turtle.end_fill()",
"def direction(point0, point1):\n d = [0, 0, 0]\n vector = [point1[0] - point0[0], point1[1] - point0[1]]\n d[1] = math.atan2(vector[1], vector[0])\n while d[1] <= -np.pi / 2:\n d[1] += np.pi\n return d",
"def rotate((x, y), theta):\n\n return math.cos(theta) * x + math.sin(theta) * y, -math.sin(theta) * x + math.cos(theta) * y",
"def create_diatom(x, l, theta):\n c = np.cos(theta)\n s = np.sin(theta)\n dx = np.array([c, s]) * l\n\n pos = np.zeros((3,3))\n pos[0,0:2] = x + dx\n pos[1,0:2] = x - dx\n pos[2,0:2] = x\n pos[2,2] = 1.0 # elevation of the COM\n return pos",
"def move_point(p, direction, d=1):\n direction_guard(direction)\n x, y = p\n dx, dy = directions[direction]\n return (x + dx * d, y + dy * d)",
"def update(self, x, y, theta):\n self.x = x\n self.y = y\n self.theta = theta\n self.theta = wrap_angles(self.theta)",
"def draw_centered_line(\n self,\n theta: Quantity,\n length: Quantity,\n ra: Quantity,\n dec: Quantity,\n color: str = \"b\",\n linewidth: float = 1.0,\n alpha: float = 0.7,\n ) -> None:\n\n _length = length / 2.0\n dx = np.sin(theta) * _length / np.cos(dec)\n dy = np.cos(theta) * _length\n coords = np.array(\n [\n [(ra + dx).to_value(u.deg), (ra - dx).to_value(u.deg)],\n [(dec + dy).to_value(u.deg), (dec - dy).to_value(u.deg)],\n ]\n )\n self.plot.show_lines([coords], color=color, linewidth=linewidth, alpha=alpha)",
"def line(center, length, rotation=0):\n unit = np.array([math.cos(rotation * 2 * PI / 360),\n math.sin(rotation * 2 * PI / 360)])\n end = center + unit * length / 2\n start = center - unit * length / 2\n return [\"PA{},{};\".format(*[int(coord) for coord in start]),\n \"PD{},{};\".format(*[int(coord) for coord in end]),\n \"PU;\"]",
"def move_rectangle(r,dx,dy):\n\n r.corner.x=r.corner.x+dx\n r.corner.y=r.corner.y+dy\n turtle.setx(r.corner.x)\n turtle.sety(r.corner.y)\n for i in range(2):\n turtle.fd(r.width)\n turtle.lt(90)\n turtle.fd(r.height)\n turtle.lt(90)\n return r",
"def spiral(t, angle):\n for i in range(angle):\n r = 200 * math.radians(i)\n arc_length = 2 * math.pi * r * abs(angle) / 360\n n = int(arc_length / 2) + 3\n step_length = arc_length / n\n step_angle = float(angle % 360) / n\n # making a slight left turn before starting reduces\n # the error caused by the linear approximation of the arc\n polyline(t, i, step_length, step_angle)",
"def dist(self, point: np.array):\n return np.linalg.norm(\n np.cross(point - self.r_start, self.direction), axis=1) / \\\n np.linalg.norm(self.direction)"
] | [
"0.65688515",
"0.6350407",
"0.61780405",
"0.6157889",
"0.5804359",
"0.5804359",
"0.5796193",
"0.5728867",
"0.5686247",
"0.5668157",
"0.56574714",
"0.5640281",
"0.5588495",
"0.55151546",
"0.5508654",
"0.5429575",
"0.54282206",
"0.54211295",
"0.54015946",
"0.5380861",
"0.53574085",
"0.5350647",
"0.53204805",
"0.53056026",
"0.52808094",
"0.52727073",
"0.5257786",
"0.5250246",
"0.5244442",
"0.52364624",
"0.521319",
"0.52092004",
"0.5187201",
"0.5183089",
"0.5175202",
"0.5161971",
"0.5153564",
"0.51533496",
"0.5150155",
"0.51472473",
"0.51299435",
"0.51297504",
"0.51139665",
"0.51035094",
"0.5086531",
"0.50847554",
"0.50808215",
"0.50664204",
"0.50632405",
"0.50450784",
"0.5044232",
"0.5041006",
"0.50404173",
"0.5026599",
"0.5025221",
"0.50213075",
"0.50213075",
"0.5021191",
"0.5013807",
"0.501133",
"0.49935496",
"0.4992761",
"0.49800128",
"0.4978568",
"0.49757716",
"0.49749392",
"0.49705756",
"0.49518707",
"0.49422136",
"0.4931975",
"0.49226397",
"0.49209386",
"0.49176297",
"0.49092618",
"0.49025565",
"0.4896971",
"0.48752773",
"0.48725182",
"0.48693687",
"0.48653817",
"0.48601207",
"0.48468745",
"0.4837464",
"0.48346967",
"0.4826623",
"0.48261932",
"0.48261848",
"0.48230678",
"0.48200345",
"0.48194993",
"0.48194897",
"0.48178795",
"0.4816215",
"0.48152396",
"0.48104095",
"0.48100024",
"0.4808083",
"0.48048884",
"0.48044702",
"0.47978556"
] | 0.5763283 | 7 |
Parse points from a string | def makepts(str):
astr = str.replace(' ','').split('-')
def fromstring(strCoords):
coords = strCoords.split(',')
return [float(coords[0]), float(coords[1])]
return [ fromstring(strCoords) for strCoords in astr] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_point(K, s):\n return [K([QQ(c) for c in coord.split(\",\")]) for coord in s[2:-2].split('],[')]",
"def parse(text):\n parts = [int(part) for part in text.strip().split(',')]\n point = Point(*parts)\n actual = \"{},{},{},{}\".format(point.x, point.y, point.z, point.t)\n assert actual == text, diff(actual, text)\n return point",
"def get_point_list(self, string):\n a = re.findall('\\(\\d+\\.\\d+, \\d+\\.\\d+\\)', string)\n lst = []\n for tp in a:\n lst.append(self.get_tuple(tp))\n print lst",
"def __getPointXYs(self, raw_string):\n try:\n pointsRE = re.compile('^\\((\\d*\\D*, *\\D*\\d*)\\)\\D*\\((\\d*\\D*, *\\D*\\d*)\\)$')\n points = pointsRE.search(raw_string.strip()).groups()\n startPoint = (int(points[0].split(',')[0].strip()), int(points[0].split(',')[1].strip()))\n endPoint = (int(points[1].split(',')[0].strip()), int(points[1].split(',')[1].strip()))\n return self.__validatePoint(startPoint), self.__validatePoint(endPoint)\n except AttributeError:\n traceback.print_exc()\n raise ValueError('Failed to get point coordinates.')",
"def __getPointXY(self, raw_string):\n try:\n # print 'input:',str\n pointRE = re.compile('^\\((\\d*, *\\d*)\\)$')\n x, y = pointRE.search(raw_string.strip()).groups()[0].split(',')\n # print 'x: %s, y: %s' % (x,y)\n return self.__validatePoint((int(x), int(y.strip())))\n except AttributeError:\n raise ValueError('Failed to get point coordinates.')",
"def parsePoint(line):\n parts = line.split(\",\")\n return LabeledPoint(parts[0], [parts[1], parts[2]])",
"def coords1(s: str) -> list[float]:\n return numarray(re.sub(SPACE, \"\", s).split(\",\"))",
"def _get_coordinates_data(obj: str) -> list:\n\n points = re.findall(r'\\((\\d+, \\d+)\\)', obj)\n if not points:\n raise InvalidGroundValueError(\n \"Enter points in format like (1, 2) (4, 4)\"\n )\n\n return points",
"def gx_coords1(s: str) -> list[float]:\n return numarray(s.split(\" \"))",
"def coords(s: str) -> list[list[float]]:\n s = s.split() # sub(TRIM_SPACE, '', v).split()\n return [coords1(ss) for ss in s]",
"def parse_point(point,n=None):\n point = point.split(\",\")\n if not n:\n n = len(point)\n return [(point[0],item) for item in point[1:n] if item]",
"def _point_as_tuple(input_string: str) -> _Tuple[float]:\n out = tuple(float(coordinate) for coordinate in input_string.split(','))\n if len(out) == DIMENSIONS:\n return out\n raise TypeError",
"def parse_point(line):\n return json.loads(line)",
"def parse_input(input: str) -> Tuple[int, int, int, int]:\n head, tail = input.split(\"@ \")\n points, measures = tail.split(\": \")\n x, y = points.split(\",\")\n w, h = measures.split(\"x\")\n return int(x), int(y), int(w), int(h)",
"def parse_line(line : str) -> Tuple[str, str, int]:\n line = line[:-1] # Strips newline character\n question_end = line.find(';')\n question = line[:question_end]\n\n line = line[question_end+1:]\n answer_end = line.find(';')\n answer = line[:answer_end]\n\n points = int(line[answer_end+1:])\n\n return question, answer, points",
"def parse_points(lines):\n lines = deque(lines)\n current = []\n while lines:\n line = lines.popleft().strip()\n if line:\n current.append(Point.parse(line))\n else:\n yield current\n current = []\n\n if current:\n yield current",
"def parsePoint(point): \n lat = float(point.getAttribute(\"lat\"))\n lon = float(point.getAttribute(\"lon\"))\n name = \"\"\n for e in point.getElementsByTagName(\"name\"):\n name = e.childNodes[0].data.strip()\n description = \"\" \n for e in point.getElementsByTagName(\"description\"):\n description = e.childNodes[0].data.strip() \n ele = 0\n for e in point.getElementsByTagName(\"ele\"):\n ele = float(e.childNodes[0].data.strip())\n t = None\n time = None\n for e in point.getElementsByTagName(\"time\"):\n t = parseTime(e.childNodes[0].data.strip())\n secMil = t.second + t.microsecond / 1000000\n time = myPyGPX.Time(t.year, t.month, t.day, t.hour, t.minute, secMil)\n return (lat,lon, time, ele, name, description)",
"def parse_point(self, text_line):\n record_type = self.substr(text_line, sps21point['RECORD_ID'][0], sps21point['RECORD_ID'][1]).strip()\n if record_type not in (SRC_DATA_RECORD, RCV_DATA_RECORD):\n return\n self.set_definition(sps21point)\n return self.parse(text_line)",
"def parseString(self, s):\n pass",
"def test_point_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Point.load(position)\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216",
"def point(surface, string):\n match = re.match('(.*?) (.*?)(?: |$)', string)\n if match:\n x, y = match.group(1, 2)\n string = string[match.end():]\n return (size(surface, x, 'x'), size(surface, y, 'y'), string)\n else:\n raise PointError",
"def test_points_class_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n # one point:\n (struct.pack(lines.Points.fmt, 1), 1),\n # the single point's data:\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Points.load(position)\n assert result.count == 1\n assert len(result.points) == 1\n result = result.points[0]\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216",
"def parse_point(self,points):\n self.body = None",
"def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec",
"def readContactPoint(text):\n items = text.split()\n if len(items)!=7:\n raise ValueError(\"Invalid number of items, should be 7\")\n return ContactPoint([float(v) for v in items[0:3]],[float(v) for v in items[3:6]],float(items[6]))",
"def _parse_points(self, scan_data):\n \n # check data\n if not scan_data['mz_data'] or not scan_data['int_data']:\n return []\n \n # decode data\n mz_data = base64.b64decode(scan_data['mz_data'])\n int_data = base64.b64decode(scan_data['int_data'])\n \n # decompress data\n if scan_data['mz_compression'] == 'zlib':\n mz_data = zlib.decompress(mz_data)\n \n if scan_data['int_compression'] == 'zlib':\n int_data = zlib.decompress(int_data)\n \n # get precision\n mz_precision = 'f'\n if scan_data['mz_precision'] == 64:\n mz_precision = 'd'\n \n int_precision = 'f'\n if scan_data['int_precision'] == 64:\n int_precision = 'd'\n \n # convert from binary\n count = len(mz_data) // struct.calcsize('<' + mz_precision)\n mz_data = struct.unpack('<' + mz_precision * count, mz_data[0:len(mz_data)])\n \n count = len(int_data) // struct.calcsize('<' + int_precision)\n int_data = struct.unpack('<' + int_precision * count, int_data[0:len(int_data)])\n \n # format\n if scan_data['spectrum_type'] == 'centroided':\n points = map(list, zip(mz_data, int_data))\n else:\n mz_data = numpy.array(mz_data)\n mz_data.shape = (-1, 1)\n int_data = numpy.array(int_data)\n int_data.shape = (-1, 1)\n data = numpy.concatenate((mz_data,int_data), axis=1)\n points = data.copy()\n \n return points",
"def fromString(self, s):\n vars = s.replace(',', '').split()\n self.position = [int(vars[0]), int(vars[1])]\n print(self.position)\n self.K = np.array([[int(vars[2]), int(vars[3])], \n [int(vars[4]), int(vars[5])]])",
"def parse_input(userstring):\n xsplit = userstring.split()\n stringtovalues = [float(x) for x in xsplit]\n\n return stringtovalues",
"def _parse_coords(self):\n\n coords = []\n\n while True:\n try:\n _, x, y = self._lines.current.split()\n coords.append((float(x), float(y)))\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return coords",
"def parse_selected_points_from_args(self):\n log = logging.getLogger(\".\".join([__name__]))\n log.addFilter(logconfig.ThreadContextFilter())\n selected_points = []\n if self.get_args().selected_points:\n point_expected_format = re.compile(\"[0-9]+,[0-9]+\")\n sel_points = self.get_args().selected_points\n for point_string in self.get_args().selected_points:\n point_string = point_string.strip('()')\n match_results = point_expected_format.match(point_string)\n # Check the regex matches the entire string\n # DEV NOTE: can use re.full_match in Python v3\n if match_results is not None and match_results.span()[1] == len(point_string):\n x, y = map(int, point_string.strip('()').split(','))\n selected_points.append(Point(x, y))\n else:\n log.warning(\"Selected point with invalid format will be ignored - '\" + point_string + \"'\")\n return selected_points",
"def parse(cls, s):\n raise NotImplementedError",
"def parse_location(location_str):\n def floatify(latlon):\n \"\"\" Turns a latlon string into a float \"\"\"\n sign = -2. * (latlon[-1].lower() in ['s', 'w']) + 1\n return float(latlon[:-1]) * sign\n points = location_str.strip().split(',')\n if not len(points) == 2:\n raise BadQuery(\"Expected four comma seperated values \"\n \"defining a single point.\")\n\n is_lat = lambda x: x[-1].lower() in ['n', 's']\n lat = filter(is_lat, points)\n if not len(lat) == 1:\n raise BadQuery(\"Expected two latitudes (determined by \" +\n \"values ending in 'N' or 'S'\")\n is_lon = lambda x: x[-1].lower() in ['e', 'w']\n lon = filter(is_lon, points)\n if not len(lon) == 1:\n raise BadQuery(\"Expected two longitudes (determined by \" +\n \"values ending in 'E' or 'W'\")\n lat = floatify(lat[0])\n lon = floatify(lon[0])\n\n # make sure latitude is in range.\n if (lat > 90.) or (lat < -90):\n raise BadQuery(\"Latitude must be within -90 and 90, got %s\" %\n str(lat))\n # we let the user use either longitudes of 0 to 360\n # or -180 to 180, then convert to nautical (-180 to 180).\n if lon > 360. or lon < -180.:\n raise BadQuery(\"Longitudes must be within -180 and 360, got %s\" %\n str(lon))\n # make sure lons end up in -180 to 180.\n lon = np.mod(lon + 180., 360.) - 180.\n\n location = {'latitude': lat,\n 'longitude': lon}\n return location",
"def load_picked_points(filename):\n\n f = open(filename, 'r')\n\n def get_num(string):\n pos1 = string.find('\\\"')\n pos2 = string.find('\\\"', pos1 + 1)\n return float(string[pos1 + 1:pos2])\n\n def get_point(str_array):\n if 'x=' in str_array[0] and 'y=' in str_array[1] and 'z=' in str_array[2]:\n return [get_num(str_array[0]), get_num(str_array[1]), get_num(str_array[2])]\n else:\n return []\n\n pickedPoints = []\n for line in f:\n if 'point' in line:\n str = line.split()\n if len(str) < 4:\n continue\n ix = [i for i, s in enumerate(str) if 'x=' in s][0]\n iy = [i for i, s in enumerate(str) if 'y=' in s][0]\n iz = [i for i, s in enumerate(str) if 'z=' in s][0]\n pickedPoints.append(get_point([str[ix], str[iy], str[iz]]))\n f.close()\n return np.array(pickedPoints)",
"def get_string_info(string):\n line_count = 1\n column_count = 1\n for char in string:\n if char == '\\n':\n column_count = 1\n line_count += 1\n else:\n column_count += 1\n return Coords(line_count, column_count, len(string))",
"def read_points():\n\tpoints = []\n\tf = open(r'sample_points.txt')\n\twhile True:\n\t\tnstr = f.readline()\n\t\tif len(nstr) == 0:\n\t\t\tbreak\n\t\tline = nstr.rstrip('\\n').split(', ')\n\t\t# print(line)\n\n\t\tpoints.append((round(float(line[0]),3),round(float(line[1]),3))) \n\n\tprint(points)\n\treturn points",
"def parser(string: str, token: str) -> List[float]:\n search_token = re.compile(r\"{token}: (.*?){unit}\".format(token=token,\n unit=UNIT))\n output = re.findall(search_token, string)\n if len(output) == 0:\n return []\n\n return [float(i) for i in output]",
"def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))",
"def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans",
"def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans",
"def parse(s):\n return s",
"def _parse_points(ctx):\n tech = ctx.obj['TECH']\n points = ctx.obj['POINTS']\n sam_files = ctx.obj['SAM_FILES']\n res_file = ctx.obj['RES_FILE']\n curtailment = ctx.obj['CURTAILMENT']\n lat_lon_fpath = ctx.obj.get('LAT_LON_FPATH', None)\n lat_lon_coords = ctx.obj.get('LAT_LON_COORDS', None)\n regions = ctx.obj.get('REGIONS', None)\n region = ctx.obj.get('REGION', None)\n region_col = ctx.obj.get('REGION_COL', 'state')\n\n i = 0\n if points is not None:\n i += 1\n\n if lat_lon_fpath is not None or lat_lon_coords:\n lat_lons = _parse_lat_lons(lat_lon_fpath, lat_lon_coords)\n points = ProjectPoints.lat_lon_coords(lat_lons, res_file,\n sam_files, tech=tech,\n curtailment=curtailment)\n i += 1\n\n if region is not None or regions is not None:\n regions = _parse_regions(regions, region, region_col)\n points = ProjectPoints.regions(regions, res_file, sam_files,\n tech=tech,\n curtailment=curtailment)\n i += 1\n\n msg = None\n if i == 0:\n msg = (\"reV Gen requires one of 'points', 'lat-lon-fpath', \"\n \"'lat-lon-coords', 'regions', or 'region' and region-col' \"\n \"must be supplied to determine points to compute generation \"\n \"for!\")\n elif i > 1:\n msg = (\"reV Gen can only produce a unique set of Project Points for \"\n \"a single input value for ONE of 'points', 'lat-lon-fpath', \"\n \"'lat-lon-coords', 'regions', or 'region' and region-col'\")\n\n if msg is not None:\n logger.error(msg)\n raise ProjectPointsValueError(msg)\n\n return points",
"def parse_string(self, data):\n pass",
"def _parse_full_position(cls, full_position_string):\n try:\n before,after = [cls._parse_single_position(s) for s in full_position_string.split('-')]\n except (ValueError,AttributeError):\n raise ValueError(\"The full_position argument must be a string of the form '100-200', '?-200' or '100-?'!\"\n \"Got '%s'\"%(full_position_string,))\n if before is None and after is None:\n raise ValueError(\"At least one section of the full_position argument must be a number!\")\n return before,after",
"def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test",
"def __parse_traffic(str):\n return float(str.strip().split(\",\")[0].replace('.',''))",
"def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test",
"def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')",
"def parselatlon_statontable(astring, positive_char, negative_char):\n lstr = astring.strip()\n if not lstr:\n return None\n lastch = lstr[-1]\n if not lastch in [positive_char, negative_char]:\n return None\n try:\n result = float(lstr[:-1])\n except ValueError:\n return None\n if lastch == negative_char:\n result = -result\n return result",
"def parse(self, string):\n\n lines = string.splitlines()\n width = int((len(lines[0]) - 1) / 3)\n height = int((len(lines) - 1) / 2)\n self.__init__(width, height)\n\n y = 1\n for i in range(1, len(lines) - 1):\n line = lines[i]\n\n for j in range(1, len(line) - 1):\n if line[0] == '+':\n # Detect vertical edges\n if j % 3 != 1 or line[j] != ' ': continue\n x = int((j + 2) / 3)\n\n self.add_edge((x, y - 1), (x, y))\n else:\n # Detect horizontal edges\n if j % 3 != 0 or line[j] != ' ': continue\n x = int(j / 3)\n\n self.add_edge((x, y), (x + 1, y))\n\n if line[0] != '+': y += 1\n\n return self",
"def coordinates(self):\n logging.debug('Get coordinates from text')\n result = []\n blocks = self.del_comm(blocks=True)\n coor = re.compile('[FXYZ][+-]?[0-9]+(\\.[0-9]+)?')\n for line in blocks:\n coord_line = False\n comm = line.split()\n temp = []\n for c in comm:\n if c == 'G1':\n coord_line = True\n if coord_line and coor.match(c):\n temp.append(c)\n if temp:\n result.append(temp)\n return result",
"def _parse_point_source(element):\n ID, name, tect_reg = _get_id_name_tect_reg(element)\n\n lon, lat = _get_point_source_location(element)\n\n mfd = _get_mfd(element)\n\n return PointSourceNRML04(lon, lat, mfd)",
"def _parse_string_coords(*args, which='x', **kwargs):\n # NOTE: Why FixedLocator and not IndexLocator? The latter requires plotting\n # lines or else error is raised... very strange.\n # NOTE: Why IndexFormatter and not FixedFormatter? The former ensures labels\n # correspond to indices while the latter can mysteriously truncate labels.\n res = []\n for arg in args:\n arg = _to_arraylike(arg)\n if _is_string(arg) and arg.ndim > 1:\n raise ValueError('Non-1D string coordinate input is unsupported.')\n if not _is_string(arg):\n res.append(arg)\n continue\n idx = np.arange(len(arg))\n kwargs.setdefault(which + 'locator', mticker.FixedLocator(idx))\n kwargs.setdefault(which + 'formatter', pticker._IndexFormatter(_to_ndarray(arg))) # noqa: E501\n kwargs.setdefault(which + 'minorlocator', mticker.NullLocator())\n res.append(idx)\n return *res, kwargs",
"def parse_coords(lines):\r\n pcoa_results = OrdinationResults.from_file(lines)\r\n return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals,\r\n pcoa_results.proportion_explained)",
"def get_point_from_linestring(geom_row, X=0, behaviour='last'):\n\n lat = None\n lng = None\n try:\n X = round(X)\n except Exception as e:\n raise TypeError(\"Please enter a number for the index of the point within the linestring (X)\")\n\n if behaviour in ['last', 'ignore']:\n pass\n else:\n behaviour = 'last'\n\n if type(geom_row) == shapely.geometry.multilinestring.MultiLineString:\n total_linestrings = len(geom_row)\n lengths = {}\n total_len = 0\n for line in range(total_linestrings):\n len_line = len(geom_row[line].xy[0])\n lengths[line] = len_line\n total_len += len_line\n if X > total_len and behaviour == 'ignore':\n return lng, lat\n elif X > total_len and behaviour == 'last' or X == -1:\n lat = geom_row[-1].xy[1][-1]\n lng = geom_row[-1].xy[0][-1]\n else:\n total = 0\n for key, val in lengths.items():\n # find the location of X within the dictionary by looking if its in a given key\n total += val\n if total >= X:\n ind_key = key\n dict_ind = (val - (total - X)) - 1 # minus 1 as Python has a base-0 index\n break\n lat = geom_row[ind_key].xy[1][dict_ind]\n lng = geom_row[ind_key].xy[0][dict_ind]\n\n elif type(geom_row) == shapely.geometry.linestring.LineString:\n len_line = len(geom_row.xy)\n lng = geom_row.xy[0][X]\n lat = geom_row.xy[1][X]\n\n return lng, lat",
"def point_to_lng_lat(point_geometry):\n\n # cast as str\n point = str(point_geometry)\n\n # parse\n point = point.split('(')[-1]\n point = point.replace(')', '')\n\n # split lat/lng\n point = point.strip()\n lng_lat = point.split(' ')\n if(len(lng_lat) != 2):\n raise Exception('Input point is invalid')\n\n # parse\n lng, lat = lng_lat\n lng = lng.strip()\n lat = lat.strip()\n lat = float(lat)\n lng = float(lng)\n\n return [lng, lat]",
"def parse_xyz_str(xyz_str, ang2bohr=False):\n\n xyz_lines = xyz_str.strip().split(\"\\n\")\n atom_num = int(xyz_lines[0].strip())\n comment_line = xyz_lines[1]\n\n # Only consider the first four items on a line\n atoms_coords = [line.strip().split()[:4]\n for line in xyz_str.strip().split(\"\\n\")[2:]\n ]\n atoms, coords = zip(*[(a, c) for a, *c in atoms_coords])\n coords = np.array(coords, dtype=np.float)\n if ang2bohr:\n coords *= ANG2BOHR\n return atoms, coords",
"def parse_precision(p):\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max",
"def parse_input(string):\n return [int(vote) for vote in string.split()]",
"def read_points(from_file):\n points = []\n with open(from_file) as fp: \n for line in fp.readlines(): \n feats = line.strip().split()\n points.append((int(feats[0]), int(feats[1])))\n\n return points",
"def string_to_json_position(x):\n\n s = x.split(',')\n return {'lat': float(s[0]), 'lng': float(s[1])}",
"def get_coordinates(text):\n m = re.search(COORD_PATTERN, text)\n if m:\n neglat = m.groups(0)[0]\n latitude = neglat + m.groups(0)[1]\n neglong = m.groups(0)[2]\n longitude = neglong + m.groups(0)[3]\n return {\n \"lat\": latitude,\n \"lon\": longitude\n }\n return None",
"def parse_pint_string(self, pint_string):\n val = pint_string.split(' ')[0]\n units = pint_string.split(val+' ')[-1]\n return val, units",
"def str2polygon(strdata):\n pts = []\n partial = None\n\n # We have two potential formats, one with 4 or 5 places and one\n # with eight!\n vals = re.findall(LAT_LON, strdata)\n for val in vals:\n if len(val) == 8:\n lat = float(val[:4]) / 100.00\n lon = float(val[4:]) / 100.00\n if lon < 40:\n lon += 100.\n lon = 0 - lon\n pts.append(checker(lon, lat, strdata))\n else:\n fval = float(val) / 100.00\n if partial is None: # we have lat\n partial = fval\n continue\n # we have a lon\n if fval < 40:\n fval += 100.\n fval = 0 - fval\n pts.append(checker(fval, partial, strdata))\n partial = None\n\n if not pts:\n return None\n if pts[0][0] != pts[-1][0] and pts[0][1] != pts[-1][1]:\n pts.append(pts[0])\n return Polygon(pts)",
"def func2(string:str):\n with open(string,\"r\") as file:\n data = file.read()\n data = data.split(\"bandwidths [1]:\")[0]\n\n final = {}\n for i in range(1,3):\n final[\"formants [{}]\".format(i)] = []\n my_list = data.split(\"formants\")\n for i in range(2,4):\n final[\"formants [{}]\".format(i-1)].extend(list(map(pars_points,my_list[i].split(\"points \")[1:])))\n return final",
"def text_points(points, strs, **kw):\n xs, ys = asarray(points, float).T\n if isinstance(strs, str): #vectorize strs\n strs = [strs] * len(xs)\n for x, y, s in zip(xs, ys, strs):\n if not s: continue\n pylab.text(x, y, s, **kw)",
"def _parse_uncompressed_position(data: str) -> Tuple[float, float, int, str, str]:\n # Decode the latitude and ambiguity\n try:\n lat, ambiguity = APRSUtils.decode_uncompressed_latitude(data[0:8])\n\n except ValueError as e:\n raise ParseError(\"Invalid latitude: {}\".format(e))\n\n # Decode the longitude\n try:\n lng = APRSUtils.decode_uncompressed_longitude(data[9:18])\n\n except ValueError as e:\n raise ParseError(\"Invalid longitude: {}\".format(e))\n\n logger.debug(\"Latitude: {} ({}) Longitude: {}\".format(\n lat, ambiguity, lng\n ))\n\n # Parse the symbol table\n symbol_table = data[8]\n logger.debug(\"Symbol table: {}\".format(symbol_table))\n\n try:\n # Parse the symbol ID\n symbol_id = data[18]\n logger.debug(\"Symbol: {}\".format(symbol_id))\n except IndexError:\n raise ParseError(\"Missing symbol identifier\")\n\n return (lat, lng, ambiguity, symbol_table, symbol_id)",
"def parse_gpx_file(gpx_file_location):\n points = \"\\\"latitude\\\",\\\"longitude\\\",\\\"time\\\"\\n\"\n gpx = gpxpy.parse(gpx_file_location)\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n points = points + f\"{point.latitude},{point.longitude},{point.time}\\n\"\n return points",
"def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")",
"def extractCoords(coords):\n\tregex2 = re.compile('\\s+')\n\ts = regex2.split(coords[0])\n\tcoord_list = []\n\tfor i in s:\n\t\tcoord_list.append(i.split(','))\n\n\tnew_coord_list = []\n\tfor item in coord_list:\n\t\ttriple = []\n\t\tfor n in item:\n\t\t\ts = n.strip()\n\t\t\ttry:\n\t\t\t\ttriple.append( float(s))\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\tif (len(triple)!=0):\n\t\t\tnew_coord_list.append(triple)\n\treturn new_coord_list",
"def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]",
"def _parse_numbers(self, numberstr: str):\n numbers = []\n currentnumber = \"\"\n\n for c in numberstr:\n if c.isdigit() or c == '-' or c == '.':\n currentnumber += c\n elif len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n currentnumber = \"\"\n if len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n\n return np.array(numbers)",
"def parse_string(s):\n # type: (str) -> Union[str, np.ndarray, float]\n v = re.sub(r'[\\[\\]]', '', s)\n\n if ',' in v:\n v = v.split(',')\n elif ';' in v:\n v = v.split(';')\n\n try:\n v = np.atleast_1d(np.array(v, dtype=float))\n if v.size == 1:\n v = v[0]\n return v\n except ValueError:\n return s",
"def compute_start_end_points(linestrings):\n starts = []\n stops = []\n for ls in linestrings:\n pt = Point(ls.coords[0])\n starts.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n pt = Point(ls.coords[-1])\n stops.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n return starts, stops",
"def parse(self, text):\n return self.dict.txt2vec(text)",
"def readPointFile(filename):\n pointInfo = {}\n f = open(filename, 'r')\n for data in f.readlines():\n point, info = data.split(\"==\")\n lng, lat = [float(p) for p in point.split(\",\")]\n pointInfo[(lng, lat)] = parseInfoToDict(info)\n f.close()\n\n return pointInfo",
"def parse_position_line(line):\n\n match = Response.regex_position.search(line)\n if match is not None:\n result = dict(\n x=float(match.group(\"x\")),\n y=float(match.group(\"y\")),\n z=float(match.group(\"z\")),\n )\n if match.group(\"e\") is not None:\n # report contains only one E\n result[\"e\"] = float(match.group(\"e\"))\n\n elif match.group(\"es\") is not None:\n # report contains individual entries for multiple extruders (\"E0:... E1:... E2:...\")\n es = match.group(\"es\")\n for m in Response.regex_e_positions.finditer(es):\n result[\"e{}\".format(m.group(\"id\"))] = float(m.group(\"value\"))\n\n else:\n # apparently no E at all, should never happen but let's still handle this\n return None\n\n return result\n\n return None",
"def parse_gp_hyperparam_priors(s):\n # define some regular expressions to parse the strings\n f = r'[0-9]*\\.?[0-9]*(?:e[+-]?[0-9]+)?'\n r1 = re.compile(r'^ *({f}) *$'.format(f=f))\n r3 = re.compile(r'^ *({f}) +({f}) +({f}) *$'.format(f=f))\n # take a string and return a numpy array or raise an exception\n def parse(a):\n m = r1.match(a)\n if m is not None:\n return np.array(m.groups()[0],dtype=float)\n m = r3.match(a)\n if m is not None:\n return np.array(m.groups(),dtype=float)\n raise ValueError(\"Unable to parse {0} as hyperparameter prior\".format(repr(a)))\n # split the string into two\n arr = s.split(';')\n if len(arr) != 2:\n raise ValueError('Expecting only one semicolon in hyperparameters')\n # parse the two sides\n return (parse(arr[0]),\n parse(arr[1]))",
"def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string",
"def _package_coordinates(self, coords_string):\n values = [float(x) for x in coords_string.strip().replace(\",\", \" \").split()]\n\n if len(values) % 2 != 0:\n raise Exception(\"Number of values for coordinates is not even.\")\n \n return {\"lat\": values[0::2], \"lon\": values[1::2], \"type\": \"polygon\", \"do_sanitise_geometries\": False}",
"def parse(self):\n\t\tsub = self.body.split(' ')\n\t\tif len(sub) == 3:\n\t\t\tself.latitude = float(sub[1])\n\t\t\tself.longitude = float(sub[2])\n\t\telse:\n\t\t\tself.latitude = None\n\t\t\tself.longitude = None\n\t\t\traise Exception(\"Invalid message\")",
"def get_points(geo_file_path):\n points = dict()\n point_idx = 1\n section_points = read_section(geo_file_path, SECTION_MARKER_POINTS)\n\n for line in section_points:\n split = line.split(' ')\n if len(split) == 3:\n x, y, _ = split\n points[str(point_idx)] = float(x), float(y)\n point_idx += 1\n\n return points",
"def readTsp(self, String0):\n Name = re.match(r\"NAME : (.*)\", String0)[1]\n COMMENT = re.search(r\"COMMENT : (.*)\", String0)[1]\n TYPE = re.search(r\"TYPE : (.*)\", String0)[1]\n DIMENSION = re.search(r\"DIMENSION : (.*)\", String0)[1]\n EDGE_WEIGHT_TYPE = re.search(r\"EDGE_WEIGHT_TYPE : (.*)\", String0)[1]\n NODE_COORD_SECTION = []\n split = String0.split(\"\\n\")\n for s0 in split:\n if (s0 and s0[0] <= '9' and s0[0] >= '0'):\n one = s0.split(\" \")\n One = []\n One.append(float(one[0]))\n One.append(float(one[1]))\n One.append(float(one[2]))\n if (One != []):\n NODE_COORD_SECTION.append(One)\n return Name, COMMENT, TYPE, DIMENSION, EDGE_WEIGHT_TYPE, NODE_COORD_SECTION",
"def p_parse(toks):\n return p_question_group.parseString(toks[0])",
"def parse(s):\n return expr.parseString(s, parseAll=True)",
"def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit",
"def parse_pts(pts_result_file, global_var_list, parsed_results_file):\n fill_value_name(pts_result_file, global_var_list, parsed_results_file)",
"def parseString(self, s):\n return self.parser.parseString(s)",
"def parse_coords(geo: str) -> Tuple[float, float]:\n lat, long = [float(x.strip()) for x in geo.split(\",\")]\n if lat > 90 or lat < -90:\n raise ValueError(\"latitude does not fall in the range (-90, 90)\")\n if long > 180 or long < -180:\n raise ValueError(\"longitude does not fall in the range (-180, 180)\")\n return (lat, long)",
"def parse(input):\n\n SHIP_STRING = {\"submarine\": \"sub\", \"aircraft\": \"aircraft\", \"patrol\": \"patrol\",\n \"sub\" : \"sub\"}\n #ship type\n ship_parser = re.compile(r'(?i)submarine|sub|aircraft|patrol')\n ship_match = ship_parser.search(input)\n\n if ship_match is None:\n raise ValueError(\"Invalid ship type in input\")\n\n ship_type = SHIP_STRING.get(ship_match.group())\n\n #point index\n position_parser = re.compile(r'(?i)[A-J],?\\s?([0-9]?[0-9])')\n position_match = position_parser.search(input)\n\n if position_match is None:\n raise ValueError(\"Invalid position in input, make sure you use the form (x,y); e.g. (A,2)\")\n\n position = (ord(position_match.group()[0].upper())-64,\n int(position_match.group()[1:]))\n\n #orientation, True = horizontal\n orientation_parser = re.compile(r'(?i)horizontal|vertical|horizontally|' +\n 'vertically')\n orientation_match = orientation_parser.search(input)\n if orientation_match is None:\n raise ValueError(\"Invalid orientation in input\")\n orientation = \"horizontal\" in orientation_match.group().lower()\n\n return ship_type, position, orientation",
"def load_get_landmark_was_pointed(path):\n with open(path) as f:\n rows = [rows.strip() for rows in f]\n \n \"\"\"Use the curly braces to find the start and end of the point data\"\"\" \n head = rows.index('{') + 1\n tail = rows.index('}')\n\n \"\"\"Select the point data split into coordinates\"\"\"\n raw_points = rows[head:tail]\n coords_set = [point.split() for point in raw_points]\n\n \"\"\"Convert entries from lists of strings to tuples of floats\"\"\"\n points = [tuple([float(point) for point in coords]) for coords in coords_set]\n return points",
"def get_input_from_string(str_input):\n\tfrom string import split\n\tres = []\n\tlist_input = split(str_input)\n\tfor i in xrange(len(list_input)):\n\t\tres.append(float(list_input[i]))\n\treturn res",
"async def points(self, ctx: Context, *, points: remove_whitespace) -> None:\n if not (point_array := re.finditer(POINT_ARRAY_FORMAT, points)):\n embed = DefaultEmbed(ctx, desc=\"Illegal character(s) in point array.\")\n\n await ctx.send(embed=embed)\n\n return\n\n else:\n # *_ catches any other dimension of the array, so only 2d is captured.\n x, y, *_ = zip(*[list(map(float, point.group(0).split(\",\"))) for point in point_array])\n\n embed = await self.bot.loop.run_in_executor(None, self.create_graph, ctx, x, y)\n\n await ctx.send(file=embed.file, embed=embed)",
"def parse_place_notation(input_string: str) -> Tuple[int, str]:\n\n # Looking for a string that matches <stage>:<place notation> where the\n # place notation is a series of bell numbers and 'x' characters\n parts = input_string.split(\":\")\n if len(parts) == 2:\n stage_part = parts[0]\n if len(stage_part) == 0 or not stage_part.isnumeric():\n raise PlaceNotationError(input_string, \"Stage must be a number\")\n stage = int(stage_part)\n place_notation = parts[1]\n if not valid_pn(place_notation):\n raise PlaceNotationError(input_string, \"Place notation is invalid\")\n else:\n raise PlaceNotationError(input_string, \"<stage>:<place notation> required\")\n\n return stage, place_notation",
"def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points",
"def key_to_coordinates(key):\n stripkey = key.strip(\"(\").strip(\")\").split(\", \")\n point_coordinates = tuple(float(elem) for elem in stripkey)\n return point_coordinates",
"def parse_gps_sentence(sentence):\n try:\n gps = pynmea2.parse(sentence)\n if not gps.is_valid:\n gps = None\n\n except pynmea2.nmea.SentenceTypeError:\n gps = None\n\n return gps",
"def __getCoordinateListForString(self, string, firstCharacter):\r\n coordinateList = []\r\n charactersEntered = 0\r\n breakReason = None\r\n\r\n statusDict = self.getVKBStatus()\r\n\r\n for i in range(len(string)):\r\n # Get coordinates for keys as long the case needs to be changed. FIXME! add switchCase to loop\r\n # no need to care about case in Email layout with other than alphabet characters\r\n if string[i] in self.__layoutMaps[self.currentLayout].keys() and \\\r\n (self.__layoutMaps[self.currentLayout][string[i]][1]&self.currentCase):\r\n coords, case, repeat = self.__layoutMaps[self.currentLayout][string[i]]\r\n if self.currentLayout&self.LAYOUT_ITUT and len(coordinateList) and coords==coordinateList[-1]:\r\n breakReason = 'delay'\r\n break # To add some delay between characters on same button\r\n for r in range(repeat):\r\n coordinateList.append(coords)\r\n if self.currentCase==self.CASE_UPPER and not self.capsLockOn:\r\n self.currentCase = self.CASE_LOWER\r\n charactersEntered +=1\r\n # In common-noun mode, VKB changes to uppercase after dot character so break loop.\r\n if string[i]=='.' and statusDict['minor-mode']=='common-noun' and not self.capsLockOn:\r\n self.currentCase = self.CASE_UPPER\r\n break\r\n else:\r\n breakReason = 'case'\r\n break\r\n\r\n return (coordinateList,charactersEntered,breakReason)",
"def parse_string(self, matrix):\n for idx, row in enumerate(matrix):\n for idy, item in enumerate(row):\n self.o_pos_to_num[(idx, idy)] = item\n self.pos_to_num[(idx, idy)] = item\n self.o_num_to_pos[item] = (idx, idy)\n self.num_to_pos[item] = (idx, idy)",
"def parse_string_line(self, data_line):\n if data_line:\n data_line = data_line.rstrip()\n if data_line:\n if data_line[0] == '#':\n extraparams = json.loads(data_line[1:])\n if 'glyph_cap_line' in extraparams:\n self.__capline = extraparams['glyph_cap_line']\n if 'glyph_base_line' in extraparams:\n self.__baseline = extraparams['glyph_base_line']\n if 'glyph_bottom_line' in extraparams:\n self.__bottomline = extraparams['glyph_bottom_line']\n elif len(data_line) > 9:\n strokes = []\n xmin = xmax = ymin = ymax = None\n # individual strokes are stored separated by <space>+R\n # starting at col 11\n for s in split(data_line[10:], ' R'):\n if len(s):\n stroke = list(zip(map(self.__char2val, s[::2]), map(self.__char2val, s[1::2])))\n xmin = min(stroke + ([xmin] if xmin else []), key=lambda t: t[0])\n ymin = min(stroke + ([ymin] if ymin else []), key=lambda t: t[1])\n xmax = max(stroke + ([xmax] if xmax else []), key=lambda t: t[0])\n ymax = max(stroke + ([ymax] if ymax else []), key=lambda t: t[1])\n strokes.append(stroke)\n self.__charcode = int(data_line[0:5])\n self.__left_side = self.__char2val(data_line[8])\n self.__right_side = self.__char2val(data_line[9])\n self.__strokes = strokes\n self.__xmin, self.__ymin, self.__xmax, self.__ymax = (xmin[0], ymin[1], xmax[0], ymax[1]) if strokes else (0, 0, 0, 0)\n return True\n return False",
"def parse_gate(s):\n if \"(\" in s:\n gate = s[:s.find(\"(\")]\n params = (s[s.find(\"(\") + 1:s.find(\")\")]).split(',')\n else:\n gate = s\n params = []\n return gate, params"
] | [
"0.7486971",
"0.73367745",
"0.7277568",
"0.72427535",
"0.70839274",
"0.6653839",
"0.66499704",
"0.65557563",
"0.6511648",
"0.6503271",
"0.6497287",
"0.6403335",
"0.63999814",
"0.63896656",
"0.6360118",
"0.631891",
"0.62946934",
"0.6294317",
"0.6215328",
"0.6208789",
"0.6205417",
"0.61564714",
"0.61482143",
"0.60772383",
"0.6056188",
"0.60336494",
"0.6028094",
"0.60173905",
"0.599064",
"0.5966035",
"0.59599316",
"0.59478474",
"0.5927191",
"0.5919969",
"0.58392256",
"0.58240724",
"0.58185047",
"0.5813609",
"0.5813609",
"0.5793371",
"0.57827854",
"0.57807785",
"0.5764607",
"0.5755335",
"0.5738151",
"0.5731645",
"0.5729306",
"0.5717426",
"0.570588",
"0.57014155",
"0.57005364",
"0.569871",
"0.569313",
"0.56873477",
"0.5681506",
"0.5674916",
"0.5645762",
"0.5625315",
"0.5623528",
"0.5621914",
"0.5610122",
"0.5605006",
"0.55919486",
"0.5591177",
"0.5574496",
"0.55638546",
"0.55613065",
"0.5555514",
"0.555348",
"0.5552423",
"0.55498356",
"0.5544893",
"0.5541929",
"0.55370533",
"0.55306005",
"0.552715",
"0.5489164",
"0.5487556",
"0.54803383",
"0.54766047",
"0.54734397",
"0.54707706",
"0.54617935",
"0.5453955",
"0.54532546",
"0.54500425",
"0.54317296",
"0.54297733",
"0.54270434",
"0.5420144",
"0.5412533",
"0.539414",
"0.539396",
"0.53748965",
"0.53681606",
"0.5363036",
"0.5352935",
"0.5347709",
"0.5338506",
"0.53225493"
] | 0.70314664 | 5 |
Create the points of a regular polygon | def polygonpts(nSides, radius=1.0):
return [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RegularPolygonPoints(n,c):\n coord = []\n for i in range(n):\n x = m.cos(2*m.pi*i/n)+c[0]\n y = m.sin(2*m.pi*i/n)+c[1]\n coord.append([x,y])\n return(coord)",
"def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon",
"def generatePolygons():",
"def test_simple_polygonisation(n_points=20):\n # generate random sample points.\n sample_points = np.random.random_sample((n_points,2))*10\n # generate simple polygon\n seq = simple_polygonisation(sample_points)\n # plot polygon\n plt.figure()\n plt.plot(seq[:,0], seq[:,1], color=\"blue\", marker=\"s\", alpha=0.5)",
"def generate_polygon(x,y,N):\r\n # Add the first point to the end of the list and convert to array if needed\r\n if type(x) == list:\r\n x = np.array(x + [x[0]])\r\n y = np.array(y + [y[0]])\r\n else:\r\n x = np.append(x,x[0])\r\n y = np.append(y,y[0])\r\n \r\n # Parameterize the arrays and interpolate\r\n d = [get_distance((x[i],y[i]),(x[i+1],y[i+1])) for i in range(len(x)-1)]\r\n d = np.cumsum([0]+d)\r\n t = np.linspace(0,d[-1],N)\r\n fx = interp1d(d,x)\r\n fy = interp1d(d,y)\r\n x = fx(t)\r\n y = fy(t)\r\n \r\n return x,y",
"def generate_random_points_in_polygon(num_of_points, polygon) -> list:\n list_of_points = []\n min_x, min_y, max_x, max_y = polygon.bounds\n counter = 0\n while counter < num_of_points:\n point = Point(random.uniform(min_x, max_x), random.uniform(min_y, max_y))\n if polygon.contains(point):\n list_of_points.append(point)\n counter += 1\n return list_of_points",
"def draw_polygon(self, *points, color=DEFAULT.color):",
"def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")",
"def trapezoid_decomposition_pl(polygons, bounds):\n polygons = Polygons(polygons)\n # print(bounds)\n point_locator = PointLocator(bounds)\n for edge in polygons.random_edge_sampler():\n point_locator.add_line(edge)\n return point_locator",
"def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component",
"def polygon(self, center, angle, points, color=(255, 255, 255), width=0):\n poly = list()\n\n for point in points:\n point = self._rotate(point, angle)\n point += center\n point = self._transform(point)\n poly.append(point)\n\n pygame.draw.polygon(self.screen, color, poly, width)",
"def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points",
"def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self",
"def regular_polygon(self, n, field = QQ):\n npi = 3.14159265359\n verts = []\n for i in range(n):\n t = 2*npi*i/n\n verts.append([sin(t),cos(t)])\n verts = [[field(RDF(x)) for x in y] for y in verts]\n return Polyhedron(vertices = verts, field = field)",
"def _normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n for i in range(-1, p - 1):\n p1 = points[i - 1]\n p2 = points[i]\n p3 = points[i + 1]\n v1 = subtract_vectors(p1, p2)\n v2 = subtract_vectors(p3, p2)\n n = cross_vectors(v1, v2)\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l",
"def polygon(center, sides, radius=1, rotation=0, translation=None):\n\n\tone_segment = math.pi * 2 / sides\n\tpoints = [\n\t\t(int(round(center[0] + math.sin(one_segment * i + rotation) * radius, 0)),\n\t\t int(round(center[1] + math.cos(one_segment * i + rotation) * radius, 0)))\n\t\tfor i in range(sides)]\n\tif translation:\n\t\tpoints = [[sum(pair) for pair in zip(point, translation)]\n\t\t\t\t for point in points]\n\treturn points",
"def decompose_polygon(points):\n N, _ = points.shape\n\n for i in range(1, N - 1):\n yield numpy.array([points[0], points[i], points[i + 1]])",
"def populate(num_points):\r\n superscript = str.maketrans(\"0123456789\", \"⁰¹²³⁴⁵⁶⁷⁸⁹\")\r\n highest_deg = int(math.sqrt(num_points))\r\n x_terms = []\r\n y_terms = []\r\n for i in range(highest_deg):\r\n x_terms.append(f'(x{i})'.translate(superscript))\r\n y_terms.append(f'(y{i})'.translate(superscript))\r\n return [x_terms, y_terms]",
"def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return",
"def _get_voronoi_poly_points(vert_index_list, voronoi_vertices,\n voronoi_centroid):\n voronoi_poly_points = []\n if -1 not in vert_index_list and len(vert_index_list) > 3:\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n elif vert_index_list.size > 0:\n # ASSUME RECTANGLE\n vert_index_list = vert_index_list[vert_index_list >= 0]\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n # CASE 1: 2 valid voronoi vertices\n if vert_index_list.size == 2:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon1 = voronoi_poly_points[0][0]\n corner_lat1 = voronoi_poly_points[0][1]\n corner_lon2 = voronoi_poly_points[1][0]\n corner_lat2 = voronoi_poly_points[1][1]\n\n # check if need to add points in lon or lat\n if abs(corner_lon1-corner_lon2) > abs(corner_lat1-corner_lat2):\n dLat = center_lat - corner_lat1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [corner_lon2, center_lat + dLat],\n [corner_lon1, center_lat + dLat]\n ])\n else:\n dLon = center_lon - corner_lon1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [center_lon + dLon, corner_lat2],\n [center_lon + dLon, corner_lat1]\n ])\n # CASE 2: 1 valid voronoi vertex\n elif vert_index_list.size == 1:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon = voronoi_poly_points[0][0]\n corner_lat = voronoi_poly_points[0][1]\n dLat = center_lat - corner_lat\n dLon = center_lon - corner_lon\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon, corner_lat],\n [center_lon + dLon, corner_lat],\n [center_lon + dLon, center_lat + dLat],\n [corner_lon, center_lat + dLat]\n ])\n\n return voronoi_poly_points",
"def create_polygon(self, vertices, style=None, parent=None):\n d = 'M %f %f L' % (vertices[0].x, vertices[0].y)\n for p in vertices[1:]:\n d = d + ' %f,%f' % (p.x, p.y)\n if vertices[0] != vertices[-1]:\n d = d + ' %f,%f' % (vertices[0].x, vertices[0].y)\n attrs = {'d': d}\n return self.create_path(attrs, style, parent)",
"def polygonFromInteriorPoints(geom, preferredEpsg):\n (projTr, llTr) = makeTransformations(4326, preferredEpsg)\n\n geomProj = copyGeom(geom)\n geomProj.Transform(projTr)\n geomOutline = geomProj.ConvexHull()\n geomOutline.Transform(llTr)\n return geomOutline",
"def random_points_within_poly(poly, npts):\n try:\n # geodjango/OGR interface\n xmin, ymin, xmax, ymax = poly.extent\n is_geodjango = True\n except AttributeError:\n # shapely interface\n xmin, ymin, xmax, ymax = poly.bounds\n is_geodjango = False\n dx = xmax - xmin\n dy = ymax - ymin\n out_idx = np.ones(npts).astype(bool)\n x = np.zeros(npts)\n y = np.zeros(npts)\n\n while out_idx.sum():\n xn = np.random.random(size=out_idx.sum()) * dx + xmin\n yn = np.random.random(size=out_idx.sum()) * dy + ymin\n x[out_idx] = xn\n y[out_idx] = yn\n if is_geodjango:\n out_idx = np.array([not geos.Point(a, b).within(poly) for (a, b) in zip(x, y)])\n else:\n out_idx = np.array([not geometry.Point(a, b).within(poly) for (a, b) in zip(x, y)])\n\n return x, y",
"def test_clip_points_by_polygons(self):\n\n # Name input files\n point_name = join(TESTDATA, 'population_5x5_jakarta_points.shp')\n point_layer = read_layer(point_name)\n points = numpy.array(point_layer.get_geometry())\n attrs = point_layer.get_data()\n\n # Loop through polygons\n for filename in ['polygon_0.shp', 'polygon_1.shp', 'polygon_2.shp',\n 'polygon_3.shp', 'polygon_4.shp',\n 'polygon_5.shp', 'polygon_6.shp']:\n\n polygon_layer = read_layer(join(TESTDATA, filename))\n polygon = polygon_layer.get_geometry()[0]\n\n # Clip\n indices = inside_polygon(points, polygon)\n\n # Sanity\n for point in points[indices, :]:\n assert is_inside_polygon(point, polygon)\n\n # Explicit tests\n if filename == 'polygon_0.shp':\n assert len(indices) == 6\n elif filename == 'polygon_1.shp':\n assert len(indices) == 2\n assert numpy.allclose(points[indices[0], :],\n [106.8125, -6.1875])\n assert numpy.allclose(points[indices[1], :],\n [106.8541667, -6.1875])\n assert numpy.allclose(attrs[indices[0]]['value'],\n 331941.6875)\n assert numpy.allclose(attrs[indices[1]]['value'],\n 496445.8125)\n elif filename == 'polygon_2.shp':\n assert len(indices) == 7\n elif filename == 'polygon_3.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_4.shp':\n assert len(indices) == 0 # Degenerate\n elif filename == 'polygon_5.shp':\n assert len(indices) == 8\n elif filename == 'polygon_6.shp':\n assert len(indices) == 6",
"def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon",
"def generate_points(octrees, pyramids, exsum):\n return _C.ops.spc.GeneratePoints(octrees.contiguous(),\n pyramids.contiguous(),\n exsum.contiguous())",
"def gen_rdm_points_square(polygon, size):\n minx, miny, maxx, maxy = polygon.bounds\n box_points = list(box(minx, miny, maxx, maxy, ccw=True).exterior.coords)\n x = np.random.uniform(low=box_points[0][0], high=box_points[2][0], size=size)\n y = np.random.uniform(low=box_points[0][1], high=box_points[2][1], size=size)\n return np.array(list(zip(x, y)))",
"def draw_filled_polygon(\n self, points: Iterable[Vec3], properties: Properties\n ) -> None:\n raise NotImplementedError",
"def plot_polygon(points, **kwargs):\n plt.gca().add_collection(\n PatchCollection(\n [Polygon(points, True)],\n **kwargs)\n )",
"def getPointInPolygonStatement(self, approxTable, columns, columnsPIP, condition):\r\n return 'SELECT ' + ora.getHintStatement(ora.getParallelStringQuery(self.numProcesses)) + ora.getSelectColumns('*') + \"\"\" \r\nFROM TABLE(mdsys.sdo_PointInPolygon(CURSOR(\r\n\"\"\" + ora.getSelectStatement(approxTable, ora.getSelectColumns(columnsPIP)) + \"\"\"), \r\nMDSYS.SDO_GEOMETRY('\"\"\" + self.wkt + \"\"\"', \"\"\" + str(self.srid) + \"\"\"), \"\"\" + str(self.tolerance) +\"\"\"))\r\n\"\"\" + condition",
"def regular_polygon(sides, radius, height):\n global _cmds\n _cmds = \"}\\n\\n\" + _cmds\n for wedge in range(sides):\n p1 = _cart(radius, wedge*360/sides)\n p2 = _cart(radius, (wedge+1)*360/sides)\n triangle([0, 0], p1, p2, height)\n _cmds = \"union(){\\n\" + _cmds",
"def MakePoints(xStart, xEnd, numPoints):\n if len(xStart) != 3 or len(xEnd) != 3:\n raise Exception(\"Start and end point must be 3-dimensional vectors\")\n if numPoints < 2:\n raise Exception(\"At least two points are required\")\n \n # Starting Points\n pt_list = []\n x = xStart[0]\n y = xStart[1]\n z = xStart[2]\n\n # How much we add/subtract between each interpolated point\n x_steps = (xEnd[0] - xStart[0])/(numPoints-1)\n y_steps = (xEnd[1] - xStart[1])/(numPoints-1)\n z_steps = (xEnd[2] - xStart[2])/(numPoints-1)\n\n # Incrementally add to each point until the end point is reached\n for i in range(numPoints):\n point_i = [x,y,z] # create a point\n #append the point to the list\n pt_list.append(point_i)\n x = x + x_steps\n y = y + y_steps\n z = z + z_steps\n return pt_list",
"def get_polygon_points(starting_point, vector_seq):\n \n res=[[]]\n res[0] = starting_point\n curr_point = starting_point\n\n i=1\n\n while i<len(vector_seq):\n if are_parallel(vector_seq[i],vector_seq[i-1]):\n tmp = vector_seq[i]\n vector_seq[i-1][0]=vector_seq[i-1][0]+ tmp[0]\n vector_seq[i-1][1]=vector_seq[i-1][1]+ tmp[1]\n vector_seq.pop(i)\n else:\n i=i+1\n \n for x in vector_seq:\n x_coord = curr_point[0]+x[0]\n y_coord = curr_point[1]+x[1]\n curr_point=[x_coord, y_coord]\n res.append(curr_point)\n\n return res",
"def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()",
"def define_polygon(cls, polygon):\n \n num_obj = cls()\n num_obj.coord = [np.array(polygon)]\n return num_obj",
"def makeup_polygons(\n draw: ImageDraw,\n num_cells: int,\n width: int,\n height: int,\n rgb_im: Image,\n random: bool,\n):\n voronoi, points = generate_voronoi_diagram(num_cells, width, height)\n for point, index in zip(points, voronoi.point_region):\n # Getting the region of the given point\n region = voronoi.regions[index]\n # Getting the points in arrays\n polygon = list()\n for i in region:\n # If vector is out of plot do not add\n if i != -1:\n polygon.append(voronoi.vertices[i])\n # Make tuples of the points\n polygon_tuples = list()\n for l in polygon:\n polygon_tuples.append(tuple(l))\n rgb = (0, 0, 0)\n if random:\n # Get random color\n rgb = random_color()\n else:\n # Get colors of the middle point\n rgb = get_color_of_point(point, rgb_im, width, height)\n # Draw the calculated polygon with the color of the middle point\n if polygon and polygon_tuples:\n draw.polygon(polygon_tuples, rgb)",
"def draw_polygon(*points):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n polygon = Polygon(*newpoints)\r\n _set_not_filled(polygon)\r\n _canvas.add(polygon)",
"def draw_shape_polygon(self, poly, xform, colour):\n pts = [xform.chain(p) for p in poly.points]\n self.canvas.polygon([(p.x, p.y) for p in pts], outline=colour)",
"def drawPolygon(points, ax, linespec=None):\n if len(points.shape) != 2 or points.shape[0] != 2:\n raise ValueError(\"'points' must be 2xN\")\n if linespec is None:\n linespec = __color_cycle.next() + 'o-'\n ax.plot(np.concatenate((points[0,:], [points[0,0]]), 1).T,\n np.concatenate((points[1,:], [points[1,0]]), 1).T, linespec)",
"def simple_polygonisation(sample_points):\n # find the extreme points with respect to x-axis\n left_most = np.argmin(sample_points[:,0])\n right_most = np.argmax(sample_points[:,0])\n # exclude the extreme points from candidates\n candidates = np.delete(sample_points, [left_most, right_most], axis=0)\n # populate seperate sets for points above and below\n set_a, set_b = [], []\n for p in candidates:\n if is_above_line(sample_points[left_most], sample_points[right_most], p):\n set_a.append(p)\n else:\n set_b.append(p)\n set_a, set_b = np.array(set_a), np.array(set_b) # more efficient than\n # np.append\n # generate the final sequence\n seq = np.array([sample_points[left_most]])\n if len(set_a) > 0:\n set_a = set_a[set_a[:,0].argsort()]\n seq = np.concatenate((seq, set_a), axis=0)\n seq = np.concatenate((seq, [sample_points[right_most]]), axis=0)\n if len(set_b) > 0:\n set_b = set_b[(-set_b[:,0]).argsort()]\n seq = np.concatenate((seq, set_b), axis=0)\n seq = np.concatenate((seq, [sample_points[left_most]]), axis=0)\n return seq",
"def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)",
"def normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n o = centroid_points(points)\n a = subtract_vectors(points[-1], o)\n for i in range(p):\n b = subtract_vectors(points[i], o)\n n = cross_vectors(a, b)\n a = b\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l",
"def dilate_polygon(points, amount_increase):\n expanded_points = []\n for index, point in enumerate(points):\n prev_point = points[(index - 1) % len(points)]\n next_point = points[(index + 1) % len(points)]\n prev_edge = np.subtract(point, prev_point)\n next_edge = np.subtract(next_point, point)\n \n prev_normal = ((1 * prev_edge[1]), (-1 * prev_edge[0]))\n prev_normal = np.divide(prev_normal, np.linalg.norm(prev_normal))\n next_normal = ((1 * next_edge[1]), (-1 * next_edge[0]))\n next_normal = np.divide(next_normal, np.linalg.norm(next_normal))\n\n bisect = np.add(prev_normal, next_normal)\n bisect = np.divide(bisect, np.linalg.norm(bisect))\n \n cos_theta = np.dot(next_normal, bisect)\n hyp = amount_increase / cos_theta\n \n new_point = np.around(point + hyp * bisect)\n new_point = new_point.astype(int)\n new_point = new_point.tolist()\n expanded_points.append(new_point)\n return expanded_points",
"def points_in_convex_polygon_jit(points, polygon, clockwise=True):\n # first convert polygon to directed lines\n num_points_of_polygon = polygon.shape[1]\n num_points = points.shape[0]\n num_polygons = polygon.shape[0]\n vec1 = polygon[:, [num_points_of_polygon - 1] + list(range(num_points_of_polygon - 1)), :]\n if clockwise:\n vec1 = polygon - vec1\n else:\n vec1 = vec1 - polygon\n # vec1: [num_polygon, num_points_of_polygon, 2]\n ret = np.zeros((num_points, num_polygons), dtype=np.bool_)\n for i in range(num_points):\n for j in range(num_polygons):\n success = True\n for k in range(num_points_of_polygon):\n cross = vec1[j, k, 1] * (polygon[j, k, 0] - points[i, 0])\n cross -= vec1[j, k, 0] * (polygon[j, k, 1] - points[i, 1])\n if cross >= 0:\n success = False\n break\n ret[i, j] = success\n return ret",
"def draw_initial_polygon( sides = 6, radius = 1.0, center = Vector((0,0,0)) ):\n\n points = []\n edges = []\n step = ( 2.0 / sides )\n i = 0\n\n for i in range( sides ):\n t = ( i * step )\n x1 = cos( t * pi ) * radius\n y1 = sin( t * pi ) * radius\n\n points.append( center + Vector(( x1, y1, 0 )) )\n\n for i in range( len(points) ):\n edge = []\n\n if i == len( points ) - 1:\n edge.append( i )\n edge.append( 0 )\n else:\n edge.append( i )\n edge.append( i + 1)\n\n edges.append( tuple( edge ) )\n\n return { 'verts' : points, 'edges' : edges }",
"def make_simple_poly(origin):\r\n # Create a rectangular ring\r\n lon, lat = origin[0], origin[1]\r\n width = 100\r\n ring = ogr.Geometry(ogr.wkbLinearRing)\r\n ring.AddPoint(lon, lat)\r\n ring.AddPoint(lon + width, lat)\r\n ring.AddPoint(lon + width, lat - width / 2.0)\r\n ring.AddPoint(lon, lat - width / 2.0)\r\n ring.AddPoint(lon, lat)\r\n\r\n # Create polygon geometry\r\n poly = ogr.Geometry(ogr.wkbPolygon)\r\n poly.AddGeometry(ring)\r\n return poly",
"def fromPoints(cls, listOfPoint: list, allowAbnormal:bool=True):\n if len(listOfPoint) != 4:\n raise ValueError(\n \"ValueError:\\tThe Polygon can't be constructed\"+\n \" as a quadrilateral.\"\n )\n new = cls()\n new.setPolygon(listOfPoint,allowAbnormal=allowAbnormal)\n return new.checkSubClass()",
"def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)",
"def create_two_init_triangles(points):\n return [(points[0], points[1], points[2]),\n (points[0], points[2], points[3])]",
"def initiate(self):\n pts = []\n for point in self.points:\n pt = gr.Point(point[0],point[1])\n pts.append(pt)\n\n self.vis = [gr.Polygon(pts)]\n\n self.draw()",
"def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon",
"def create_poly(self, bounds):\n\n left, bottom, right, top = bounds\n\n return Polygon(\n [\n (left, bottom),\n (left, top),\n (right, top),\n (right, bottom),\n (left, bottom),\n ]\n )",
"def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename",
"def create_points(number): \n\n # generate x and y coordinates:\n x = np.random.permutation(2*number)[:number] - number\n y = np.random.permutation(2*number)[:number] - number\n\n points = [ { 0 : float(x[i]), 1 : float(y[i]), \"index\" : i} for i in range(len(x)) ]\n\n return points\n\n # generate points as coordinate pairs of floats.\n # return zip(map(float,x),map(float,y))",
"def proc_filled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=True)",
"def points_inside_poly(points, all_verts):\n return Path(all_verts, close=True).contains_points(points)",
"def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)",
"def __init__(self, points, verbose=False):\n assert(type(points) == np.ndarray)\n assert(points.dtype==int)\n assert(points.shape[1] == 3)\n assert(points.shape[0]>1)\n\n # Make points unique to avoid duplicate vertices:\n self.points = np.unique(points, axis=0)\n self.verbose = verbose\n self.g = self.__generate()",
"def _preprocess_polygon(polygon):\n\n # Could catch ValueErrors for unsuitable inputs\n polygon = numpy.array(polygon)\n\n if len(polygon.shape) == 1:\n if len(polygon) % 2:\n raise ValueError('Number of values for polygon not divisible by two.'\n 'Coordinates need an x and y coordinate: '.format(polygon))\n polygon = polygon.reshape((-1, 2))\n\n if not len(polygon.shape) == 2 or polygon.shape[1] != 2:\n raise ValueError('polygon of wrong dimensions. It should be of shape. '\n 'Should be: (num_points, 2). Input: {}'.format(polygon))\n\n polygon = Polygon(numpy.array(polygon))\n\n # Mainly for self-intersection\n if not polygon.is_valid:\n raise ValueError('polygon is invalid, likely self-intersecting: {}'.\n format(polygon))\n\n return polygon",
"def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'",
"def plotPoints(shapes, YesTestPoints, YesTrainPoints, NoTestPoints, NoTrainPoints, filename):\n\n #plot the \"yes\" sample points and the projected polygons\n figure = plt.figure(figsize=(8, 8))\n plt.hold(True)\n for shape1 in shapes:\n px, py = list(zip(*shape1['geometry']['coordinates']))\n px = list(px)\n py = list(py)\n px.append(px[0])\n py.append(py[0])\n plt.plot(px, py, 'b')\n yestestx, yestesty = list(zip(*YesTestPoints))\n yestrainx, yestrainy = list(zip(*YesTrainPoints))\n plt.plot(yestestx, yestesty, 'r.')\n plt.plot(yestrainx, yestrainy, 'g.')\n notestx, notesty = list(zip(*NoTestPoints))\n notrainx, notrainy = list(zip(*NoTrainPoints))\n plt.plot(notestx, notesty, 'cx')\n plt.plot(notrainx, notrainy, 'mx')\n plt.legend(['Polygon 1 boundaries', 'Polygon 2 boundaries', 'Yes Test', 'Yes Train', 'No Test', 'No Train'], numpoints=1)\n plt.title('Sample points')\n plt.savefig(filename)",
"def proc_unfilled_polygon(self, tokens):\n\n return self._proc_polygon(tokens, filled=False)",
"def pointToSmallPolygon(point, width=0.1):\n offset = width * 0.5\n x, y = point\n return [(x - offset, y - offset),\n (x - offset, y + offset),\n (x + offset, y + offset),\n (x + offset, y - offset),]",
"def generatePoints(normals, num_points, noise_b, sq_side):\n N = np.sum(num_points)\n [D, S] = normals.shape\n X = np.zeros([D, N])\n ss_ind = np.zeros([N, 1])\n k = 0\n for ss in range(S):\n X_tmp = np.vstack((2 * (np.random.uniform(0, 1, [1, num_points[ss]]) - 0.5) * noise_b,\n 2 * (np.random.uniform(0, 1, [(D - 1), num_points[ss]]) - 0.5) * sq_side))\n SVD = np.linalg.svd(\n np.eye(D) - (1 / np.sqrt(np.dot(normals[:, ss], normals[:, ss]))) * np.outer(normals[:, ss], normals[:, ss]))\n U = np.fliplr(SVD[0])\n X_tmp = np.matmul(U, X_tmp)\n X[:, k:(k + num_points[ss])] = X_tmp\n ss_ind[k:(k + num_points[ss])] = ss * np.ones((num_points[ss], 1))\n k = k + num_points[ss]\n return X, ss_ind",
"def SplitIntoPolygons(shape):\n ret = []\n this_polygon = []\n restart_indices = set(shape.parts)\n for idx, point in enumerate(shape.points):\n if idx in restart_indices:\n if this_polygon:\n ret.append(this_polygon)\n this_polygon = [[point[0], point[1]]]\n else:\n this_polygon.append([point[0], point[1]])\n if this_polygon:\n ret.append(this_polygon)\n return ret",
"def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))",
"def create_points(data):\n #TODO list comprehension\n for row in data:\n\n if row[\"x\"] and row[\"y\"]:\n try:\n row[\"geometry\"] = point.Point(float(row[\"x\"]), float(row[\"y\"]))\n except:\n row[\"geometry\"] = None\n else:\n row[\"geometry\"] = None\n\n return data",
"def point_to_polygon_geojson(g):\n point_coordinates = g['geometry']['coordinates']\n polygon_geojson = {\n 'type': 'Feature',\n 'properties': g['properties'],\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [\n [point_coordinates, point_coordinates, point_coordinates, point_coordinates]\n ]\n }\n }\n return polygon_geojson",
"def draw_filled_polygon(*points):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n polygon = Polygon(*newpoints)\r\n _set_filled(polygon)\r\n _canvas.add(polygon)",
"def polygon(self, relative_div=None):\n if self.__node is None:\n return []\n\n a = self.__node.pos\n b = (self.__node.pos[0] + self.__node.size[0], self.__node.pos[1])\n c = (self.__node.pos[0] + self.__node.size[0], self.__node.pos[1] + self.__node.size[1])\n d = (self.__node.pos[0], self.__node.pos[1] + self.__node.size[1])\n\n if relative_div is not None:\n return [a-relative_div.pos, b-relative_div.pos, c-relative_div.pos, d-relative_div.pos, a-relative_div.pos]\n return [a, b, c, d, a]",
"def calculate_points(self, component):\n # find selection range on source plot\n x_start, x_end = self._get_selection_screencoords()\n if x_start > x_end:\n x_start, x_end = x_end, x_start\n\n y_end = self.source.y\n y_start = self.source.y2\n\n left_top = np.array([x_start, y_end])\n left_mid = np.array([x_start, y_start])\n right_top = np.array([x_end, y_end])\n right_mid = np.array([x_end, y_start])\n\n # Offset y because we want to avoid overlapping the trapezoid with the topmost\n # pixels of the destination plot.\n y = self.destination.y - 1\n\n left_end = np.array([self.destination.x, y])\n right_end = np.array([self.destination.x2, y])\n\n polygon = np.array((left_end, left_mid, left_top,\n right_top, right_mid, right_end))\n left_line = np.array((left_top, left_mid, left_end))\n right_line = np.array((right_end, right_mid, right_top))\n\n return left_line, right_line, polygon",
"def simplices2polytopes(points, triangles):\n polytopes = []\n for triangle in triangles:\n logger.debug('Triangle: ' + str(triangle))\n triangle_vertices = points[triangle, :]\n logger.debug('\\t triangle points: ' +\n str(triangle_vertices))\n poly = qhull(triangle_vertices)\n logger.debug('\\n Polytope:\\n:' + str(poly))\n polytopes += [poly]\n return polytopes",
"def generate_square_vertices(geom):\n unit = geom.pix_x.unit\n width = geom.pixel_width.to_value(unit) / 2\n x = geom.pix_x.to_value(unit)\n y = geom.pix_y.to_value(unit)\n\n x_offset = width[:, np.newaxis] * np.array([-1, -1, 1, 1])\n y_offset = width[:, np.newaxis] * np.array([1, -1, -1, 1])\n\n x = x[:, np.newaxis] + x_offset\n y = y[:, np.newaxis] + y_offset\n return x, y",
"def calculate_points(self, component):\n # find selection range on source plot\n x_start, x_end = self._get_selection_screencoords()\n if x_start > x_end:\n x_start, x_end = x_end, x_start\n\n y_end = self.source.y\n y_start = self.source.y2\n\n left_top = np.array([x_start, y_start])\n left_mid = np.array([x_start, y_end])\n right_top = np.array([x_end, y_start])\n right_mid = np.array([x_end, y_end])\n\n # Offset y because we want to avoid overlapping the trapezoid with the topmost\n # pixels of the destination plot.\n y = self.destination.y2 + 1\n\n left_end = np.array([self.destination.x, y])\n right_end = np.array([self.destination.x2, y])\n\n polygon = np.array((left_top, left_mid, left_end,\n right_end, right_mid, right_top))\n left_line = np.array((left_top, left_mid, left_end))\n right_line = np.array((right_end, right_mid, right_top))\n\n return left_line, right_line, polygon",
"def polygon(self, sides, x0, y0, r, color, rotate=0):\n coords = []\n theta = radians(rotate)\n n = sides + 1\n for s in range(n):\n t = 2.0 * pi * s / sides + theta\n coords.append([int(r * cos(t) + x0), int(r * sin(t) + y0)])\n\n # Cast to python float first to fix rounding errors\n self.lines(coords, color=color)",
"def get_quad_points():\n points = np.array(\n [[0.333333333333333333333333333333, 0.333333333333333333333333333333],\n [0.950275662924105565450352089520, 0.024862168537947217274823955239],\n [0.024862168537947217274823955239, 0.950275662924105565450352089520],\n [0.024862168537947217274823955239, 0.024862168537947217274823955239],\n [0.171614914923835347556304795551, 0.414192542538082326221847602214],\n [0.414192542538082326221847602214, 0.171614914923835347556304795551],\n [0.414192542538082326221847602214, 0.414192542538082326221847602214],\n [0.539412243677190440263092985511, 0.230293878161404779868453507244],\n [0.230293878161404779868453507244, 0.539412243677190440263092985511],\n [0.230293878161404779868453507244, 0.230293878161404779868453507244],\n [0.772160036676532561750285570113, 0.113919981661733719124857214943],\n [0.113919981661733719124857214943, 0.772160036676532561750285570113],\n [0.113919981661733719124857214943, 0.113919981661733719124857214943],\n [0.009085399949835353883572964740, 0.495457300025082323058213517632],\n [0.495457300025082323058213517632, 0.009085399949835353883572964740],\n [0.495457300025082323058213517632, 0.495457300025082323058213517632],\n [0.062277290305886993497083640527, 0.468861354847056503251458179727],\n [0.468861354847056503251458179727, 0.062277290305886993497083640527],\n [0.468861354847056503251458179727, 0.468861354847056503251458179727],\n [0.022076289653624405142446876931, 0.851306504174348550389457672223],\n [0.022076289653624405142446876931, 0.126617206172027096933163647918],\n [0.851306504174348550389457672223, 0.022076289653624405142446876931],\n [0.851306504174348550389457672223, 0.126617206172027096933163647918],\n [0.126617206172027096933163647918, 0.022076289653624405142446876931],\n [0.126617206172027096933163647918, 0.851306504174348550389457672223],\n [0.018620522802520968955913511549, 0.689441970728591295496647976487],\n [0.018620522802520968955913511549, 0.291937506468887771754472382212],\n [0.689441970728591295496647976487, 0.018620522802520968955913511549],\n [0.689441970728591295496647976487, 0.291937506468887771754472382212],\n [0.291937506468887771754472382212, 0.018620522802520968955913511549],\n [0.291937506468887771754472382212, 0.689441970728591295496647976487],\n [0.096506481292159228736516560903, 0.635867859433872768286976979827],\n [0.096506481292159228736516560903, 0.267625659273967961282458816185],\n [0.635867859433872768286976979827, 0.096506481292159228736516560903],\n [0.635867859433872768286976979827, 0.267625659273967961282458816185],\n [0.267625659273967961282458816185, 0.096506481292159228736516560903],\n [0.267625659273967961282458816185, 0.635867859433872768286976979827]]);\n\n w = np.array(\n [0.051739766065744133555179145422,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190])*0.5;\n quad_x = np.copy(points[:,0])\n quad_y = np.copy(points[:,1])\n return (quad_x, quad_y, w)",
"def begin_poly(self):\n self._poly = [self._position]\n self._creatingPoly = True",
"def __repr__(self):\n return (f'RegularPoly({self.vert_count}, {self.radius})')",
"def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values",
"def addPoint(self, p):\n p = np.asarray(p)\n idx = len(self.coords)\n # print(\"coords[\", idx,\"] ->\",p)\n self.coords.append(p)\n\n # Search the triangle(s) whose circumcircle contains p\n bad_triangles = []\n for T in self.triangles:\n # Choose one method: inCircleRobust(T, p) or inCircleFast(T, p)\n if self.inCircleFast(T, p):\n bad_triangles.append(T)\n\n # Find the CCW boundary (star shape) of the bad triangles,\n # expressed as a list of edges (point pairs) and the opposite\n # triangle to each edge.\n boundary = []\n # Choose a \"random\" triangle and edge\n T = bad_triangles[0]\n edge = 0\n # get the opposite triangle of this edge\n while True:\n # Check if edge of triangle T is on the boundary...\n # if opposite triangle of this edge is external to the list\n tri_op = self.triangles[T][edge]\n if tri_op not in bad_triangles:\n # Insert edge and external triangle into boundary list\n boundary.append((T[(edge+1) % 3], T[(edge-1) % 3], tri_op))\n\n # Move to next CCW edge in this triangle\n edge = (edge + 1) % 3\n\n # Check if boundary is a closed loop\n if boundary[0][0] == boundary[-1][1]:\n break\n else:\n # Move to next CCW edge in opposite triangle\n edge = (self.triangles[tri_op].index(T) + 1) % 3\n T = tri_op\n\n # Remove triangles too near of point p of our solution\n for T in bad_triangles:\n del self.triangles[T]\n del self.circles[T]\n\n # Retriangle the hole left by bad_triangles\n new_triangles = []\n for (e0, e1, tri_op) in boundary:\n # Create a new triangle using point p and edge extremes\n T = (idx, e0, e1)\n\n # Store circumcenter and circumradius of the triangle\n self.circles[T] = self.circumcenter(T)\n\n # Set opposite triangle of the edge as neighbour of T\n self.triangles[T] = [tri_op, None, None]\n\n # Try to set T as neighbour of the opposite triangle\n if tri_op:\n # search the neighbour of tri_op that use edge (e1, e0)\n for i, neigh in enumerate(self.triangles[tri_op]):\n if neigh:\n if e1 in neigh and e0 in neigh:\n # change link to use our new triangle\n self.triangles[tri_op][i] = T\n\n # Add triangle to a temporal list\n new_triangles.append(T)\n\n # Link the new triangles each another\n N = len(new_triangles)\n for i, T in enumerate(new_triangles):\n self.triangles[T][1] = new_triangles[(i+1) % N] # next\n self.triangles[T][2] = new_triangles[(i-1) % N] # previous",
"def compute_all_jxy(polygon=None):\n expressions = []\n symmetric = []\n\n # given a 12-gon, we do the following:\n # polygon = Symbolic12Gon()\n # polygon = make_regular()\n if polygon is None:\n polygon = make_any_gon()\n # polygon = make_assumption_gon()\n\n # print(polygon.vertices)\n for i in range(6):\n print(i)\n # translate such that this point is the origin\n# polygon = polygon.translate(polygon.vertices[i])\n# print(polygon)\n # shear so that the diagonal we are considering is vertical\n try:\n q = polygon.vertices[i].qx_to_shear_by(polygon.vertices[i+1])\n# print(\"q1:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n sheared_polygon = polygon.shear_x_zero(q)\n# print(sheared_polygon)\n# print(\"test:\", sheared_polygon.vertices[i] - sheared_polygon.vertices[i+1])\n w, h = sheared_polygon.get_cylinder(i)\n # print(\"h: \",h.full_simplify())\n# print(\"shear 1 w: \",w.full_simplify())\n # print(len(sheared_polygon.vertices))\n# print(sheared_polygon.vertices[i])\n # shear again so that the edge that we consider is horizontal\n try:\n q = sheared_polygon.vertices[i].qy_to_shear_by(sheared_polygon.vertices[(i + 7) % 12])\n# print(sheared_polygon.vertices[i], sheared_polygon.vertices[(i + 7) % 12])\n# print(\"q2:\", q.rational(D=3), q.irrational(D=3))\n except ZeroDivisionError:\n print(\"-------\")\n print(\"division by 0!\")\n print(\"-------\")\n continue\n\n twice_sheared = sheared_polygon.shear_y_zero(q)\n\n # rescale such that the modulus of the vertical cylinder is rational\n w, h = twice_sheared.get_cylinder(i)\n# print(\"shear 2 h: \",h.full_simplify())\n# print(\"shear 2 w: \",w.full_simplify())\n # print(w.y, h.x)\n stretch_factor = w.x/h.y # this should be reciprocated, but we just care it is rational\n # print(stretch_factor)\n stretched_polygon = sheared_polygon.stretch_y(stretch_factor)\n\n # compute Jxy\n jxy = stretched_polygon.jxy()\n expressions.append(jxy)\n symmetric.append((jxy[1], jxy[2]))\n\n return expressions, symmetric",
"def test_polygon_to_vertex_arrays(self):\n\n these_vertex_x_coords, these_vertex_y_coords = (\n skeleton_lines._polygon_to_vertex_arrays(POLYGON_OBJECT_XY))\n\n self.assertTrue(numpy.allclose(\n these_vertex_x_coords, VERTEX_X_COORDS, atol=TOLERANCE))\n self.assertTrue(numpy.allclose(\n these_vertex_y_coords, VERTEX_Y_COORDS, atol=TOLERANCE))",
"def drawPolygon(self,id,points,algorithm):\n if id in self.elements.keys():\n print(\"The id for the polygon has been registered! Please use another one\")\n return\n for i, p in enumerate(points):\n x=p[0]; y=self.h-1-p[1]\n try:\n self.checkInBound(x,0); self.checkInBound(y,1)\n except AssertionError as e:\n # print self.w,self.h,x,y\n print(\"Some value is out of bound! Please check your input\")\n return\n points=[(self.h-1-p[1],p[0]) for p in points]\n polygonEle=Polygon(id,points,algorithm,self.drawColor)\n self.elements[id]=polygonEle\n self.sync=False",
"def polygon(t, length, n):\n for i in range(n):\n t.fd(length)\n t.lt(360/n)",
"def plot_polygon(polygon, size_points_distrib=50):\n # Get the points\n list_points = list(polygon.exterior.coords)\n distances = np.array(scipy.spatial.distance.euclidean([elt[0] for elt in list_points], [elt[1] for elt in list_points]))\n avg_dist = distances.mean()\n\n # Get the boundaries\n minx, miny, maxx, maxy = polygon.bounds\n box_points = box(minx, miny, maxx, maxy, ccw=True)\n\n fig, ax = plt.subplots(figsize=(10, 10))\n\n # Box\n plt.scatter(*zip(*list(box_points.exterior.coords)), color='black', linestyle=\"--\", alpha=0.2)\n plt.plot(*zip(*list(box_points.exterior.coords)), color='black', linestyle=\"--\", alpha=0.2)\n\n # Polygon\n plt.scatter(*zip(*list_points), color='blue')\n plt.plot(*zip(*list(list_points)), color='blue', linestyle=\"-.\", alpha=0.2)\n ax.set(xlim=[minx, maxx])\n ax.set(ylim=[miny, maxy])\n\n # Limits\n rdm_points = gen_rdm_points_square(polygon, size_points_distrib)\n # creates mask\n is_in_distrib = point_in_polygons(polygon, rdm_points)\n print(rdm_points[is_in_distrib])\n\n # Points in\n x_in, y_in = zip(*rdm_points[is_in_distrib])\n plt.scatter(x_in, y_in, color='green', alpha=0.2, marker=\"+\")\n\n # Points out\n x_out, y_out = zip(*rdm_points[~is_in_distrib])\n plt.scatter(x_out, y_out, color='red', alpha=0.2, marker=\"+\")\n\n plt.show()",
"def generate_points(num_points):\n for i in xrange(0, num_points):\n pass",
"def gpoly_encode(points):\n try:\n import gpolyencode\n encoder = gpolyencode.GPolyEncoder()\n except Exception as err:\n return dict(error='%s: %s' % (type(err), err, ))\n else:\n return encoder.encode(points)",
"def addPoly(self,p):\n for t in triangulate(p):\n self.addTri(t)\n return self",
"def get_points(self, npoints: int):\n\n R = sorted(np.random.rand(npoints) * 2. * np.pi)\n\n xx = self.cx + self.a * np.cos(R) * np.cos(self.angle_rad) - self.b * np.sin(R) * np.sin(\n self.angle_rad)\n\n yy = self.cy + self.a * np.cos(R) * np.sin(self.angle_rad) + self.b * np.sin(R) * np.cos(\n self.angle_rad)\n\n return R, xx, yy",
"def create_new_polygon(self, coords, **options):\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id",
"def __init__(self, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, regular_bounding_box=True):\n self._points = points\n self.x_y_z = [n_x, n_y, n_z]\n self.sizes = [size_x, size_y, size_z]\n self.regular_bounding_box = regular_bounding_box",
"def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])",
"def erode_polygon(points, amount_increase):\n expanded_points = []\n for index, point in enumerate(points):\n prev_point = points[(index - 1) % len(points)]\n next_point = points[(index + 1) % len(points)]\n prev_edge = np.subtract(point, prev_point)\n next_edge = np.subtract(next_point, point)\n\n prev_normal = ((-1 * prev_edge[1]), (1 * prev_edge[0]))\n prev_normal = np.divide(prev_normal, np.linalg.norm(prev_normal))\n next_normal = ((-1 * next_edge[1]), (1 * next_edge[0]))\n next_normal = np.divide(next_normal, np.linalg.norm(next_normal))\n\n bisect = np.add(prev_normal, next_normal)\n bisect = np.divide(bisect, np.linalg.norm(bisect))\n\n cos_theta = np.dot(next_normal, bisect)\n hyp = amount_increase / cos_theta\n\n new_point = np.around(point + hyp * bisect)\n new_point = new_point.astype(int)\n new_point = new_point.tolist()\n expanded_points.append(new_point)\n return expanded_points",
"def generate_interpolated_points(point1, point2):\n points = connect(np.array([point2, point1]))\n return set(map(tuple, points))",
"def get_regular_points(self, npoints=None, device=\"gpu0\"):\n if not self.npoints == npoints:\n self.mesh = pymesh.generate_icosphere(1, [0, 0, 0], 4) # 2562 vertices\n self.vertex = torch.from_numpy(self.mesh.vertices).to(device).float()\n self.num_vertex = self.vertex.size(0)\n self.vertex = self.vertex.transpose(0,1).contiguous().unsqueeze(0)\n self.npoints = npoints\n\n return Variable(self.vertex.to(device))",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def sub_polytope_generator(self):\n pointset = set(self.integral_points())\n for v in self.vertices():\n sub = list(pointset.difference([v]))\n yield LatticePolytope_PPL(*sub)",
"def test_random_polygon(self):\n p = g.trimesh.path.polygons.random_polygon()\n assert p.area > 0.0\n assert p.is_valid",
"def draw_polygon():\n\n glBegin(GL_POLYGON)\n glColor3f(0.3, 0.4, 1.0)\n i = 0\n while i <= (matrix_order - 1):\n x = vertices[0][i]\n y = vertices[1][i]\n # casting\n glVertex2f((float(x) / 2) + (width / 2), (float(y) / 2) + (height / 2))\n i = i + 1\n glEnd()"
] | [
"0.719469",
"0.7190829",
"0.7045325",
"0.70451605",
"0.6851909",
"0.67886734",
"0.67724663",
"0.6691914",
"0.66057116",
"0.6592724",
"0.6576254",
"0.64651835",
"0.6458977",
"0.6458977",
"0.6396571",
"0.6344055",
"0.62720823",
"0.62539804",
"0.6227694",
"0.6226541",
"0.6224374",
"0.6174987",
"0.6169016",
"0.61689234",
"0.615547",
"0.6117171",
"0.6106361",
"0.6096897",
"0.6089805",
"0.6078069",
"0.6072685",
"0.6071296",
"0.6033277",
"0.60273343",
"0.602497",
"0.6021968",
"0.6020969",
"0.60194135",
"0.5991709",
"0.59643936",
"0.5961047",
"0.59537673",
"0.59510845",
"0.59508735",
"0.59507877",
"0.5945553",
"0.59328276",
"0.59251875",
"0.59185",
"0.59052306",
"0.5858795",
"0.58566564",
"0.585593",
"0.5832674",
"0.5825397",
"0.5785073",
"0.5770432",
"0.5763424",
"0.5754704",
"0.5752622",
"0.57500726",
"0.5739959",
"0.573561",
"0.57316965",
"0.5730619",
"0.5725237",
"0.5719068",
"0.5710874",
"0.57085145",
"0.57065195",
"0.57035196",
"0.570272",
"0.56936896",
"0.56922615",
"0.56809586",
"0.5672776",
"0.56712264",
"0.566878",
"0.56595695",
"0.5653728",
"0.56513613",
"0.564882",
"0.56473774",
"0.56443805",
"0.5639449",
"0.5638998",
"0.5632039",
"0.5626533",
"0.56214726",
"0.561706",
"0.5616833",
"0.5616007",
"0.5613596",
"0.56123096",
"0.5611693",
"0.56077874",
"0.5607095",
"0.56068295",
"0.5606318",
"0.56041265"
] | 0.62863386 | 16 |
Longest run testcases with more than one target | def test_longest_run_mult(self):
self.assertTrue(geneutil.longestRun('QQQQN','QN')==5)
self.assertTrue(geneutil.longestRun('QQANNQ','QN',1)==6)
self.assertTrue(geneutil.longestRun('QQNPPQ','QN',1)==3)
self.assertTrue(geneutil.longestRun('QQQAANN','QN',2)==7)
self.assertTrue(geneutil.longestRun('ANQNQAN','QN',1)==6)
self.assertTrue(geneutil.longestRun('ANQNQANP','QN',1)==6) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getResult(targets, i=None):",
"def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def run_automatic_tester():\n number_of_target_maps = len(os.listdir(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH))\n overall_true_positive_count = 0\n overall_false_positive_count = 0\n overall_target_count = 0\n\n for index_0 in range(number_of_target_maps):\n\n answer_sheet = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_MAP_ANSWER_SHEET_PATH, str(index_0 + 1) + \".json\")))\n answer_list = []\n\n for index_1 in range(len(answer_sheet[\"targets\"])):\n answer_list.append((answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][0], answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][1]))\n overall_target_count += len(answer_list)\n\n target_detection_result = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH, str(index_0 + 1) + \".json\")))\n result_list = []\n\n for index_2 in range(len(target_detection_result[\"image_processing_results\"])):\n result_list.append((target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][0] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][2] / 2), target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][1] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][3] / 2)))\n\n current_true_positive_count = 0\n current_false_positive_count = 0\n banned_index_list = []\n\n for index_3 in range(len(answer_list)):\n true_positive_found = False\n\n for index_4 in range(len(result_list)):\n is_index_4_banned = False\n\n for index_5 in range(len(banned_index_list)):\n if (index_4 == banned_index_list[index_5]):\n is_index_4_banned = True\n\n if (is_index_4_banned == True):\n continue\n\n correct_target_center_x = answer_list[index_3][0]\n correct_target_center_y = answer_list[index_3][1]\n\n detected_target_center_x = result_list[index_4][0]\n detected_target_center_y = result_list[index_4][1]\n\n if ((abs(correct_target_center_x - detected_target_center_x) <= 20) and (abs(correct_target_center_y - detected_target_center_y) <= 20)):\n current_true_positive_count += 1\n banned_index_list.append(index_4)\n true_positive_found = True\n continue\n\n current_false_positive_count = len(result_list) - current_true_positive_count\n\n overall_true_positive_count += current_true_positive_count\n overall_false_positive_count += current_false_positive_count\n\n percentage = 100 * float(overall_true_positive_count) / (overall_target_count)\n\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")\n TargetDetectionTesterLogger.log(\"Total True Positive Count: \" + str(overall_true_positive_count))\n TargetDetectionTesterLogger.log(\"Total False Positive Count: \" + str(overall_false_positive_count))\n TargetDetectionTesterLogger.log(\"Percentage of Successfully Detected Targets: \" + str(percentage) + \"%\")\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")",
"def test_target_number_less_than_alp(self):\n alp = list(range(10))\n targets = generate_targets(alp, 5)\n self.assertEqual(len(targets), 5)\n self.assertEqual(len(targets), len(set(targets)))",
"def target_multi_objective1(\n config: Configuration,\n seed: int,\n # instance: str,\n # budget: float,\n) -> list[float]:\n return [seed, seed]",
"def run_mcts(self, runs_per_round):\n for i in range(runs_per_round):\n self.select(self.env, 'r')\n self.env_reset()\n counts = [self.Nsa[('r', a)] for a in range(self.actions)]\n # print(\"counts \", counts)\n # print(\"Q-values\", [self.Qsa[('r', a)] for a in range(self.actions)])\n # print()\n return np.argmax(counts)",
"def test_target_greater_than_alp(self):\n alp = list(range(5))\n targets = generate_targets(alp, 10)\n self.assertEqual(len(targets), 10)\n\n counts = Counter(targets)\n\n for item in alp:\n self.assertEqual(counts[item], 2)",
"def get_n_best(self):\n pass",
"def get_test_suite():\n # max for a and p\n MAX = 2**31 - 1 # INT32_MAX, max value for a and p\n sqrt_MAX = floor(sqrt(MAX)) # max for n\n \n # first test suite\n a_list = [0, 0, 0, 1, 1, 2, 7, 2, 1, 0, 0, 3, 1, 0, 0, 0, 1]\n p_list = [5, 3, 3, 0, 0, 0, 8, 1, 1, 0, 0, 0, 0, 1, 2, 0, 1]\n n_list = [7, 2, 2, 7, 3, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1]\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n \n # second test suite\n a_list = [3, 5, 23, 25, 100, 200, MAX, MAX-1, MAX]\n p_list = [10, 5, 23, 25, 100, 200, 1000, 100, 500]\n n_list = [23, 1, 0, 7, 1, 100, sqrt_MAX, 3, 23]\n \n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n\n # third test suite\n a_list = []\n p_list = []\n n_list = []\n\n # keep a = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n # keep p = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep n = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(0)\n # keep a = 0 and p = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep all non-zero\n for _ in range(30):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite",
"def main(keep_best_count, mutation_factor, rounds, target, stagnate):\n ways = [range(len(DISTANCES))]\n result = {'round':0,'cost':None}\n for i in range(rounds):\n ways = mutate(ways,mutation_factor)\n best = []\n for way in ways:\n best.append((rate(way),way))\n best.sort()\n if VERBOSITY:\n for way in best:\n print way\n print \"Round %d best way is %s\" % (i+1, best[0][0])\n # break if we hit the target\n if best[0][0] <= target:\n print \"Hit Target\"\n break\n # break if we stagnate to long\n if result['cost'] is None or best[0][0] <result['cost']:\n result['cost'] = best[0][0]\n result['round'] = i+1\n elif result['round'] + stagnate <= i+1:\n print \"Stagnate to long\"\n break\n ways = list(b[1] for b in best[0:keep_best_count])\n print \"\"\n print \"best found order with cost=%d\" % best[0][0]\n print ' '.join(list(NAMES[i] for i in best[0][1]))\n print \"\"",
"def solutionByOthers(self, nums, target):\n nums.sort()\n results = []\n\n self._findNSum( nums, target, 4, [], results )\n return results",
"def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)",
"def findmaxidx(datasets, target='atom_label'):\n\n if target == 'atom_label':\n return _findmaxidx(datasets, 0)\n elif target == 'wle_label':\n return _findmaxidx(datasets, 2)",
"def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def count_targets(searchList):\n targets = {}\n for x in searchList:\n loVal = -10000 - x\n hiVal = 10000 - x\n loInd = bisect_left(searchList, loVal)\n hiInd = bisect_right(searchList, hiVal)\n for y in searchList[loInd:hiInd]:\n if y == x:\n continue\n t = x + y\n targets[t] = 1\n return len(targets)",
"def gen_jobs(lower_idx, upper_idx, target=\"llvm\"):\n return [LorienTestWorkload(target, idx).to_job() for idx in range(lower_idx, upper_idx)]",
"def evaluate(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) #argmax 返回最大数的索引\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate1_6(self, test_data):\r\n test_results = [(np.argmax(self.feedforward1_6(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate2_5_1(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return test_results",
"def greedy_search(self,inputs,states=None,max_len=20):\n \n ids_list = list()\n for i in range(max_len):\n hiddens,states = self.lstm(inputs,states)\n outputs = self.linear(hiddens.squeeze(1))\n #Get the most likely integer to represent the token\n \n predicted = outputs.argmax(1)\n ids_list.append(predicted.item())\n inputs = self.embed(predicted)\n inputs = inputs.unsqueeze(1)\n return ids_list",
"def main():\n ngon_sols = find_all_ngon_sols()\n ngon_strs = set(str(ngon) for ngon in ngon_sols)\n ngon_ints = (int(ngon_str) for ngon_str in ngon_strs\n if len(ngon_str) == TARGET_LEN) \n\n print(\"Max ngon integer is {}\".format(max(ngon_ints)))",
"def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()",
"def count_targets(searchList):\n count = 0\n n = len(searchList)\n stop1 = time.time()\n for t in range(-10000, 10001):\n for x in searchList:\n if t - x <= x:\n break\n i = bisect_left(searchList, t - x, hi=n-1)\n if searchList[i] == t - x:\n count += 1\n break\n return count",
"def test_when_targets():\n num_multi_targets = 0\n for when_targets_day in when_targets:\n # All inputs have a label\n assert np.all(when_targets_day.sum(axis=1).sum(axis=1) > 0)\n\n num_multi_targets += np.sum((when_targets_day.sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def run(self, target: int) -> list:\n\n valid_values = range(0, 99)\n\n # This will be some sort of brute force attempt\n values = [0, 0]\n while values[0] < 100 and values[1] < 100:\n self.reset()\n# import pdb; pdb.set_trace()\n test = self._process(values[0], values[1])\n if test == target:\n break\n else:\n if values[0] <= 99:\n if values[1] < 99:\n values[1] += 1\n elif values[1] == 99 and values[0] < 99:\n values[0] += 1\n values[1] = 0\n\n else:\n raise OpCodeError(\"No value possible\")\n\n return 100 * values[0] + values[1]",
"def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)",
"def test_find_long_chains(self):\n # a --> d --> j g h --> i\n # b _/ c --> e --> f\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n # Adding short path\n sh = self.skill_graph.add(Skill.build('h', ''))\n si = self.skill_graph.add(Skill.build('i', ''))\n self.skill_graph.add_prerequisite(si.id, sh.id)\n # Making path longer\n sj = self.skill_graph.add(Skill.build('j', ''))\n self.skill_graph.add_prerequisite(sj.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains(2)\n expected = [\n [self.sa.id, self.sd.id, sj.id],\n [self.sb.id, self.sd.id, sj.id],\n [self.sc.id, self.se.id, self.sf.id]\n ]\n self.assertEqual(sorted(expected), sorted(result))",
"def test_max_features_wo_gridsearch(self):\n X,Y,Z = self.create_bin_data()\n t = self.check_task('RFC nt=1;e=1;c=gini;mf=0.0001', X, Y, Z)\n self.assertEquals(t.parameters['max_features'], 1)",
"def correct(output, target, topk=(1,)):\n maxk = max(topk)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).sum(0, keepdim=True)\n res.append(correct_k)\n return res",
"def correct(output, target, topk=(1,)):\n maxk = max(topk)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).sum(0, keepdim=True)\n res.append(correct_k)\n return res",
"def test_find_long_chains_multiple(self):\n # a -> b -> c -> ... x\n # \\________________/\n self.skill_graph = SkillGraph.load()\n old_skill = self.skill_graph.add(Skill.build('o', ''))\n last_skill = self.skill_graph.add(Skill.build('l', ''))\n self.skill_graph.add_prerequisite(last_skill.id, old_skill.id)\n chain_ids = [old_skill.id]\n for index in range(CHAINS_MIN_LENGTH):\n new_skill = self.skill_graph.add(Skill.build(str(index), ''))\n chain_ids.append(new_skill.id)\n self.skill_graph.add_prerequisite(new_skill.id, old_skill.id)\n old_skill = new_skill\n self.skill_graph.add_prerequisite(old_skill.id, last_skill.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains()\n self.assertEqual([chain_ids], result)",
"def getbestnumberoftrees(features: ndarray, target: ndarray, limit:int) -> tuple:\n\n # Defining the initial accuracy value to compare with different number of trees in training\n accuracy = 0\n accuracyList = []\n\n for n in range(1, limit+1, 1):\n # Training\n trained_model = InternalRandomForest.modeltrain(features, target, n)\n\n # Calculating the percentual accuracy of the training\n accuracy_t = accuracy_score(target, trained_model.predict(features), normalize=True)\n\n # Build accuracy array for this set of number of trees\n accuracyList.append(accuracy_t)\n\n # Verifying if the current training is better than the last one\n if accuracy_t > accuracy:\n bestNumberTrees = n\n accuracy = accuracy_t\n\n # Obtain best trained model\n best_model = InternalRandomForest.modeltrain(features, target, bestNumberTrees)\n\n return bestNumberTrees, accuracyList, best_model",
"def evaluate1_5(self, test_data):\r\n test_results = [(np.argmax(self.feedforward1_5(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def test_remainder(self):\n alp = list(range(5))\n targets = generate_targets(alp, 12)\n\n counts = Counter(targets)\n for item in alp:\n self.assertGreaterEqual(counts[item], 2)\n self.assertLessEqual(counts[item], 3)",
"def calculate_appropriate_target(self):\n pass",
"def calculate_appropriate_target(self):\n pass",
"def num_correct(output, target, topk=(1,)):\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k)\n return res, batch_size",
"def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"",
"def test_generate_project_maximum_with_tvm(self) -> None:\n output_path = os.path.join(os.getcwd(), 'tmp')\n input_path = os.path.abspath(\n os.path.join(os.getcwd(),\n 'examples',\n 'classification',\n # 'lmnet_quantize_cifar10_stride_2.20180523.3x3',\n 'minimal_graph_with_shape.pb'))\n\n try:\n gp.run(input_path=input_path,\n dest_dir_path=output_path,\n project_name='unittest4',\n activate_hard_quantization=True,\n threshold_skipping=True,\n num_pe=16,\n use_tvm=True,\n use_onnx=False,\n debug=False,\n cache_dma=False,\n )\n finally:\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n\n print(\"Script test with maximum options including TVM passed!\")",
"def run_mcts(self):\n self.mcts.sigstop = False\n self.running_mcts = True\n\n self.mcts.search(max_time=self.max_time, c=self.exploration, verbose=True)\n\n self.running_mcts = False\n\n print(self.mcts.dump(self.mcts.root, 0, c=0))\n return self.get_top5()",
"def test_suite():\n test(sum_upto_first_even([1,3,2]),4)\n test(sum_upto_first_even([1,3,3]),7)\n test(sum_upto_first_even([2,3,3]),0)",
"def targets(self):\n\n # Targets that fail but shouldn't\n known_failing_targets = [\n # The following two targets lose out due to a resource collision, because `example_b` happens\n # to be first in the context, and test.junit mixes all classpaths.\n 'testprojects/maven_layout/resource_collision/example_b/src/test/java/org/pantsbuild/duplicateres/exampleb:exampleb',\n 'testprojects/maven_layout/resource_collision/example_c/src/test/java/org/pantsbuild/duplicateres/examplec:examplec',\n # TODO: This one has a missing dependency, but is intended to succeed... should it?\n 'testprojects/src/java/org/pantsbuild/testproject/thriftdeptest',\n # TODO(Eric Ayers): I don't understand why this fails\n 'testprojects/src/java/org/pantsbuild/testproject/jvmprepcommand:compile-prep-command',\n ]\n\n # Targets that are intended to fail\n negative_test_targets = [\n 'testprojects/maven_layout/provided_patching/leaf:fail',\n 'testprojects/src/antlr/python/test:antlr_failure',\n 'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-files',\n 'testprojects/src/java/org/pantsbuild/testproject/compilation_warnings:fatal',\n 'testprojects/src/java/org/pantsbuild/testproject/dummies:compilation_failure_target',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/earlyexit:tests',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/failing/tests/org/pantsbuild/tmp/tests',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/mixed/tests/org/pantsbuild/tmp/tests',\n 'testprojects/src/java/org/pantsbuild/testproject/missingdepswhitelist.*',\n 'testprojects/src/java/org/pantsbuild/testproject/missingdirectdepswhitelist:missingdirectdepswhitelist',\n 'testprojects/src/java/org/pantsbuild/testproject/missingjardepswhitelist:missingjardepswhitelist',\n 'testprojects/src/java/org/pantsbuild/testproject/runtime:compile-fail',\n 'testprojects/src/scala/org/pantsbuild/testproject/compilation_failure',\n 'testprojects/src/scala/org/pantsbuild/testproject/compilation_warnings:fatal',\n 'testprojects/src/thrift/org/pantsbuild/thrift_exports:C-without-exports',\n 'testprojects/src/thrift/org/pantsbuild/thrift_linter:',\n 'testprojects/src/java/org/pantsbuild/testproject/provided:c',\n 'testprojects/tests/java/org/pantsbuild/testproject/dummies:failing_target',\n 'testprojects/tests/java/org/pantsbuild/testproject/empty:',\n 'testprojects/tests/java/org/pantsbuild/testproject/fail256:fail256',\n 'testprojects/tests/python/pants/dummies:failing_target',\n 'testprojects/tests/scala/org/pantsbuild/testproject/non_exports:C',\n 'testprojects/src/scala/org/pantsbuild/testproject/exclude_direct_dep',\n # These don't pass without special config.\n 'testprojects/tests/java/org/pantsbuild/testproject/depman:new-tests',\n 'testprojects/tests/java/org/pantsbuild/testproject/depman:old-tests',\n 'testprojects/tests/java/org/pantsbuild/testproject/htmlreport:htmlreport',\n 'testprojects/tests/java/org/pantsbuild/testproject/parallel.*',\n 'testprojects/src/python/python_distribution/fasthello_with_install_requires.*'\n ]\n\n # May not succeed without java8 installed\n need_java_8 = [\n 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java8',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform',\n 'examples/src/java/org/pantsbuild/example/plugin',\n ]\n\n # Targets for testing timeouts. These should only be run during specific integration tests,\n # because they take a long time to run.\n timeout_targets = [\n 'testprojects/tests/python/pants/timeout:sleeping_target',\n 'testprojects/tests/java/org/pantsbuild/testproject/timeout:sleeping_target',\n # Called with test_pytest_run_integration\n 'testprojects/tests/python/pants/timeout:exceeds_timeout',\n 'testprojects/tests/python/pants/timeout:ignores_terminate',\n ]\n\n deliberately_conflicting_targets = [\n 'testprojects/src/python/interpreter_selection.*'\n ]\n\n simply_skip = [\n # Already tested at pants_test.backend.jvm.targets.test_jar_dependency_integration.JarDependencyIntegrationTest\n 'testprojects/3rdparty/org/pantsbuild/testprojects:testprojects',\n # Already tested in 'PantsRequirementIntegrationTest' and 'SetupPyIntegrationTest'.\n 'testprojects/pants-plugins/*',\n ]\n\n targets_to_exclude = (known_failing_targets + negative_test_targets + need_java_8 +\n timeout_targets + deliberately_conflicting_targets + simply_skip)\n exclude_opts = map(lambda target: '--exclude-target-regexp={}'.format(target),\n targets_to_exclude)\n\n # Run list with exclude options, then parse and sort output.\n pants_run = self.run_pants(['list', 'testprojects::', 'examples::'] + exclude_opts)\n self.assert_success(pants_run)\n return sorted(pants_run.stdout_data.split())",
"def __call__(self, output, target):\n maxk = max(self.topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in self.topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res",
"def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension",
"def main():\n\n argparser = ArgumentParser()\n argparser.add_argument('--datapath', '-D', type=str, help='Relative path to cwd of a local data file')\n argparser.add_argument('--attack_model', '-AM', type=str, default='ANY', choices=['RandomForest', 'LogReg', 'LinearSVC', 'SVC', 'KNN', 'ANY'])\n argparser.add_argument('--runconfig', '-RC', default='runconfig_mia.json', type=str, help='Path relative to cwd of runconfig file')\n argparser.add_argument('--outdir', '-O', default='outputs/test', type=str, help='Path relative to cwd for storing output files')\n args = argparser.parse_args()\n\n # Load runconfig\n with open(path.join(cwd, args.runconfig)) as f:\n runconfig = json.load(f)\n print('Runconfig:')\n print(runconfig)\n\n # Load data\n RawDF, metadata = load_local_data_as_df(path.join(cwd, args.datapath))\n dname = args.datapath.split('/')[-1]\n RawDF['ID'] = [f'ID{i}' for i in arange(len(RawDF))]\n RawDF = RawDF.set_index('ID')\n\n print(f'Loaded data {dname}:')\n print(RawDF.info())\n\n # Randomly select nt target records T = (t_1, ..., t_(nt))\n targetIDs = choice(list(RawDF.index), size=runconfig['nTargets'], replace=False).tolist()\n Targets = RawDF.loc[targetIDs, :]\n\n # Drop targets from sample population\n RawDFdropT = RawDF.drop(targetIDs)\n\n # Add a crafted outlier target to the evaluation set\n targetCraft = craft_outlier(RawDF, runconfig['sizeTargetCraft'])\n targetIDs.extend(list(set(targetCraft.index)))\n Targets = Targets.append(targetCraft)\n\n # Sample adversary's background knowledge RawA\n rawAidx = choice(list(RawDFdropT.index), size=runconfig['sizeRawA'], replace=False).tolist()\n\n # Sample k independent target test sets\n rawTindices = [choice(list(RawDFdropT.index), size=runconfig['sizeRawT'], replace=False).tolist() for nr in range(runconfig['nIter'])]\n\n # List of candidate generative models to evaluate\n gmList = []\n for gm, paramsList in runconfig['generativeModels'].items():\n if gm == 'IndependentHistogram':\n for params in paramsList:\n gmList.append(IndependentHistogram(*params))\n elif gm == 'BayesianNet':\n for params in paramsList:\n gmList.append(BayesianNet(*params))\n elif gm == 'PrivBayes':\n for params in paramsList:\n gmList.append(PrivBayes(*params))\n elif gm == 'CTGAN':\n for params in paramsList:\n gmList.append(CTGAN(metadata, *params))\n elif gm == 'PateGan':\n for params in paramsList:\n gmList.append(PateGan(metadata, *params))\n else:\n raise ValueError(f'Unknown GM {gm}')\n\n for GenModel in gmList:\n print(f'----- {GenModel.__name__} -----')\n\n FeatureList = [NaiveFeatureSet(GenModel.datatype), HistogramFeatureSet(GenModel.datatype, metadata), CorrelationsFeatureSet(GenModel.datatype, metadata), EnsembleFeatureSet(GenModel.datatype, metadata)]\n\n prior = {LABEL_IN: runconfig['prior']['IN'], LABEL_OUT: runconfig['prior']['OUT']}\n\n if args.attack_model == 'RandomForest':\n AttacksList = [MIAttackClassifierRandomForest(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LogReg':\n AttacksList = [MIAttackClassifierLogReg(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LinearSVC':\n AttacksList = [MIAttackClassifierLinearSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'SVC':\n AttacksList = [MIAttackClassifierSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'KNN':\n AttacksList = [MIAttackClassifierKNN(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'ANY':\n AttacksList = []\n for F in FeatureList:\n AttacksList.extend([MIAttackClassifierRandomForest(metadata, prior, F),\n MIAttackClassifierLogReg(metadata, prior, F),\n MIAttackClassifierKNN(metadata, prior, F)])\n else:\n raise ValueError(f'Unknown AM {args.attack_model}')\n\n # Run privacy evaluation under MIA adversary\n results = evaluate_mia(GenModel, AttacksList, RawDFdropT, Targets, targetIDs, rawAidx, rawTindices,\n runconfig['sizeRawT'], runconfig['sizeSynT'], runconfig['nSynT'],\n runconfig['nSynA'], runconfig['nShadows'], metadata)\n\n outfile = f\"{dname}{GenModel.__name__}MIA\"\n\n with open(path.join(f'{args.outdir}', f'{outfile}.json'), 'w') as f:\n json.dump(results, f, indent=2, default=json_numpy_serialzer)",
"def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth",
"def reduce_run():",
"def GetRunTargets(self):\n return list(self._run_target_index.keys())",
"def runTests(tests_dir, output_dir):\n\n runtime = 0\n os.makedirs(tests_dir, exist_ok=True)\n for test_case in os.listdir(tests_dir):\n print()\n print(\"Running test: \" + str(test_case))\n\n with open(tests_dir + test_case, \"r\") as f:\n tar, n = list(map(int, f.readline().split(\" \")))\n arr = list(map(int, f.readline().split(\" \")))\n\n start = timeit.default_timer()\n\n try:\n writeOutput(maxCombinationSum(tar, arr), test_case, output_dir)\n except KeyboardInterrupt:\n print(\"\\n\\tTest cancelled - KeyboardInterrupt\")\n except Exception as e:\n print(\"\\tError: \" + str(e))\n\n stop = timeit.default_timer()\n print(\"\\tTime for test: \" + str(stop - start) + \" seconds.\")\n\n runtime += (stop - start)\n\n if runtime == 0:\n print(\"No test case files found in tests directory.\\nPlease run solution from inside solution directory.\")\n else:\n print(\"\\nCompleted all tests in : \" + str(runtime) + \" seconds\")",
"def test_input_target_different():\n for day in range(len(departure_cameras)):\n which_targets_day = which_targets[day]\n when_targets_day = when_targets[day]\n where_targets_day = where_targets[day]\n departure_cameras_day = departure_cameras[day]\n # Which\n for departure_camera, target in zip(departure_cameras_day, which_targets_day):\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras\n # When\n for departure_camera, when_target in zip(departure_cameras_day, when_targets_day):\n target = when_target.sum(axis=1) > 1\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras\n # Where\n for departure_camera, where_target in zip(departure_cameras_day, where_targets_day):\n target = where_target.sum(axis=3).sum(axis=2).sum(axis=1) > 1\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras",
"def test_list_runs(self):\n pass",
"def test_group_exceed_max_testcases(self):\n for i in range(1, 31):\n testcase = test_utils.create_generic_testcase()\n testcase.crash_type = 'Heap-buffer-overflow'\n testcase.crash_state = 'abcdefgh' + str(i)\n testcase.project_name = 'project'\n testcase.one_time_crasher_flag = False\n\n # Attach actual issues to some testcases.\n if i in [3, 4, 5]:\n testcase.bug_information = '123'\n\n # Make some testcases unreproducible.\n if i in [1, 2, 3]:\n testcase.one_time_crasher_flag = True\n\n testcase.put()\n\n unrelated_testcase = test_utils.create_generic_testcase()\n\n grouper.group_testcases()\n\n testcase_ids = list(data_handler.get_open_testcase_id_iterator())\n\n # [1, 2] get removed since they are unreproducible testcases.\n # [3] is not removed since it has bug attached (even though unreproducible).\n # [6, 7, 8] are removed to account for max group size. Even though they\n # are reproducible, they are the ones with least weight.\n expected_testcase_ids = [3, 4, 5] + list(range(\n 9, 31)) + [unrelated_testcase.key.id()]\n self.assertEqual(expected_testcase_ids, testcase_ids)",
"def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n",
"def calculate_ec_targets_used(oclass, total_targets):\n data_shards, parity_shards, group_number = get_ec_data_parity_group(oclass)\n group_size = data_shards + parity_shards\n if group_number in ('x', 'X'):\n group_number = max(1, total_targets // group_size)\n return group_size * int(group_number)",
"def test_get_timeouts_with_maximum(self):\n\n self.set_options(timeouts=True, timeout_maximum=1)\n task = self.create_task(self.context())\n self.assertEquals(task._timeout_for_targets([targetC]), 1)",
"def testMaxTargets(self):\n\n self.assertEqual('Maxtargets: %s' % inventory_base.DEFAULT_MAXTARGETS,\n self.inv._CmdMaxTargets('maxtargets', []))\n self.inv._CmdMaxTargets('maxtargets', ['10'])\n self.assertEqual(10, self.inv._maxtargets)",
"def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval",
"def maxTargets(self):\n return self._getAttribute(Attribute.maxTargets)",
"def _select_targets(y, min_threshold=10, max_threshold=None):\n c = collections.Counter(y)\n y_sel = []\n for y_id in c:\n if c[y_id] > min_threshold:\n if max_threshold:\n if c[y_id] < max_threshold:\n y_sel.append(y_id)\n else:\n y_sel.append(y_id)\n return y_sel",
"def get_winners(self):\n\n if self.optimal is not None:\n return self.optimal\n clean_proposals = self.cleaner.create_scenarios(self.proposals)\n self.optimal = self.optimizer.optimize(clean_proposals)\n return self.optimal",
"def get_target_per_score(self):\n pass",
"def run():\n\n # Build list of stations\n stations = build_station_list()\n list_of_rivers_numbers=rivers_by_station_number(stations, 9)\n print(\"Rivers with greatest number of stations: {}\".format(list_of_rivers_numbers))",
"def find_combinations_of_coins(target, max_coin_amount):\n # Terminate conditions\n if max_coin_amount == 1:\n return 1\n if target == 0:\n return 1\n # If not terminating then partition by the max_coin_amount and reducing it to iteratively reduce the problem\n total_combinations = 0\n next_max_coin_amount = max([coin for coin in DENOMINATIONS if coin < max_coin_amount])\n for amount_of_max_coin in range(int(target / max_coin_amount) + 1):\n new_target = target - amount_of_max_coin * max_coin_amount\n total_combinations += find_combinations_of_coins(new_target, next_max_coin_amount)\n return total_combinations",
"def evaluateAllLastThreeGames(maxSeed):\n\t# Generate all possible 4-digit strings in base maxSeed\n\tseedsWithRepetition = ''\n\tfor seed in range(1, maxSeed + 1):\n\t\tseedsWithRepetition += 4 * str(seed)\n\tallPossibleStrings = set(permutations(seedsWithRepetition, 4))\n\t\n\tfor possibleString in allPossibleStrings:\n\t\tregionWinners = [int(possibleString[i]) for i in range(4)]\n\n\t\tfor j in range(8):\n\t\t\tf4Game1 = j % 2\n\t\t\tf4Game2 = int(j / 2) % 2\n\t\t\tncg = int(j / 4) % 2\n\t\t\ttotalLogProb = 0.\n\n\t\t\t# Round 5 (F4)\n\t\t\tteam1 = {'seed': regionWinners[0], 'region': 0}\n\t\t\tteam2 = {'seed': regionWinners[1], 'region': 1}\n\t\t\twinProb = getWinProbability(team1, team2, r=5)\n\t\t\tncgTeam1 = team1 if f4Game1 == 1 else team2\n\t\t\ttotalLogProb += math.log(winProb if f4Game1 == 1 else (1 - winProb))\n\n\t\t\tteam1 = {'seed': regionWinners[2], 'region': 2}\n\t\t\tteam2 = {'seed': regionWinners[3], 'region': 3}\n\t\t\twinProb = getWinProbability(team1, team2, r=5)\n\t\t\tncgTeam2 = team1 if f4Game2 == 1 else team2\n\t\t\ttotalLogProb += math.log(winProb if f4Game2 == 1 else (1 - winProb))\n\n\t\t\t# Round 6 (NCG)\n\t\t\twinProb = getWinProbability(ncgTeam1, ncgTeam2, r=6)\n\t\t\ttotalLogProb += math.log(winProb if ncg == 1 else (1 - winProb))\n\t\t\tpartialLogProb = totalLogProb\n\t\t\tfor regionWinner in regionWinners:\n\t\t\t\ttotalLogProb += mostLikelyRegions[str(regionWinner)][1]\n\t\t\tprint('{0},{1:03b},{2:.4f},{3:.4f}'.format(regionWinners, j, partialLogProb, totalLogProb))\n\tpass",
"def find_n_qubits(gates):\n return max((get_maximum_index(g.targets) for g in gates), default=-1) + 1",
"def evaluate(self, test_data):\n test_results = [(np.argmax(self.feed_forward(x)), y) for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def test_id(self):\n\n w, h = 11, 11\n method = \"minimax\"\n value_table = [[0] * w for _ in range(h)]\n value_table[3][0] = 1\n value_table[2][3] = 1\n value_table[4][4] = 2\n value_table[7][2] = 3\n eval_fn = EvalTable(value_table)\n\n depths = [\"7+\", \"6\", \"5\", \"4\", \"3\", \"2\", \"1\"]\n exact_counts = [((4, 4), set([(2, 3), (3, 0)])),\n ((16, 6), set([(2, 3), (3, 0)])),\n ((68, 20), set([(2, 3), (3, 2)])),\n ((310, 21), set([(2, 3), (3, 2)])),\n ((1582, 45), set([(3, 0), (3, 2)])),\n ((7534, 45), set([(3, 0), (3, 2)])),\n ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))]\n\n time_limit = 3200\n while time_limit >= TIMER_MARGIN:\n agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h)\n\n legal_moves = board.get_legal_moves()\n timer_start = curr_time_millis()\n time_left = lambda : time_limit - (curr_time_millis() - timer_start)\n move = agentUT.get_move(board, legal_moves, time_left)\n finish_time = time_left()\n\n self.assertTrue(len(board.visited) > 4, ID_FAIL)\n\n self.assertTrue(finish_time > 0,\n \"Your search failed iterative deepening due to timeout.\")\n\n # print time_limit, board.counts, move\n\n time_limit /= 2\n # Skip testing if the search exceeded 7 move horizon\n if (board.counts[0] > exact_counts[-1][0][0] or\n board.counts[1] > exact_counts[-1][0][1] or\n finish_time < 5):\n continue\n\n for idx, ((n, m), c) in enumerate(exact_counts[::-1]):\n if n > board.counts[0]:\n continue\n self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts))\n break",
"def targets():\n return [\n # C++\n CppDistribTest(\"linux\", \"x64\", \"debian10\", \"cmake\", presubmit=True),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_as_submodule\", presubmit=True\n ),\n CppDistribTest(\n \"linux\",\n \"x64\",\n \"debian10\",\n \"cmake_as_externalproject\",\n presubmit=True,\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_fetchcontent\", presubmit=True\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_module_install\", presubmit=True\n ),\n CppDistribTest(\n \"linux\", \"x64\", \"debian10\", \"cmake_pkgconfig\", presubmit=True\n ),\n CppDistribTest(\n \"linux\",\n \"x64\",\n \"debian10_aarch64_cross\",\n \"cmake_aarch64_cross\",\n presubmit=True,\n ),\n CppDistribTest(\"windows\", \"x86\", testcase=\"cmake\", presubmit=True),\n CppDistribTest(\n \"windows\",\n \"x86\",\n testcase=\"cmake_as_externalproject\",\n presubmit=True,\n ),\n # C#\n CSharpDistribTest(\n \"linux\", \"x64\", \"debian10\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\"linux\", \"x64\", \"ubuntu1604\", use_dotnet_cli=True),\n CSharpDistribTest(\n \"linux\", \"x64\", \"alpine\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\n \"linux\", \"x64\", \"dotnet31\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\n \"linux\", \"x64\", \"dotnet5\", use_dotnet_cli=True, presubmit=True\n ),\n CSharpDistribTest(\"macos\", \"x64\", use_dotnet_cli=True, presubmit=True),\n CSharpDistribTest(\"windows\", \"x86\", presubmit=True),\n CSharpDistribTest(\"windows\", \"x64\", presubmit=True),\n # Python\n PythonDistribTest(\"linux\", \"x64\", \"buster\", presubmit=True),\n PythonDistribTest(\"linux\", \"x86\", \"buster\", presubmit=True),\n PythonDistribTest(\"linux\", \"x64\", \"fedora34\"),\n PythonDistribTest(\"linux\", \"x64\", \"arch\"),\n PythonDistribTest(\"linux\", \"x64\", \"alpine\"),\n PythonDistribTest(\"linux\", \"x64\", \"ubuntu2004\"),\n PythonDistribTest(\n \"linux\", \"aarch64\", \"python38_buster\", presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x64\", \"alpine3.7\", source=True, presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x64\", \"buster\", source=True, presubmit=True\n ),\n PythonDistribTest(\n \"linux\", \"x86\", \"buster\", source=True, presubmit=True\n ),\n PythonDistribTest(\"linux\", \"x64\", \"fedora34\", source=True),\n PythonDistribTest(\"linux\", \"x64\", \"arch\", source=True),\n PythonDistribTest(\"linux\", \"x64\", \"ubuntu2004\", source=True),\n # Ruby\n RubyDistribTest(\n \"linux\",\n \"x64\",\n \"debian10\",\n ruby_version=\"ruby_2_7\",\n source=True,\n presubmit=True,\n ),\n RubyDistribTest(\n \"linux\", \"x64\", \"debian10\", ruby_version=\"ruby_3_0\", presubmit=True\n ),\n RubyDistribTest(\"linux\", \"x64\", \"centos7\"),\n RubyDistribTest(\"linux\", \"x64\", \"ubuntu1604\"),\n RubyDistribTest(\"linux\", \"x64\", \"ubuntu1804\", presubmit=True),\n # PHP7\n PHP7DistribTest(\"linux\", \"x64\", \"debian10\", presubmit=True),\n PHP7DistribTest(\"macos\", \"x64\", presubmit=True),\n ]",
"def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])",
"def find_solutions(target, max_coin=None):\n if target == 0:\n return\n if max_coin is None:\n max_coin = 200\n for coin in DENOMINATIONS:\n if coin > max_coin:\n continue\n if coin <= target:\n solution = [coin]\n new_target = target - coin\n if new_target:\n for x in find_solutions(target - coin, coin):\n yield solution + x\n else:\n yield solution",
"def getLastTestCase(self):\n \n length = len(self.testcases)\n \n if length == 0:\n trace_info(\"No test case to be have run, testcases array empty\")\n return None\n lastIndex = length - 1\n \n try:\n return self.testcases[lastIndex].name\n except IndexError:\n printOutput(self.testcases)\n raise IndexError(\"Couldn't get the last testcase\")",
"def GuessTargets(self, target_name):\n return difflib.get_close_matches(target_name, self.GetTargets(), 10, 0.4)",
"def _process_failures(self, target):\n crash_synopses = self._fuzz_data_logger.failed_test_cases.get(self._fuzz_data_logger.all_test_cases[-1], [])\n if len(crash_synopses) > 0:\n self._fuzz_data_logger.open_test_step(\"Failure summary\")\n\n # retrieve the primitive that caused the crash and increment it's individual crash count.\n self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant, 0) + 1\n self.crashing_primitives[self.fuzz_node] = self.crashing_primitives.get(self.fuzz_node, 0) + 1\n\n # print crash synopsis\n if len(crash_synopses) > 1:\n # Prepend a header if > 1 failure report, so that they are visible from the main web page\n synopsis = \"({0} reports) {1}\".format(len(crash_synopses), \"\\n\".join(crash_synopses))\n else:\n synopsis = \"\\n\".join(crash_synopses)\n self.procmon_results[self.total_mutant_index] = crash_synopses\n self._fuzz_data_logger.log_info(synopsis)\n\n if self.fuzz_node.mutant is not None and \\\n self.crashing_primitives[self.fuzz_node] >= self._crash_threshold_node:\n skipped = self.fuzz_node.num_mutations() - self.fuzz_node.mutant_index\n self._skip_current_node_after_current_test_case = True\n self._fuzz_data_logger.open_test_step(\n \"Crash threshold reached for this request, exhausting {0} mutants.\".format(skipped))\n self.total_mutant_index += skipped\n self.fuzz_node.mutant_index += skipped\n elif self.fuzz_node.mutant is not None and \\\n self.crashing_primitives[self.fuzz_node.mutant] >= self._crash_threshold_element:\n if not isinstance(self.fuzz_node.mutant, primitives.Group)\\\n and not isinstance(self.fuzz_node.mutant, blocks.Repeat):\n skipped = self.fuzz_node.mutant.num_mutations() - self.fuzz_node.mutant.mutant_index\n self._skip_current_element_after_current_test_case = True\n self._fuzz_data_logger.open_test_step(\n \"Crash threshold reached for this element, exhausting {0} mutants.\".format(skipped))\n self.total_mutant_index += skipped\n self.fuzz_node.mutant_index += skipped\n\n self.restart_target(target)\n return True\n else:\n return False",
"def test_multi_objective(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n # We always expect a list of costs (although a dict is returned).\n # Internally, target function runner maps the dict to a list of costs in the right order.\n for target in [target_multi_objective1, target_multi_objective2]:\n runner = make_runner(target, use_multi_objective=True)\n config = runner._scenario.configspace.get_default_configuration()\n\n SEED = 2345\n status, cost, _, _ = runner.run(config=config, instance=None, seed=SEED, budget=None)\n\n assert isinstance(cost, list)\n assert cost == [SEED, SEED]\n assert status == StatusType.SUCCESS",
"def test_output_is_counterfactuals(self):\n\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertGreaterEqual(len(output), 1)\n target_prediction = self._predict_and_return_argmax_label(self._example)\n for cf_example in output:\n cf_prediction = self._predict_and_return_argmax_label(cf_example)\n self.assertNotEqual(cf_prediction, target_prediction)",
"def test_result_n(self):\r\n saber = self.saber\r\n movies = self.movies\r\n res = nearest_n(movies, saber, PRED).drop(\"genres\", axis=1)\r\n intersection = res.merge(self.training, how=\"left\", on=\"movieId\").dropna()\r\n self.assertTrue(not intersection.empty)",
"def solve_p1_v1(target: int) -> int:\n\n elves = DeliveryService()\n\n house, n_presents = 0, 0\n while n_presents < target:\n house += 1\n n_presents = elves.visit(house)\n print(house, n_presents)\n\n return house",
"def test_calculate_class_2_individuals_best_response_simulation_all_inds_in_one():\n all_individuals_to_first = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=0.1,\n lambda_1_2=3,\n mu_1=10,\n mu_2=2,\n num_of_servers_1=8,\n num_of_servers_2=4,\n threshold_1=6,\n threshold_2=3,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_first == 1\n\n all_individuals_to_second = calculate_class_2_individuals_best_response(\n lambda_2=0.3,\n lambda_1_1=3,\n lambda_1_2=0.1,\n mu_1=2,\n mu_2=10,\n num_of_servers_1=4,\n num_of_servers_2=8,\n threshold_1=3,\n threshold_2=6,\n system_capacity_1=float(\"inf\"),\n system_capacity_2=float(\"inf\"),\n buffer_capacity_1=float(\"inf\"),\n buffer_capacity_2=float(\"inf\"),\n use_simulation=True,\n runtime=500,\n num_of_trials=5,\n warm_up_time=100,\n seed_num_1=10,\n seed_num_2=10,\n )\n assert all_individuals_to_second == 0",
"def lazy_greedy_max(self, budget):\r\n\r\n classes, no_elements = torch.unique(self.y_trn, return_counts=True)\r\n len_unique_elements = no_elements.shape[0]\r\n per_class_bud = int(budget / len(classes))\r\n final_per_class_bud = []\r\n _, sorted_indices = torch.sort(no_elements, descending = True)\r\n\r\n if self.selection_type == 'PerClass':\r\n \r\n total_idxs = 0\r\n for n_element in no_elements:\r\n final_per_class_bud.append(min(per_class_bud, torch.IntTensor.item(n_element)))\r\n total_idxs += min(per_class_bud, torch.IntTensor.item(n_element))\r\n \r\n if total_idxs < budget:\r\n bud_difference = budget - total_idxs\r\n for i in range(len_unique_elements):\r\n available_idxs = torch.IntTensor.item(no_elements[sorted_indices[i]])-per_class_bud \r\n final_per_class_bud[sorted_indices[i]] += min(bud_difference, available_idxs)\r\n total_idxs += min(bud_difference, available_idxs)\r\n bud_difference = budget - total_idxs\r\n if bud_difference == 0:\r\n break\r\n\r\n total_greedy_list = []\r\n for i in range(len_unique_elements):\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n \r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=final_per_class_bud[i])\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=final_per_class_bud[i])\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn[idxs].numpy())\r\n greedyList = self.get_index(self.x_trn[idxs].numpy(), x_sub)\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n greedyList = list(np.argmax(sim_sub, axis=1))\r\n total_greedy_list.extend(idxs[greedyList])\r\n\r\n elif self.selection_type == 'Supervised':\r\n \r\n \r\n if self.submod == 'feature_based':\r\n \r\n class_map = {}\r\n for i in range(len_unique_elements):\r\n class_map[torch.IntTensor.item(classes[i])] = i #Mapping classes from 0 to n\r\n \r\n sparse_data = torch.zeros([self.x_trn.shape[0], self.x_trn.shape[1]*len_unique_elements])\r\n for i in range(self.x_trn.shape[0]):\r\n \r\n start_col = class_map[torch.IntTensor.item(self.y_trn[i])]*self.x_trn.shape[1]\r\n end_col = start_col+self.x_trn.shape[1]\r\n sparse_data[i, start_col:end_col] = self.x_trn[i, :]\r\n\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n x_sub = fl.fit_transform(sparse_data.numpy())\r\n total_greedy_list = self.get_index(sparse_data.numpy(), x_sub)\r\n\r\n else:\r\n for i in range(len(classes)):\r\n \r\n if i == 0:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = idxs.repeat_interleave(N)\r\n col = idxs.repeat(N)\r\n data = self.dist_mat.cpu().numpy().flatten()\r\n else:\r\n idxs = torch.where(self.y_trn == classes[i])[0]\r\n N = len(idxs)\r\n self.compute_score(idxs)\r\n row = torch.cat((row, idxs.repeat_interleave(N)), dim=0)\r\n col = torch.cat((col, idxs.repeat(N)), dim=0)\r\n data = np.concatenate([data, self.dist_mat.cpu().numpy().flatten()], axis=0)\r\n \r\n \r\n sparse_simmat = csr_matrix((data, (row.numpy(), col.numpy())), shape=(self.N_trn, self.N_trn))\r\n #self.dist_mat = sparse_simmat\r\n\r\n if self.submod == 'facility_location':\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n sim_sub = fl.fit_transform(sparse_simmat)\r\n total_greedy_list = list(np.array(np.argmax(sim_sub, axis=1)).reshape(-1))\r\n\r\n\r\n if self.selection_type == 'Full':\r\n \r\n\r\n total_greedy_list = []\r\n idx_end = self.x_trn.shape[0] - 1\r\n idxs = torch.linspace(0, idx_end, self.x_trn.shape[0]).long()\r\n\r\n if self.submod == 'facility_location':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.facilityLocation.FacilityLocationSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'graph_cut':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.graphCut.GraphCutSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'saturated_coverage':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.saturatedCoverage.SaturatedCoverageSelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'sum_redundancy':\r\n self.compute_score(idxs)\r\n fl = apricot.functions.sumRedundancy.SumRedundancySelection(random_state=0, metric='precomputed',\r\n n_samples=budget)\r\n elif self.submod == 'feature_based':\r\n fl = apricot.functions.featureBased.FeatureBasedSelection(random_state=0, n_samples=budget)\r\n\r\n if self.submod == 'feature_based':\r\n\r\n x_sub = fl.fit_transform(self.x_trn.numpy())\r\n total_greedy_list = self.get_index(self.x_trn.numpy(), x_sub)\r\n\r\n else: \r\n\r\n sim_sub = fl.fit_transform(self.dist_mat.cpu().numpy())\r\n total_greedy_list = list(np.argmax(sim_sub, axis=1))\r\n\r\n return total_greedy_list",
"def buildall(target):\n if not is_acyclic(target):\n raise CircularDependency()\n\n def needsbuild(dataset):\n if dataset.exists and \\\n any(not par.exists or dataset.is_older_than(par)\n for par in dataset.parents()):\n return True, PARENTNEWER\n elif not dataset.exists:\n return True, MISSING\n else:\n return False, None\n\n def mark_children_breadthfirst(roots, parents):\n \"\"\" Mark build order for all ancestors, beginning with the roots. \"\"\"\n marks = {}\n queue = [(0, root) for root in roots]\n while len(queue) != 0:\n i, dep = queue.pop(0)\n marks[dep] = i\n for child in filter(lambda d: d in parents, dep.children(0)):\n iold = marks.get(child, -1)\n if i > iold:\n marks[child] = i\n queue.append((i+1, child))\n return marks\n\n # Map of Dataset -> integer, where the integer indicates the build step\n marks = mark_children_breadthfirst(target.roots(), set(target.parents()))\n\n groups = []\n maxi = 0\n for dep, i in marks.items():\n nb, reason = needsbuild(dep)\n if nb:\n while i >= maxi:\n groups.append([])\n maxi += 1\n groups[i].append((dep, reason))\n\n for group in groups:\n yield group\n\n if needsbuild(target)[0]:\n yield [(target, MISSING)]",
"def test_get_most_relevant_run():\n\n most_relevant_run = LearningResourceRunFactory.create(\n availability=AvailabilityType.archived.value,\n best_start_date=datetime(2019, 10, 1, tzinfo=pytz.utc),\n run_id=\"1\",\n )\n LearningResourceRunFactory.create(\n availability=AvailabilityType.archived.value,\n best_start_date=datetime(2018, 10, 1, tzinfo=pytz.utc),\n run_id=\"2\",\n )\n\n assert (\n get_most_relevant_run(LearningResourceRun.objects.filter(run_id__in=[\"1\", \"2\"]))\n == most_relevant_run\n )\n\n most_relevant_run = LearningResourceRunFactory.create(\n availability=AvailabilityType.upcoming.value,\n best_start_date=datetime(2017, 10, 1, tzinfo=pytz.utc),\n run_id=\"3\",\n )\n\n LearningResourceRunFactory.create(\n availability=AvailabilityType.upcoming.value,\n best_start_date=datetime(2020, 10, 1, tzinfo=pytz.utc),\n run_id=\"4\",\n )\n\n assert (\n get_most_relevant_run(\n LearningResourceRun.objects.filter(run_id__in=[\"1\", \"2\", \"3\", \"4\"])\n )\n == most_relevant_run\n )\n\n most_relevant_run = LearningResourceRunFactory.create(\n availability=AvailabilityType.current.value, run_id=\"5\"\n )\n\n assert (\n get_most_relevant_run(\n LearningResourceRun.objects.filter(run_id__in=[\"1\", \"2\", \"3\", \"4\", \"5\"])\n )\n == most_relevant_run\n )",
"def check_all_leaves(trial):\r\n leaf_nodes = trial.get_leaf_nodes()\r\n shuffle(leaf_nodes)\r\n states = []\r\n max_value = trial.get_max_dist_value()\r\n for node in leaf_nodes:\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n node.observe()\r\n #if node.value >= max_value:\r\n # trial_copy = copy.deepcopy(trial)\r\n # states.append(trial_copy)\r\n # return zip(states, [node.label for node in trial.observed_nodes] + [0])\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n return zip(states, [node.label for node in trial.observed_nodes] + [0])",
"def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)",
"def get_best_guess(self, lst):\n maxlen = 0\n pass\n #for elem in lst:",
"def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)",
"def test_merge_targets():\n uid1 = \"ACCT\"\n uid2 = \"GGGG\"\n uid3 = \"AAGG\"\n\n seq1 = [\"ACTGTTTGTCTAAGC\"]*2\n qual1 = ['I'*len(seq1[0])]*len(seq1)\n seq2 = [\"ACTGTTTTTCTAAGC\"]*5\n qual2 = ['I'*len(seq2[0])]*len(seq2)\n seq3 = [\"ACTGTTTTTCTAAGC\"]*2\n qual3 = ['I'*len(seq3[0])]*len(seq3)\n\n clusters = create_consensus([uid1 + uid1]*len(seq1) + \\\n [uid2 + uid2]*len(seq2),\n ['I'*(len(uid1)*2)]*(len(seq1) + len(seq2)),\n seq1 + seq2, qual1 + qual2)\n seq3 = [pseq.SequenceWithQuality(seq, qual) for seq, qual in zip(seq3, qual3)]\n uid = pseq.SequenceWithQuality(uid2 + uid3, 'I'*(len(uid2) + len(uid3)))\n cand = clusters.merge_target(uid, seq3[0], {}, 2)\n assert cand == uid2 + uid2, \"%r != %r\" % (cand, uid2 + uid2)\n cand = clusters.merge_target(uid, seq3[0], {}, 1)\n assert cand is None, \"%r != %r\" % (cand, None)",
"def getTestResults():",
"def longest_consecutive_run(members, n):\n try:\n # first false element, so that when the loop ends we know we're at the\n # end of a run\n start = next(ind for ind, m in enumerate(members) if not m)\n except StopIteration:\n return n\n longest_run = current_run = 0\n for i in range(n):\n if members[(i + start) % n]:\n current_run += 1\n else:\n longest_run = max(current_run, longest_run)\n current_run = 0\n longest_run = max(current_run, longest_run)\n return longest_run",
"def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])",
"def evaluate_multiple(logits, targets, eval_cutoffs=[5, 10, 20], batch_wise=False):\n _, indices = torch.topk(logits, max(eval_cutoffs), -1)\n recall, mrr = [], []\n for k in eval_cutoffs:\n indices_k = indices[:, :k]\n targets_k = targets\n recall_k, mrr_k = get_recall(indices_k, targets_k, batch_wise), get_mrr(indices_k, targets_k, batch_wise)\n\n recall.append(recall_k)\n\n mrr.append(mrr_k)\n # print([[str(x.size()) for x in recall], str(targets.size()), str(indices_k.size())])\n return recall, mrr",
"def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i",
"def main(source_dir, ksplit, out_dir, data_pattern, label_pattern, test_mode,\r\n numTopVars = [10, 50, 100, 500, 1000], compute_results=True):\r\n # Load input and labels.\r\n data, labels, data_file = load_data(source_dir, data_pattern)\r\n filename_base = path.splitext(path.basename(mname))[0]\r\n # Get classifiers and params.\r\n global NAMES\r\n if test_mode:\r\n NAMES = [\"Chance\", \"Nearest Neighbors\", \"Linear SVM\", \"Decision Tree\",\r\n \"Logistic Regression\", \"Naive Bayes\", \"LDA\"]\r\n ksplit = 3\r\n\r\n classifiers, params = make_classifiers(NAMES) # data.shape, ksplit)\r\n\r\n\r\n # Make the folds.\r\n logging.info(\"Making %d folds\" % ksplit)\r\n #kf = StratifiedKFold(labels, n_folds=ksplit)\r\n kf = KFold(labels.shape[0], n_folds=ksplit)\r\n\r\n # Extract the training and testing indices from the k-fold object,\r\n # which stores fold pairs of indices.\r\n fold_pairs = [(tr, ts) for (tr, ts) in kf]\r\n assert len(fold_pairs) == ksplit\r\n rank_per_fold = get_rank_per_fold(data, labels, fold_pairs,\r\n save_path=out_dir, parallel=True)\r\n #dhjelm: were we planning on using this dict?\r\n #score={}\r\n dscore=[]\r\n totalErrs = []\r\n if compute_results:\r\n for name in NAMES:\r\n mdl = classifiers[name]\r\n param = params[name]\r\n # get_score runs the classifier on each fold,\r\n # each subset of selected top variables and does a grid search for\r\n # classifier-specific parameters (selects the best)\r\n clf, allConfMats, allTotalErrs, allFittedClassifiers = \\\r\n get_score(data, labels, fold_pairs, name, mdl, param,\r\n numTopVars=numTopVars,\r\n rank_per_fold=rank_per_fold, parallel=True,\r\n rand_iter=-1)\r\n # save classifier object and results to file\r\n save_classifier_results(name, out_dir, allConfMats,\r\n allTotalErrs)\r\n save_classifier_object(clf, allFittedClassifiers, name, out_dir)\r\n # Append classifier results to list of all results\r\n dscore.append(allConfMats)\r\n totalErrs.append(allTotalErrs)\r\n '''\r\n First do some saving of total results\r\n '''\r\n save_combined_results(NAMES, dscore, totalErrs,\r\n numTopVars, out_dir, filename_base)\r\n\r\n plot_errors(NAMES, numTopVars, dscore, totalErrs,\r\n filename_base, out_dir,compute_results)\r\n\r\n logging.shutdown()",
"def recursive_max_train(seq, remaining_tiles):\n # find what I'm playing on. This requires me to order the tiles correctly\n live_end = seq[-1][1]\n\n # get list of tile that can be played\n playable_tiles = []\n viable_legs = []\n\n for tile in remaining_tiles:\n if live_end in tile:\n playable_tiles.append(tile)\n\n # if there are no playable tiles, return incoming sequence\n if not playable_tiles:\n return seq\n\n # for each playable tile, find the longest/highest value train\n for tile in playable_tiles:\n # find remaining hand\n _my_hand = remaining_tiles.copy()\n _my_hand.remove(tile)\n\n # if tile is ordered backwards, switch it so I get the live end right\n _my_tile = tile\n\n if tile[0] == live_end:\n pass\n elif tile[1] == live_end:\n _my_tile.reverse()\n else:\n assert \"Shouldn't get here\"\n\n # RECURSION HERE. BE CAREFUL OF ORDER.\n viable_legs.append(recursive_max_train(seq + [_my_tile], _my_hand))\n\n # find length of longest viable leg\n max_leg_len = max([len(leg) for leg in viable_legs])\n\n # set max_leg_value so\n max_leg_val = 0\n\n for leg in viable_legs:\n if len(leg) == max_leg_len:\n # some multi-layer list comprehension voodoo\n leg_val = sum([pip for tile in leg for pip in tile])\n\n if leg_val > max_leg_val:\n # if this is more valuable\n max_leg_val = leg_val\n max_leg = leg\n\n return max_leg",
"def evaluate(self,test_data):\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def analyze(self,filenames,output_dir,diffs_only=False):\n def okey(value):\n r = max((['PASS', 'ERROR', 'FAIL', 'UNTESTED', 'SKIPPED'].index(r.outcome) for r in tests[value] if r))\n if r == 0:\n return value\n else:\n return r\n def overall_outcome_weight(results):\n return max((['PASS', 'ERROR', 'FAIL', 'UNTESTED', 'SKIPPED'].index(r.outcome) for r in results if r))\n\n # pass 0: Load results\n results = [RunResults.load(filename) for filename in filenames]\n # step 1: Check if all results are for the same version\n version = results[0].version\n for result in results:\n if result.version != version:\n raise Exception('Analyze: Results for the same FB version required.')\n # step 2: Sort results into groups (platform, cpuarch, arch, run)\n results.sort(key=operator.attrgetter('platform','cpuarch','arch','sequence'))\n\n # pass 1: Create list of tests with results\n tests = {} # Dictionary of all tests found in results; Test ID: list of results\n for result in results:\n column = results.index(result)\n for test_id,test_result in result.items():\n tests.setdefault(test_id,len(results)*[None])[column] = test_result\n\n # pass 2: Analyze results for each tests that didn't pass in all runs\n test_details = {}\n # step 1: Collect details for tests that didn't pass\n for test_id,test_results in tests.items():\n for test_result in test_results:\n if test_result and test_result.outcome != Result.PASS:\n l = test_details.setdefault(test_id,list())\n result = results[test_results.index(test_result)]\n l.append((self.get_run_tag(result.platform,result.cpuarch,result.arch,result.sequence),test_result))\n # step 2: group results for each test\n for test_id,test_results in test_details.items():\n groups = [] # item format: (result,[list_of_runs])\n for result_id,test_result in test_results:\n added = False\n for group in groups:\n if self.compare_results(group[0],test_result):\n group[1].append(result_id)\n added = True\n if not added:\n groups.append((test_result,[result_id]))\n del test_results[:]\n test_results.extend(groups)\n\n # pass 3: Order tests\n test_order = tests.keys()\n test_order.sort(key=okey)\n\n # pass 4: Generate report\n self.print_analysis(version,results,tests,test_details,test_order,\n output_dir, diffs_only)"
] | [
"0.5934326",
"0.5803281",
"0.57657754",
"0.57657754",
"0.57301104",
"0.56978285",
"0.55573726",
"0.5550279",
"0.55331385",
"0.5475259",
"0.546607",
"0.54098874",
"0.5349436",
"0.53450775",
"0.53050417",
"0.5281973",
"0.5245072",
"0.5236958",
"0.523272",
"0.51985705",
"0.51941895",
"0.51761985",
"0.5167142",
"0.5166413",
"0.51572186",
"0.51529944",
"0.514977",
"0.5145269",
"0.5138368",
"0.5130986",
"0.51295584",
"0.51213574",
"0.51213574",
"0.51174873",
"0.5107132",
"0.510628",
"0.50976104",
"0.5089299",
"0.5089299",
"0.5087768",
"0.5069298",
"0.5062929",
"0.5059979",
"0.50591534",
"0.5059106",
"0.50533205",
"0.5040043",
"0.50367075",
"0.50347424",
"0.50276417",
"0.50255203",
"0.50174576",
"0.50092447",
"0.50087494",
"0.5007562",
"0.49983144",
"0.49979872",
"0.49921262",
"0.4986427",
"0.49846935",
"0.49823615",
"0.49817553",
"0.49739197",
"0.49719155",
"0.49669147",
"0.4965758",
"0.49637115",
"0.4962879",
"0.49578422",
"0.49510506",
"0.49498448",
"0.49472052",
"0.49401525",
"0.49369928",
"0.49357226",
"0.49341634",
"0.49273112",
"0.49268195",
"0.49267986",
"0.49265057",
"0.4925363",
"0.49245614",
"0.49224594",
"0.49203342",
"0.49196786",
"0.49162674",
"0.49145937",
"0.49140692",
"0.49073026",
"0.49059257",
"0.4904639",
"0.49005216",
"0.4900396",
"0.48973137",
"0.48934543",
"0.48906955",
"0.4889208",
"0.48890877",
"0.48880696",
"0.48871037"
] | 0.6047128 | 0 |
Max Sliding Count testcases | def test_max_sliding_count(self):
self.assertTrue(geneutil.maxSlidingCount('AAAAA','A')==5)
self.assertTrue(geneutil.maxSlidingCount('AAAAA','Q')==0)
self.assertTrue(geneutil.maxSlidingCount('AAATAA','A')==4)
self.assertTrue(geneutil.maxSlidingCount('AAATTAA','A')==3)
self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','M',10)==10)
self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','C',10)==3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 8\n\n \"\"\"maxi, number_of_dice, ret = 0, 10, 0\n while number_of_dice > 0:\n avg = make_averaged(roll_dice)(number_of_dice, dice)\n maxi = max(maxi, avg)\n if avg >= maxi:\n ret = number_of_dice\n number_of_dice -= 1\n return ret\"\"\"\n\n\n\n counterA = 1\n num_rolls=1\n max_value = 0\n best_num_rolls = 0\n while counterA <= 10:\n num_rolls = counterA\n average_function = make_averaged(roll_dice)(counterA, dice)\n if average_function > max_value:\n max_value = average_function\n best_num_rolls = counterA\n counterA +=1\n return best_num_rolls\n\n \"\"\"counterA = 1\n maxvalue = 0\n maxvaluenumber = 0\n while(counterA<=10):\n num_rolls = counterA\n average_for_roll = make_averaged(roll_dice(num_rolls, dice), num_samples)\n counterB = average_for_roll(roll_dice(counterA, dice))\n if(counterB>maxvalue):\n maxvalue = counterB\n maxvaluenumber = counterA\n counterA +=1\n return maxvaluenumber\"\"\"\n # END PROBLEM 8",
"def get_highest(self, test):\n return",
"def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])",
"def count_max(alon):\n return count_max_acc(alon, alon[0], 0, 0)",
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n \"*** YOUR CODE HERE ***\"\n k, max_value, max_num = 1, 0, 0\n roll = make_averaged(roll_dice, num_samples)\n while k <= 10:\n current_value = roll(k, dice)\n #print('k: ' + str(k) + ' current_value: ' + str(current_value))\n if current_value > max_value:\n max_value, max_num = current_value, k\n k += 1\n return max_num\n # END PROBLEM 9",
"def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)",
"def max(self):\r\n\t\treturn max(self.sample)",
"def predict(self, test_data):\n count = 0.0\n for testcase in test_data:\n answer = np.argmax(testcase[1])\n prediction = np.argmax(self.feed_forward(testcase[0]))\n count = count + 1 if (answer - prediction) == 0 else count\n return count",
"def count_max_acc(alon, curr_max, count, pos):\n if pos == len(alon):\n return count\n curr_num = alon[pos]\n if curr_num > curr_max:\n curr_max = curr_num\n count = 0\n if curr_num == curr_max:\n count += 1\n return count_max_acc(alon, curr_max, count, pos+1)",
"def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)",
"def evaluate(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) #argmax 返回最大数的索引\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]",
"def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)",
"def get_support_max_limit(item, counts):\n return int(max(counts[item] / MIN_ALL_CONF, MIN_SUPPORT))",
"def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i",
"def MaxTestStat(self):\n return max(self.test_stats)",
"def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)",
"def test_max_mid(self):\n self.assertEqual(max_integer([5, 3, 8, 4, 1]), 8)",
"def max_value(gameState):\n if terminal_test(gameState): return -1",
"def test_max_two_sources(self):\n metric = self.metric(addition=\"max\")\n measurement = self.measurement(\n metric,\n sources=[self.source(metric, value=\"10\"), self.source(metric, value=\"20\")],\n )\n self.assertEqual(\"20\", measurement[\"count\"][\"value\"])",
"def test_max(doctest):",
"def test_max_events_range(self):\n\n self.log.info(\"Testing max_event counts\")\n enable_failover = True\n timeout_val = 10\n max_plus_1 = CbServer.Failover.MAX_EVENTS + 1\n\n # Set max_events between (min, max)\n for num_events in range(CbServer.Failover.MIN_EVENTS, max_plus_1):\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=num_events)\n self.assertTrue(status, \"Failed to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, num_events)\n\n for num_events in [0, max_plus_1]:\n self.log.info(\"Testing max_event_count=%s\" % num_events)\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=max_plus_1)\n self.assertFalse(status, \"Able to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, CbServer.Failover.MAX_EVENTS)",
"def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return",
"def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)",
"def findMaximal(freqSet):",
"def test_maxIndex(self):\t\t\n self.assertEqual(attempt.maxIndexZ, 113)\n self.assertEqual(attempt.maxIndexW, 134)",
"def max(self, i):\n x=self.val(i,0)\n lm=len(self)\n t=1\n while t<lm:\n y=self.val(i,t)\n if x<y:\n x=y\n t+=1\n return x",
"def test_max_samples(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"max_samples\": 3,\n \"min_gradient\": -1,\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n for val in [0, 1, 2, 3, 2, 1]:\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n assert state.attributes[\"sample_count\"] == 3",
"def find_max_with_count(A):\n\n def frmax(lo, hi):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1] incl. count\"\"\"\n if lo == hi: return (0, A[lo])\n\n mid = (lo+hi)//2\n ctleft,left = frmax(lo, mid)\n ctright,right = frmax(mid+1, hi)\n return (1+ctleft+ctright, max(left, right))\n\n return frmax(0, len(A)-1)",
"def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val",
"def test_max_integer(self):\n self.assertEqual(max_integer([1, 2, 3, 4, 5]), 5)\n self.assertEqual(max_integer([4, 6, 2, 10, 1]), 10)",
"def test_longest_streak():\n yoga_longest_streak = analytics.longest_streak_periodicity(yoga_trackings(), 'daily', 1)\n run_longest_streak = analytics.longest_streak_periodicity(run_trackings(), 'weekly', 1)\n read_longest_streak = analytics.longest_streak_periodicity(read_trackings(), 'daily', 1)\n meditation_longest_streak = analytics.longest_streak_periodicity(meditation_trackings(),\n 'daily',\n 1)\n french_longest_streak = analytics.longest_streak_periodicity(french_trackings(), 'weekly', 1)\n\n assert yoga_longest_streak == 9\n assert run_longest_streak == 4\n assert read_longest_streak == 7\n assert meditation_longest_streak == 4\n assert french_longest_streak == 5",
"def solution(n, array):\n\n counters = [0] * n\n\n # Current greatest value calculated so far\n max_count = 0\n\n for i in range(len(array)):\n if array[i] == n + 1:\n # max_count = max(counters)\n counters = [max_count] * n\n else:\n counters[array[i] - 1] += 1\n\n # To avoid calculating max(), we update the max value at each step\n if counters[array[i] - 1] > max_count:\n max_count = counters[array[i] - 1]\n\n return counters",
"def get_max_iters():\n return 2000",
"def optimal(count):\n\n return _optimal(start, count)[0]",
"def test_top_keyword(self):\n # Set top to 200\n byt = bytscl(self.array1, top=200)\n mx = numpy.max(byt)\n self.assertEqual(mx, 200)",
"def get_last(self, count):",
"def test_result(self):\n self.assertIsNone(max_integer([]))\n self.assertEqual(max_integer([1, 2, 3, 4]), 4)\n self.assertEqual(max_integer([4, 1, 2, 3]), 4)\n self.assertEqual(max_integer([1, 4, 3, 2]), 4)\n self.assertEqual(max_integer([-34, -2, -3, -37]), -2)\n self.assertEqual(max_integer([-231, 2, -33, -24]), 2)\n self.assertEqual(max_integer([23.4, 34.6, 56.5, 60.2]), 60.2)\n self.assertEqual(max_integer([1]), 1)\n self.assertEqual(max_integer([56.3]), 56.3)\n self.assertEqual(max_integer([-34]), -34)\n self.assertEqual(max_integer([\"holberton\", \"school\",\"student\"]), \"student\")",
"def makeSpan():\n max_span = 0\n for machine in machines_list:\n if machine.span > max_span:\n max_span = machine.span\n return max_span",
"def test_get_max_score(self):\r\n self.combinedoe.update_task_states()\r\n self.combinedoe.state = \"done\"\r\n self.combinedoe.is_scored = True\r\n max_score = self.combinedoe.max_score()\r\n self.assertEqual(max_score, 1)",
"def calcUpperFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_upper = fls[0:max_idx] * (2.0 ** (1.0 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_upper / float(step_size)\n f_upper = np.round(approx_idx).astype(int)\n return f_upper",
"def test_end(self):\n lst = [1, 5, 98]\n self.assertEqual(max_integer(lst), 98)",
"def findMaxConsecutiveOnes(nums: List[int]) -> int:\n count = maxCount = 0\n for num in nums:\n if num == 1:\n count += 1\n else:\n maxCount = max([count, maxCount])\n count = 0\n return max([count, maxCount])",
"def testMax(self):\n top = 10\n table = self.auth.table(self.dataset, self.table, top=top)\n record_count = len(table)\n self.assertLessEqual(record_count, top)",
"def max_trials(self) -> int:\n return self._max_trials",
"def test_Max_Iteration_Negative(self):\n\t\tself.assertRaises(calc.OutOfRangeError, calc.it, M([[1 + 1j]]), 0 + 0j, -10)",
"def test_check_max(self):\n\t\tself.filter.set_operator(\".max\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=0)))\n\t\tself.assertFalse(self.filter.check(Object(field=13)))",
"def max(self):\n\n return time_stat(self, stat=\"max\")",
"def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)",
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n averaged_dice = make_averaged(roll_dice, num_samples)\n max_score = 0\n result = 0\n for num_rolls in range(1, 11):\n average_turn_score = averaged_dice(num_rolls, dice)\n if average_turn_score > max_score:\n max_score = average_turn_score\n result = num_rolls\n elif average_turn_score == max_score: # if tied, lower num rolls\n if num_rolls < result:\n max_score = average_turn_score\n result = num_rolls\n return result\n # END PROBLEM 9",
"def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")",
"def test_middle(self):\n lst = [1, 98, 5]\n self.assertEqual(max_integer(lst), 98)",
"def test_find_max_seat_id():\n data = [\n {\"seat_id\": 100},\n {\"seat_id\": 101},\n {\"seat_id\": 99},\n ]\n assert find_max_seat_id(data) == 101",
"def test_begin(self):\n lst = [98, 1, 2]\n self.assertEqual(max_integer(lst), 98)",
"def computeGoodMax(totalTimes, noerrs):\n # Could allow a small amount of space above the top, but it's annnoying for percentages!\n # return None\n factor = 1.00\n maxReading = factor * max(\n [max([v for v in l if v != None]) for l in list(totalTimes.values())]\n )\n if maxReading == 0:\n maxReading = 0.1\n decade = math.floor(math.log10(maxReading))\n scaledValue = maxReading * 10 ** (-decade)\n # print (\"maxReading: \",maxReading,\"decade: \",decade,\" scaledValue: \",scaledValue)\n for v in (\n 1.0,\n 1.1,\n 1.2,\n 1.25,\n 1.3,\n 1.4,\n 1.5,\n 1.6,\n 1.7,\n 1.75,\n 1.8,\n 1.9,\n 2.0,\n 2.5,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 7.5,\n 8.0,\n 9.0,\n ):\n if scaledValue <= v:\n # print (\"computeGoodMax: \", v * (10**decade))\n return v * (10 ** decade)\n # print (\"computeGoodMax: \", 10**(decade+1))\n return 10 ** (decade + 1)",
"def extrapolate_with_worst_case(values: List[float], n: int = 5) -> float:\n n = min(len(values), n)\n return values[-1] + max(v_next - v_prev for v_prev, v_next in zip(values[-n:], values[-n+1:]))",
"def count_max_df(df):\n d_dl = Counter(df[\"moy_dc\"])\n k, max_value_dl = max(d_dl.items(), key=lambda x:x[1])\n \n d_dh = Counter(df[\"moy_dh\"])\n k, max_value_dh = max(d_dh.items(), key=lambda x:x[1])\n \n return max_value_dl, max_value_dh\n pass",
"def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)",
"def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)",
"def pick_largest(self, cut_off):\r\n for i in range(self.dimension):\r\n m = self.masked[int(self.rank_yx(self.rank[i])[0]) # locating the corresponding mark array\r\n ,int(self.rank_yx(self.rank[i])[1])]\r\n if m * self.image_data[i] == self.image_data[i]:\r\n if self.image_data[i] <= cut_off:\r\n print(\"Surveying completed\")\r\n return -1,-1 # returns -1,-1 if scan is completed\r\n else:\r\n return self.image_data[i], np.array(self.rank[i])",
"def evaluate2_5_1(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return test_results",
"def test_longest_run_mult(self):\n\t\tself.assertTrue(geneutil.longestRun('QQQQN','QN')==5)\n\t\tself.assertTrue(geneutil.longestRun('QQANNQ','QN',1)==6)\n\t\tself.assertTrue(geneutil.longestRun('QQNPPQ','QN',1)==3)\n\t\tself.assertTrue(geneutil.longestRun('QQQAANN','QN',2)==7)\n\t\tself.assertTrue(geneutil.longestRun('ANQNQAN','QN',1)==6)\n\t\tself.assertTrue(geneutil.longestRun('ANQNQANP','QN',1)==6)",
"def _evaluate_rollout(self, state, limit=1000):\n player = state.get_current_player()\n for i in range(limit):\n end, winner = state.game_end()\n if end:\n break\n action_probs = rollout_policy_fn(state)\n max_action = max(action_probs, key=itemgetter(1))[0]\n state.do_move(max_action)\n else:\n # If no break from the loop, issue a warning.\n print(\"WARNING: rollout reached move limit\")\n if winner == -1: # tie\n return 0\n else:\n return 1 if winner == player else -1",
"def _evaluate_rollout(self, state, limit=1000):\n player = state.get_current_player()\n for i in range(limit):\n end, winner = state.game_end()\n if end:\n break\n action_probs = rollout_policy_fn(state)\n max_action = max(action_probs, key=itemgetter(1))[0]\n state.do_move(max_action)\n else:\n # If no break from the loop, issue a warning.\n print(\"WARNING: rollout reached move limit\")\n if winner == -1: # tie\n return 0\n else:\n return 1 if winner == player else -1",
"def run(data):\n max_calories = 0\n calories = 0\n for item in data:\n item = item.strip()\n if item != \"\":\n calories += int(item)\n else:\n max_calories = set_max(calories, max_calories)\n calories = 0\n max_calories = set_max(calories, max_calories)\n\n return max_calories",
"def get_max_num_runs(self, db):\n res = db.session.query(func.max(db.ExperimentResult.run)).filter_by(experiment=self).first()\n if res is None or res[0] is None: return 0\n return res[0] + 1",
"def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number",
"def maxs(self):\n return self.intervals[:, 1]",
"def max_pp(level):\n base_pp = 6\n level_pp = 2 * level\n return base_pp + (level_pp - 2)",
"def max_temp(self):\n return 99",
"def maximumDominationCount(leaf):\n maximumDominationCount = np.nanmax(leaf.calDominationCount())\n return maximumDominationCount",
"def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr",
"def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd",
"def count(A,target):\n\n def rcount(lo, hi, target):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1].\"\"\"\n if lo == hi:\n return 1 if A[lo] == target else 0\n\n mid = (lo+hi)//2\n left = rcount(lo, mid, target)\n right = rcount(mid+1, hi, target)\n return left + right\n\n return rcount(0, len(A)-1, target)",
"def max_count(self):\n return self.config.get('max_count', 500)",
"def Max(data):\n return data.max()",
"def get_max_draw_down(ts_vals):\r\n MDD = 0\r\n DD = 0\r\n peak = -99999\r\n for value in ts_vals:\r\n if (value > peak):\r\n peak = value\r\n else:\r\n DD = (peak - value) / peak\r\n if (DD > MDD):\r\n MDD = DD\r\n return MDD",
"def maximum(self, start, end):\n return self.foldl1(start, end, max)",
"def high_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n max_val = values[0]\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n count += 1\n arr[i] = count\n return arr",
"def fail_max(self) -> int:\n return self._fail_max",
"def _evaluate_rollout(self, state, limit=1000):\n player = state.turn\n for i in range(limit):\n end, winner = state.if_win()\n if end: break\n action_probs = rollout_policy_fn(state)\n max_action = max(action_probs, key=itemgetter(1))[0]\n state.do_move(max_action)\n else:\n # If no break from the loop, issue a warning.\n print(\"WARNING: rollout reached move limit\")\n return -1\n return 1 if winner == player else -1",
"def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)",
"def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]",
"def test_max_score(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stem=False)\n posts = [\n \"Erdogan with threats to attack regime forces 'everywhere' in Syria\",\n \"Damascus says Erdogan 'disconnected from reality' after threats\",\n ]\n\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n scorer = TFIDFScorer({ 'erdogan': 1, 'threats': 2 }, 10)\n candidates = extractor.extract(corpus)\n scores = scorer.score(candidates, normalize_scores=True)\n self.assertTrue(all( score <= 1 for score in scores.values() ))",
"def test_max(self):\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n max_true = ['99', '99.0', '89.9', '88', '-99']\n for value in max_true:\n document = {'age_3': value}\n self.assertTrue(val.validate(document))\n document = {'age_4': value}\n self.assertTrue(val.validate(document))\n\n max_false = ['99.1', '100']\n for value in max_false:\n document = {'age_3': value}\n self.assertFalse(val.validate(document))\n document = {'age_4': value}\n self.assertFalse(val.validate(document))",
"def test_base_cases(self):\n self.assertEqual(max_integer([1, 50, 2, 10]), 50)\n self.assertEqual(max_integer([-1, -20, -2, -50]), -1)\n self.assertEqual(max_integer([1, 2, 3, 4, 5]), 5)\n self.assertEqual(max_integer([40, 0, 2, 5]), 40)\n self.assertEqual(max_integer([10, 0, -2, 5]), 10)\n self.assertEqual(max_integer([40]), 40)\n self.assertEqual(max_integer([]), None)",
"def max_curve(trial_scores: np.ndarray) -> np.ndarray:\n ret = np.empty(len(trial_scores))\n keep = -1e9\n for i, score in enumerate(trial_scores):\n keep = max(keep, score)\n ret[i] = keep\n return ret",
"def max_val(t): \n maxVal = False\n \n def helper(obj):\n nonlocal maxVal\n for el in obj:\n if isinstance(el, int):\n if maxVal == False or maxVal < el:\n maxVal = el\n else:\n helper(el)\n \n helper(t)\n return maxVal",
"def robSingle_2(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end + 1])\n curMax = 0\n preMax = 0\n for num in nums[start:end + 1]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax",
"def max_drawdown(returns):\n\n if returns.size < 1:\n return np.nan\n\n df_cum_rets = cum_returns(returns, starting_value=100)\n cum_max_return = df_cum_rets.cummax()\n\n return df_cum_rets.sub(cum_max_return).div(cum_max_return).min()",
"def get_max_time_vals(train_results):\n t_tr, t_te, t_lo, t_re = 0, 0, 0, 0\n for tres in train_results:\n t_tr += tres.time_train\n t_te += tres.time_test\n t_lo += tres.time_load\n t_re += tres.time_reduce\n n = len(train_results)\n return t_tr/n, t_te/n, t_lo/n, t_re/n",
"def main():\n\n n = int(input().strip())\n inputList = []\n for counter in range(n):\n List = input().strip().split(' ')\n inputList.append([int(i) for i in List])\n\n # Printing maximum count.\n print(maxAlignPoints(inputList))",
"def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value",
"def test_extract_max(self):\n for i in xrange(0,100):\n self.rebuild_all()\n real_max = self.real_heap.extract_max()\n #print \"real max is :\",real_max\n #print \"the copy max is :\",max(self.copy_heap)\n assert real_max == max(self.copy_heap)\n assert self.is_heap_valid(self.real_heap) == True\n \n if i%10==0:\n print \"Extraction of %d/%d is completed\"%(i,100)",
"def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)",
"def max_profit(prices: List[int]) -> int:",
"def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run",
"def test_get_maximum():\n assert get_maximum({\"numbers\": [4, 3, 2, 1]}) == {\"maximum\": 4}",
"def _count_level_events(count_list):\r\n if not len(count_list):\r\n return 0, 0, None\r\n number_events = 0\r\n number_multiple = 0\r\n max_multiple = count_list[0]\r\n for index, count in enumerate(count_list):\r\n if count_list[index] > 0:\r\n number_events = number_events + 1\r\n if count_list[index] > 1:\r\n number_multiple = number_multiple + 1\r\n if count_list[index] > max_multiple:\r\n max_multiple = count_list[index]\r\n return number_events, number_multiple, max_multiple"
] | [
"0.62596506",
"0.6250178",
"0.5989606",
"0.59886724",
"0.59800094",
"0.5964477",
"0.5924157",
"0.59153247",
"0.587332",
"0.58163136",
"0.58109987",
"0.5752196",
"0.57395124",
"0.5706765",
"0.5683033",
"0.5675851",
"0.5669799",
"0.5661942",
"0.56422496",
"0.56366104",
"0.56272537",
"0.55978036",
"0.55771285",
"0.5576232",
"0.5575976",
"0.55610085",
"0.5495193",
"0.54949117",
"0.5494849",
"0.54705244",
"0.5469306",
"0.54584855",
"0.54516035",
"0.54512465",
"0.54475945",
"0.5438339",
"0.543741",
"0.54354614",
"0.5433855",
"0.5426958",
"0.54229385",
"0.54206073",
"0.5420239",
"0.54193723",
"0.540499",
"0.5403806",
"0.53971475",
"0.538907",
"0.5367522",
"0.53654486",
"0.53515035",
"0.53377676",
"0.53368676",
"0.5336735",
"0.5335484",
"0.5333045",
"0.5332506",
"0.5331379",
"0.5327725",
"0.53163075",
"0.5315951",
"0.53115034",
"0.53097874",
"0.5303021",
"0.5303021",
"0.53029203",
"0.5300657",
"0.52988714",
"0.5297979",
"0.5296516",
"0.5288431",
"0.52868205",
"0.527335",
"0.52652735",
"0.52642035",
"0.5263796",
"0.5259143",
"0.52586186",
"0.5258088",
"0.5247839",
"0.5243524",
"0.5242296",
"0.52370405",
"0.52351797",
"0.52330863",
"0.52255964",
"0.522333",
"0.5222309",
"0.5216755",
"0.52137107",
"0.5212849",
"0.5211495",
"0.5209019",
"0.520562",
"0.52041394",
"0.5195053",
"0.5193533",
"0.51870054",
"0.5184458",
"0.51843727"
] | 0.7155858 | 0 |
Entropy of a homopolymer | def test_entropy(self):
seq1 = 'AAAA'
res = geneutil.sequenceEntropy(seq1)
self.assertAlmostEqual(res.entropy,0.0)
self.assertTrue(res.counts['A']==4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy(message):\n n = len(message)\n message = letter_freq(message)\n h = 0\n for n_i in message.values():\n p_i = n_i/n\n h += -p_i*(log2(p_i))\n return h",
"def entropy(self):\n Z = self.sum()\n assert (Z > 0), 'Non-normalizable factor (perhaps log factor?)' # also check for positivity?\n H = 0.0\n for x in np.nditer(self.t, op_flags=['readonly']):\n p = x/Z\n H += 0.0 if p==0 else -p*np.log(p)\n return H",
"def entropy(message):\n message = letter_freq(message)\n n = sum(message.values())\n h = 0\n for n_i in message.values():\n p_i = n_i / n\n h += -p_i * log2(p_i)\n return h",
"def _entropy(data):\n hist = np.array(PIL.Image.fromarray(data).histogram())\n hist = hist / hist.sum()\n hist = hist[hist != 0]\n return -np.sum(hist * np.log2(hist))",
"def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n for x in range(256):\n p_x = float(data.count(chr(x))) / len(data)\n if p_x > 0:\n entropy -= p_x * math.log(p_x, 2)\n\n return entropy",
"def entropy(dist):\n #dist = array([max(d,1e-100) for d in dist])\n dist = dist + 1e-20\n return dot(dist,(log(1.0/dist) * (1.0/log(2.0))).T)",
"def entropy(img):\n # by calculating\n histogram = img.histogram()\n histogram_size = sum(histogram)\n histogram = [float(h) / histogram_size for h in histogram]\n\n return -sum([p * math.log(p, 2) for p in histogram if p != 0])",
"def entropy(data):\n e = 0\n\n counter = collections.Counter(data)\n l = len(data)\n for count in counter.values():\n p_x = count / l\n e += - p_x * math.log2(p_x)\n\n return e",
"def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))",
"def image_entropy(img):\r\n hist = img.histogram()\r\n hist_size = sum(hist)\r\n hist = [float(h) / hist_size for h in hist]\r\n\r\n return -sum([p * math.log(p, 2) for p in hist if p != 0])",
"def conditional_entropy_hyper(self) -> float:\n pass",
"def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s",
"def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)",
"def entropy(self):\n raise NotImplementedError",
"def entropy(data):\n n, m = np.shape(data)\n data = np.tanh(data)\n data = data / np.sum(data, axis=0)\n a = data * 1.0\n a[np.where(data == 0)] = 0.000001\n\n e = (-1.0 / np.log(n)) * np.sum(data * np.log(a), axis=0)\n w = (1 - e) / np.sum(1 - e)\n return w",
"def entropy(self):\n ent = 0.0\n for f in self.byte_freq:\n if f > 0:\n freq = float(f) / self.byte_total\n ent = ent + freq * math.log(freq, 2)\n return -ent",
"def entropy(y):\r\n\r\n # INSERT YOUR CODE HERE\r\n value, count = np.unique(y,return_counts = True)\r\n Hy = 0.0\r\n prob = count.astype(float)/len(y)\r\n for p in prob:\r\n Hy += -(p)*(np.log2(p))\r\n return Hy\r\n raise Exception('Function not yet implemented!')",
"def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)",
"def computeEntropy(self, img):\n hist, bins = np.histogram(img.ravel(), bins=256, density=True)\n return scipy.stats.entropy(hist)",
"def entropy(p):\n assert (p >= 0).all()\n assert abs(np.sum(p)-1) < 1e-6\n return -np.sum(p*np.log(p+1e-12))",
"def entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))",
"def entropy( freq ):\n N = 0.0\n entropy = 0.0\n for x, v in freq.items( ):\n N += v\n entropy -= v * math.log( v, 2 )\n return (N * math.log( N, 2 ) + entropy) / N",
"def entropy(y):\n EPS = 0.0005\n\n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n pk = np.mean(y, axis=0)\n \n return - np.sum(pk * np.log(pk + EPS))",
"def compute_entropy(mat):\n\n total_entropy = 0\n\n count_motifs = count_nucleotides(mat)\n p = profile(count_motifs, mat.shape[0])\n\n for i in range(len(p[0, :])):\n entropy = 0\n for j in range(len(p[:, 0])):\n if p[j, i] != 0:\n entropy = entropy + p[j, i] * math.log(p[j, i], 2)\n total_entropy = total_entropy - entropy\n\n return total_entropy",
"def entropy(data):\n\n freqs = {}\n suma = len(data)\n\n for i in range(0, len(data)):\n freqs[data[i]] = 1.0 + freqs.get(data[i], 0)\n\n res = 0.0\n for i in freqs:\n res += (freqs[i] / suma) * log((freqs[i] / suma), 2)\n return -res",
"def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())",
"def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_",
"def entropy_py(p):\n return 2 ** np.sum(-p*np.log2(p+1e-10))",
"def calculate_entropy(prob):\n return -(prob * math.log(prob,2))",
"def entropy(self):\n return self._normal.entropy()",
"def shannon_entropy(probs):\n return -(\n math.sum([px * math.log2(px) if px != 0 and not (np.isclose(px, 0)) else 0 for px in probs])\n )",
"def shannon_entropy(c):\n\n c_normalized = c / float(np.sum(c))\n c_normalized_nonzero = c_normalized[np.nonzero(c_normalized)] # gives 1D array\n entropy = -sum(c_normalized_nonzero * np.log2(c_normalized_nonzero)) # unit in bits\n return entropy",
"def entropy(y):\n total = y.size\n value_counts = np.bincount(y).astype(\"float\")\n proportions = value_counts / y.size\n\n return sum(-i * np.log(i) for i in proportions if i)",
"def entropy(Y):\n\n temp = np.unique(Y, return_counts=True)\n uniq_Y = list(temp[0])\n Y_count = list(temp[1])\n \n total = sum(Y_count)\n\n ent = 0\n for elem in uniq_Y:\n prob = Y_count[uniq_Y.index(elem)] / total\n # print(\"prob:\", prob)\n ent -= (prob * (math.log2(prob)))\n # print(\"ent:\",ent)\n\n return ent",
"def entropy(p):\n ent = tf.where(p > np.finfo(np.float32).eps, -p * tf.log(p), tf.zeros_like(p))\n ent = tf.reduce_sum(ent, axis=1)\n return ent",
"def entropy(d, total, word_count):\n\t# Entropie je - Sum_morf p(morf) * log_2 p(morf)\n\t# p(morf) = c(morf) / c(all)\n\te = 0\n\tfor count in d.values():\n\t\tp = count/total\n\t\ttype_e = - p * log2(p)\n\t\te += type_e * count\n\treturn e / word_count",
"def entropy(y):\n p = _proba(y)\n return (-p * np.log2(p)).sum()",
"def conditional_entropy(self) -> float:\n pass",
"def entropy(self, f):\n f_log = -torch.log(self.einsum(\"q,q->q\", [f, 1 / self.w]))\n return self.einsum(\"q,q->\", [f, f_log])",
"def _entropy(self, y):\n # Get size\n n = y.shape[0]\n summation = 0\n\n # Summatory\n for c_i in np.unique(y):\n prob = sum(y == c_i) / float(n)\n summation += prob * np.log2(prob)\n\n return -summation",
"def entropy(s):\n p, lns = Counter(s), float(len(s))\n return -sum( count/lns * math.log(count/lns, 2) for count in p.values())",
"def _conditional_entropy_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n total_occurrences = confmat.sum()\n p_xy_m = confmat / total_occurrences\n p_y = confmat.sum(1) / total_occurrences\n p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])\n return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))",
"def entropy(self):\n return -np.sum(self.log_likelihoods * np.exp(self.log_likelihoods))",
"def entropy(p: torch.Tensor):\n nz = (p > 0).to(p.device)\n\n eps = torch.finfo(p.dtype).eps\n p_stable = p.clone().clamp(min=eps, max=1 - eps)\n\n out = torch.where(\n nz,\n p_stable * torch.log(p_stable),\n torch.tensor(0.0, device=p.device, dtype=torch.float),\n )\n\n return -(out).sum(-1)",
"def shannon_entropy(counts):\n freq = np.array(counts) * 1.0 / np.sum(counts)\n return -np.sum([f * np.log2(f) for f in freq if f != 0])",
"def entropy(self, params):\n log_std = params[:, :, 1]\n return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1)",
"def _entropy(self):\n return self.rv.entropy(*self._pymc_dists_to_value(self.args), **self.kwds)",
"def entropy(message):\n # Should the import be here or should it be at the top of the page?\n freq_dict = letter_freq(message)\n length_message = len(message)\n bit_entropy = 0\n for occurrences in freq_dict.values():\n frequency = occurrences / length_message\n bit_entropy = bit_entropy - frequency * log2(frequency)\n return bit_entropy",
"def entropy(self, priors=None):\n def entropy_f(x):\n x[x != 0] *= np.log(x[x != 0])\n return -x.sum(axis=0)\n return self.utility(entropy_f, priors)",
"def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)",
"def entropy(self):\n\n \"\"\"Gets the first neighbours, which are the first 2*r+1 cells.\"\"\"\n current_neighbours = []\n amount = [0] * self.k ** (2 * self.r + 1)\n for i in range(2 * self.r + 1):\n current_neighbours.append(self.config[self.t, i % self.width])\n\n \"\"\"Calculates the rule and adds one to it's amount. It then removes the\n leftmost cell and adds a cell to the right.\"\"\"\n for i in range(len(self.config[self.t]) - 1):\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount) - 1 - rule] += 1\n current_neighbours.pop(0)\n current_neighbours.append(\n self.config[self.t, (2 * self.r + 1 + i) % self.width])\n\n \"\"\"Calculates the rule for the last neighbourhood.\"\"\"\n rule = 0\n for j in range(len(current_neighbours)):\n rule += int(current_neighbours[j] *\n self.k ** ((2 * self.r + 1) - j - 1))\n amount[len(amount)-1 - rule] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(len(amount)):\n if(amount[i] != 0):\n probability = amount[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_entropy = (self.average_entropy *\n self.t + shannon) / (self.t + 1)",
"def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )",
"def calc_entropy(frequency_wn, temperature):\r\n\tentropy = 0\r\n\tfrequency = [entry * SPEED_OF_LIGHT for entry in frequency_wn]\r\n\tfor entry in frequency:\r\n\t\tfactor = ((PLANCK_CONSTANT*entry)/(BOLTZMANN_CONSTANT*temperature))\r\n\t\ttemp = factor*(1/(math.exp(factor)-1)) - math.log(1-math.exp(-factor))\r\n\t\ttemp = temp*GAS_CONSTANT/4.184\r\n\t\tentropy = entropy + temp \r\n\treturn entropy",
"def __compute_entropy_probability(probability:np.ndarray) -> float:\n entropy = -np.sum(probability * np.log2(probability))\n return entropy",
"def div(self):\n freqList = [i / sum(self.has.values()) for i in self.has.values()]\n entropies = [i * math.log(i, 2) for i in freqList]\n entropy = -sum(entropies)\n return entropy",
"def entropyDistributed(distribution):\n return -sum(map(lambda p : p * log(p, 2), distribution))",
"def entropy(Y):\n unique, count = np.unique(Y, return_counts=True, axis=0)\n prob = count/len(Y)\n en = np.sum((-1)*prob*np.log2(prob))\n return en",
"def entropy_root(self):\n\t\tif self.size() == 0:\n\t\t\treturn 0\n\n\t\tcounter = collections.Counter(self.y)\n\t\treturn entropy([x[1] for x in counter.items()], self.size())",
"def _entropy(P):\n\n #TODO remove the \"+ 1e-20\" inside the log2 computation\n # it's just a hack to avoid to compute log2(0)\n ent = -1.0 * np.sum(P * np.log2(P+1e-20), axis=0)\n return ent",
"def entropy_(P):\n res = 0.0\n\n mask = P != 0.0 # avoid 0 in log\n f = lambda x: x*np.log2(x)\n # map-reduce strategy (likely to be more optimized than loops)\n temp = list(map(f, P[mask]))\n res = -np.sum(temp, dtype=float)\n return res",
"def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)",
"def entropy(self, y):\n n = y.size\n if n <= 1:\n return 0\n\n labels, counts = unique(y, return_counts=True)\n\n if counts.size <= 1:\n return 0\n\n probs = counts / n\n entropy = -sum([p * log(p, 2) for p in probs])\n return entropy",
"def entropy(x):\n x_max, x_min = x.max(), x.min()\n assert (x_min >= 0) and (x_max <= 1)\n if x_min == x_max == 0:\n return np.float32(0.)\n # Take only non-zero values as log(0) = 0 :\n nnz_x = x[np.nonzero(x)]\n entr = -np.sum(nnz_x * np.log2(nnz_x))\n\n return entr",
"def compute_entropy(m, nan_as_zero=True):\n # normalize rows (the sum of each row must be 1)\n p = m / m.sum(axis=1, keepdims=True)\n # compute per row entropy (in base 2)\n e = entr(p).sum(axis=1) / np.log(2)\n if nan_as_zero:\n e = np.nan_to_num(e)\n return e",
"def entropy(out, dim=1, reduce='mean'):\n log_prob = F.log_softmax(out, dim=dim)\n h = -torch.sum(log_prob.exp() * log_prob, dim=dim)\n if reduce == 'none':\n return h\n if reduce == 'mean':\n return h.mean()\n if reduce == 'sum':\n return h.sum()",
"def entropy_function(c, n):\n return -(c*1.0/n)*math.log(c*1.0/n,2)",
"def joint_entropy(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n return np.nansum(np.multiply(P_nan, np.log2(1 / P_nan)))",
"def getEntropy(self, pVal, nVal):\n totVal = pVal + nVal\n if pVal == 0 or nVal == 0:\n return 0\n\n pProb = pVal/totVal\n nProb = 1 - pProb\n entropy = - (pProb * math.log(pProb, 2) + nProb * math.log(nProb, 2))\n return entropy",
"def calculate_entropy():\n\tstat = {} # dictionary - chars and number of repetitions\n\tallchar = 0.0 # total number of characters\n\tentropy = 0.0 # initial entropy\n\n\tfor line in sys.stdin.readlines():\n\t\tline = re.sub(r'\\s', '', line)\n\t\tfor znak in line:\n\t\t\tif znak in stat:\n\t\t\t\tstat[znak] += 1\n\t\t\telse:\n\t\t\t\tstat[znak] = 1\n\t\t\tallchar += 1\n\n\tfor znak in stat:\n\t\tstat[znak] = stat[znak]/allchar\n\t\tentropy += stat[znak] * log(stat[znak], 2)\n\n\tentropy *= -1\n\treturn entropy",
"def mixture_entropy_brute_force(self):\n Z, sum_wlogw = 0, 0\n\n # Naive implementation of measuring the entropy is\n # p(c) = w(c) / Z with Z = sum_c w(c)\n # H_c = -sum_c p(c) * log2(p(c))\n # This can be transformed to a more stable implementation:\n # H_c = log2(Z) - 1/Z * sum_c w(c) * log2(w(c))\n \n for _, weight_c in self._iterate_mixtures():\n if weight_c > 0:\n Z += weight_c\n sum_wlogw += weight_c * np.log2(weight_c)\n \n if Z == 0:\n return 0\n else:\n return np.log2(Z) - sum_wlogw / Z",
"def entropy(counts):\n assert (counts >= 0).all()\n probs = counts / counts.sum()\n probs = probs[probs > 0] # Avoid log(0)\n return - np.sum(probs * np.log2(probs))",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(probabilities):\n return -(sum([p * log(p, 2) if p > 0 else 0 for p in probabilities]))",
"def entropy(self, X):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, index=[str(i) for i in range(len(X))])\n K = self._posterior_covariance(X)\n L = np.linalg.cholesky(K)\n D = len(X)\n return np.sum(np.log(np.diag(L))) + 0.5 * D * np.log(2*np.pi*np.exp(1))",
"def calc_entropy(column):\n # Compute the counts of each unique value in the column\n counts = numpy.bincount(column)\n # Divide by the total column length to get a probability\n probabilities = counts / len(column)\n \n # Initialize the entropy to 0\n entropy = 0\n # Loop through the probabilities, and add each one to the total entropy\n for prob in probabilities:\n if prob > 0:\n entropy += prob * math.log(prob, 2)\n \n return -entropy",
"def calc_entropy(column):\r\n # Compute the counts of each unique value in the column\r\n counts = numpy.bincount(column)\r\n # Divide by the total column length to get a probability\r\n probabilities = counts / len(column)\r\n \r\n # Initialize the entropy to 0\r\n entropy = 0\r\n # Loop through the probabilities, and add each one to the total entropy\r\n for prob in probabilities:\r\n if prob > 0:\r\n entropy += prob * math.log(prob, 2)\r\n \r\n return -entropy",
"def entropy(self) -> float:\n probabilities = np.array([len(players) for players in self.answers.values()])\n probabilities = probabilities / sum(probabilities)\n return sc.stats.entropy(probabilities)",
"def entropyRandom(stream):\n prob = 1.0 / len(stream)\n return -(prob * log(prob, 2)) * len(stream)",
"def crossEntropy(p_m1):\n p_m2 = 1 - p_m1\n D = - p_m1*math.log(p_m1) - p_m2*math.log(p_m2)\n return D",
"def get_entropy(distribution, samples):\n entropy = -tf.reduce_sum(distribution.log_prob(samples), axis=1)\n return entropy",
"def entropy(target_col):\n elements,counts = np.unique(target_col,return_counts = True)\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n return entropy",
"def entropy(self):\n return self._entropy_func",
"def _graph_fn_entropy(distribution):\n return distribution.entropy()",
"def entropy(self, policy_params):\n return self.head.entropy(policy_params)",
"def chl_entropy(y, base=2):\n p,bins = histogram(y, bins=unique(y)) # don't use 'Normed' feature, since that includes the bin-width!\n p = p[p!=0]/float(len(y))\n S = -1.0*sum(p*log(p))/log(base)\n return S",
"def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)",
"def entropy(p_list):\n assert len(p_list) > 0\n E = 0.0\n for p in p_list:\n if p == 0.0:\n continue\n E += p*math.log(p)\n return E",
"def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.entropy, _crank16.entropy, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)",
"def __compute_entropy(row:pd.Series) -> float:\n probability = row.value_counts(normalize=True).values\n entropy = EntropicSolver.__compute_entropy_probability(probability)\n return entropy",
"def entropy(a):\n a = a.upper()\n\n freq = collections.defaultdict(int) # int() is the default constructor for non existent item, and returns 0\n for c in a:\n freq[c] = freq[c] + 1\n\n e = 0.0\n for f in freq.values():\n if f:\n p = f / len(a)\n e += p * math.log(p)\n\n return -e",
"def spatial_entropy(map_):\n map_ = map_ / np.sum(map_)\n return -1 * np.sum(map_ * np.log(map_))",
"def pseudo_entropy_global(self, f):\n f_w = self.einsum(\"q,q->q\", [f, 1 / self.w])\n return self.rho(f) - self.einsum(\"q,q->\", [f, f_w])",
"def log_entropy(dm):\n size = len(dm)\n entropy = 0\n w, v = np.linalg.eig(dm)\n for n in range(size):\n if w[n] != 0:\n entropy = entropy - w[n] * np.log2(w[n])\n return entropy",
"def entropy(string):\n p, lns = Counter(string), float(len(string))\n return -sum(count/lns * math.log(count/lns, 2) for count in p.values())",
"def get_entropy(dictionary):\n my_sum = 0\n weighted_sum_of_logs = 0\n for freq in dictionary.values():\n if freq:\n my_sum += freq\n weighted_sum_of_logs += freq * math.log(freq)\n return math.log(my_sum) - weighted_sum_of_logs / my_sum",
"def entropy(l):\n\n probabilities = np.bincount(l) / len(l)\n with np.errstate(divide='ignore'): # ignore log(0) errors, we'll handle\n log_probabilities = np.log2(probabilities)\n log_probabilities[~np.isfinite(log_probabilities)] = 0\n return -np.sum(probabilities * log_probabilities)",
"def entropy(y,w):\r\n\r\n\t# my original entropy function commented below is not working as desired. The below implementation is based on from Sai Ram Chappidi's explanation\r\n\r\n # y_partition = partition(y)\r\n # elements,counts = np.unique(y,return_counts = True)\r\n # entropy=0\r\n\r\n # for i in range(len(elements)):\r\n # entropy += ((-(np.sum(w[y_partition[i]])))/np.sum(w))*np.log2(np.sum(w[y_partition[i]])/np.sum(w))\r\n # return entropy\r\n\r\n entropy = 0\r\n # two hypothesis cases 0,1\r\n h = {0: 0, 1: 0}\r\n leny = len(y)\r\n for i in range(leny):\r\n # if y is 0 add 0 to the weight\r\n if y[i] == 0:\r\n h[0] += w[i]\r\n # if y is 1 add 1 to the weight\r\n elif y[i] == 1:\r\n h[1] += + w[i]\r\n # summing all the weighted values \r\n val_sum = h[0] + h[1]\r\n\r\n # entropy calculation\r\n for j in range(len(h)):\r\n h[j] = h[j]/val_sum\r\n # to prevent divide by zero\r\n if h[j] != 0:\r\n entropy += h[j] * np.log2(h[j])\r\n entropy = -(entropy)\r\n return entropy",
"def entropy ( target_array ):\n return -1 * sum (\n [\n pipe ( np.sum ( target_array == value ) / len ( target_array ), lambda ratio: ratio * np.log ( ratio ) )\n for value in set ( target_array )\n ]\n ) # End entropy()",
"def compute_entropy(self, word:str) -> float:\n if self.game_matrix is None:\n return self.word_matrix.loc[word].pipe(self.__compute_entropy)\n return self.game_matrix.loc[word].pipe(self.__compute_entropy)",
"def entropy(self, text):\n\n# text = self.myReplacer.replace(text)\n# text = self.tokenizer.tokenize(text)\n new_text = []\n for word in text:\n if word.count('\\'') > 0:\n words = word.split('\\'')\n for w in words:\n new_text.append(w)\n else:\n new_text.append(word)\n text = new_text\n \n e = 0.0\n lenth = len(text)\n if lenth == 0:\n return 0\n elif lenth < self._n:\n current_n = lenth\n else:\n current_n = self._n\n \n for i in range(current_n - 1, len(text)):\n context = tuple(text[(i - current_n + 1) : i])\n token = text[i]\n e += self.logprob(token, context)\n return e"
] | [
"0.76097333",
"0.7541695",
"0.7539285",
"0.7420453",
"0.74041563",
"0.73959124",
"0.73686385",
"0.73681575",
"0.73568857",
"0.72097194",
"0.7157526",
"0.7145899",
"0.71405953",
"0.7108745",
"0.7095906",
"0.7078543",
"0.7067588",
"0.70662063",
"0.7055253",
"0.7043006",
"0.69989175",
"0.69886166",
"0.6976024",
"0.6972817",
"0.6956938",
"0.6948809",
"0.6939645",
"0.69337165",
"0.69177926",
"0.69126815",
"0.68906146",
"0.6889553",
"0.6884266",
"0.6881909",
"0.68771386",
"0.68759406",
"0.6873457",
"0.6870386",
"0.68604445",
"0.6858294",
"0.6857163",
"0.68477964",
"0.6835201",
"0.6831061",
"0.68195266",
"0.6812758",
"0.6807599",
"0.68047523",
"0.68035185",
"0.6789179",
"0.67769665",
"0.6775937",
"0.6756994",
"0.6746602",
"0.6740069",
"0.67346525",
"0.67299885",
"0.6726124",
"0.6714497",
"0.67022544",
"0.6702189",
"0.6681279",
"0.66784644",
"0.66580695",
"0.6646936",
"0.6642317",
"0.6641925",
"0.66296446",
"0.66280895",
"0.6611926",
"0.6606756",
"0.66035473",
"0.66035473",
"0.66027963",
"0.6602147",
"0.6592152",
"0.6591876",
"0.6584448",
"0.65830344",
"0.65694314",
"0.6568952",
"0.65688246",
"0.65596896",
"0.6546837",
"0.65443194",
"0.65394616",
"0.6527535",
"0.6522276",
"0.65191656",
"0.6508012",
"0.65040994",
"0.6500037",
"0.648587",
"0.64843065",
"0.6481273",
"0.6476398",
"0.6467684",
"0.6446372",
"0.6444825",
"0.64404166",
"0.6414005"
] | 0.0 | -1 |
Determine what moves are safe for a player to make. Returns a list of valid actions that player p can make in the given state. | def safe_moves(p, state):
x, y = state['players'][p]['x'], state['players'][p]['y']
moves = []
actions = [(1, 0, 'east'),
(-1, 0, 'west'),
(0, -1, 'north'),
(0, 1, 'south')]
for dx, dy, move in actions:
tx, ty = str(x + dx), str(y + dy)
if tx not in state['cells'] or ty not in state['cells'][tx]:
moves.append(move)
return moves | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actions(self, state):\r\n\r\n valid_actions = []\r\n # What kind of an action it will be\r\n # 1. Add a new piece to the game.\r\n # 2. Move and existing piece.\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n # If we want to place a new piece in the game\r\n if new_piece:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == '-':\r\n # (player, to, from)\r\n # Since we are introducing a new piece it's coming from\r\n # an imaginary position i.e. (9, 9)\r\n valid_actions.append((player, (i, j), (9, 9)))\r\n\r\n # when we moving an existing piece in the game\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] != '-':\r\n # Now check for places this player can move from this position\r\n for ii, jj in self.valid_adjacent_positions[(i, j)]:\r\n if state[ii][jj] == '-':\r\n # (player, to, from)\r\n valid_actions.append((state[i][j], (ii, jj), (i, j)))\r\n\r\n return copy.deepcopy(valid_actions)",
"def get_safe_actions(state):\n safe = set()\n if state.player_has_armor(state.ptm):\n\n for action in {U, D, L, R}:\n r1, c1 = TronProblem.move(state.player_locs[state.ptm], action)\n if not (\n state.board[r1][c1] == CellType.WALL\n or TronProblem.is_cell_player(state.board, (r1, c1))\n ):\n safe.add(action)\n return safe\n else:\n for action in {U, D, L, R}:\n r1, c1 = TronProblem.move(state.player_locs[state.ptm], action)\n if not (\n state.board[r1][c1] == CellType.BARRIER\n or state.board[r1][c1] == CellType.WALL\n or TronProblem.is_cell_player(state.board, (r1, c1))\n ):\n safe.add(action)\n return safe",
"def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n possible_moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n \n worker = state[0]\n boxes = state[1]\n \n # Iterate throguh the moves and make sure they satify constraints\n for move in possible_moves:\n if (move_coords(worker, move) not in self.walls):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) in self.taboo:\n pass\n else: \n MovementList.append(move)\n else:\n MovementList.append(move)\n \n return MovementList",
"def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves",
"def getLegalActions(self, state):\n actions = [i for i in range(-5, 6)]\n for action in actions:\n if action > state[0] or action < -state[1]:\n actions.remove(action)\n return actions",
"def getPossibleActions(self, state):\n if self.weHaveBall(state):\n return [('hold',)] + [('pass', i) for i in range(1, self.keeperNum)]\n else:\n return [None]",
"def findActions(problem, state):\r\n size = len(problem) - 1\r\n legalActions = []\r\n if state[0] > 0 and problem[state[0] - 1][state[1]] != 'w':\r\n legalActions.append('N')\r\n if state[0] < size and problem[state[0] + 1][state[1]] != 'w':\r\n legalActions.append('S')\r\n if state[1] > 0 and problem[state[0]][state[1] - 1] != 'w':\r\n legalActions.append('W')\r\n if state[1] < size and problem[state[0]][state[1] + 1] != 'w':\r\n legalActions.append('E')\r\n return legalActions",
"def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]",
"def getLegalActions( state ): ## This is being called by the GameState.getLegalActions function and uses self as the state argument.\n return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )## REF-211 calls the getPossibleActions method in the Actions class.",
"def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list",
"def actions(state):\n action_list = []\n\n if state.active_color == cc.WHITE_ACTIVE:\n active_pieces = cc.WHITE_PIECES\n elif state.active_color == cc.BLACK_ACTIVE:\n active_pieces = cc.BLACK_PIECES\n else:\n raise Exception(\"Actions: Invalid Active Color\")\n # Check for states where castling can occur\n castles = gm.get_castle(state)\n if castles[0]: # Kingside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_KINGSIDE))\n if castles[1]: # Queenside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_QUEENSIDE))\n\n # Loop over the board, finding the moves for each piece\n for rank in range(8):\n for column in range(8):\n if state.board[rank, column] in active_pieces:\n p = gm.Piece(state.board[rank, column], (rank, column))\n action_list.extend(p.get_moves(state))\n\n # Handle En passant attacks\n for action in action_list:\n if action.end == state.en_passant:\n action.capture = True\n\n return action_list",
"def actions(self, state):\n\n actions = []\n \n # if its player 1's turn\n if state.maxs_turn==True:\n # look through all the squares on the board\n for coords in state.gameState:\n # if its a rebel append allowable move and attack actions\n if state.gameState[coords]=='R':\n if state.gameState[(coords[0]-1, coords[1])]== ' ':\n actions.append(\"Move: Rebel @ {} --> {}\".format(coords, (coords[0]-1, coords[1])))\n if ((coords[0]-1, coords[1]+1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]+1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]+1)))\n if ((coords[0]-1, coords[1]-1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]-1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]-1)))\n \n # if its a jedi append allowable move and attack actions\n elif state.gameState[coords]=='J':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n # walk in each direction until reaching the edge of board, or a player\n while (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Jedi @ {} --> {}\".format(coords, coord))\n coord = (coord[0]+direction[0], coord[1]+direction[1])\n # if we ran into a sith we can attack\n if (coord in state.gameState) and (state.gameState[coord] == 'S'):\n actions.append(\"Attack: Jedi @ {} --> Sith @ {}\".format(coords, coord))\n \n else:\n for coords in state.gameState:\n if state.gameState[coords]=='S':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n if (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Sith @ {} --> {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'R'):\n actions.append(\"Attack: Sith @ {} --> Rebel @ {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'J'):\n actions.append(\"Attack: Sith @ {} --> Jedi @ {}\".format(coords, coord))\n \n\n\n if len(actions)==0:\n actions.append(\"Pass\")\n \n actions.sort()\n \n return actions",
"def getLegalActions(self, state):\n return self.actionFn(state)",
"def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves",
"def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves",
"def actions(self, state):\n\n possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']\n index_blank_square = self.find_blank_square(state)\n\n # implement actions here \n\n return possible_actions",
"def getLegalActions(self,state):\n return self.actionFn(state)",
"def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves",
"def actions(self, state, enemy=False):\n vaccinate_actions = []\n quarantine_actions = []\n medics = 1\n police = 2\n if not enemy:\n for (i, j) in self.zoc:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc):\n quarantine_actions.append(('quarantine', (i, j)))\n else:\n for (i, j) in self.zoc_enemy:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc_enemy):\n quarantine_actions.append(('quarantine', (i, j)))\n\n vaccinate_actions_pre = powerset1(vaccinate_actions, medics)\n quarantine_actions_pre = powerset1(quarantine_actions, police)\n vaccinate_actions_tup = tuple(vaccinate_actions_pre)\n quarantine_actions_tup = tuple(quarantine_actions_pre)\n\n if ((len(vaccinate_actions_tup) == 0) and (len(quarantine_actions_tup) != 0)):\n possible_actions = quarantine_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) != 0)):\n possible_actions = vaccinate_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) == 0)):\n possible_actions = [()]\n else:\n possible_actions = tuple()\n for action_p in quarantine_actions_tup:\n for action_m in vaccinate_actions_tup:\n action_m += action_p\n possible_actions += (action_m, action_p)\n possible_actions += vaccinate_actions_tup + quarantine_actions_tup\n return tuple(possible_actions)",
"def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())",
"def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n \n moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n opposite_moves = [\"Down\", \"Up\", \"Right\", \"Left\"]\n worker = state[0]\n boxes = state[1]\n temp_warehouse = self.warehouse.copy(worker, boxes)\n no_go = self.taboo.copy()\n walls = self.walls.copy()\n for wall in walls:\n no_go.append(wall)\n \n accessible = []\n \n for box in boxes:\n for i in range(len(moves)):\n surrounding_space = move_coords(box, opposite_moves[i])\n if can_go_there(temp_warehouse, move_coords(box, opposite_moves[i])):\n accessible.append((surrounding_space, moves[i]))\n \n for space_move in accessible:\n space = space_move[0]\n move = space_move[1]\n box_push_space = move_coords(move_coords(space, move), move)\n if (box_push_space in no_go) or (box_push_space in boxes):\n continue\n else:\n MovementList.append((move_coords(space, move), move)) \n print(\"Movement List: \", MovementList)\n \n if len(accessible) < 0: \n # Iterate throguh the moves and make sure they satify constraints\n for move in moves:\n if (move_coords(worker, move) not in no_go):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) not in boxes:\n MovementList.append((move_coords(worker, move), move)) \n else:\n MovementList.append((move_coords(worker, move), move))\n \n \n \n \n \n return MovementList",
"def _available_actions(self, state, colour):\n available_actions = []\n if colour == \"white\":\n stacks = +state.state\n else:\n stacks = -state.state\n for square in stacks.keys():\n available_actions.append((\"BOOM\", square))\n for square, n in stacks.items():\n for d in range(1, n + 1):\n for next_square in self._NEXT_SQUARES(square, d):\n if next_square in stacks or state.state[next_square] == 0:\n for m in range(1, n + 1):\n move_action = (\"MOVE\", m, square, next_square)\n available_actions.append(move_action)\n return available_actions",
"def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions",
"def step(self, state):\n mcts_action = self.mcts_search(state)\n policy = [(action, (1.0 if action == mcts_action else 0.0))\n for action in state.legal_actions(self.player_id())]\n\n return policy, mcts_action",
"def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]",
"def possible_moves(state_int):\n assert isinstance(state_int, int)\n field = decode_binary(state_int)\n return [idx for idx, col in enumerate(field) if len(col) < GAME_ROWS]",
"def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list",
"def getLegalMovingActions(state,agent):\n actions = state.getLegalActions(agent)\n # Removing 'Stop'\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n return actions",
"def actions(self, player):\n snake = self.snakes.get(player)\n head = snake.position[0]\n return [m for m in MOVES\n if utils.isOnGrid(m.apply(head), self.grid_size)\n and snake.authorizedMove(m)]",
"def move(self, state_prev, state, reward):\n actions = []\n if state:\n self.select_player(state['ball'])\n else:\n self.selected = NUM_TEAM//2\n for i, player in enumerate(self.players):\n move = player.move(state_prev, state, reward, self.selected)\n if move != 'FORM':\n actions.append(move)\n else:\n actions.append(self.formation_dir(i))\n return actions",
"def legal_actions(self):\n\n if self.player == TERMINAL: # is terminal\n return []\n elif self.player == CHANCE: # is chance\n if self.phase == PREFLOP: # preflop\n return [Action(deal=[i, j, k, l])\n for i in range(52) for j in range(52)\n for k in range(52) for l in range(52)\n if len(set([i, j, k, l])) == 4]\n elif self.phase == FLOP: # flop\n return [Action(deal=[i, j, k])\n for i in range(52) for j in range(52)\n for k in range(52) if len(set([i, j, k])) == 3]\n elif self.phase == TURN: # turn\n return [Action(deal=[i]) for i in set(range(52)) - set(self.pub)]\n else: # river\n return [Action(deal=[i]) for i in set(range(52)) - set(self.pub)]\n else:\n if self.status[self.player] == RAISED2TIMES: # can't raise\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player)]\n else: # can raise\n if self.phase == PREFLOP or self.phase == FLOP: # preflop or flop\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player),\n Action(action=RAISE, bet=1, player=self.player)]\n else: # turn or river\n return [Action(action=FOLD, player=self.player),\n Action(action=CALL, player=self.player),\n Action(action=RAISE, bet=2, player=self.player)]",
"def __valid_token_moves(self, state, next_state, token_id):\r\n if next_state == False:\r\n return [False, False, False, False]\r\n\r\n current_pos_token = state.state[0][token_id]\r\n next_pos_token = next_state.state[0][token_id]\r\n\r\n current_opponent_states = state.state[1:]\r\n next_opponent_states = next_state.state[1:]\r\n\r\n moved_out = (current_pos_token == -1) and (next_pos_token != -1)\r\n into_goal = (current_pos_token != 99) and (next_pos_token == 99)\r\n send_opp_home = self.__will_send_opponent_home(np.array(current_opponent_states), np.array(next_opponent_states))\r\n send_self_home = (current_pos_token != -1) and (next_pos_token == -1)\r\n \r\n\r\n token_actions = [moved_out, into_goal, send_opp_home, send_self_home] # True if action is valid\r\n\r\n return token_actions",
"def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move",
"def get_player_actions(state, player, reward_move=False):\n\n actions = []\n board = state.get_board()\n empty_cells = board.get_all_empty_cells()\n opponent_pieces = board.get_player_pieces_on_board(Color(player * -1))\n\n if reward_move:\n for piece in opponent_pieces:\n actions.append(YoteAction(action_type=YoteActionType.STEAL_FROM_BOARD, at=piece))\n if state.in_hand[player * -1] > 0:\n actions.append(YoteAction(action_type=YoteActionType.STEAL_FROM_HAND))\n return actions\n else:\n if empty_cells and state.in_hand[player] > 0:\n for cell in empty_cells:\n actions.append(YoteAction(action_type=YoteActionType.ADD, to=cell))\n player_pieces = board.get_player_pieces_on_board(Color(player))\n for piece in player_pieces:\n moves = YoteRules.get_effective_cell_moves(state, piece, player)\n if moves:\n for move in moves:\n actions.append(YoteAction(action_type=YoteActionType.MOVE, at=piece, to=move))\n return actions",
"def get_action_outcomes(self, state, action):\r\n temp_state = tuple([max(0, min(self.pond_size[i]-1, state[i] + self.action_directions[action][i]))\r\n for i in range(2)])\r\n return self.transition_lists[temp_state]",
"def get_possible_actions(game_state: gs.GameState) -> Optional[List[int]]: # noqa: C901\n if game_state.is_game_ended():\n return None\n\n legal_actions = []\n\n if game_state.is_auction_started(): # if it is an auction\n # find max auction sun\n auction_suns = game_state.get_auction_suns()\n max_auction_sun = float(\"-inf\")\n if sum(1 for el in auction_suns if el is not None) > 0:\n max_auction_sun = max(el for el in auction_suns if el is not None)\n\n # add a legal action for every player sun greater than the max\n # bid sun\n current_player_usable_sun = game_state.get_current_player_usable_sun()\n possible_bid_actions = [gi.BID_1, gi.BID_2, gi.BID_3, gi.BID_4]\n for i in range(len(current_player_usable_sun)):\n if current_player_usable_sun[i] > max_auction_sun:\n legal_actions.append(possible_bid_actions[i])\n\n # if current player is not the auction starter or auction was\n # forced or someone else has bid, then player can pass\n currPlayer = game_state.get_current_player()\n if (\n currPlayer != game_state.get_auction_start_player()\n or game_state.auction_was_forced()\n or game_state.get_num_auction_suns() > 0\n ):\n legal_actions.append(gi.BID_NOTHING)\n\n else: # if it is not an auction\n # if disaster must be resolved\n if (\n game_state.get_num_mons_to_discard() > 0\n or game_state.get_num_civs_to_discard() > 0\n ):\n\n player = game_state.get_auction_winning_player()\n assert player is not None\n winning_player_collection = game_state.get_player_collection(player)\n\n # if there are civilizations to be discarded\n if game_state.get_num_civs_to_discard() > 0:\n possible_discards = [\n gi.DISCARD_ASTR,\n gi.DISCARD_AGR,\n gi.DISCARD_WRI,\n gi.DISCARD_REL,\n gi.DISCARD_ART,\n ]\n # the number of civilization tiles\n for i in range(gi.NUM_CIVS):\n if winning_player_collection[gi.STARTING_INDEX_OF_CIVS + i] > 0:\n legal_actions.append(possible_discards[i])\n\n # if there are monuments to be discarded\n elif game_state.get_num_mons_to_discard() > 0:\n possible_discards = [\n gi.DISCARD_FORT,\n gi.DISCARD_OBEL,\n gi.DISCARD_PAL,\n gi.DISCARD_PYR,\n gi.DISCARD_TEM,\n gi.DISCARD_STAT,\n gi.DISCARD_STE,\n gi.DISCARD_SPH,\n ]\n # the number of civilization tiles\n for i in range(gi.NUM_MONUMENTS):\n if (\n winning_player_collection[gi.STARTING_INDEX_OF_MONUMENTS + i]\n > 0\n ):\n legal_actions.append(possible_discards[i])\n\n # this should never be reached\n else:\n raise Exception(\n \"Error getting possible actions for disaster \" \"resolution\"\n )\n\n # if no disaster to resolve\n else:\n # add start auction option\n legal_actions.append(gi.AUCTION)\n\n num_auction_tiles = game_state.get_num_auction_tiles()\n max_auction_tiles = game_state.get_max_auction_tiles()\n if num_auction_tiles < max_auction_tiles:\n # add draw option if auction tiles not full\n legal_actions.append(gi.DRAW)\n\n # if golden god exists, add god options for each auction\n # tile\n players = game_state.get_current_player_collection()\n if players[gi.INDEX_OF_GOD] > 0:\n possible_takes = [\n gi.GOD_1,\n gi.GOD_2,\n gi.GOD_3,\n gi.GOD_4,\n gi.GOD_5,\n gi.GOD_6,\n gi.GOD_7,\n gi.GOD_8,\n ]\n\n auction_tiles = game_state.get_auction_tiles()\n for i in range(num_auction_tiles):\n if not gi.index_is_disaster(auction_tiles[i]):\n legal_actions.append(possible_takes[i])\n\n return sorted(legal_actions)",
"def actions(self, state):\n \n #les actions sont définies comme étant les nombres possibles dans \n #la case i,j\n theActions = []\n for i in range(size):\n for j in range(size):\n line = i\n col = j\n if(state[i][j] == 0):\n possibleNumbers = [1,2,3,4,5,6,7,8,9]\n config = state\n for a in range(size):\n x = config[line][a]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n for b in range(size):\n x = config[b][col]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n #identifie quelle boite on veut vérifier\n hBox = col - col % 3\n vBox = line - line % 3\n \n for c in range(3):\n for d in range(3):\n x = config[c+vBox][d+hBox]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n for k in possibleNumbers:\n theActions.append((i,j,k))\n return theActions",
"def get_king_moves(self, state):\n #king_moves = []\n possible_moves = []\n if self.color == cc.WHITE_ACTIVE:\n enemy_color = cc.BLACK_ACTIVE\n enemy_pieces = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n enemy_color = cc.WHITE_ACTIVE\n enemy_pieces = cc.WHITE_PIECES\n else:\n raise Exception(\"GameState: Invalid Active Color\")\n\n for vector in cc.KING_VECTORS:\n rank = self.coord[0] + vector[0]\n column = self.coord[1] + vector[1]\n if rank in cc.VALID_RANKS and column in cc.VALID_RANKS:\n if state.board[rank, column] == cc.NO_PIECE:\n possible_moves.append(cc.Action(self.string, self.coord, (rank, column)))\n elif state.board[rank, column] in enemy_pieces:\n possible_moves.append(cc.Action(self.string, self.coord, (rank, column), capture=True))\n \n # # Iterate over list of king moves, removing ones that are under attack\n # for move in possible_moves:\n # if not check.space_under_attack(state, move.end, enemy_color):\n # king_moves.append(move)\n\n return possible_moves",
"def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves",
"def get_possible_states(self) -> List[State]:\n next_states = []\n for action in self._legal_moves():\n next_states.append(self.move(action))\n return next_states",
"def safemoves():\n safe = []\n \n for move in board.moves():\n if board.rel(move) not in board.adjacent(board.them()):\n if any(map(board.passable,board.adjacent(board.rel(move)))):\n safe.append(move)\n\n\n if DEBUG:\n log.write(\"me\" + repr(board.me()) + \"\\n\")\n log.write(\"them\" + repr(board.them()) + \"\\n\")\n log.write(\"moves\"+ repr(board.moves()) + \"\\n\")\n log.write(\"safemoves\"+ repr(safe) + \"\\n\")\n return safe",
"def get_knight_moves(self, state):\n knight_moves = []\n if self.color == cc.WHITE_ACTIVE:\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_knight_moves: Invalid Knight Color\")\n\n possible_moves = get_crawler_moves(self.coord, cc.KNIGHT_VECTORS)\n for move in possible_moves:\n if state.board[move] == cc.NO_PIECE:\n knight_moves.append(cc.Action(self.string, self.coord, move))\n elif state.board[move] in enemy_set:\n knight_moves.append(cc.Action(self.string, self.coord, move, capture=True))\n\n return knight_moves",
"def getLegalActions( state, ghostIndex ):\n conf = state.getGhostState( ghostIndex ).configuration\n possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )\n reverse = Actions.reverseDirection( conf.direction )\n if Directions.STOP in possibleActions:\n possibleActions.remove( Directions.STOP )\n if reverse in possibleActions and len( possibleActions ) > 1:\n possibleActions.remove( reverse )\n return possibleActions",
"def get_valid_moves(self):\n if self.king:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1],\n [self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n else:\n if self.player == 1:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1]]\n else:\n valid_moves = [[self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n return valid_moves",
"def possible_moves(self):\n states = []\n possible_floors = self.possible_floors()\n possible_items = self.possible_items()\n\n for fl in possible_floors:\n for items in possible_items:\n new_floors = deepcopy(self.floors)\n for item in items:\n new_floors[self.lift_floor].remove(item)\n new_floors[fl].append(item)\n\n if self.validate_floors(new_floors):\n states.append(\n GameState(new_floors, lift_floor=fl, moves=self.moves+1)\n )\n\n return states",
"def get_all_valid_moves(self, player):\n moves = [] # Stores the possible moves\n capture_move_exists = False # Indicates if a capturing move is possible\n\n for piece in self.get_all_pieces(player):\n valid_moves = self._get_valid_moves(piece)\n\n for move, skip in valid_moves.items():\n moves.append([(piece.row, piece.col), move, skip])\n\n if len(skip) > 0:\n # Checks if there is a move that can capture a piece\n capture_move_exists = True\n\n if capture_move_exists:\n # Only gets the capturing moves if there is one\n eating_moves = []\n for move in moves:\n if len(move[2]) != 0:\n eating_moves.append(move)\n\n moves = eating_moves\n\n return moves",
"def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list",
"def computeActionFromValues(self, state):\n best_move = None\n best_Q = float(\"-inf\")\n\n for a in self.mdp.getPossibleActions(state):\n q = self.computeQValueFromValues(state,a)\n if q > best_Q:\n best_Q = q\n best_move = a\n\n return best_move",
"def untried_actions(self):\r\n if self._untried_actions is None:\r\n self._untried_actions = self.state.get_legal_moves()\r\n return self._untried_actions",
"def actions(self, state):\n actions = []\n raise_forces = [(\"RAISE_FORCE\", state.force*2.0)]\n reduce_forces = [] #[(\"REDUCE_FORCE\", state.force/2.0)]\n controlled = [t for t in self.territories if getattr(state, t) == \"CONTROLLED\"]\n owned = set([t for t in self.territories if getattr(state, t) in [\"CONTROLLED\", \"RE-ENFORCED\"]])\n border_territories = set()\n for t in owned:\n border_territories |= self.territories[t].get_borders() - owned\n reenforce_territories = [(\"RE-ENFORCE\", t) for t in controlled]\n take_territories = [(\"TAKE\", t) for t in border_territories if state.force >= self.territories[t].force]\n return raise_forces + reduce_forces + reenforce_territories + take_territories",
"def getValidMoves(self, board):\n inputList = []\n for i in range(0, 6):\n if c.canInput(i):\n inputList[i] = 1\n else:\n inputList[i] = 0",
"def result(self, state, action):\r\n\r\n sc = copy.deepcopy(state)\r\n new_piece, player = self.new_or_old_piece(state)\r\n current_player, to_action, from_action = action\r\n\r\n # Make the move\r\n sc[to_action[0]][to_action[1]] = current_player\r\n\r\n # There can't be more than 6 pieces in any state.\r\n if not new_piece:\r\n # Now making from place as null again\r\n sc[from_action[0]][from_action[1]] = '-'\r\n\r\n return sc",
"def get_valid_move_actions(pos,obstacles):\n\n valid_actions = [False]*len(global_defs.Actions)\n for idx,action in enumerate(global_defs.Actions[:-1]):\n valid = check_valid(pos+global_defs.ACTIONS_TO_MOVES[action])\n if valid:\n valid_actions[idx] = True\n #The last action, i.e. WORK, is set to False, since we don't have any idea about deciding it.\n return np.array(valid_actions)",
"def get_legal_moves(self, i, j):\r\n legal_moves = list()\r\n for action in self.action_dic.keys():\r\n coordinate_change = self.action_dic[action]\r\n new_i = coordinate_change[0] + i\r\n new_j = coordinate_change[1] + j\r\n if (new_i >= 0 and new_i < 3) and (new_j >= 0 and new_j < 3):\r\n legal_moves.append(self.reflection_dic[action])\r\n return legal_moves",
"def decide(self, game, state, available_moves, opponent_moves):\n\t\tstatecopy = copy.deepcopy(state)\n\t\troot = GameNode(game, None, statecopy, available_moves, None)\n\t\ttree = GameTree(root)\n\t\tminimaxAB = AlphaBeta(tree)\n\t\tbest_state = minimaxAB.alpha_beta_search(tree.root)\n\t\tmove = best_state.action\n\t\treturn [move.row, move.column, move.shift]",
"def get_all_moves(self, castling_allowed=True):\n\n can_move = str.isupper if self.white_to_move else str.islower\n\n valid_moves = set()\n\n for row_num, row in enumerate(self.board):\n for col_num, piece in enumerate(row):\n if piece != EMPTY_SPACE and can_move(piece):\n\n location = (row_num, col_num)\n\n # Everything except the pawn movement\n if piece.lower() in NAME_TO_PIECE:\n valid_moves = valid_moves.union(self._get_standard_moves_for_piece(location, piece))\n\n # Pawn moves\n if piece.lower() == PAWN:\n valid_moves = valid_moves.union(self._get_pawn_moves(location, piece))\n\n # Castling\n if castling_allowed and piece.lower() == KING:\n valid_moves = valid_moves.union(self._get_possible_castles(piece))\n\n return valid_moves",
"def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move",
"def legalMoves(self):\n moves = []\n indexOfZero = self.tiles.index(0)\n \n if indexOfZero == 0:\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 1:\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 2:\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 3:\n moves.append('Up')\n moves.append('Down')\n moves.append('Right')\n elif indexOfZero == 4:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 5:\n moves.append('Up')\n moves.append('Down')\n moves.append('Left')\n elif indexOfZero == 6:\n moves.append('Up')\n moves.append('Right')\n elif indexOfZero == 7:\n moves.append('Up')\n moves.append('Left')\n moves.append('Right')\n elif indexOfZero == 8:\n moves.append('Up')\n moves.append('Left')\n else:\n print('something wrong with board')\n return moves",
"def legal_moves(board,player=None):\r\n \r\n possible_moves = []\r\n moves = []\r\n if player == None:\r\n moves += board.white + board.black\r\n elif player == -1:\r\n moves += board.black\r\n elif player == 1:\r\n moves += board.white\r\n \r\n captured = False\r\n for pos in moves:\r\n if pos[0] == 'A':\r\n m = [-8,-7,1,8,9]\r\n elif pos[0] == 'H':\r\n m = [-9,-8,-1,7,8]\r\n else:\r\n m = [-9,-8,-7,-1,1,7,8,9]\r\n loc = decode(pos)\r\n for i in m:\r\n captured = capture(board, player, possible_moves, pos, loc, i)\r\n canter(board, player, possible_moves, pos, loc, i)\r\n plain(board, player, possible_moves, pos, loc, i)\r\n \r\n if captured:\r\n enemy_list = []\r\n for capturing_move in possible_moves:\r\n if len(capturing_move) == 3:\r\n enemy_list.append(capturing_move)\r\n possible_moves = list(enemy_list)\r\n\r\n return possible_moves",
"def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list",
"def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]",
"def actions(self, state):\n if (state == (3,3,1)): # if yes, send a missionary and a canniable to land B\n return (2,2,0)\n if (state == (2,2,0)): # if yes, send a missionary back to land A\n return (3,2,1)\n if (state == (3,2,1)): # if yes, send a missionary and a canniable to land B\n return (2,1,0)\n if (state == (2,1,0)): # if yes, send a missionary back to land A\n return (3,1,1)\n if (state == (3,1,1)): # if yes, send 2 missionary to land B\n return (1,1,0)\n if (state == (1,1,0)): # if yes, send a missionary and a canniable to land A\n return (2,2,1)\n if (state == (2,2,1)): # if yes, send 2 missionary to land B\n return (0,2,0)\n if (state == (0,2,0)): # if yes, send a missionary to land A\n return (1,2,1)\n if (state == (1,2,1)): # if yes, send a missionary and a canniable to land B\n return (0,1,0)\n if (state == (0,1,0)): # if yes, send a missionary to land A\n return (1,1,1)\n if (state == (1,1,1)): # if yes, send a missionary and a canniable to land B\n return (0,0,0)\n\n raise NotImplementedError",
"def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n # Add a successor state to the successor list if the action is legal\n # Here's a code snippet for figuring out whether a new position hits a wall:\n x, y = state[0]\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n if not hitsWall:\n successors.append((((nextx,nexty), self.visited_corner), action, 1))\n \"*** YOUR CODE HERE ***\"\n\n return successors",
"def available_moves(board_state):\n for x, y in itertools.product(range(len(board_state)), range(len(board_state[0]))):\n if board_state[x][y] == 0:\n yield (x, y)",
"def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]",
"def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves",
"def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask",
"def random_next_action(state):\n\n possible_moves = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n possible_moves.append((i, j))\n return random.choice(possible_moves)",
"def get_action(self, state):\n self.visited = {}\n utility = -inf\n move = 'STOP'\n\n # We choose the successor with the maximum utility\n for successor in state.generatePacmanSuccessors():\n maxPlayer = True\n score = self.alphabeta(successor[0], -inf, +inf, maxPlayer)\n if utility < score:\n move = successor[1]\n utility = score\n\n # If there's no winning state, we try to to move farther from the ghost\n if utility == -inf:\n dist = -inf\n for successor in state.generatePacmanSuccessors():\n newDist = self.distanceFromGhost(successor[0])\n if not successor[0].isLose() and newDist > dist:\n move = successor[1]\n dist = newDist\n print(utility)\n return move",
"def openMoves(self):\n arr = []\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n t = self.getPawn(x,y)\n if(t!=None):\n for z in range(-1,2):\n if(self.legalMove(t,z)):\n #move , #newState\n arr.append((t,z))\n return arr",
"def __find_all_moves(self, tower) -> list:\r\n choice = []\r\n for height in range(1,len(tower.tower)-2):\r\n for index in range(1,4):\r\n if self.stat_brain.is_valid(height, index, tower):\r\n choice.append((height, index))\r\n \r\n r.shuffle(choice)\r\n return choice",
"def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves",
"def actions(self):\r\n def create_move(at, to):\r\n return lambda: self._move(at, to)\r\n\r\n moves = []\r\n for i, j in itertools.product(range(self.width),\r\n range(self.width)):\r\n direcs = {'R':(i, j-1),\r\n 'L':(i, j+1),\r\n 'D':(i-1, j),\r\n 'U':(i+1, j)}\r\n\r\n for action, (r, c) in direcs.items():\r\n if r >= 0 and c >= 0 and r < self.width and c < self.width and \\\r\n self.board[r][c] == 0:\r\n move = create_move((i,j), (r,c)), action\r\n moves.append(move)\r\n return moves",
"def actions(board):\n actionSet = set()\n\n # if cell is empty it's a valid move\n for i, row in enumerate(board):\n for j, cell in enumerate(row):\n if cell == EMPTY:\n actionSet.add((i, j))\n\n return actionSet",
"def get_valid_moves(self) -> list[int]:\n return self._valid_moves",
"def get_possible_moves(self) -> list:\n if self.p1_turn:\n name = '2'\n else:\n name = '1'\n\n count = 0\n for i in self.claim:\n if i == name:\n count += 1\n over = count >= 0.5 * len(self.claim)\n\n moves = []\n if not over:\n for i in self.letters:\n if i.isalpha():\n moves.append(i)\n return moves",
"def is_legal_move(state, action, player, rewarding_move=False): # TODO: Update this function to an more\n # optimized one.\n action = action.get_action_as_dict()\n if rewarding_move:\n if player == state.get_next_player() == state.get_latest_player():\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND and state.in_hand[player * -1] > 0:\n return True\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n opponent_piece = state.get_board().get_player_pieces_on_board(Color(player * -1))\n if opponent_piece and action['action']['at'] in opponent_piece:\n return True\n return False\n else:\n if state.get_next_player() == player:\n if action['action_type'] == YoteActionType.ADD and state.in_hand[player] > 0:\n empty_cells = state.get_board().get_all_empty_cells()\n if empty_cells and action['action']['to'] in empty_cells:\n return True\n elif action['action_type'] == YoteActionType.MOVE:\n if state.get_board().get_cell_color(action['action']['at']) == Color(player):\n effective_moves = YoteRules.get_effective_cell_moves(state, action['action']['at'], player)\n if effective_moves and action['action']['to'] in effective_moves:\n return True\n return False\n return False",
"def find_possible_actions(board_state):\n empty_slot_indices = [ind+1 for ind,\n val in enumerate(board_state) if val.isnumeric()]\n\n return empty_slot_indices",
"def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves",
"def assignProbablities(self, gameState):\n legalActions = gameState.getLegalActions()\n numDiceActive = sum(gameState.numDicePerPlayer)\n probActionTuples = []\n\n for action in legalActions:\n currentHand = gameState.hands[self.agentIndex]\n currentAction = action\n remainingTotalDice = gameState.totalNumDice - gameState.numDicePerPlayer[self.agentIndex]\n assert remainingTotalDice > 0\n remainingActionCount = currentAction[2] - currentHand[currentAction[1]]\n if remainingActionCount > remainingTotalDice:\n if action[0] == 'deny':\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n elif remainingActionCount > 0:\n # or (action[0] == \"confirm\" and remainingActionCount == 0)\n if action[0] == \"bid\":\n probActionTuples.append((self.bidProbability(remainingTotalDice, remainingActionCount), action))\n elif action[0] == \"deny\":\n probActionTuples.append((1 - self.bidProbability(remainingTotalDice, remainingActionCount), action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n elif remainingActionCount == 0:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n elif action[0] == \"deny\":\n probActionTuples.append((0, action))\n else:\n probActionTuples.append((self.confirmProbability(remainingTotalDice, remainingActionCount), action))\n else:\n if action[0] == \"bid\":\n probActionTuples.append((1, action))\n else:\n probActionTuples.append((0, action))\n\n return probActionTuples",
"def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x, y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append((nextState, action, cost))\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors",
"def get_action(self, state):\n from graphics_utils import keys_waiting\n from graphics_utils import keys_pressed\n keys = keys_waiting() + keys_pressed()\n if keys != []:\n self.keys = keys\n\n legal = state.get_legal_actions(self.index)\n move = self.get_move(legal)\n\n if move == Directions.STOP:\n # Try to move in the same direction as before\n if self.last_move in legal:\n move = self.last_move\n\n if (self.STOP_KEY in self.keys) and Directions.STOP in legal:\n move = Directions.STOP\n\n if move not in legal:\n move = random.choice(legal)\n\n self.last_move = move\n return move",
"def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves",
"def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0:\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1:\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2:\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3:\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) \n for goal in self.goals:\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")",
"def getAction(self, gameState):\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n if 'Stop' in legalMoves:\n legalMoves.remove('Stop')\n numFood = len(gameState.getFood().asList())\n ghoststates = gameState.getGhostStates()\n ghost = ghoststates[0]\n timer = ghost.scaredTimer\n if timer > 5:\n newPosits = [gameState.generatePacmanSuccessor(action).getPacmanPosition() for action in legalMoves]\n scores = [manhattanDistance(pos, ghost.getPosition()) for pos in newPosits]\n bestScore = min(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n for ind in bestIndices:\n if legalMoves[ind] == ghost.getDirection():\n return legalMoves[ind]\n return legalMoves[random.choice(bestIndices)]\n\n\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n if numFood > 10:\n for ind in bestIndices:\n if (legalMoves[ind] == 'South') or (legalMoves[ind] == 'West'):\n return legalMoves[ind]\n\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n\n\n\n\n return legalMoves[chosenIndex]",
"def populate_states(self, list, player):\n if self.pre_state is None:\n for action in list:\n self.occupied[action] = player\n self.available_moves.remove(action)\n return 1\n print(\"you can only populate at the init state\")\n return 0",
"def get_legal_moves(self):\n\n return self._legal_moves",
"def map_state_to_actions(self, state: types.StateSpace, **override_args: Any) \\\n -> types.ActionProcess:\n z_star = self.max_weight_policy(state)\n actions = self.mpc_policy.obtain_actions(z_star=z_star, num_mpc_steps=1)\n return actions",
"def get_valid_moves(self):\r\n validMoves = []\r\n\r\n for x in range(BOARD_SIZE):\r\n for y in range(BOARD_SIZE):\r\n pos = np.array([x,y])\r\n if self.board[pos[0],pos[1]] == 0:\r\n if(self.update_board(pos,_testing=True)):\r\n validMoves.append(pos)\r\n\r\n return validMoves",
"def possible_moves(state):\n sliders = state['sliders']\n pins = state['pins']\n result = []\n\n # this is a bit repetitive -- could theoretically generalize?\n for i, pin in enumerate(pins):\n x, y = pin\n for dy, direction in [(1, 'down'), (-1, 'up')]:\n new_pin = (x, y+dy)\n move = 'move pin {0} {1}'.format(i, direction)\n if all_open([new_pin], sliders):\n new_state = deepcopy(state)\n new_state['pins'][i] = new_pin\n result.append((move, new_state))\n\n for i, slider in enumerate(sliders):\n coords, offset = slider\n for dx, direction in [(1, 'left'), (-1, 'right')]:\n new_slider = (coords, offset+dx)\n move = 'move slider {0} {1}'.format(i, direction)\n if all_open(pins, [new_slider]):\n new_state = deepcopy(state)\n new_state['sliders'][i] = new_slider\n result.append((move, new_state))\n\n return result",
"def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors",
"def get_actions(self, game_state):\n actions = []\n\n if not game_state.is_active:\n actions.append(Action('start_game'))\n actions.append(Action('leave_game'))\n self.reset()\n return actions\n\n self.is_game_started = True\n\n # Identify a game is ended.\n if (game_state.exp == -1) and (game_state.level == 0) and (not game_state.store.is_open):\n actions.append(Action('leave_game'))\n return actions\n\n if game_state.money > 30:\n if self.need_level_up(game_state):\n actions.append(Action('level_up'))\n return actions\n\n if not game_state.store.is_open:\n actions.append(Action('toggle_store'))\n return actions\n\n hero_positions = []\n for i in range(5):\n hero = game_state.store.heroes[i]\n if hero is None:\n continue\n if hero.name == self.hero_name:\n game_state.money = game_state.money - self.base_hero.price\n if game_state.money < 0:\n break\n hero_positions.append(i)\n if len(hero_positions) == 0:\n if (game_state.money > 30) or (not self.need_level_up(game_state)\n and game_state.round > 10 and game_state.money >= 7):\n actions.append(Action('reroll'))\n else:\n actions.append(Action('wait', 5))\n return actions\n\n for i in range(5):\n if game_state.store.heroes[i] is None:\n continue\n if game_state.store.heroes[i].name == self.hero_name:\n actions.append(Action('recruit', i))\n self.hand.add_hero(game_state.store.heroes[i])\n if len(actions) == 0:\n actions.append(Action('wait', 5))\n return actions\n # Allow the game to log the heroes in hand before upgrade.\n actions.append(Action('log_hero_in_hand', copy.deepcopy(self.hand)))\n\n upgrade_position = self.hand.can_hero_upgrade()\n if upgrade_position is None:\n actions.extend(self.rotate_actions(0))\n return actions\n actions.append(Action('upgrade_hero_in_hand', upgrade_position))\n self.hand.upgrade_hero(upgrade_position)\n actions.append(Action('log_hero_in_hand', copy.deepcopy(self.hand)))\n\n new_upgrade_position = self.hand.can_hero_upgrade()\n if new_upgrade_position is None:\n actions.extend(self.rotate_actions(upgrade_position))\n return actions\n actions.append(Action('upgrade_hero_in_hand', new_upgrade_position))\n self.hand.upgrade_hero(new_upgrade_position)\n actions.append(Action('log_hero_in_hand', copy.deepcopy(self.hand)))\n actions.extend(self.rotate_actions(new_upgrade_position))\n\n return actions",
"def get_possible_moves(board):\n\tpossible_moves = []\n\n\tfor count, player in enumerate(board):\n\t\tif player is not server_player and player is not user_player:\n\t\t\tpossible_moves.append(count)\n\n\treturn possible_moves",
"def valid_actions(self):\n valids = []\n for index, space in enumerate(self.inputs_):\n if space == TictactoeMatch.EMPTY:\n valids.append(index)\n return valids",
"def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n x,y = state\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state not in self._visited:\n self._visited[state] = True\n self._visitedlist.append(state)\n\n return successors",
"def actions(board):\n\tposs_actions = set()\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif board[i][j]==EMPTY:\n\t\t\t\tposs_actions.add((i,j))\n\n\treturn poss_actions",
"def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available",
"def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded",
"def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result",
"def actions(board):\n avail_moves = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n avail_moves.add((i,j))\n \n if len(avail_moves) == 0:\n return 0\n\n return avail_moves"
] | [
"0.7829784",
"0.7794976",
"0.72625947",
"0.7095838",
"0.7046285",
"0.70153415",
"0.70093673",
"0.69802105",
"0.6962673",
"0.6890715",
"0.6863222",
"0.6827296",
"0.66367495",
"0.66215557",
"0.66136307",
"0.66089207",
"0.65999246",
"0.6596026",
"0.65576553",
"0.6540269",
"0.6538662",
"0.65383077",
"0.64809287",
"0.64495707",
"0.64364856",
"0.638771",
"0.63836336",
"0.6359694",
"0.6353979",
"0.63442975",
"0.63310707",
"0.63235563",
"0.62826693",
"0.6269921",
"0.6238363",
"0.6236121",
"0.6216466",
"0.62049145",
"0.62041813",
"0.620323",
"0.61979854",
"0.61904943",
"0.61817545",
"0.61742085",
"0.6156863",
"0.6153865",
"0.6132083",
"0.61097777",
"0.61084497",
"0.6104109",
"0.60854924",
"0.6079567",
"0.6078582",
"0.6072051",
"0.6070422",
"0.60685456",
"0.6054681",
"0.60512364",
"0.60494715",
"0.6049258",
"0.60382026",
"0.60088366",
"0.59962755",
"0.59916496",
"0.5983729",
"0.5977751",
"0.59670454",
"0.59667003",
"0.5960164",
"0.59577554",
"0.5951588",
"0.5949083",
"0.59385705",
"0.5938296",
"0.5928838",
"0.59088254",
"0.5898554",
"0.5894139",
"0.58832836",
"0.5879712",
"0.58730245",
"0.5872668",
"0.5865858",
"0.58601815",
"0.5860104",
"0.58577275",
"0.58571213",
"0.5855283",
"0.58458877",
"0.5845877",
"0.5845569",
"0.5842933",
"0.584098",
"0.58357096",
"0.5831189",
"0.5830494",
"0.58300537",
"0.58228135",
"0.5821814",
"0.58188665"
] | 0.79888695 | 0 |
Start the client listening to the game. Pass in a function that accepts the available actions and the current state of the game, and returns the action to take. The SDK will handle the rest. Checks if any commandline arguments are passed when running, if there are any, they are assumed to be client keys that are sent to the server for connecting. | def start(turn_handler):
if os.environ.get('BOTBOX_SECRET'):
print('Using env secret:', os.environ['BOTBOX_SECRET'])
headers = {'Authorization': os.environ['BOTBOX_SECRET']}
elif len(sys.argv) > 1:
print('Using cli secret:', sys.argv[1])
headers = {'Authorization': sys.argv[1]}
else:
print('Using no authentication')
headers = []
# get the URL for the server from an environment variable if it is set,
# otherwise use the default localhost
if os.environ.get('BOTBOX_SERVER'):
url = (WS_SERVER_SCHEME + '://'
+ os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)
else:
url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT
print("Connecting to:", url)
ws = websocket.WebSocketApp(
url,
on_open = _on_open,
on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),
on_error = _on_error,
on_close = _on_close,
header = headers
)
ws.run_forever() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()",
"async def run():\n # Get the arguments from the parser\n args = client.arguments\n\n # If the help argument was used, return\n if hasattr(args, \"help\"):\n return\n # Otherwise, check the correct command and invoke the respective function\n # BUILD\n if args.command == \"build\":\n if args.action == \"delete\":\n await client.delete_build(args.build)\n elif args.action == \"download\":\n await client.download_build(args.build, args.force)\n elif args.action == \"info\":\n await client.show_build(args.build)\n # BUILDS\n elif args.command == \"builds\":\n if args.refresh:\n await client.update_builds()\n await client.show_builds(args.ready_only)\n # FOLDER\n elif args.command == \"folder\":\n if args.action == \"create\":\n await client.create_folder(args.folder, args.no_resources)\n elif args.action == \"info\":\n await client.get_folder(args.folder)\n elif args.action == \"resources\":\n await client.get_resources(args.folder)\n elif args.action == \"delete\":\n await client.delete_folder(args.folder)\n # FOLDERS\n elif args.command == \"folders\":\n if args.refresh:\n await client.post(\"/folders\")\n await client.show_folders()\n # SERVER\n elif args.command == \"server\":\n if args.action == \"start\":\n await client.start_server(args.server, args.build)\n elif args.action == \"info\":\n await client.get_server(args.server)\n elif args.action == \"stop\":\n await client.stop_server(args.server)\n # SERVERS\n elif args.command == \"servers\":\n await client.print_servers()\n # INFO\n else:\n await client.show_info()",
"def run_action(client: Client, args: Namespace):\n\n result = None\n\n if args.action == 'exec':\n result = client.run(args.command, *args.argument)\n elif args.action == 'say':\n result = client.say(args.message)\n elif args.action == 'fortune':\n result = client.fortune(\n short=not args.long, offensive=args.offensive)\n elif args.action == 'datetime':\n result = client.datetime(frmt=args.format)\n elif args.action == 'in-use':\n players = client.players\n\n if players.online:\n LOGGER.info('There are %i players online:', players.online)\n LOGGER.info(', '.join(players.names))\n else:\n LOGGER.warning('There are no players online.')\n exit(1)\n\n if result:\n LOGGER.info(result)",
"def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")",
"def main():\n\n args = get_args()\n log_level = DEBUG if args.debug else INFO\n basicConfig(level=log_level, format=LOG_FORMAT)\n host, port, passwd = get_credentials(args.server)\n\n try:\n with Client(host, port, timeout=args.timeout) as client:\n if not client.login(passwd):\n LOGGER.error('Failed to log in.')\n exit(4)\n\n if args.action == 'idle-shutdown':\n players = client.players\n else:\n run_action(client, args)\n except timeout:\n LOGGER.error('Connection timeout.')\n exit(3)\n\n if args.action == 'idle-shutdown':\n if not idle_shutdown(players, args):\n exit(1)",
"def start(self, autologin=True, autoreconnect=False):\n self.autologin = autologin\n self.autoreconnect = autoreconnect\n if self.loop.is_running():\n self.add_task(self._handler())\n logger.info(\n \"The client's event loop was already running. \"\n \"The client will run as a new task on the loop.\"\n )\n return True\n else:\n self.loop.run_until_complete(self._handler())\n return False",
"def start():\n if not cfg.irc:\n logging.warning(\"Skipping IRC module: no configuration provided\")\n return\n\n server = cfg.irc.server\n port = cfg.irc.port\n ssl = cfg.irc.ssl\n nick = cfg.irc.nick\n channels = cfg.irc.channels\n\n logging.info(\n \"Starting IRC client: server=%r port=%d ssl=%s nick=%r \" \"channels=%r\",\n server,\n port,\n ssl,\n nick,\n channels,\n )\n\n bot = Bot(cfg.irc)\n utils.DaemonThread(target=bot.start).start()\n\n evt_target = EventTarget(bot)\n events.dispatcher.register_target(evt_target)\n utils.DaemonThread(target=evt_target.run).start()",
"def start( *args, **kwargs ):",
"def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()",
"def main():\n door = TalkingDoor()\n\n application = ApplicationBuilder().token(TOKEN).build()\n\n application.add_handlers(\n [\n CommandHandler([\"start\", \"help\"], door.help),\n CommandHandler(\"status\", door.status),\n CommandHandler(\"alarm\", door.alarm),\n CommandHandler(\"last_vid\", door.last_vid),\n CommandHandler(\"last_vids\", door.last_vids),\n CommandHandler(\"stop\", door.stop),\n CommandHandler(\"last\", door.last),\n CommandHandler(\"lines\", door.last_lines),\n ]\n )\n application.add_handler(CallbackQueryHandler(door.button))\n\n application.post_init = send_keyboard\n\n application.run_polling()",
"def main():\n s = start_server()\n accept_connection(s)",
"def game_start(self):\r\n\t\tself._comm_server.broadcast_message(\"game-start\")\r\n\t\tself._is_game_started = True\r\n\t\tself._handlers[\"game-start\"].invoke()\r\n\t\t_logger.info(\"Game is started.\")",
"def main() -> None:\n\n logger.info(f\"Arguments: {args}\")\n client = iotcore.Client()\n client.subscribe(args.request_topic, iotcore.QOS.AT_MOST_ONCE, handler)\n\n while True:\n # Keep app open and running\n time.sleep(1)",
"def main():\n usage = \"usage: %prog [options] channels\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n return 2\n\n # do stuff\n # This runs the program in the foreground. We tell the reactor to connect\n # over TCP using a given factory, and once the reactor is started, it will\n # open that connection.\n reactor.connectTCP(HOST, PORT, MyFirstIRCFactory(args))\n # Since we're running in the foreground anyway, show what's happening by\n # logging to stdout.\n log.startLogging(sys.stdout)\n # And this starts the reactor running. This call blocks until everything is\n # done, because this runs the whole twisted mainloop.\n reactor.run()",
"def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()",
"def main():\n Fire(cli)",
"def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)",
"def main():\n\n\t# Initialize the node\n\trospy.init_node(\"node_action_server_ros_iot_bridge\")\n\n\t# Create a object for RosIotBridgeActionServer class\n\taction_server = RosIotBridgeActionServer()\n\n\t# Not letting this node die\n\trospy.spin()",
"def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()",
"def start(self):\n while True:\n #requests.get(\"http://localhost:8080/clear\")\n if use_launch_phrase:\n recognizer, audio = self.speech.listen_for_audio()\n if self.speech.is_call_to_action(recognizer, audio):\n self.__acknowledge_action()\n self.decide_action()\n else:\n self.decide_action()",
"def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")",
"def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)",
"def main():\r\n if check_argv():\r\n if len(sys.argv) == 3:\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), True, ip=None)\r\n gui.create_board()\r\n gui.root.title(\"Server\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()\r\n elif len(sys.argv) == 4:\r\n ip = socket.gethostbyname(socket.gethostname())\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), False, ip)\r\n gui.create_board()\r\n gui.root.title(\"Client\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()",
"def execute():\n command_line_args = argv[1:]\n args = cli(command_line_args)\n\n callback = args.callback\n kwargs = {\n k: v\n for k, v in args.__dict__.items()\n if k != \"callback\"\n }\n\n main(callback, **kwargs)",
"def start_game(self, **kwargs):\n\n success, info = self.gms.start_game(\n player=kwargs.get('player', 'x'),\n first_turn=raw_input('Would you like to go first? y/n\\n') == 'y'\n )\n if success:\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n print(self.gms.game.get_board_state_pretty())\n self.play_human_move()\n else:\n print(info['messages'][0])",
"def run_cli_game():\n # Set up game\n view = ConsoleView()\n game = GameEngine(view)\n\n # Game loop\n while not game.game_over:\n view.turn_started()\n\n # Get move to make from user and execute it\n move = input()\n print()\n \n execute_move(move, game, view)",
"def main(args):\n\n cocos.director.director.init(resizable=True)\n\n Conversation = wit_handler.Conversation()\n TextBox = text_box.TextBox(\n enter_function=Conversation.send_message\n )\n\n # Run a scene with our event displayers:\n cocos.director.director.run(cocos.scene.Scene(TextBox))",
"def run_server(self, GameState):\n pass",
"def start(self) -> None:\n\n while not self.stop_listening:\n if self.world_rank == 0:\n command = MDI_Recv_Command(self.comm)\n else:\n command = None\n if self.world_rank == 0:\n print(\"MDI command received: \" + str(command))\n\n # Search for this command in self.commands\n found_command = False\n for supported_command in self.commands:\n if not found_command and command == supported_command:\n # Run the function corresponding to this command\n self.commands[supported_command]()\n found_command = True\n if not found_command:\n raise Exception(\"Unrecognized command: \" + str(command))",
"def _start(args=None):\n options = _parse_args(args)\n main(**options)",
"def start(self) -> None:\n self.execute_startup_menu()\n self.execute_main_menu()",
"def main():\n\ttoken = os.getenv(\"BOT_TOKEN\")\n\tapplication = Application.builder().token(token).read_timeout(30).write_timeout(30).build()\n\tload_interactions(application)\n\tprint(\"Simple Media Converter instance started!\")\n\tapplication.run_polling()",
"def start():\n\n start_server()",
"def start(self):\n self.logger.debug(\"Starting loop\")\n self.client.loop_start()",
"def main():\n return run_server(**parse_server_args())",
"def start():\n commands = {\"new tournament\": Controller.new_tournament,\n \"new round\": Controller.new_round,\n \"new player\": Controller.new_player,\n\n \"set round\": Controller.set_round,\n \"set player\": Controller.set_player,\n\n \"get players -all -alpha\": Controller.get_all_players_alpha,\n \"get players -all -rank\": Controller.get_all_players_rank,\n \"get players -alpha\": Controller.get_players_alpha,\n \"get players -rank\": Controller.get_players_rank,\n\n \"get tournament -all\": Controller.get_all_tournaments,\n \"get tournament\": Controller.get_tournament,\n\n \"get round -all\": Controller.get_all_rounds,\n \"get round\": Controller.get_round,\n\n \"get match -all\": Controller.get_all_matches,\n \"get match\": Controller.get_match,\n\n \"load\": Controller.load,\n\n \"exit\": Controller.close_app\n }\n\n # At the beginning of the program, load all data from a data_base.\n Controller.load()\n print(\"Need help? Type 'commands' to see all commands and there purposes.\")\n\n while True:\n instruction = str(input(\"ChessManager >>> \"))\n try:\n commands[instruction]()\n except KeyError:\n print(\"Wrong Command.\")",
"def start(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # open socket, AF_INET=get IPv4 address, SOCK_STREAM=protocol TCP\n s.bind((HOST, PORT)) # bind socket to the address (ip,port)\n print(\"[STARTING] server is starting...\")\n s.listen() # start listening to clients\n print(f\"[LISTENING] Server is listening on {HOST, PORT}\")\n while True:\n cl_socket, addr = s.accept() # accept client\n # if there are less than 3 active players - accept, else - continue (wait for next request)\n if threading.activeCount() == MAX_GAMES_LIVE + 1: # this thread + max amount of games\n # 3 active players, request denied\n cl_socket.send('[1]Game manager is full, please try again later'.encode())\n cl_socket.close()\n print(f\"[CONNECTION DENIED] {addr}\")\n else: # make new thread and start the game\n cl_socket.send(\"[0]OK let's play\".encode()) # verification msg for client\n print(f\"[NEW CONNECTION] {addr} connected.\")\n thread = threading.Thread(target=self.new_client, args=(cl_socket, addr)) # open thread with 'new_client' function\n thread.start()\n print(f\"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}\")",
"def start_run(*args, **kwargs):\n return fluent.start_run(*args, **kwargs)",
"def start_game(self):\n return self.do_actions('before_game')",
"def main():\n\n addon_url = sys.argv[0]\n addon_handle = int(sys.argv[1])\n addon_args = urlparse.parse_qs(sys.argv[2][1:])\n\n # Route request to action.\n Plugin(addon_url, addon_handle, addon_args).route()",
"def run(self):\n # check param and env\n self.sanity_check()\n\n # only-check mode\n if self.module.check_mode:\n self.module.exit_json(**self.result)\n\n self.init_session()\n\n action = self.select_action()\n action()",
"def start(self):\n assert(self._cbs is not None)\n self._as.start() # start the server",
"def _run(self, client: OpenrCtrl.Client, *args, **kwargs) -> None:\n\n raise NotImplementedError",
"def main_menu():\n connection = ClientToServer(ip = \"192.168.1.1\", \n port = \"1234\",\n username = \"Admin\",\n hns_password = \"password\")\n \n game_loop(connection)",
"def main():\n\n if (len(sys.argv) < 3):\n print 'Usage: python serverclient.py <server|client> <port>\\n'\n return -1\n else:\n if sys.argv[1].lower() == 'server':\n Server(sys.argv[2])\n elif sys.argv[1].lower() == 'client':\n Client(sys.argv[2])\n else:\n print 'Unrecognized argument: ', sys.argv[1]\n return -1\n return 0",
"def main(self):\n self.parse_option()\n self.set_option()\n\n r = Bootscripts()\n reactor.listenTCP(8009, server.Site(r))\n reactor.run()",
"def main(self):\n if len(sys.argv) == 1:\n print(\"\"\"IP and Port number not provided\n Refer help using --help command\"\"\")\n sys.exit(0)\n elif len(sys.argv) == 2 and sys.argv[1] == '--default':\n print(\"Using default arguments\")\n server_object = Server(ip=\"127.0.0.1\", port=2345)\n server_object.receive_data()\n elif len(sys.argv) == 2 and sys.argv[1] == '--help':\n print(\"\"\"Program arguments:\n python3 Client.py \"ip-address\" \"port-number\" \"\"\")\n sys.exit(0)\n elif len(sys.argv) == 2:\n print(\"\"\"Invalid arguments\n Refer help(--help) for more information\"\"\")\n sys.exit(0)\n elif len(sys.argv) == 3:\n server_object = Server(sys.argv[1], sys.argv[2])\n server_object.receive_data()",
"def run():\n server = current_server()\n server._auto_stop = True\n return start()",
"def start_game(self):\n\n\t\tpass",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"URI\")\n args = parser.parse_args()\n smart_client(args.URI)",
"def main():\n user_interaction()",
"def startup(self, override_args=None):\n self._app_name = sys.argv[0]\n if override_args:\n my_args = [self._app_name]\n my_args = my_args + override_args\n else:\n my_args = sys.argv[:]\n\n if len(my_args) != 2:\n print(\"\"\"Usage:\n\n {0} sitl\n run with built-in SITL simulator\n {0} render\n just render the behaviour tree\n {0} <connection string>\n connect as prescribed and fly the mission\"\"\".format((self._app_name)))\n elif my_args[1] == 'sitl':\n self._sitl = dronekit_sitl.start_default(lat=self._sitl_lat,\n lon=self._sitl_lon)\n self._connection_string = self._sitl.connection_string()\n print(\"Using SITL via {}\".format(self._connection_string))\n self.connect()\n elif my_args[1] == 'render':\n print(\"Rendering only\")\n self.render()\n else:\n self._connection_string = my_args[1]\n print(f\"Attempting to connect via {self._connection_string}\")\n self.connect()",
"def RunServer():\n\n # get command line argument of server, port\n host, port = sys.argv[1].split(\":\")\n ruleset_name = sys.argv[2]\n starting_round = sys.argv[3]\n server = GameServer(localaddr=(host, int(port)), ruleset=ruleset_name, startinground = starting_round )\n while not server.game_over:\n server.Pump()\n sleep(0.0001)\n print(\"To play again restart server with same command\")",
"def start(self):\n\n # Call the protected _turn method to start the game\n self._turn()",
"def main():\n\n global _CLIENT\n\n logging.basicConfig(level=logging.DEBUG)\n app.logger.setLevel(logging.INFO)\n\n _CLIENT = Client('192.168.0.120', 443, 'root', 'calvin')\n _CLIENT.connect()\n\n\n app.run(debug=True)",
"def start_game(self):\n env = os.environ.copy()\n hook_path = os.path.join('hook', 'libhook.so')\n game_path = os.path.join(env.get('HOME'), '.local', 'share', 'Steam',\n 'steamapps', 'common', 'Super Hexagon',\n 'SuperHexagon')\n\n env['LD_PRELOAD'] = os.path.abspath(hook_path)\n args = [\"bash\", game_path]\n\n self.controller.handle_keys([])\n\n self.frame_counter = 0\n self.dead_until = None\n\n self.game_process = subprocess.Popen(\n args,\n env=env,\n # stdout=subprocess.DEVNULL,\n )",
"def Run(self, _):\n client = GetClientFromFlags()\n params = GetGlobalParamsFromFlags()\n for field in params.all_fields():\n value = params.get_assigned_value(field.name)\n if value != field.default:\n client.AddGlobalParam(field.name, value)\n banner = \"\"\"\n == iam interactive console ==\n client: a iam client\n apitools_base: base apitools module\n messages: the generated messages module\n \"\"\"\n local_vars = {\n 'apitools_base': apitools_base,\n 'client': client,\n 'client_lib': client_lib,\n 'messages': messages,\n }\n if platform.system() == 'Linux':\n console = apitools_base_cli.ConsoleWithReadline(\n local_vars, histfile=FLAGS.history_file)\n else:\n console = code.InteractiveConsole(local_vars)\n try:\n console.interact(banner)\n except SystemExit as e:\n return e.code",
"def start(self):\n\n self.app.go()",
"def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")",
"def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])",
"def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []",
"def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()",
"def _main():\n import sys\n\n def log(message):\n print(message)\n\n def print_usage():\n log('usage: %s <application key> send <number> <message> <from_number>' % sys.argv[0])\n log(' %s <application key> status <message_id>' % sys.argv[0])\n log(' %s <application key> balance' % sys.argv[0])\n\n if len(sys.argv) > 4 and sys.argv[2] == 'send':\n key, number, message = sys.argv[1], sys.argv[3], sys.argv[4]\n client = SkylineSms(key)\n if len(sys.argv) > 6:\n log(client.send_message(number, message, sys.argv[6]))\n else:\n log(client.send_message(number, message))\n elif len(sys.argv) > 2 and sys.argv[2] == 'status':\n key, message_id = sys.argv[1], sys.argv[3]\n client = SkylineSms(key)\n log(client.check_status(message_id))\n elif len(sys.argv) > 2 and sys.argv[2] == 'balance':\n key = sys.argv[1]\n client = SkylineSms(key)\n log(client.balance())\n else:\n print_usage()\n sys.exit(1)\n\n sys.exit(0)",
"def bot():\n log(log.DEBUG, 'Starting bot')\n bot = BotSad()\n bot.listen()",
"def start(cfg: 'Settings', server: str):\n\n try:\n server_path = find_server(cfg.parent_directory, server)\n except (ParentDirMissing, ServerNotFound, NoInvocation) as e:\n e.log_this()\n return\n\n if isUp(server):\n log.info(f'{server} appears to be running already!')\n else:\n invocation = get_invocation(server_path)\n os.chdir(server_path)\n log.info(f'Starting {server}')\n run(['screen', '-h', '5000', '-dmS', server, *invocation, 'nogui'])\n sleep(5)\n if isUp(server):\n log.info(f'{server} is now running!')\n # run_startup_commands(server)\n else:\n log.warning(f'{server} does not appear to have started!')",
"def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")",
"def init(\n self,\n ) -> bool:\n success = True\n try:\n self._allowed_users = self._get_allowed_users(\n **self._config[\"allowed_users\"]\n )\n self._api_key = keyring.get_password(\n self._config[\"namespace\"], self._config[\"api\"]\n )\n # Create the Application and pass it your bot's token.\n self.application = Application.builder().token(self._api_key).build()\n # on different commands - answer in Telegram\n self.application.add_handler(\n CommandHandler(\n command=\"status\",\n callback=self._check_status,\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"pump\",\n callback=self._toggle_pump,\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve1\",\n callback=partial(self._toggle_valve, valve_number=1),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve2\",\n callback=partial(self._toggle_valve, valve_number=2),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"valve3\",\n callback=partial(self._toggle_valve, valve_number=3),\n filters=filters.Chat(self._allowed_users),\n )\n )\n self.application.add_handler(\n CommandHandler(\n command=\"holidays\",\n callback=self._toggle_holidays,\n filters=filters.Chat(self._allowed_users),\n )\n )\n # on non command i.e message - echo the message on Telegram\n self.application.add_handler(\n MessageHandler(filters.TEXT & ~filters.COMMAND, None)\n )\n # Step log runninng al 200ms\n context = CallbackContext(self.application)\n context.job_queue.run_repeating(callback=self.step_log, interval=0.2)\n # logging info\n print(f\"'{self._name}' - {self._pid} successfully initialized\")\n self.telegram_queue.put(\n f\"Process {self._pid} - '{self._name}' successfully initialized\"\n )\n # Run the bot until the user presses Ctrl-C\n self.application.run_polling(allowed_updates=Update.ALL_TYPES)\n except Exception as error:\n print(f\"Process {self._pid} - \" + repr(error))\n success = False\n return success",
"def __init__(self, client, game):\n super().__init__(client, game)\n self.actions = {} # deprecated\n self.state_handlers = {}\n self.add_handler(\"INIT\", self._initialize)\n self.add_handler(\"STOP\", self._stop)",
"def main(self):\n \n self.log = sys.argv[1]\n self.listener = pyxhook.HookManager()\n self.listener.KeyDown = self.capture_key_press\n self.listener.HookKeyboard()\n self.listener.start()",
"def start(self):\n self._client.predict(\n endpoint=self._endpoint, instances=self._request)\n\n if self._completion_callback:\n if self._query_handle:\n callback_args = [self._query_handle]\n else:\n callback_args = []\n self._completion_callback(*callback_args)",
"def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)",
"def TerminalClientStart(self):\n pass",
"def start_simulation( # pylint: disable=too-many-arguments\n *,\n client_fn: Callable[[str], ClientLike],\n num_clients: Optional[int] = None,\n clients_ids: Optional[List[str]] = None,\n client_resources: Optional[Dict[str, float]] = None,\n server: Optional[Server] = None,\n config: Optional[ServerConfig] = None,\n strategy: Optional[Strategy] = None,\n client_manager: Optional[ClientManager] = None,\n ray_init_args: Optional[Dict[str, Any]] = None,\n keep_initialised: Optional[bool] = False,\n) -> History:\n # pylint: disable-msg=too-many-locals\n event(\n EventType.START_SIMULATION_ENTER,\n {\"num_clients\": len(clients_ids) if clients_ids is not None else num_clients},\n )\n\n # Initialize server and server config\n initialized_server, initialized_config = init_defaults(\n server=server,\n config=config,\n strategy=strategy,\n client_manager=client_manager,\n )\n log(\n INFO,\n \"Starting Flower simulation, config: %s\",\n initialized_config,\n )\n\n # clients_ids takes precedence\n cids: List[str]\n if clients_ids is not None:\n if (num_clients is not None) and (len(clients_ids) != num_clients):\n log(ERROR, INVALID_ARGUMENTS_START_SIMULATION)\n sys.exit()\n else:\n cids = clients_ids\n else:\n if num_clients is None:\n log(ERROR, INVALID_ARGUMENTS_START_SIMULATION)\n sys.exit()\n else:\n cids = [str(x) for x in range(num_clients)]\n\n # Default arguments for Ray initialization\n if not ray_init_args:\n ray_init_args = {\n \"ignore_reinit_error\": True,\n \"include_dashboard\": False,\n }\n\n # Shut down Ray if it has already been initialized (unless asked not to)\n if ray.is_initialized() and not keep_initialised:\n ray.shutdown()\n\n # Initialize Ray\n ray.init(**ray_init_args)\n log(\n INFO,\n \"Flower VCE: Ray initialized with resources: %s\",\n ray.cluster_resources(),\n )\n\n # Register one RayClientProxy object for each client with the ClientManager\n resources = client_resources if client_resources is not None else {}\n for cid in cids:\n client_proxy = RayClientProxy(\n client_fn=client_fn,\n cid=cid,\n resources=resources,\n )\n initialized_server.client_manager().register(client=client_proxy)\n\n # pylint: disable=broad-except\n try:\n # Start training\n hist = run_fl(\n server=initialized_server,\n config=initialized_config,\n )\n except Exception as ex:\n log(ERROR, ex)\n log(\n ERROR,\n \"Your simulation crashed :(. This could be because of several reasons.\"\n \"The most common are: \"\n \"\\n\\t > Your system couldn't fit a single VirtualClient: try lowering \"\n \"`client_resources`. You used: %s\"\n \"\\n\\t > Too many VirtualClients were spawned causing an issue: try raising \"\n \"`client_resources`. You used: %s\",\n client_resources,\n client_resources,\n )\n hist = History()\n\n event(EventType.START_SIMULATION_LEAVE)\n\n return hist",
"def main(**kwargs):\n print('Start')\n agent = initAgent(**kwargs)\n kwargs['agent'] = agent\n result = []\n\n def mainsub(*args):\n game = Game(**kwargs)\n game.display(kwargs['noshow'])\n while True:\n # get_input = getch(\"Enter direction (w/a/s/d): \")\n get_input = game.action()\n if get_input in keypad:\n game.move(keypad.index(get_input))\n game.update()\n # elif get_input == \"q\":\n # break\n # else:\n # print(\"\\nInvalid choice.\")\n # continue\n if game.end:\n game.savegame()\n game.display(kwargs['noshow'])\n print(\"Result:\", game.nturn, game.score)\n break\n game.display(kwargs['noshow'])\n result.append((game.score, game.nturn))\n game.agent.replay()\n if kwargs['train']:\n game.agent.save()\n game.reset()\n if kwargs['train']:\n np.save('result.%s' % game.agent.algo, np.array(result))\n\n map(mainsub, range(kwargs['n']))\n print(\"Thanks for playing.\")",
"def run(self):\n try:\n while True:\n utils.clear_screen()\n utils.write('Which of the following actions would you like to take?\\n')\n for opt in self._options.values():\n utils.write('Action: {!r}\\nDescription: {}\\n'.format(\n opt.name, opt.description))\n action = utils.prompt_enum(\n '', accepted_values=list(self._options.keys()),\n case_sensitive=False).strip().lower()\n callback = self._options[action].callback\n if callback is None:\n break\n self = callback()\n finally:\n utils.write(\n 'Done managing Grab n Go for Cloud Project {!r}.'.format(\n self._config.project))",
"async def main(event):\n if conf.MATRIX_PW:\n LOGGER.info(f\"Log in {conf.MATRIX_ID=} on {conf.MATRIX_URL=}\")\n await utils.CLIENT.login(conf.MATRIX_PW)\n else:\n LOGGER.info(f\"Restoring log in {conf.MATRIX_ID=} on {conf.MATRIX_URL=}\")\n utils.CLIENT.access_token = conf.MATRIX_TOKEN\n\n server = web.Server(handler.matrix_webhook)\n runner = web.ServerRunner(server)\n await runner.setup()\n LOGGER.info(f\"Binding on {conf.SERVER_ADDRESS=}\")\n site = web.TCPSite(runner, *conf.SERVER_ADDRESS)\n await site.start()\n\n # Run until we get a shutdown request\n await event.wait()\n\n # Cleanup\n await runner.cleanup()\n await utils.CLIENT.close()",
"def run(self, *args, **kwargs) -> None:\n\n with get_openr_ctrl_client(self.host, self.cli_opts) as client:\n self._run(client, *args, **kwargs)",
"def run(self):\r\n while self._go.isSet(): #while app is running\r\n if self._check_console_input(): #if something to read on the console\r\n cmd = sys.stdin.readline() #read it\r\n self.inq.put(cmd) #dispatch it tpo the server\r\n response = self.outq.get(timeout=2.0) #wait for an answer\r\n sys.stdout.write(response) #write the answer on the console\r",
"def _do_start(self, chat_id, user_id, args, update):\n \n self.tclient.send_message('Hallo! Ich bin ein Bot, um dir zu helfen, dir deine Nasensprüche zu merken!', chat_id)",
"def start():\r\n\r\n userName = userLogin.login()\r\n runApp(userName)",
"def start(self) -> None:\n app = web.Application()\n app.add_routes([web.post(\"/\", self._handle_request)])\n self._runner = web.AppRunner(app)\n\n self._startup_event = threading.Event()\n self._server_loop = asyncio.new_event_loop()\n t = threading.Thread(target=self._run)\n t.start()\n\n # Wait for server to startup\n self._startup_event.wait()",
"def run(self) -> None:\n self._hass.turn_on('scene.{0}'.format(self._args['scene']))",
"def start(parse_opts):\n global opts\n opts = parse_opts\n app.run(host='0.0.0.0')",
"def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)",
"def start(self, **kwargs):\n pass",
"def start(self, **kwargs):\n pass",
"def start_interaction(self):\n self.__interact()",
"def start_cmd(wrapper: MessageDispatcher, message: str):\n if wrapper.target is channels.Main:\n start(wrapper)",
"def main():\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument(\n \"--port\", type=int, default=9090, help=\"The port number to listen to.\",\n )\n parser.add_argument(\"--settings\", help=\"Full path to settings file.\", required=True)\n parser.add_argument(\n \"--ssl_context\",\n help=\"A key and certificate file pair to run the server in HTTPS mode.\",\n nargs=2,\n )\n\n args = parser.parse_args()\n\n keyfile = None\n certfile = None\n if args.ssl_context:\n keyfile, certfile = args.ssl_context\n\n run_server(\n port=args.port, settings=args.settings, keyfile=keyfile, certfile=certfile,\n )",
"def start(self):\n gevent.spawn(self.run)",
"def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()",
"def loop_start( self ):\n self.client.loop_start()",
"def start(self):\n\n p = Parser()\n if self.event_status < 1:\n print(\"\\n\" * 100)\n self.game_intro()\n print(\"\\n\" * 100)\n\n playing = True\n while playing:\n self.check_upgrades()\n self.check_energy()\n self.check_event_status()\n cur_location = self.player.get_location()\n cur_location.print_description(self.event_status)\n cur_location.print_details(self.event_status)\n print_player_info(self.player)\n cur_location.set_visited(True)\n\n player_command = get_command()\n cmd_action, cmd_exit, cmd_direction, cmd_item, cmd_character = Parser.action_requested(player_command)\n\n print(\"\\n\" * 100)\n if cmd_action == GO:\n self.player.go_exit(self.event_status, direction=cmd_direction, exit_name=cmd_exit)\n\n elif cmd_action == TAKE:\n if cmd_item is None:\n print(\"You can't take that.\")\n else:\n self.player.take(cmd_item)\n\n elif cmd_action == DROP:\n if cmd_item is None:\n print(\"You can't drop that.\")\n else:\n self.player.drop(cmd_item)\n\n elif cmd_action == TALK:\n if cmd_character is None:\n print(\"You can't do talk to that.\")\n else:\n self.player.talk(cmd_character, self.event_status)\n\n elif cmd_action == LOOK:\n self.player.look(self.event_status)\n\n elif cmd_action == SAVEGAME:\n tmp_save_dir = input(\"Enter the save name\\n> \")\n if tmp_save_dir:\n save_dir = tmp_save_dir\n else:\n save_dir = None\n self.save(save_dir)\n\n elif cmd_action == QUIT:\n print(\"Exiting the game...\")\n return\n\n elif cmd_action == LOOK_AT:\n if cmd_item is None:\n print(\"You can't look at that.\")\n else:\n self.player.look_at(cmd_item)\n\n elif cmd_action == LISTEN:\n self.player.listen()\n\n elif cmd_action == PULL:\n if cmd_item is None:\n print(\"You can't pull that.\")\n else:\n self.pull(cmd_item)\n\n elif cmd_action == PUSH:\n if cmd_item is None:\n print(\"You can't push that.\")\n else:\n self.push(cmd_item)\n\n elif cmd_action == CHARGE:\n self.player.charge()\n\n elif cmd_action == USE:\n if cmd_item is None:\n print(\"You can't use that.\")\n else:\n self.use(cmd_item)\n\n elif cmd_action == WAIT:\n sleep_rate = 0.2\n print(\"You wait for a few moments...\")\n time.sleep(2)\n duration = time.time() + 5\n while time.time() < duration:\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"****\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"***\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"**\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"*\")\n time.sleep(sleep_rate)\n print(\"\\n\" * 100)\n print(\"Nothing happened...\")\n time.sleep(2)\n print(\"\\n\" * 100)\n\n elif cmd_action == HELP:\n self.help()\n # wait for user to finish reading\n input(\"Press 'enter' to continue.\")\n\n elif cmd_action == INVENTORY:\n self.player.print_inventory()\n\n elif cmd_action == LOADGAME:\n saved_games_dir = os.path.join(os.getcwd(), \"saved_games\")\n\n # Print Available Saved Games\n print(\"Enter the number of the game you want to load.\")\n saved_games = [game for game in os.listdir(saved_games_dir)]\n for index, sg in enumerate(saved_games):\n print(\"{0}. {1}\".format(index + 1, sg))\n\n # TODO error checking on user input\n user_game_selection = input(\">\")\n user_game = saved_games[int(user_game_selection) - 1]\n print(\"Loading game: {0}\".format(user_game))\n print(\"\\n\" * 100)\n self.load_game(os.path.join(saved_games_dir, user_game))\n else:\n print(\"Huh? That doesn't make any sense.\")",
"def run(self):\n import copen\n # If the client didn't specify a target function, then don't\n # do any processing.\n if not self._target:\n self._finished = 1\n else:\n self._handle = copen.copen_fn(\n self._target, *self._args, **self._kwargs)",
"def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])",
"def eventloop(cls, custom_actions=[]):\n iiter = cls([None], custom_actions=custom_actions, verbose=False)\n print('[IITER] Begining interactive main loop')\n for _ in iiter:\n pass\n return iiter",
"def start(update, context):\n update.message.reply_text(START_HELLO)",
"def stasis_start_cb(channel, ev):\n await channel.answer()\n await bridge.addChannel(channel=channel.id)",
"def main():\n player = Player(LivingRoom())\n escaping = True\n\n print('Alright kid, it\\'s you and me on a grand adventure. We\\'re '\n 'currently in the {}, and I can see {} possible exits. You can '\n 'search the room or try exploring, if you like.'\n .format(player.location.name, player.location.exits))\n\n while escaping:\n # need to replace hard list with extract from player.actions\n action = input('\\nWhat now?\\n\\n1. Search\\t2. Grab\\t3. Gurgle\\n>')\n\n if action in player.actions.keys():\n player.actions[action]()",
"def start(self, **kwargs):\n return self.client.api.start(self.id, **kwargs)"
] | [
"0.61051023",
"0.597413",
"0.59535104",
"0.5894354",
"0.58022404",
"0.5720852",
"0.56565666",
"0.56241596",
"0.56227505",
"0.5545233",
"0.5524626",
"0.54708153",
"0.54685193",
"0.54664093",
"0.5453326",
"0.5451305",
"0.54465693",
"0.5406699",
"0.5384227",
"0.53778505",
"0.5373198",
"0.53630126",
"0.5355289",
"0.5341888",
"0.53399384",
"0.53392494",
"0.5337174",
"0.52947944",
"0.5291907",
"0.52859837",
"0.52640337",
"0.5262829",
"0.5253508",
"0.5253049",
"0.524907",
"0.52461195",
"0.5245039",
"0.5244541",
"0.52394146",
"0.5236551",
"0.52355474",
"0.5216015",
"0.5211702",
"0.5208786",
"0.5207029",
"0.520166",
"0.5198246",
"0.5191657",
"0.51893735",
"0.5186428",
"0.5173014",
"0.5168862",
"0.5161251",
"0.51515526",
"0.514936",
"0.5145198",
"0.5135357",
"0.5111023",
"0.5110363",
"0.51016176",
"0.5099562",
"0.5097229",
"0.5095348",
"0.5093835",
"0.50922626",
"0.5084898",
"0.507803",
"0.5065299",
"0.5060634",
"0.5054112",
"0.5046701",
"0.5041348",
"0.50411296",
"0.5037258",
"0.5033062",
"0.5031495",
"0.50294095",
"0.50281286",
"0.50245064",
"0.502129",
"0.50207067",
"0.50140965",
"0.5010153",
"0.5009334",
"0.50086087",
"0.50086087",
"0.50042975",
"0.50034165",
"0.49987227",
"0.49966276",
"0.49927762",
"0.49905974",
"0.4990578",
"0.4981802",
"0.4978076",
"0.49753985",
"0.49683818",
"0.4965741",
"0.4965326",
"0.4950803"
] | 0.6006053 | 1 |
This is a private method that handles incoming messages from the websocket, passes the turn information to an agent's turn handler, and then passes the result back to the server. | def _on_message(ws, msg, turn_handler):
def x():
parsed = json.loads(msg)
player = parsed['player']
actions = parsed['actions']
state = parsed['state']
action = turn_handler(player, actions, state)
response = {"action":action}
ws.send(json.dumps(response))
_thread.start_new_thread(x, ()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # specified game are provided. Otherwise, ESPN's websockets include\n # noisy league-wide information.\n elif \"pl\" in message:\n if message[\"pl\"] != \"0\" and message[\"tc\"] == self.channel:\n decoded = self.decode_message(message)\n self.write_message(wsobj, decoded)",
"def on_message(self, ws, buf):\n if self.phase_auth:\n self.on_phase_auth_message(buf)\n return\n\n msg = self.__nanojsonrpc_unpack(buf)\n shot = msg['shot']\n\n if shot < 0:\n # Authenticate\n self.__ws_conn.send(self.__nanojsonrpc_pack('auth'))\n\n elif self.__shot_finished[shot]:\n # End\n pass\n\n elif shot in self.shot_threadings:\n # Forward\n self.__shot_inboxes[shot].put(msg)\n\n else:\n # If the shot does not exist, a 'worker' will be established.\n worker = UbqcClient(\n shot,\n self.__shot_inboxes[shot],\n self.shot_outbox,\n self.program,\n )\n worker.daemon = True\n\n self.shot_threadings[shot] = worker\n worker.start()\n self.__shot_inboxes[shot].put(msg)",
"def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)",
"def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()",
"def process_turn(self):\n data = self.bot.on_turn({'map': self.encode_map(), 'player_num': PLAYER_ID})\n for action in data.get('ACTIONS', []):\n f = getattr(self, action['action_type'].lower(), lambda **k: None)\n f(**action)",
"def handleMessage(msg):",
"async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )",
"def message_received_from_server(self, message):\n\n if message[\"type\"] == \"state\":\n self._board = message[\"board\"]\n self._winner = message[\"winner\"]\n elif message[\"type\"] == \"turn\":\n row, col = self._find_empty_cell()\n self.send_message_to_server({\n \"type\": \"move\",\n \"row\": row,\n \"column\": col\n })",
"def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))",
"def process_websocket(ws):\n try:\n yield from ws.receive()\n except aiohttp.errors.WSServerHandshakeError:\n pass",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def handle(self, message: Message) -> None:\n self.handled_message = message\n envelope = Envelope(\n to=message.counterparty,\n sender=self.context.agent_address,\n protocol_id=TwoPartyNegotiationMessage.protocol_id,\n message=self.encoded_message_2_in_bytes,\n )\n self.context.outbox.put(envelope)",
"def handleMessage(self):\n try:\n # This whole block is wrapped in a try/except because the default\n # behaviour of the SimpleWebSocketServer library is to silently\n # discard any exceptions raised by these handlers. This is very\n # unhelpful. A workaround is to catch and log any exceptions\n # explicitly here.\n logging.debug(\"%s %s\", self.address, \"incoming message\")\n is_binary = not isinstance(self.data, str)\n if is_binary:\n message = self.data\n else:\n message = json.loads(self.data)\n\n self.logbook.messages_received.append(message)\n for response in self.get_responses(message, is_binary=is_binary):\n self.logbook.messages_sent.append(response)\n self.sendMessage(json.dumps(response).encode(\"utf-8\"))\n except Exception as exc: # pylint: disable=broad-except\n logging.exception(str(exc))\n self.close(status=1011, reason=\"Internal server error\")",
"async def _incoming_ws(self, pid, websocket):\n # websockets have a convenient __aiter__ interface, allowing\n # us to just iterate over the messages forever.\n # Under the hood, if there are no messages available from the\n # WebSocket, this code will yield and until another message is\n # received.\n\n # If the WebSocket is disconnected unexpectedly, the for loop\n # will produce an exception.\n try:\n async for msg in websocket:\n # Trim whitespace\n msg = msg.strip()\n # Make sure the message isn't an empty string\n if msg:\n # Pass the message onto the server's handler.\n self.on_player_msg(pid, msg)\n # If we get this error, then player probably just logged off.\n except websockets.exceptions.ConnectionClosed:\n pass\n finally:\n logging.debug(\"_incoming_ws closed for %s\", pid)",
"def handle(self, message):",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client",
"def handle_message(self, message):",
"def gym_handle(ws):\n while True:\n message = ws.wait()\n if message is None: \n break\n message_handle(ws, message)",
"def _on_inbound_message(self, message):\n if message.channel.startswith(\"actuators/commands/\"):\n actuation = self.inbound_message_deserializer.deserialize_actuator_command(message)\n if actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_SET:\n self.actuation_handler.handle_actuation(actuation.reference, actuation.value)\n\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_STATUS:\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_UNKNOWN:\n print(\"Received unsupported actuation command\")\n\n else:\n print(\"Received unsupported message: \\n\" +\n message.channel + \"\\n\" + message.payload)",
"def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)",
"def handle_message(self, data, channel):\n pass",
"def on_message(self,ws,message):\n pass",
"def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from [email protected] to [email protected]\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply",
"def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))",
"async def _handle_battle_message(self, split_messages: List[List[str]]) -> None:\n # Battle messages can be multiline\n if (\n len(split_messages) > 1\n and len(split_messages[1]) > 1\n and split_messages[1][1] == \"init\"\n ):\n battle_info = split_messages[0][0].split(\"-\")\n battle = await self._create_battle(battle_info)\n else:\n battle = await self._get_battle(split_messages[0][0])\n\n for split_message in split_messages[1:]:\n if len(split_message) <= 1:\n continue\n elif split_message[1] in self.MESSAGES_TO_IGNORE:\n pass\n elif split_message[1] == \"request\":\n if split_message[2]:\n request = orjson.loads(split_message[2])\n battle._parse_request(request)\n if battle.move_on_next_request:\n await self._handle_battle_request(battle)\n battle.move_on_next_request = False\n elif split_message[1] == \"win\" or split_message[1] == \"tie\":\n if split_message[1] == \"win\":\n battle._won_by(split_message[2])\n else:\n battle._tied()\n await self._battle_count_queue.get()\n self._battle_count_queue.task_done()\n self._battle_finished_callback(battle)\n async with self._battle_end_condition:\n self._battle_end_condition.notify_all()\n elif split_message[1] == \"error\":\n self.logger.log(\n 25, \"Error message received: %s\", \"|\".join(split_message)\n )\n if split_message[2].startswith(\n \"[Invalid choice] Sorry, too late to make a different move\"\n ):\n if battle.trapped:\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Unavailable choice] Can't switch: The active Pokémon is \"\n \"trapped\"\n ) or split_message[2].startswith(\n \"[Invalid choice] Can't switch: The active Pokémon is trapped\"\n ):\n battle.trapped = True\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to an active \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to a fainted \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: Invalid target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can't choose a target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: \"\n ) and split_message[2].endswith(\"needs a target\"):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif (\n split_message[2].startswith(\"[Invalid choice] Can't move: Your\")\n and \" doesn't have a move matching \" in split_message[2]\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Incomplete choice: \"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Unavailable choice]\"\n ) and split_message[2].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\"[Invalid choice]\") and split_message[\n 2\n ].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You sent more choices than unfainted\"\n \" Pokémon.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can only Terastallize once per battle.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n else:\n self.logger.critical(\"Unexpected error message: %s\", split_message)\n elif split_message[1] == \"turn\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle)\n elif split_message[1] == \"teampreview\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle, from_teampreview_request=True)\n elif split_message[1] == \"bigerror\":\n self.logger.warning(\"Received 'bigerror' message: %s\", split_message)\n else:\n battle._parse_message(split_message)",
"def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")",
"def _websocket_message(self, msg):\n if msg is None:\n self._logger.warn(\"Websocket server disconnected!\")\n if not self._disconnect_issued:\n if self._ws is not None:\n self._ws.close()\n self._ws = None\n yield self._connect(reconnecting=True)\n return\n try:\n msg = json.loads(msg)\n self._logger.debug(\"Message received: %s\", msg)\n msg_id = str(msg['id'])\n if msg_id.startswith('redis-pubsub'):\n self._process_redis_message(msg, msg_id)\n elif msg_id.startswith('redis-reconnect'):\n # only resubscribe to namespaces, the server will still\n # publish sensor value updates to redis because the client\n # did not disconnect, katportal lost its own connection\n # to redis\n yield self._resend_subscriptions()\n else:\n self._process_json_rpc_message(msg, msg_id)\n except Exception:\n self._logger.exception(\n \"Error processing websocket message! {}\".format(msg))\n if self._on_update:\n self._io_loop.add_callback(self._on_update, msg)\n else:\n self._logger.warn('Ignoring message (no on_update_callback): %s',\n msg)",
"def handle_turn(game_ID, team, action, payload):\n\n state = get_state(game_ID)\n if state[\"playerState\"][\"winner\"] != \"none\":\n return [\"playerState\"]\n if state[\"playerState\"][\"turn\"] != team or state[\"playerState\"][\"action\"] != action:\n raise InvalidTurnError(\n f'{state[\"playerState\"][\"action\"]} for {state[\"playerState\"][\"turn\"]} goes now'\n )\n if action == \"spymaster\":\n return spymaster_move(game_ID, payload[\"hint\"], payload[\"attempts\"])\n elif action == \"chooser\":\n return chooser_move(\n game_ID, state[\"wordsState\"], payload[\"guess\"], state[\"playerState\"][\"turn\"]\n )",
"def handle_handshake(self, message):\n message_type = messages.get_message_type(message)\n if message_type == \"OFPT_HELLO\":\n self.hello_received = True\n if message_type == \"OFPT_FEATURES_REPLY\":\n self.features_reply_received = True\n self.dpid = message.datapath_id\n if self.features_reply_received and self.hello_received:\n #print \"Switch on: %s:%s has the datapath ID: %s\" % (\n # self.address, self.port, self.dpid)\n if self.needs_migration:\n #print \"Migrating switch...\"\n self.handle_migration(message)\n else:\n self.activate_controller()\n self.controller.start_sending_to_switch()",
"def handle(self):\n for request in self._each_msg():\n r_len, r_type = struct.unpack_from('> I B', request)\n\n if r_type == self.SSH2_AGENTC_REQUEST_IDENTITIES:\n response = self._merge_identities(request)\n elif r_type == self.SSH2_AGENTC_SIGN_REQUEST:\n # Extract key blob from request\n key_blob_len = struct.unpack_from('> I', request, 5)[0]\n key_blob = request[9:9 + key_blob_len]\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n\n agent = self._identity_map[hex_blob]\n\n if agent:\n if agent == self.server.alternate_agent:\n key_digest = self._key_digest(key_blob)\n LOG.info(\"identity %s used by %s: %s\", key_digest,\n self.username, self.process_info)\n\n response = agent.forward_request(request)\n else:\n response = \\\n self.server.default_agent.forward_request(request)\n else:\n response = self.server.default_agent.forward_request(request)\n\n self.request.sendall(response)",
"def callback_client_receive(data):\n data: GameStateModel = JSONSerializer.deserialize(data)\n logger.debug(f\"Client received {data.__class__.__name__} object from host.\")\n # print(f\"Client received {data.__class__.__name__} object from host.\")\n if isinstance(data, GameStateModel):\n GameStateModel.set_game(data)\n return\n if isinstance(data, TurnEvent) or isinstance(data, ActionEvent):\n exec_thread = threading.Thread(target=data.execute)\n exec_thread.start()",
"def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"",
"def handshake_handler(json):\n\n # json object contains the session cookie, thus identifying the sender\n # Handshake the request sid with the user identifier, through the json[data] cookie\n # get_cache(cookie) -> if there is proceed and get userid from the cache\n userid = get_token(self.cache, json[\"data\"])\n if userid is None:\n emit('error', {'error': 'User not authenticated'})\n disconnect()\n return\n\n # Create message queue use this context emit to send messages\n connection = queue.connect(flask_app.private_consts.Queues.QUEUE_HOST)\n channel = queue.create_channel(connection)\n db = get_db(self.consts)\n self.channels_dict[request.sid] = {\"channel\" : channel, \"user_id\": userid, \"db\": db,\n \"connection\": connection}\n\n # User2user messages\n queue_name = Queues.USER_U2U_PREFIX + \"_\" + str(userid) + \"_\" + uuid.uuid4().hex\n queue_id = queue.create_queue(channel, queue_name)\n # Bind to fanout type user exchange\n queue.bind_queue(channel, queue_id, configs.USER_EXCHANGE)\n\n room_id = request.sid\n\n def user_queue_callback(ch, method, properties, body):\n # Here filter messages by userid, only emit the ones corresponding to this user\n # if userlocation ~= messagelocation -> emit the message\n # Get user location\n data = json_engine.loads(body)\n position = get_position(db, userid)\n sender_id = data[\"content\"][\"user_id\"]\n sender_position = get_position(db, sender_id)\n message = data[\"content\"][\"message\"]\n if sender_id == userid:\n return\n if position is not None:\n u_lat = position[0]\n u_lon = position[1]\n m_lat = sender_position[0]\n m_lon = sender_position[1]\n m_radius = data[\"radius\"]\n if abs(u_lat - m_lat) < m_radius and abs(u_lon - m_lon) < m_radius:\n content = {\"time\": strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), \"from\": sender_id, \"text\": message}\n sio.emit(\"user:incoming\", content, room=room_id)\n else:\n sio.emit(\"error\", {\"error\": \"User has no position, please set the position first\"}, room=room_id)\n\n queue.configure_consume(channel, user_queue_callback, queue_id)\n\n # Bot2user messages\n queue_name = Queues.USER_B2U_PREFIX + \"_\" + str(userid) + \"_\" + uuid.uuid4().hex\n queue_id = queue.create_queue(channel, queue_name)\n # This queue is, by default, not binded to any exchange, as the routing key depends on the location\n # Will be handy when reconfiguring the building\n self.channels_dict[request.sid][\"bot_queue\"] = queue_id\n\n def bot_queue_callback(ch, method, properties, body):\n data = json_engine.loads(body)\n message = data[\"text\"]\n content = {\"time\": strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), \"from\": \"Bot\", \"text\": message}\n # Here messages already are filtered by the exchange\n sio.emit(\"bot:incoming\", content, room=room_id)\n\n queue.configure_consume(channel, bot_queue_callback, queue_id)\n # Created in a background thread due to the synchronous behaviour of message queues\n thread = Thread(target=queue.start_message_consumption, args=(channel,))\n thread.start()\n emit(\"handshake_allowed\", {\"success\": \"true\", \"room\": userid})\n print(\"Succesful handshake with \" + userid, \"At:\" + thread.name)",
"def handle_msg(self, state_id, msg):\n pass",
"def handle_message(self, from_channel, to_channel, message_type, *args):\n logger.debug(\"{} received message: {} -> {} ({})\".format(\n self, from_channel, to_channel, message_type,\n ))\n # TODO: Validation\n if message_type == msgtypes.AI_CHANNEL_ASSIGNED:\n channel = args[0]\n self.handle_channel_assigned(channel)\n elif message_type == msgtypes.AI_CONNECTED:\n channel = args[0]\n self.handle_ai_connected(channel)\n elif message_type == msgtypes.CLIENT_CONNECTED:\n client_id = args[0]\n self.handle_client_connected(client_id)\n elif message_type == msgtypes.CLIENT_DISCONNECTED:\n client_id = args[0]\n self.handle_client_disconnected(client_id)\n elif message_type == msgtypes.DOBJECT_CREATED:\n dobject_id = args[0]\n token = args[1]\n self.handle_dobject_created(dobject_id, token)\n elif message_type == msgtypes.CREATE_DOBJECT_VIEW:\n dobject_id = args[0]\n dclass = args[1]\n fields = args[2]\n self.handle_create_dobject_view(dobject_id, dclass, fields)\n elif message_type == msgtypes.CREATE_AI_VIEW:\n dobject_id = args[0]\n dclass = args[1]\n fields = args[2]\n self.handle_create_ai_view(dobject_id, dclass, fields)\n elif message_type == msgtypes.FIELD_UPDATE:\n source = from_channel\n dobject_id = args[0]\n field_id = args[1]\n values = args[2]\n self.handle_field_update(source, dobject_id, field_id, values)\n else:\n # FIXME: Better to log it and drop it on the floor?\n raise NotImplementedError",
"def handle_message(self, msg):\n pass",
"def handle_message(client, message):\r\n\t# get player object associated with the client making the current request\r\n\tplayer = players_online[client]\r\n\r\n\ttype = message[\"type\"]\r\n\tdata = message[\"data\"]\r\n\r\n\tif (type == \"choose_topic\"):\r\n\t\tplayer.set_chosen_topic(data)\r\n\t\tsession_handler.update_available_sessions()\r\n\r\n\telif (type == \"choose_game_mode\"):\r\n\t\tplayer.set_chosen_game_mode(data)\r\n\r\n\telif (type == \"choose_session\"):\r\n\t\t# do something involving joining a session by ID\r\n\t\tsession_handler.join_session_by_id(player, data)\r\n\r\n\telif (type == \"create_session\"):\r\n\t\t# create a session using the chosen topic and game_mode\r\n\t\tsession_handler.create_session(player)\r\n\r\n\telif (type == \"leave_session\"):\r\n\t\tplayer.leave_session()\r\n\t\t# if player was in session, sessions may need deleting\r\n\t\tsession_handler.check_for_empty_sessions()\r\n\r\n\telif (type == \"answer_question\"):\r\n\t\tplayer.answer_question(data)\r\n\r\n\telif (type == \"play_again\"):\r\n\t\tplayer.session.replay(player)\r\n\t\tsession_handler.update_available_sessions()",
"def handle(self):\n self.raw_requestline = self.rfile.readline()\n if not self.parse_request(): # An error code has been sent, just exit\n return\n\n # next line is where we'd have expect a configuration key somehow\n handler = self.WebSocketWSGIHandler(\n self.rfile, self.wfile, self.get_stderr(), self.get_environ()\n )\n handler.request_handler = self # backpointer for logging\n handler.run(self.server.get_app())",
"def _route_message(self, msg):\n # check xml formatting\n try:\n xmldoc = minidom.parseString(msg)\n except xml.parsers.expat.ExpatError:\n _LOGGER.warning(\"ISY Received Malformed XML:\\n%s\", msg)\n return\n _LOGGER.log(LOG_VERBOSE, \"ISY Update Received:\\n%s\", msg)\n\n # A wild stream id appears!\n if f\"{ATTR_STREAM_ID}=\" in msg and ATTR_STREAM_ID not in self.data:\n self.update_received(xmldoc)\n\n # direct the event message\n cntrl = value_from_xml(xmldoc, ATTR_CONTROL)\n if not cntrl:\n return\n if cntrl == \"_0\": # ISY HEARTBEAT\n if self._loaded is None:\n self._loaded = ES_INITIALIZING\n self.isy.connection_events.notify(ES_INITIALIZING)\n elif self._loaded == ES_INITIALIZING:\n self._loaded = ES_LOADED\n self.isy.connection_events.notify(ES_LOADED)\n self._lasthb = now()\n self._hbwait = int(value_from_xml(xmldoc, ATTR_ACTION))\n _LOGGER.debug(\"ISY HEARTBEAT: %s\", self._lasthb.isoformat())\n elif cntrl == PROP_STATUS: # NODE UPDATE\n self.isy.nodes.update_received(xmldoc)\n elif cntrl[0] != \"_\": # NODE CONTROL EVENT\n self.isy.nodes.control_message_received(xmldoc)\n elif cntrl == \"_1\": # Trigger Update\n if f\"<{ATTR_VAR}\" in msg: # VARIABLE\n self.isy.variables.update_received(xmldoc)\n elif f\"<{ATTR_ID}>\" in msg: # PROGRAM\n self.isy.programs.update_received(xmldoc)\n elif f\"<{TAG_NODE}>\" in msg and \"[\" in msg: # Node Server Update\n pass # This is most likely a duplicate node update.\n elif f\"<{ATTR_ACTION}>\" in msg:\n action = value_from_xml(xmldoc, ATTR_ACTION)\n if action == ACTION_KEY:\n self.data[ACTION_KEY] = value_from_xml(xmldoc, TAG_EVENT_INFO)\n return\n if action == ACTION_KEY_CHANGED:\n self._program_key = value_from_xml(xmldoc, TAG_NODE)\n # Need to reload programs\n asyncio.run_coroutine_threadsafe(\n self.isy.programs.update(), self.isy.loop\n )\n elif cntrl == \"_3\": # Node Changed/Updated\n self.isy.nodes.node_changed_received(xmldoc)",
"async def _outgoing_ws(self, pid, websocket):\n character = self.players[pid]\n\n while not websocket.closed:\n msg = await character.msgs.get()\n\n # TODO: try to get more messages and buffer writes?\n try:\n await websocket.send(msg + \"\\n\\r\")\n except websockets.exceptions.ConnectionClosed:\n break\n\n logging.debug(\"_outgoing_ws closed for %s\", pid)",
"def onMessage(self, payload, isBinary):\n if isBinary:\n logger.log_info(\"DISCORD: got a binary payload for some reason\")\n return\n data = json.loads(str(payload, \"utf-8\"))\n if seqid := data.get(\"s\"):\n self.last_sequence = seqid\n\n # not sure if that error json format is for websockets, so\n # check for it just in case\n if \"errors\" in data:\n self.handle_error(data)\n return\n\n # check for discord gateway API op codes first\n if data[\"op\"] == OP_HELLO:\n self.interval = data[\"d\"][\"heartbeat_interval\"] / 1000 # convert millisec to seconds\n if self.nextHeartbeatCall:\n self.nextHeartbeatCall.cancel()\n self.nextHeartbeatCall = self.factory._batched_timer.call_later(\n self.interval * random(),\n self.doHeartbeat,\n )\n if self.session_id:\n # we already have a session; try to resume instead\n self.resume()\n else:\n self.identify()\n elif data[\"op\"] == OP_HEARTBEAT_ACK:\n # our last heartbeat was acknowledged, so reset the \"pending\" flag\n self.pending_heartbeat = False\n elif data[\"op\"] == OP_HEARTBEAT:\n # Discord wants us to send a heartbeat immediately\n self.doHeartbeat(force=True)\n elif data[\"op\"] == OP_INVALID_SESSION:\n # Discord doesn't like our current session; reconnect for a new one\n logger.log_msg(\"Discord: received 'Invalid Session' opcode. Reconnecting.\")\n if data[\"d\"] == False:\n # can't resume, clear existing resume data\n self.session_id = None\n self.factory.resume_url = None\n self.factory.reconnect()\n elif data[\"op\"] == OP_RECONNECT:\n # reconnect as requested; Discord does this regularly for server load balancing\n logger.log_msg(\"Discord: received 'Reconnect' opcode. Reconnecting.\")\n self.factory.reconnect()\n elif data[\"op\"] == OP_DISPATCH:\n # handle the general dispatch opcode events by type\n if data[\"t\"] == \"READY\":\n # our recent identification is valid; process new session info\n self.connection_ready(data[\"d\"])\n else:\n # general message, pass on to data_in\n self.data_in(data=data)",
"def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)",
"def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response",
"def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"async def on_message_activity(self, turn_context: TurnContext):\n reply = MessageFactory.list([])\n # Get the state properties from the turn context.\n welcome_user_state = await self.user_state_accessor.get(\n turn_context, WelcomeUserState\n )\n\n if not welcome_user_state.did_welcome_user:\n welcome_user_state.did_welcome_user = True\n\n text = turn_context.activity.text.lower()\n\n if text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n #await self.__send_intro_card(turn_context)\n reply.attachments.append(self.create_signin_card())\n await turn_context.send_activity(reply)\n\n \n else:\n # This example hardcodes specific utterances. You should use LUIS or QnA for more advance language\n # understanding.\n print(\"Printing action------\",turn_context.activity.text)\n print(\"Printing JSON------\",turn_context._activity.value)\n \n\n if turn_context._activity.value is not None:\n print(\"Printing type------\",turn_context._activity.value[\"type\"])\n print(\"Printing customer id------\",turn_context._activity.value[\"customerId\"])\n print(\"Printing password------\",turn_context._activity.value[\"password\"])\n\n customerId = turn_context._activity.value[\"customerId\"]\n password = turn_context._activity.value[\"password\"]\n terms = turn_context._activity.value[\"terms\"]\n isvalid = True\n if (customerId is None) or (str(customerId).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Customer ID\")\n if (password is None) or (str(password).strip()==\"\"):\n isvalid = False\n await turn_context.send_activity(\"Please enter valid Password\")\n if (terms is None or terms in (\"false\")):\n isvalid = False\n await turn_context.send_activity(\"Please accept the terms and conditions.\")\n\n if (isvalid and turn_context._activity.value[\"type\"] in (\"Login\")):\n # defining a params dict for the parameters to be sent to the API\n PARAMS = {'userName': customerId, 'password': password}\n # sending get request and saving the response as response object\n r = requests.get(url=\"http://localhost:8080/login\", params=PARAMS)\n # extracting data in json format\n data = r.json()\n print(\"printing response \", data[\"loginStatus\"])\n if (data[\"loginStatus\"] is not None and data[\"loginStatus\"] in (\"success\")):\n await turn_context.send_activity(\"Login Succeded\")\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n else:\n await turn_context.send_activity(\"Login Failed. Please try again\")\n # for key in turn_context._activity.value:\n # print(turn_context._activity.value[key])\n \n else:\n text = turn_context.activity.text.lower()\n \n if text in (\"369\"):\n await turn_context.send_activity(\"Thanks!!\")\n await self.__send_intro_card(turn_context)\n elif text in (\"sign-in\", \"login\"):\n await self.__login_otp_card_card(turn_context)\n elif text in (\"hello\", \"hi\",\"intro\",\"help\",\"menu\"):\n await self.__send_intro_card(turn_context)\n #await turn_context.send_activity(f\"You said { text }\")\n elif text in (\"account balance\"):\n await self.__send_accountbalance_card(turn_context)\n await turn_context.send_activity(\"Also, your deposit xxxxxxxxx9243 is closed pre-maturely as per your request and amount is credited to your third party account.\")\n elif text in (\"xxxxxxxxx4567\"):\n await self.__list_accountTransaction_card(turn_context)\n await self.__mobile_billDue_card(turn_context)\n elif text in (\"yes, pay my mobile bill\"):\n await self.__show_invoice_card(turn_context)\n await self.__show_selectAccountForBill_card(turn_context)\n elif text in(\"debit from xxxxxxxxx4567\"):\n await turn_context.send_activity(\"An OTP is sent to your registered mobile number xxxxxxxx90.\")\n await turn_context.send_activity(\"Please enter the OTP.\")\n elif text in (\"1234\"):\n await turn_context.send_activity(\"Transaction Successful !! Mobile bill paid for $100 from your account number xxxxxxxxx4567\")\n await turn_context.send_activity(\"As a loyal customer, we are happy to offer you one year free VISA card which comes with $25 movie voucher.\\n\\n Also your balance reward points 514 from card xxxxxxxxxxxx7653 will be added to the new card.\")\n await self.__show_congratulations_card(turn_context)\n elif text in (\"credit card\"):\n await turn_context.send_activity(\"Credit card xxxxxxxxxxxx7653 \\n\\n Current outstanding is $0.00 \\n\\n Card closed on 09/01/2020 \\n\\n Balance reward points are 514\")\n elif text in (\"service requests\"):\n await turn_context.send_activity(\"Currently there are no open service requests.\")\n elif text in (\"xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Your current account xxxxxxxxx4566 is Active, but there are no transactions on it.\")\n elif text in (\"debit from xxxxxxxxx4566\"):\n await turn_context.send_activity(\"Insufficient account balance. Please choose another account\")\n await self.__show_selectAccountForBill_card(turn_context)\n #else:\n #await self.__send_intro_card(turn_context)",
"def handle(self, message):\n if not message['successful']:\n raise BayeuxError(\n 'Unsuccessful handshake response: {}'\n .format(message.get('error')))\n else:\n logger.info('Hand shook client ID %s', message['clientId'])\n self.client.client_id = message['clientId']",
"def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None",
"def receive_message(self, message):",
"async def state_message_received(msg):\n json_payload = json.loads(msg.payload)\n _LOGGER.debug(json_payload)\n\n # If listening to `tele`, result looks like: {\"IrReceived\":{\"Protocol\":\"XXX\", ... ,\"IRHVAC\":{ ... }}}\n # we want to extract the data.\n if \"IrReceived\" in json_payload:\n json_payload = json_payload[\"IrReceived\"]\n\n # By now the payload must include an `IRHVAC` field.\n if \"IRHVAC\" not in json_payload:\n return\n\n payload = json_payload[\"IRHVAC\"]\n \n if payload[\"Vendor\"] == self._vendor:\n # All values in the payload are Optional\n prev_power = self.power_mode\n if \"Power\" in payload:\n self.power_mode = payload[\"Power\"].lower()\n if \"Mode\" in payload:\n self._hvac_mode = payload[\"Mode\"].lower()\n # Some vendors send/receive mode as fan instead of fan_only\n if self._hvac_mode == CURRENT_HVAC_FAN:\n self._hvac_mode = HVAC_MODE_FAN_ONLY\n if \"Temp\" in payload:\n if payload[\"Temp\"] > 0:\n if self.power_mode == STATE_OFF and self._ignore_off_temp:\n self._target_temp = self._target_temp\n else:\n self._target_temp = payload[\"Temp\"]\n if \"Celsius\" in payload:\n self._celsius = payload[\"Celsius\"].lower()\n if \"Quiet\" in payload:\n self._quiet = payload[\"Quiet\"].lower()\n if \"Turbo\" in payload:\n self._turbo = payload[\"Turbo\"].lower()\n if \"Econo\" in payload:\n self._econo = payload[\"Econo\"].lower()\n if \"Light\" in payload:\n self._light = payload[\"Light\"].lower()\n if \"Filter\" in payload:\n self._filter = payload[\"Filter\"].lower()\n if \"Clean\" in payload:\n self._clean = payload[\"Clean\"].lower()\n if \"Beep\" in payload:\n self._beep = payload[\"Beep\"].lower()\n if \"Sleep\" in payload:\n self._sleep = payload[\"Sleep\"]\n if \"SwingV\" in payload:\n self._swingv = payload[\"SwingV\"].lower()\n if self._swingv != \"auto\":\n self._fix_swingv = self._swingv\n if \"SwingH\" in payload:\n self._swingh = payload[\"SwingH\"].lower()\n if self._swingh != \"auto\":\n self._fix_swingh = self._swingh\n if (\n \"SwingV\" in payload\n and payload[\"SwingV\"].lower() == STATE_AUTO\n and \"SwingH\" in payload\n and payload[\"SwingH\"].lower() == STATE_AUTO\n ):\n if SWING_BOTH in self._swing_list:\n self._swing_mode = SWING_BOTH\n elif SWING_VERTICAL in self._swing_list:\n self._swing_mode = SWING_VERTICAL\n elif SWING_HORIZONTAL in self._swing_list:\n self._swing_mode = SWING_HORIZONTAL\n else:\n self._swing_mode = SWING_OFF\n elif (\n \"SwingV\" in payload\n and payload[\"SwingV\"].lower() == STATE_AUTO\n and SWING_VERTICAL in self._swing_list\n ):\n self._swing_mode = SWING_VERTICAL\n elif (\n \"SwingH\" in payload\n and payload[\"SwingH\"].lower() == STATE_AUTO\n and SWING_HORIZONTAL in self._swing_list\n ):\n self._swing_mode = SWING_HORIZONTAL\n else:\n self._swing_mode = SWING_OFF\n\n if \"FanSpeed\" in payload:\n fan_mode = payload[\"FanSpeed\"].lower()\n # ELECTRA_AC fan modes fix\n if (\n HVAC_FAN_MAX_HIGH in self._fan_list\n and HVAC_FAN_AUTO_MAX in self._fan_list\n ):\n if fan_mode == HVAC_FAN_MAX:\n self._fan_mode = FAN_HIGH\n elif fan_mode == HVAC_FAN_AUTO:\n self._fan_mode = HVAC_FAN_MAX\n else:\n self._fan_mode = fan_mode\n else:\n self._fan_mode = fan_mode\n _LOGGER.debug(self._fan_mode)\n\n if self._hvac_mode is not HVAC_MODE_OFF:\n self._last_on_mode = self._hvac_mode\n\n # Set default state to off\n if self.power_mode == STATE_OFF:\n self._hvac_mode = HVAC_MODE_OFF\n self._enabled = False\n else:\n self._enabled = True\n\n # Set toggles to 'off'\n for key in self._toggle_list:\n setattr(self, '_' + key.lower(), 'off')\n\n # Update HA UI and State\n self.async_write_ha_state()\n #self.async_schedule_update_ha_state()\n\n # Check power sensor state\n if self._power_sensor and prev_power is not None and prev_power != self.power_mode:\n await asyncio.sleep(3)\n state = self.hass.states.get(self._power_sensor)\n await self._async_power_sensor_changed(self._power_sensor, None, state)",
"def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)",
"def HandleMessage(msg, conn, requester):\n\n print(\"\\nReceived a new message:\\n{}\".format(msg))\n if msg['__class__'] == 'ReqDecryption':\n msg = msg['__value__']\n C = msg['C']\n D = msg['D']\n C = parse_point(C)\n D = parse_point(D)\n\n print(\"\\nReceived a new tallied contribution:\")\n print(\"C = {}\\nD = {}\".format(C, D))\n out = requester.decrypt(C, D)\n\n req = RespDecryption(out[0], out[1], out[2])\n write_message(conn, req)\n print(\"\\nThe final outcome is:\\n{}\".format(out[0]))\n exit()",
"def handle_action_received(msg: ReceiveMessage) -> None:\n payload = self.render_template(msg, CONF_ACTION_TEMPLATE)\n if not payload or payload == PAYLOAD_NONE:\n _LOGGER.debug(\n \"Invalid %s action: %s, ignoring\",\n [e.value for e in HVACAction],\n payload,\n )\n return\n try:\n self._attr_hvac_action = HVACAction(str(payload))\n except ValueError:\n _LOGGER.warning(\n \"Invalid %s action: %s\",\n [e.value for e in HVACAction],\n payload,\n )\n return\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)",
"def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!",
"def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))",
"def received_message(self, msg):\n command = int(msg[:8], base=16)\n msg = msg[8:]\n self.log.debug(\"CONTROLLER - RECEIVED COMMAND: \" + str(command))\n self.log.debug(\"CONTROLLER - MSG: \" + str([int(msg[i:i+8], base=16) for i in range(0, len(msg), 8)]))\n if command == 0:\n # 0 - opponent start the game\n self.master.add_log(\"Opponent starts the game.\")\n elif command == 1:\n # 1 - you start the game\n self.master.add_log(\"You start the game! Your turn.\")\n self.master.first = True\n self.master.new_round(False)\n elif command == 2:\n # 2 - start of your turn\n self.master.add_log(\"Your turn.\")\n self.master.new_round()\n elif command == 3:\n # 3 - opponent draws a card\n self.master.opp_hand.add_placeholder()\n self.master.add_log(\"Opponent draw a card.\")\n elif command == 4:\n # 4,x,y - opponent plays a card with x id on y spot on gameboard\n c_id = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n card = self.master.database.get_card(c_id)\n if card.card_type == \"Spell\":\n self.master.opp_sfield.set_card(card)\n else:\n self.master.opp_bfield.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent played a card {card.name}.\")\n elif command == 5:\n # 5,v,x,y - player v picks up card from x space from y spot to his hand\n # v - 0/1 - you/opponent\n # x - 0/1 - mana/battlefield\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n card = self.master.mana.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from mana zone to your hand.\")\n elif c_space == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.hand.add_card(card)\n self.master.add_log(f\"You pick up {card.name} from battle zone to your hand.\")\n elif c_player == 1:\n if c_space == 0:\n card = self.master.opp_mana.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from mana to his hand.\")\n elif c_space == 1:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n # TODO: add better logging (which card etc.)\n self.master.add_log(f\"Opponent picks up {card.name} from battle zone to his hand.\")\n elif command == 6:\n # 6,v,x,y - player v puts card from x space from y spot to his graveyard\n # v - 0/1 - you/opponent\n # x - 0/1/2 - mana/battlefield/hand\n c_player = int(msg[:8], base=16)\n c_space = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_player == 0:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"yu_mn\", c_pos)\n elif c_space == 1:\n self.master.a_move_to_graveyard(\"yu_bf\", c_pos)\n elif c_space == 2:\n card = self.master.hand[c_pos]\n self.master.a_move_to_graveyard(\"yu_hd\", c_pos)\n self.master.send_message(15, card.id) # Sent back which card was discarded\n elif c_player == 1:\n if c_space == 0:\n self.master.a_move_to_graveyard(\"op_mn\", c_pos, False)\n elif c_space == 1:\n # Do not change to a_move_to_graveyard\n if c_pos == 5:\n card = self.master.opp_sfield.remove_card()\n else:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent's card {card.name} from battle zone was moved to his graveyard.\")\n elif command == 7:\n # 7,x,y - opponent puts y card from x space to manazone\n # x - 0/1/2/3 - hand/deck/graveyard\n c_space = int(msg[:8], base=16)\n c_id = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card {card.name} from his hand to the mana zone\")\n elif c_space == 1:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his deck to the mana zone\")\n elif c_space == 2:\n card = self.master.database.get_card(c_id)\n self.master.opp_mana.add_card(card)\n self.master.opp_graveyard.remove_card(card)\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to the mana zone\")\n elif command == 8:\n # 8,x - opponent adds card from his hand to y shield (face down)\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.add_placeholder(c_pos)\n self.master.opp_hand.remove_card(0)\n self.master.add_log(f\"Opponent added card from his hand to shields\")\n elif command == 9:\n # 9,x,y - Opponent tap/untap card on y spot in mana zone\n # x - 0/1 - tap/untap\n c_tap = bool(int(msg[:8]))\n c_pos = int(msg[8:16], base=16)\n if c_tap:\n self.master.opp_mana.untap_card(c_pos)\n else:\n self.master.opp_mana.tap_card(c_pos)\n elif command == 10:\n # 10,x - (info) opponent looks under his shield on x spot\n c_pos = int(msg[:8], base=16)\n self.master.add_log(f\"Opponent is peeking his {c_pos} shield\")\n elif command == 11:\n # 11,x,y - opponent looks under my shield/card on hand on y spot\n # x - 0/1 - hand/shield\n c_space = int(msg[:8])\n c_pos = int(msg[8:16], base=16)\n if c_space == 0:\n card = self.master.hand[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} card in hand\")\n elif c_space == 1:\n card = self.master.shields[c_pos]\n self.master.add_log(f\"Opponent is peeking your {c_pos} shield\")\n self.master.send_message(111, card.id)\n elif command == 111:\n # 111,x - \n c_id = int(msg[:8], base=16)\n # TODO: split command to separate hand and shield\n # TODO: show in the UI what the card actually is\n self.master.add_log(f\"The choosen card is {c_id}\")\n elif command == 12:\n # 12,x,y - opponent attacks your x card with his y card on the battlefield\n c_opp_pos = int(msg[:8], base=16)\n c_my_pos = int(msg[8:16], base=16)\n opp_card = self.master.opp_bfield[c_opp_pos]\n my_card = self.master.bfield[c_my_pos]\n self.master.add_log(f\"Opponent is attacking your card {my_card.name} with card {opp_card.name}.\")\n self.master.creature_attacked(c_opp_pos, c_my_pos)\n elif command == 112:\n # 112,x - returned which card you will attack\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 13:\n # 13,x,y1,y2,... - opponent attacks your shields with y card\n # x - position of creature on the board\n # ya - a-th shield attacked by this creature\n creature_pos = int(msg[:8], base=16)\n msg = msg[8:]\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n shields_string = \", \".join([str(pos) for pos in shields_pos])\n self.master.add_log(f\"Your shields at pos {shields_string} are being attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.shields_attacked(creature_pos, shields_pos)\n elif command == 113:\n # 113,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block shield attack, continue\n self.master.attack_shield()\n else:\n # Oppponent blocked with creature\n self.master.selected_shields = []\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 14:\n # 14,y1,y2,... - opponent destroys your shields\n # ya - a-th shield\n shields_pos = []\n while len(msg) > 0:\n shields_pos.append(int(msg[:8], base=16))\n msg = msg[8:]\n self.master.shield_destroyed(shields_pos)\n elif command == 114:\n # 114,x - opponent picked up x shield to his hand\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up {c_pos} shield to his hand.\")\n self.master.refresh_screen()\n elif command == 214:\n # 214,x - opponent played x shield to spell/battle zone\n c_pos = int(msg[:8], base=16)\n self.master.opp_shields.remove_shield(c_pos)\n self.master.add_log(f\"Opponent played a card from {c_pos} shield trigger.\")\n self.master.refresh_screen()\n elif command == 314:\n # 314 - opponent ended handling shield attack\n self.master.selected_card = []\n self.master.your_turn = 1\n elif command == 15:\n # 15 - id of the discarded card\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.add_card(card)\n self.master.add_log(f\"Opponent discarded {card.name}\")\n self.master.refresh_screen()\n elif command == 16:\n # 16,v,x,y - x player taps/untaps a y creature\n # v - 0/1 - tap/untap\n # x - 0/1 - you/opponent\n # y - pos\n c_tap = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_pos = int(msg[16:24], base=16)\n if c_tap == 0:\n # Tap\n if c_player == 0:\n # You\n self.master.bfield.set_tapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now tapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_tapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now tapped.\")\n if c_tap == 1:\n # Untap\n if c_player == 0:\n # You\n self.master.bfield.set_untapped(c_pos)\n self.master.add_log(f\"Your creature at pos {c_pos} is now untapped.\")\n elif c_player == 1:\n self.master.opp_bfield.set_untapped(c_pos)\n self.master.add_log(f\"Opponent creature at pos {c_pos} is now untapped.\")\n self.master.refresh_screen()\n elif command == 17:\n # 17,c,s1,p1,s2,p2... - opponent chooses which cards to destroy from the list\n # c - how many creatures to destoy\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_destoyed(count, target_list)\n elif command == 117:\n # 117 - opponent choosed cards and his actions ended\n self.master.post_destroy_creatures()\n elif command == 18:\n # 18,x - opponent adds card x from his deck to hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his deck to his hand\")\n elif command == 19:\n # 19,x - opponent adds card x from his graveyard to his hand\n c_id = int(msg[:8], base=16)\n card = self.master.database.get_card(c_id)\n self.master.opp_graveyard.remove_card(card)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent added card {card.name} from his graveyard to his hand\")\n elif command == 20:\n # 20,c,s1,p1,s2,p2... - opponent chooses which cards to move to manazone from the list\n # c - how many creatures to sacrafice\n # sa - set of a-th card\n # pa - position of a-th card\n target_list = []\n count=int(msg[:8], base=16)\n msg = msg[8:]\n while len(msg) > 0:\n set=int(msg[:8], base=16)\n pos=int(msg[8:16], base=16)\n target_list.append((set, pos))\n msg = msg[16:]\n self.master.select_creatures_to_be_put_to_mana(count, target_list)\n elif command == 120:\n # 120 - opponent choosed cards and his actions ended\n self.master.post_sacrafice_creatures()\n elif command == 21:\n # 21,y,x - player x puts card from y pos on battlefield zone to manazone\n # x - 0/1 - opponent/you\n # y - position\n c_player = int(msg[:8], base=16)\n c_pos = int(msg[8:16], base=16)\n if c_player == 0:\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_mana.add_card(card)\n self.master.add_log(f\"Opponent moved card {card.name} from his battlezone to the mana zone\")\n elif c_player == 1:\n card = self.master.bfield.remove_card(c_pos)\n self.master.mana.add_card(card)\n self.master.add_log(f\"Opponent moved your card {card.name} from battlezone to your mana zone\")\n elif command == 22:\n # 22,x - player x puts card from y pos on battlefield zone to hand\n # x - position\n c_pos = int(msg[:8], base=16)\n card = self.master.opp_bfield.remove_card(c_pos)\n self.master.opp_hand.add_placeholder()\n self.master.add_log(f\"Opponent picked up card {card.name} from his battlezone to his hand\")\n elif command == 23:\n # 23 - opponent added an z effect to x card on y battefield\n c_pos = int(msg[:8], base=16)\n c_player = int(msg[8:16], base=16)\n c_effect_name = int(msg[16:24], base=16)\n effect_name = EffectName(c_effect_name).name\n if c_player == 0:\n # to the opponent\n card = self.master.opp_bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to his card {card.name}\")\n elif c_player == 1:\n # to the player\n card = self.master.bfield[c_pos]\n self.master.add_log(f\"Opponent gave effect {effect_name} to your card {card.name}\")\n elif command == 24:\n # 24,x - opponent attacks you directly with x card\n # x - position of creature on the board\n creature_pos = int(msg[:8], base=16)\n self.master.add_log(f\"You are being directly attacked by {self.master.opp_bfield[creature_pos].name}.\")\n self.master.directly_attacked(creature_pos)\n elif command == 124:\n # 124,x - answer from the opponent, that either he blocks with blocker or shields will be destroyed\n if msg == \"\":\n # Opponent didn't block, you win\n self.master.win()\n else:\n # Oppponent blocked with creature\n c_pos = int(msg[:8], base=16)\n self.master.attack_creature(c_pos)\n elif command == 25:\n # 25 - opponent won the game\n self.master.lose(True)\n elif command == 26:\n # 26 - opponent lost the game\n self.master.win(True)\n elif command == 27:\n # 27 - start of the next turn\n self.master.turn_count += 1\n self.master.add_turn_info()",
"def _turn(self, *args):\n self.send_line('STATUS TURN %s' % self.game.turn)",
"def dispatch_incoming_message(self, event):\n\n device_id = get_device_id_from_event(event)\n\n body = event.body_as_json()\n\n if get_message_source_from_event(event) == \"twinChangeEvents\":\n body = body.get(Fields.PROPERTIES, {}).get(Fields.REPORTED, {})\n\n self.update_pairing(device_id, body)\n device_data = self.device_list.try_get(device_id)\n\n if not device_data:\n return\n\n if get_message_source_from_event(event) == \"twinChangeEvents\":\n self.incoming_twin_changes.put(event)\n else:\n cmd = body.get(Fields.CMD, None)\n received_operation_id = body.get(Fields.OPERATION_ID, None)\n received_run_id = body.get(Fields.RUN_ID, None)\n\n if cmd == Commands.PAIR_WITH_SERVICE_APP:\n # handled in the update_pairing() function above\n pass\n elif cmd == Commands.SEND_OPERATION_RESPONSE:\n logger.info(\n \"Received telemetry sendOperationResponse from {} with operationId {}\".format(\n device_id, received_operation_id,\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n if Flags.RETURN_EVENTHUB_MESSAGE_CONTENTS in body.get(Fields.FLAGS, []):\n payload = {\n Fields.CMD: Commands.OPERATION_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.EVENTHUB_MESSAGE_CONTENTS: {\n Fields.EVENTHUB_MESSAGE_BODY: body,\n Fields.EVENTHUB_CONTENT_TYPE: event.content_type,\n Fields.EVENTHUB_CORRELATION_ID: event.correlation_id,\n Fields.EVENTHUB_MESSAGE_ID: event.message_id,\n Fields.EVENTHUB_SYSTEM_PROPERTIES: convert_binary_dict_to_string_dict(\n event.system_properties\n ),\n Fields.EVENTHUB_PROPERTIES: convert_binary_dict_to_string_dict(\n event.properties\n ),\n },\n }\n message = json.dumps(payload)\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id,\n message=message,\n props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n else:\n self.outgoing_operation_response_queue.put(\n OperationResponse(device_id=device_id, operation_id=received_operation_id,)\n )\n\n if Flags.RESPOND_IMMEDIATELY in body.get(Fields.FLAGS, []):\n self.force_send_operation_response.set()\n\n elif cmd == Commands.SET_DESIRED_PROPS:\n desired = body.get(Fields.DESIRED_PROPERTIES, {})\n if desired:\n logger.info(\"Updating desired props: {}\".format(desired))\n self.registry_manager.update_twin(\n device_id, Twin(properties=TwinProperties(desired=desired)), \"*\"\n )\n\n elif cmd == Commands.INVOKE_METHOD:\n self.executor.submit(self.handle_method_invoke, device_data, event)\n # TODO: add_done_callback -- code to handle this is in the device app, needs to be done here too, so we can count exceptions in non-critical threads\n\n elif cmd == Commands.INVOKE_PNP_COMMAND:\n self.executor.submit(self.handle_pnp_command_invoke, device_data, event)\n # TODO: add_done_callback -- code to handle this is in the device app, needs to be done here too, so we can count exceptions in non-critical threads\n\n elif cmd == Commands.GET_PNP_PROPERTIES:\n logger.info(\n \"Getting digital twin for {} with operationid {}\".format(\n device_id, received_operation_id\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n\n twin = self.digital_twin_client.get_digital_twin(device_id)\n\n message = json.dumps(\n {\n Fields.CMD: Commands.OPERATION_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.PNP_PROPERTIES_CONTENTS: twin,\n }\n )\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id, message=message, props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n elif cmd == Commands.UPDATE_PNP_PROPERTIES:\n logger.info(\n \"Updating digital twin for {} with operationid {}\".format(\n device_id, received_operation_id\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n\n self.digital_twin_client.update_digital_twin(\n device_id, body[Fields.PNP_PROPERTIES_UPDATE_PATCH]\n )\n\n # TODO: send ack for all of these ops, include error if failure\n\n elif cmd == Commands.SEND_C2D:\n logger.info(\n \"Sending C2D to {} with operationId {}\".format(\n device_id, received_operation_id,\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n message = json.dumps(\n {\n Fields.CMD: Commands.C2D_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: received_run_id,\n Fields.OPERATION_ID: received_operation_id,\n Fields.TEST_C2D_PAYLOAD: body[Fields.TEST_C2D_PAYLOAD],\n }\n )\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id, message=message, props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n else:\n logger.info(\n \"Unknown command received from {}: {}\".format(device_id, body),\n extra=custom_props(device_id, device_data.run_id),\n )",
"def handle_message(self, data):\r\n print data\r\n\r\n #video stream starts\r\n if data[:13] == 'video_stream:':\r\n #port will be between 3000 to 7000\r\n port = int(data[13:17])\r\n self.partnum = int(data[18:])\r\n\r\n #creates a video file in cache\r\n if not exists(CASHE + movie_name + '\\\\'):\r\n makedirs(CASHE + movie_name + '\\\\')\r\n\r\n self.receive = Receiver(port, self.partnum, CASHE + movie_name + '\\\\')\r\n self.receive.start()\r\n\r\n #upload stream approved\r\n elif data[:16] == 'upload_approved:':\r\n port = int(data[16:])\r\n self.uploader = Uploader(port, upload_path)\r\n self.uploader.start()\r\n self.upload_num = 1\r\n\r\n elif data[:6] == 'parts:':\r\n if data[6:].isdigit():\r\n self.partnum = int(data[6:])\r\n\r\n elif data[:8] == 'invalid:':\r\n self.uploader = None\r\n if data[8:] == 'hash':\r\n self.upload_num = 2\r\n else:\r\n self.upload_num = 3\r\n\r\n print 'invalid upload'\r\n\r\n elif data == 'vid_not_found':\r\n self.partnum = -1\r\n print 'could not watch vid'\r\n\r\n elif data[:8] == 'results:':\r\n results = data[8:].split(':<!>:')\r\n self.res_list = [['Movie Name', 'views', 'grade']]\r\n for i in results:\r\n datas = i.split(':!:')\r\n self.res_list.append(datas)\r\n self.print_results = True\r\n '''if results == ['']:\r\n self.print_results = False\r\n else:\r\n self.print_results = True'''",
"def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply",
"def handle(self):\n try:\n peers = Peers([\n gevent.spawn(self.route.proxy_input, self.client.sock,\n self.sock, self.buf, self.extra),\n gevent.spawn(self.route.proxy_connected, self.sock, \n self.client.sock, self.extra)])\n gevent.joinall(peers.greenlets)\n finally:\n self.sock.close()",
"def onMessage(self, msg, binary):\r\n# print('WebSocket: Received new message from client. '\r\n# '(binary={0})'.format(binary))\r\n\r\n try:\r\n self._assembler.processMessage(msg, binary)\r\n except InvalidRequest as e:\r\n self.sendErrorMessage('Invalid Request: {0}'.format(e))\r\n except DeadConnection:\r\n self.sendErrorMessage('Dead Connection')\r\n self.dropConnection()\r\n except:\r\n import traceback\r\n traceback.print_exc()\r\n self.sendErrorMessage('Fatal Error')",
"def handle_command(ARGS, CLIENT, command, channel):\n message = '''Commands I know:\n list teams\n scores <optional week number>\n does Brandon suck\n '''\n message = \"\"\n attachments = \"\"\n if command == \"list teams\":\n message = '\\n'.join(map(lambda x: x.team_name, ARGS.league.teams))\n elif command == \"does brandon suck\":\n message = 'yes'\n elif 'scores' in command:\n pieces = command.split(' ')\n if len(pieces) == 1:\n message = 'Current Scoreboard'\n matchups = ARGS.league.scoreboard(projections=True)\n else:\n message = 'Scoreboard for week ' + pieces[1]\n matchups = ARGS.league.scoreboard(pieces[1], projections=True)\n\n attachments = [{\n 'fallback': 'A textual representation of your table data',\n 'fields': [\n {\n 'title': 'Home',\n 'value': '\\n'.join(map(lambda x: x.home_team.team_abbrev + \" \" + str(x.home_score) + \" (\" + str(x.home_projection) + \")\", matchups)),\n 'short': True\n },\n {\n 'title': 'Away',\n 'value': '\\n'.join(map(lambda x: x.away_team.team_abbrev + \" \" + str(x.away_score) + \" (\" + str(x.away_projection) + \")\", matchups)),\n 'short': True\n }\n ]\n }]\n CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, attachments=attachments, as_user=True)\n\n # CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, as_user=True)",
"def handle_incoming_message(obj, reply_channel):\n if int(obj[message_type_key]) == 0:\n try:\n sub_obj = create_subscriber_object(reply_channel, obj)\n subscribers[reply_channel.name] = sub_obj\n except ApiException as exc:\n send_save_to_channel(reply_channel, str(exc))\n\n elif int(obj[message_type_key]) == 1:\n disconnect_subscriber(reply_channel)\n\n print(\"incoming_msg_handled\")",
"async def exchanges_message_handler(bnc_websocket, ftx_websocket, param) -> None:\n\n ok = True\n while ok:\n try:\n # receiving updates\n bnc = await bnc_websocket.recv()\n ftx = await ftx_websocket.recv()\n # translate to execute strategy\n await price_analyze(json.loads(bnc), json.loads(ftx), param['p_d'], param['m'])\n # sleep if its needed\n await asyncio.sleep(param['r_r'])\n\n except ConnectionClosed:\n print('Connection Closed. Need to reboot.')\n ok = False",
"def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])",
"def onMessage(self, payload, isBinary):",
"def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()",
"def on_message(data):\n pass",
"def respond_to_message(self):\n\n MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data)\n\n outgoing_messages = data.get(\"messages\", [])\n events_to_publish = data.get(\"publish_events\", [])\n\n agent_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"AGENT\"]\n user_messages = [message[\"message\"] for message in outgoing_messages if message[\"sending_to\"] == \"USER\"]\n\n agent_response = Util.send_messages(messages=agent_messages, sending_to=\"AGENT\")\n user_response = Util.send_messages(messages=user_messages, sending_to=\"USER\")\n\n if agent_response or user_response:\n\n Util.update_state(meta_data=self.meta_data, state=self.state)\n Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish)\n\n return 1",
"def _goal_received_cb(self):\n rospy.loginfo(\"[Server] Goal received, passing it on.\")\n self._action_client.wait_for_server()\n self._success = True\n goal = self._action_server.accept_new_goal()\n self._action_client.send_goal(goal, self._result_received_cb,\n self._active_cb,\n self._feedback_received_cb)",
"def _process_message(self, json_object):\n\n message = json.loads(json_object)\n if message['type'] == \"relay\":\n self._process_relay(message)\n elif message['type'] == \"control\":\n self._process_control(message)\n else:\n print(\"ERROR Received message has invalid type\\n\")\n return",
"def __handle_message_activity(self, activity):\n BotRequestHandler.STATE+=1 ## POORMAN'S STATE TRACKING\n self.send_response(200)\n self.end_headers()\n credentials = MicrosoftAppCredentials(APPID, APPPASSWORD)\n connector = ConnectorClient(credentials, base_url=activity.service_url)\n LUIStext = ''\n\n ## FIRST, GET APPID\n if self.STATE==1:\n if activity.text:\n BotRequestHandler.LUISAPPID=activity.text\n reply = BotRequestHandler.__create_reply_activity(activity, \"You entered application ID: %s\\nNow, please input your subscription key (default: %s):\" % (activity.text,self.LUISAPPKEY))\n\n ## SECOND, GET APPKEY\n elif self.STATE==2:\n if activity.text:\n BotRequestHandler.LUISAPPKEY=activity.text\n reply = BotRequestHandler.__create_reply_activity(activity, \"Great! You entered application key: %s\\nNow, enter some text for the LUIS model to render:\" % activity.text)\n\n ## THIRD AND ONWARDS: SEND TEXT TO LUIS AND REPORT LUIS RESPONSE TO THE USER\n else:\n try:\n CLIENT = LUISClient(self.LUISAPPID, self.LUISAPPKEY, True)\n res = CLIENT.predict(activity.text)\n while res.get_dialog() is not None and not res.get_dialog().is_finished():\n TEXT = input('%s\\n'%res.get_dialog().get_prompt())\n res = CLIENT.reply(TEXT, res)\n LUIStext=self.__handle_LUIS_response(res)\n reply = BotRequestHandler.__create_reply_activity(activity, 'LUIS says: %s' % LUIStext)\n except Exception as exc:\n LUIStext=exc\n print(\"Error: %s\" % exc)\n reply = BotRequestHandler.__create_reply_activity(activity, 'About %s, LUIS complains: %s' % (activity.text,LUIStext))\n\n connector.conversations.send_to_conversation(reply.conversation.id, reply)",
"def message_handle(ws, message):\n try:\n data = json.loads(message)\n method = data['method']\n params = data['params']\n except json.JSONDecodeError:\n ws.close((1003, 'Message `{}` is invalid'.format(message)))\n except KeyError:\n keys = str(list(data.keys()))\n ws.close((1003, 'Message keys {} are missing or invalid'.format(keys)))\n else:\n try:\n public[method](ws, **params)\n except KeyError:\n ws.close((1007, 'Method `{}` not found'.format(method)))\n except TypeError:\n ws.close((1007, 'Parameters `{}` are wrong'.format(data['params'])))\n except InstanceNotFound as instance_id:\n ws.close((1007, 'Instance `{}` not found'.format(instance_id)))\n except EnvironmentMalformed as env_id:\n ws.close((1007, 'Environment `{}` is malformed'.format(env_id)))\n except EnvironmentNotFound as env_id:\n ws.close((1007, 'Environment `{}` not found'.format(env_id)))\n except WrongAction as action:\n ws.close((1007, 'Action `{}` is wrong'.format(action)))\n except Exception as err:\n ws.close((1007, 'Unknonwn error: {}'.format(err)))",
"def receive(self, message):",
"def _handle_message(self, msg):\n self.event('message', msg)",
"async def handle_message(self, peer_name):\n try:\n reader = self.peers[peer_name][\"reader\"]\n buffer = self.peers[peer_name][\"buffer\"]\n except KeyError:\n print(f\"Error: Connection to {peer_name} doesn't exist.\")\n return\n\n data = await reader.read(1024*8)\n\n if not data:\n raise NodeDisconnectException(f\"Node {peer_name} disconnected.\")\n\n buffer.write(data)\n try:\n message_header, message = buffer.receive_message()\n except InvalidMessageChecksum as ex:\n print(f\"Warning: {ex} (node {peer_name}).\")\n return\n\n if message_header is not None:\n await self.handle_message_header(peer_name, message_header, data)\n\n if not message:\n return\n\n # Executes proper message handler.\n handle_func_name = \"handle_\" + message_header.command\n handle_func = getattr(self, handle_func_name, None)\n if handle_func and callable(handle_func):\n await handle_func(peer_name, message_header, message)",
"def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))",
"async def handle_message(self, *args, **kwargs):\n\n # setup middlewares it not ready\n if not self.middleware_is_ready:\n self.setup_middlewares()\n\n # get a UserMessage object from args passed\n message = self.create_user_message(*args, **kwargs)\n \n # sends UserMessage to middlewares\n await self.proccess_message(message)",
"def on_message(ws, msg):\n data = json.loads(msg)\n if \"results\" in data:\n # This prints out the current fragment that we are working on\n text = data['results'][0]['alternatives'][0]['transcript'].lower()\n print(text)\n # Pass it to the callback\n if CALLBACK(text):\n # If it recognized something, stop listening\n global RUNNING\n RUNNING = False",
"def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.",
"async def relay(websocket, path):\n # register(websocket) sends user_event() to websocket\n await register(websocket)\n try:\n while True:\n try:\n message = await websocket.recv()\n except ConnectionClosed:\n break\n else:\n await relay_message(message, current_user=websocket)\n finally:\n await unregister(websocket)",
"def _transit_to_scores(self, **kwargs):\n logging.debug(\"in _transit_to_scores\")\n handler = kwargs['handler']\n\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if game.state != self.state_name:\n return False # not in 'voting' state\n game.state = 'scores'\n participants = self._calculate_scores(game)\n game.put()\n # send out the score info on the channels.\n # TODO: currently, the scores for this round are only recorded briefly,\n # as the code below will reset them as part of the setup for the\n # next round/game. Might want to change this.\n # TODO: should the broadcasting part be part of the handler logic or\n # the state transition logic?\n self._broadcast_scores(participants, game.key.id(), game.current_round)\n\n # We can now start a new round. This resets the card selection and vote\n # fields. If we've had N rounds, this is a new game instead. \n if game.current_round >= (config.ROUNDS_PER_GAME - 1):\n # if have reached the limit of rounds for a game,\n # then start new game using the participants of the current game\n self.start_new_game(participants)\n return True\n else:\n # otherwise, start new round in the current game\n logging.info(\"starting new round.\")\n game.start_new_round(participants)\n return True",
"def process(self):\n # hello_message = HelloMessage(*self.message.value)\n # TODO: assert realm is in allowed list\n welcome_message = WelcomeMessage()\n self.answer_message = welcome_message",
"def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])",
"def on_message(self, ws, message):\n message = json.loads(message)\n if message['type'] == 'error':\n self.on_error(None, message['message'])\n elif message['type'] == 'subscriptions':\n print(\"Subscribed to {}\".format(', '.join([ channel['name'] for channel in message['channels'] ])))\n else:\n if ((message['type']=='ticker' and message['product_id'] in self._ticker) or \n (message['type'] in [\"snapshot\", \"l2update\"] and message['product_id'] in self._level2) or \n (message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] )):\n self.messages.append(message)\n elif message['type']=='heartbeat':\n self.updated_time = time.time()",
"async def process_turn(self, game):\r\n\r\n # Check if the player is an AI\r\n if self.is_ai:\r\n\r\n # Determine the best place to go and return the location\r\n # Use a sleep function to simulate decision making\r\n await sleep(1)\r\n self.determine_best_move(game.board)\r\n return None\r\n\r\n # The player is not an AI\r\n else:\r\n\r\n # Wait for the player to react with the spot they want to go\r\n def check_reaction(reaction, user):\r\n return (\r\n reaction.message.id == game.message.id and\r\n user.id == self.member.id and\r\n str(reaction) in game.get_valid_reactions()\r\n )\r\n done, pending = await wait([\r\n game.bot.wait_for(\"reaction_add\", check = check_reaction),\r\n game.bot.wait_for(\"reaction_remove\", check = check_reaction)\r\n ], return_when = FIRST_COMPLETED)\r\n reaction, user = done.pop().result()\r\n for future in pending:\r\n future.cancel()\r\n\r\n # Check if the player wants to QUIT the ConnectFourGame\r\n if str(reaction) == QUIT:\r\n return ConnectFourPlayer.QUIT\r\n\r\n # The player does not want to quit, make their requested move\r\n else:\r\n\r\n # Check if the column is full\r\n if game.board.is_column_full(CONNECT_FOUR_REACTIONS.index(str(reaction))):\r\n return ConnectFourPlayer.COLUMN_FULL\r\n\r\n # The column is not full, let the player go there\r\n else:\r\n game.board.add_piece(CONNECT_FOUR_REACTIONS.index(str(reaction)), is_challenger = game.challenger_turn)\r\n return None",
"def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0",
"def handle_message(self, msg: mqtt.MQTTMessage) -> None:\n payload = json.loads(msg.payload.decode(\"utf-8\"))\n logging.info(f\"Received a new message: {payload}\")\n if \"volume\" in payload:\n validate(payload, schema=self.volume_schema)\n self.volume = payload[\"volume\"]\n elif \"volumeCtrl\" in payload:\n validate(payload, schema=self.volume_ctrl_schema)\n self.volume_up() if payload[\"volumeCtrl\"] == \"+\" else self.volume_down()\n elif \"mute\" in payload:\n validate(payload, schema=self.mute_schema)\n self.mute = payload[\"mute\"]\n elif \"toggle\" in payload:\n validate(payload, schema=self.toggle_schema)\n self.toggle_mute() if payload[\"toggle\"] == \"mute\" else self.toggle_pause()\n elif \"ctrl\" in payload:\n validate(payload, schema=self.ctrl_schema)\n self.skip_forward() if payload[\"ctrl\"] == \">>\" else self.skip_backward()\n else:\n raise ValueError(f\"Cannot handle message: {payload}, not a valid command\")",
"def on_message(self, msg):\n\n msg = json.loads(msg)\n\n psession = self.funcserver.pysessions.get(self.pysession_id, None)\n if psession is None:\n interpreter = PyInterpreter(self.funcserver.define_python_namespace())\n psession = dict(interpreter=interpreter, socks=set([self.id]))\n self.funcserver.pysessions[self.pysession_id] = psession\n else:\n interpreter = psession[\"interpreter\"]\n psession[\"socks\"].add(self.id)\n\n code = msg[\"code\"]\n msg_id = msg[\"id\"]\n\n stdout = sys.stdout\n try:\n sys.stdout = cStringIO.StringIO()\n interpreter.runsource(code)\n output = sys.stdout.getvalue() or interpreter.output\n if isinstance(output, list):\n output = \"\".join(output)\n interpreter.output = []\n finally:\n sys.stdout = stdout\n\n msg = {\"type\": MSG_TYPE_CONSOLE, \"id\": msg_id, \"data\": output}\n self.send_message(msg)",
"def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))",
"async def _handle_sent_message(self, guild_id: int, op: str, payload: Dict[str, Any]) -> None:\n if op == \"voice-server-update\":\n update = andesite.VoiceServerUpdate(payload[\"sessionId\"], payload[\"event\"])\n coro = self.handle_voice_server_update(guild_id, update)\n else:\n return\n\n err_cb: Callable = functools.partial(self.on_handle_sent_message_error, guild_id, op, payload)\n await _run_with_error_callback(coro, err_cb)",
"def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)",
"def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)",
"def async_handle_message(self, msg: dict) -> None:\n if msg[\"type\"] == \"result\":\n future = self._result_futures.get(msg[\"messageId\"])\n\n if future is None:\n self._logger.warning(\n \"Received result for unknown message: %s\", msg[\"messageId\"]\n )\n return\n\n if msg[\"success\"]:\n future.set_result(msg[\"result\"])\n return\n\n future.set_exception(FailedCommand(msg[\"messageId\"], msg[\"errorCode\"]))\n return\n\n if self.driver is None:\n raise InvalidState(\"Did not receive state as first message\")\n\n if msg[\"type\"] != \"event\":\n # Can't handle\n return\n\n event = Event(type=msg[\"event\"][\"event\"], data=msg[\"event\"])\n self.driver.receive_event(event)",
"def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def call_received(action,element_id):\n\t\ttry:\n\t\t\tnew_entry = None\n\t\t\tif action == 'add':\n\t\t\t\t#leader will add to the board and then propagate it to the other vessels\n\t\t\t\tnew_entry = request.body.read()\n\t\t\t\tmax_sequence = max(board,key=int)\n\t\t\t\telement_id = max_sequence+1\n\t\t\t\tadd_new_element_to_store(element_id, new_entry)\n\t\t\tif action == 'modify':\n\t\t\t\tnew_entry = request.body.read()\n\t\t\t\tmodify_element_in_store(element_id,new_entry)\n\t\t\tif action == 'delete':\n\t\t\t\tdelete_element_from_store(element_id)\n\t\t\tthread=Thread(target=propagate_to_vessels,args=('/propagate/{}/{}'.format(action,element_id),new_entry))\n\t\t\tthread.daemon= True\n\t\t\tthread.run() # Thread.run() is not Thread.start(), it does not do a separate thread\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn False\n\t\t#this route is used when the leader receives an action from another vessel\n\t\t#the leader receives the actions through this route and then propagate them to the other vessels using the regular route",
"def message_handler(self, dest, source, message):\n pass"
] | [
"0.62253714",
"0.6034103",
"0.6009861",
"0.60036653",
"0.6001996",
"0.59676135",
"0.5915021",
"0.58753765",
"0.58735913",
"0.5867791",
"0.5857927",
"0.5834144",
"0.5820389",
"0.58074945",
"0.5798301",
"0.5788626",
"0.5782153",
"0.5777276",
"0.5736334",
"0.5656241",
"0.5642342",
"0.5607427",
"0.5601038",
"0.5595248",
"0.55904734",
"0.5582438",
"0.55785877",
"0.55758494",
"0.5565749",
"0.55592424",
"0.5551887",
"0.55444723",
"0.5534121",
"0.5527456",
"0.55078834",
"0.5506328",
"0.5506171",
"0.5485421",
"0.5482009",
"0.5472779",
"0.5467524",
"0.54328626",
"0.54283786",
"0.5416351",
"0.54059047",
"0.5398611",
"0.5385169",
"0.5358551",
"0.53515494",
"0.5343057",
"0.53233904",
"0.53157663",
"0.5305009",
"0.5279575",
"0.5278624",
"0.52738637",
"0.5273027",
"0.5271324",
"0.5270255",
"0.5269653",
"0.526761",
"0.5266271",
"0.5263099",
"0.52572364",
"0.5255205",
"0.5253718",
"0.5253264",
"0.52495575",
"0.5233347",
"0.52190626",
"0.52183074",
"0.5213852",
"0.5212813",
"0.5208292",
"0.5206911",
"0.5205851",
"0.5193405",
"0.51912117",
"0.5187265",
"0.5179775",
"0.5178608",
"0.51743466",
"0.517206",
"0.51710755",
"0.51607126",
"0.51485157",
"0.5148022",
"0.51470745",
"0.5137957",
"0.51321656",
"0.5110774",
"0.5104673",
"0.50994635",
"0.5088685",
"0.5087081",
"0.508401",
"0.5081241",
"0.5081241",
"0.50786597",
"0.50754887"
] | 0.72230154 | 0 |
Do not return anything, modify matrix inplace instead. | def rotate(self, matrix: List[List[int]]) -> None:
flip(transpose(matrix)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __update_matrix(self, old_matrix_view):\n # if we've cleaned dirt - we will see it on our next move, so we substitute only unseen cells\n # which are marked with \"o\"\n new_matrix_view = []\n for row in range(self.matrix_rows):\n new_matrix_view.append([char for char in input()])\n\n if old_matrix_view:\n for row in range(self.matrix_rows):\n for col in range(self.matrix_cols):\n if new_matrix_view[row][col] == \"o\":\n new_matrix_view[row][col] = old_matrix_view[row][col]\n\n return new_matrix_view",
"def update(mat) -> np.ndarray:\n return mat",
"def copy_matrix(matrix):\n import numpy as np\n copy_of_m = np.copy(matrix)\n return copy_of_m",
"def transform_mat(matrix):\n delta = 1e-5\n matrix = matrix + delta\n return matrix",
"def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix",
"def matNew(mat):\n return matCopy(mat)",
"def newMatrix(self):\n self.matrix = makeMatrix()\n for row in range(self.matrix.getHeight()):\n for column in range(self.matrix.getWidth()):\n self.canvasGrid[row][column].draw(self.matrix[row][column])",
"def _setMatrixRow(self, row):\n item = self._item()\n if item is not None:\n matrix = item.getMatrix()\n matrix[self._index, :] = row.x(), row.y(), row.z()\n item.setMatrix(matrix)",
"def vec_matrix_update(self, A, y, P, evecr):\n\n l = y.shape[1]\n A_old = A[:, :(P) * l]\n\n y = np.matrix(y)\n\n if evecr.size:\n new_cols = y * evecr\n A_old = np.concatenate((A_old, new_cols[P:-1]), axis=1)\n\n N = A.shape[0]\n A_old = np.concatenate((A_old, np.ones([N, 1])), axis=1)\n\n return np.array(A_old)",
"def rowReduce(self):\n myMatrix = Matrix(self.Matrix)\n print(\"This is the row reduced echelon form of your matrix: \\n\", myMatrix.rref())",
"def reiniciarMatrix(self):\n self.matrixMAPA = []\n self.rellenarMatrix()",
"def update_F_matrix(self, F_matrix):\n self.F_matrix = F_matrix",
"def _clear_matrix(self):\n\t\tself._w2i_matrix = self._i2w_matrix = None",
"def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()",
"def matrix(self):\n try:\n return self.__matrix\n except AttributeError:\n old_matrix_view = self.__read_matrix_file()\n new_matrix_view = self.__update_matrix(old_matrix_view)\n self.__write_matrix_to_file(new_matrix_view)\n self.__matrix = new_matrix_view\n return self.__matrix",
"def matrix_add():",
"def _apply_correction(self):\n np.copyto(self.network.weights, self.correction_matrix)",
"def deepercopy(matrix):\n if isinstance(matrix[0], int) or isinstance(matrix[0], float):\n newmat = [0 for x in range(len(matrix))]\n for i in range(len(matrix)):\n newmat[i] = matrix[i]\n return newmat\n else:\n newmat = [0 for x in range(len(matrix))]\n for i in range(len(matrix)):\n newmat[i] = deepercopy(matrix[i])\n return newmat",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n not_fixed = set( ((i, j)) for i in range(0,n) for j in range(0,n))\n\n while not_fixed:\n i, j = not_fixed.pop()\n not_fixed.add((i,j))\n old_value = matrix[i][j]\n\n while True: # complete the cycle of fixes beginning at (i,j)\n i, j = j, n - i - 1\n if (i, j) not in not_fixed:\n break\n tmp = matrix[i][j]\n matrix[i][j] = old_value\n not_fixed.remove((i, j)) \n old_value = tmp",
"def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n for row in range(length//2):\n for col in range(row, length-row-1):\n # matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row], matrix[row][col] = matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n return",
"def process(self, mat):",
"def mutate_matrix(matrix):\n L = len(matrix)\n r_i = random.randrange(L)\n r_j = random.randrange(4)\n r = random.gauss(0,1)\n return [[matrix[i][j]+r*(i==r_i)*(j==r_j)\n for j in range(4)] for i in range(L)]",
"def overlayMatrix(board_object, item_object, x, y):\n board_matrix = board_object.returnMatrixBoard()\n item_matrix = item_object.returnMatrix()\n k = 0\n l = 0\n for i in range(x, x + item_object.length):\n for j in range(y, y + item_object.width):\n board_matrix[i][j] = item_matrix[k][l]\n l += 1\n k += 1\n l = 0\n board_object.editBoard(board_matrix)",
"def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()",
"def apply_mask(mask_matrix_df, original_matrix_df):\n\n print(\"Applying the mask ...\")\n\n original_matrix_columns = list(original_matrix_df)\n original_matrix_rows = list(original_matrix_df.index)\n\n mask_array = mask_matrix_df.to_numpy()\n original_array = original_matrix_df.to_numpy().astype(float)\n\n\n # Note: np.nan cannot be inserted into an array of type int. The array needs to be float.\n np.putmask(original_array, mask_array, np.nan)\n\n\n after_masking_df = pd.DataFrame(original_array, columns=original_matrix_columns, index=original_matrix_rows)\n return after_masking_df",
"def clone_matrix(mat):\n return [[x for x in row] for row in mat]",
"def shrinkTrackMatrix(self):\n self.tracksMatrix = self.tracksMatrix[0:(len(self.tracksMatrix)-1)]\n self.attributesMatrix = self.attributesMatrix[0:(len(self.attributesMatrix)-1)]",
"def copy(self):\n rdd = self._data.map(\n lambda m: m\n )\n\n return Matrix(rdd, self._shape,\n dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)",
"def forward_substitution(self):\r\n for col in range(0, self.SIZE):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col + 1, self.SIZE):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result",
"def removeMatrixTranslate(matrix):\n\n float_matrix = [matrix(i, j) for i in xrange(4) for j in xrange(4)]\n for idx in range(12, 15):\n float_matrix[idx] = 0.0\n \n outMatrix = OpenMaya.MFloatMatrix()\n OpenMaya.MScriptUtil.createFloatMatrixFromList(float_matrix , outMatrix)\n\n return outMatrix",
"def iteration_improve(opt_matrix, over_alloc_pct, under_alloc_pct, can_add, can_remove):\n for idx in prange(opt_matrix.shape[0]):\n row_values = opt_matrix[idx, :]\n improve_single_row(row_values, over_alloc_pct, under_alloc_pct, can_add, can_remove)\n\n return opt_matrix",
"def rotate(self, matrix: list) -> None:\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # matrix[i].reverse()\n print(matrix)\n for i in range(len(matrix)):\n matrix[i].reverse()\n print(matrix)",
"def flush(self):\n\n if hasattr(self, 'trilinosMatrix'):\n if hasattr(self.matrix, 'storeZeros'):\n self.trilinosMatrix.flush(cacheStencil=self.matrix.storeZeros)\n else:\n self.trilinosMatrix.flush(cacheStencil=False)\n\n if (not hasattr(self, 'cache')) or (self.cache is False):\n del self.matrix",
"def matSet(mat, r, c, v):\n mat[r][c]=v",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix) # 行\n\n # 以x=y为轴翻转\n # [[1,2,3],\n # [4,5,6],\n # [7,8,9]]\n # 变为\n # [1 4 7]\n # [2 5 8]\n # [3 6 9]\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 以中点为轴翻转\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n - j - 1] = matrix[i][n - j - 1], \\\n matrix[i][j]\n\n # 非原地修改写法,先上下翻转,再以x=y为轴复制对应数字\n # n = len(matrix)\n # r = list(zip(*matrix[::-1]))\n # for i in range(n):\n # for j in range(n):\n # matrix[i][j] = r[i][j]",
"def change_basis(self, U_global):\n self.matrix = U_global @ self.matrix @ np.conj(U_global).T",
"def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix",
"def make_immutable(mat):\n if issparse(mat):\n mat.data.flags.writeable = False\n if mat.format in {\"csr\", \"csc\", \"bsr\"}:\n mat.indices.flags.writeable = False\n mat.indptr.flags.writeable = False\n elif mat.format == \"coo\":\n mat.row.flags.writeable = False\n mat.col.flags.writeable = False\n else:\n mat.flags.writeable = False",
"def reset(self):\n self.mat = np.zeros(9).reshape(3,3).astype(np.int32)\n return self.mat",
"def copy_matrix(self, M):\r\n # Section 1: Get matrix dimensions\r\n rows = len(M)\r\n cols = len(M[0])\r\n \r\n # Section 2: Create a new matrix of zeros\r\n MC = self.zeros_matrix(rows, cols)\r\n \r\n # Section 3: Copy values of M into the copy\r\n for i in range(rows):\r\n for j in range(cols):\r\n MC[i][j] = M[i][j]\r\n \r\n return MC",
"def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty",
"def copy(self) -> 'MatrixBoolean':\n\t\treturn MatrixBoolean(matrix=self.matrix)",
"def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass",
"def copy_input_pattern(self, matrix, output_neuron, input):\n matrix[output_neuron, :] = input",
"def to_matrix(self, normalize: bool = True) -> jnp.ndarray:\n return NotImplemented # pragma: no cover",
"def setZeroes(self, matrix: List[List[int]]) -> None:\n new_matrix = [row.copy() for row in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0 and new_matrix[i][j] == 0:\n self.setZero(matrix, i, j)",
"def fill_matr(matrix):\n for j in range(len(matrix)):\n for i in range(len(matrix[0])):\n print(\"enter another element\")\n matrix[j][i] = int(input()) \n print(\"matrix is full\")\n return matrix",
"def copy(self):\n # Warning: Because we use memcpy and thus copy memory internally, we have to be careful to always update this method\n # whenever the CSRSparseMatrix class changes...\n\n cdef CSRSparseMatrix_INT64_t_FLOAT32_t self_copy\n\n # we copy manually the C-arrays\n cdef:\n FLOAT32_t * val\n INT64_t * col\n INT64_t * ind\n INT64_t nnz\n\n nnz = self.nnz\n\n self_copy = CSRSparseMatrix_INT64_t_FLOAT32_t(control_object=unexposed_value, nrow=self.__nrow, ncol=self.__ncol, store_zero=self.__store_zero, store_symmetric=self.__store_symmetric)\n\n val = <FLOAT32_t *> PyMem_Malloc(nnz * sizeof(FLOAT32_t))\n if not val:\n raise MemoryError()\n memcpy(val, self.val, nnz * sizeof(FLOAT32_t))\n self_copy.val = val\n\n col = <INT64_t *> PyMem_Malloc(nnz * sizeof(INT64_t))\n if not col:\n PyMem_Free(self_copy.val)\n raise MemoryError()\n memcpy(col, self.col, nnz * sizeof(INT64_t))\n self_copy.col = col\n\n ind = <INT64_t *> PyMem_Malloc((self.__nrow + 1) * sizeof(INT64_t))\n if not ind:\n PyMem_Free(self_copy.val)\n PyMem_Free(self_copy.col)\n raise MemoryError()\n memcpy(ind, self.ind, (self.__nrow + 1) * sizeof(INT64_t))\n self_copy.ind = ind\n\n self_copy.__nnz = nnz\n\n self_copy.__col_indices_sorted_test_done = self.__col_indices_sorted_test_done\n self_copy.__col_indices_sorted = self.__col_indices_sorted\n self_copy.__first_row_not_ordered = self.__first_row_not_ordered\n\n return self_copy",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for dig in range(n):\n row = dig\n for col in range(dig+1, n):\n matrix[row][col] , matrix[col][row] = matrix[col][row], matrix[row][col]\n print(matrix)\n left = 0\n right = n-1\n while left < right:\n for row in range(n):\n matrix[row][left], matrix[row][right] = matrix[row][right], matrix[row][left]\n left+=1\n right-=1",
"def rotate(self, matrix) -> None:\n c = len(matrix)\n matrix[:] = [[matrix[c-i-1][j] for i in range(c)] for j in range(c)]",
"def _matrix(*params):\n raise NotImplementedError",
"def add_to_row(M, i, j):\n N = copy.deepcopy(M)\n N[i] = 1 * np.logical_xor(N[i], N[j])\n return N",
"def dup_matrix(self):\n maxtrixcalc = importr('matrixcalc') # load matrixcalc library\n rscript = 'D.matrix( )'.replace(' ', str(self.k)) # generate R script\n rmatrix = robjects.r(rscript) # run R script\n dup_mat = np.array(rmatrix) # convert to ndarray\n return dup_mat # ndarray",
"def rotate(self, matrix: List[List[int]]) -> None:\n for r in range(len(matrix)):\n for c in range(r):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n for row in matrix:\n row.reverse()",
"def add_row(matrix):\n\tl = len(matrix[0])\n\ttemp = matrix[:]\n\ttemp += [[0]*l]\n\treturn temp",
"def rotate1(self, matrix: List[List[int]]) -> None:\n matrixLen = len(matrix)\n\n for i in range(matrixLen):\n for j in range(i, matrixLen):\n print(i, j)\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(matrixLen):\n for j in range(matrixLen // 2):\n matrix[i][j], matrix[i][matrixLen - 1 - j] = matrix[i][matrixLen - 1 - j], matrix[i][j]",
"def rotate(self, matrix: List[List[int]]) -> None:\n if(matrix == None or len(matrix) == 1): return\n n = len(matrix)\n for i in range(0, n//2 + 1):\n for j in range(i, n-1-i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n-1-j][i]\n matrix[n-1-j][i] = matrix[n-1-i][n-1-j]\n matrix[n-1-i][n-1-j] = matrix[j][n-1-i]\n matrix[j][n-1-i] = tmp\n \n return",
"def copy(self):\n data = self.data.copy()\n return MPMatrix(self.shape, data)",
"def clear(self):\n for y in range(len(self.matrix)):\n for x in range(len(self.matrix[0])):\n self.matrix[y-1][x-1] = (0,0,0)",
"def second_inplace(a):",
"def update_kb(self, row, col, falseNeg, result: bool = False):\n pass",
"def perform_gauss_jordan_elimination_(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n \n r = 0\n c = 0\n rows, cols = len(m), len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n _swap = False\n if m[r,c] == 0:\n for i in range(r+1,rows):\n if m[i,c] == 1:# If new pivot found... swap\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n m[[i,r]] = m[[r,i]] ## Swap\n _swap = True\n if show:\n print_matrix(m)\n break # No more swapping in this column\n if not _swap: ## No swap, move to the next column, same row\n c+=1\n\n if m[r,c] == 1:\n ## XOR\n for i in range(rows):\n indexes = np.setdiff1d(np.where(m[:,c] == 1),r) # Get all the ones to XOR in the same column\n for i in indexes:\n m[i] = np.bitwise_xor(m[i],m[r]) # Bitwise XOR\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column) are treated\n if r == rows or c >= cols-1:\n break\n\n if show:\n print(\"Final State\")\n print_matrix(m)\n \n return m",
"def setZeroes(self, matrix: List[List[int]]) -> None:\r\n import copy\r\n m=len(matrix)\r\n n=len(matrix[0])\r\n m_copy=copy.deepcopy(matrix)\r\n for i in range(m):\r\n for j in range(n):\r\n if m_copy[i][j]==0:\r\n matrix[i]=[0]*n\r\n for x in range(m):\r\n matrix[x][j]=0",
"def inv_inplace(a):",
"def setZeroes(matrix):\r\n \r\n #An average O(n^2) time traversal solution with memoization\r\n #for each 0 we encounter, we update the entire row and column to 0s, but on the condition that the row/column has not been updated yet\r\n \r\n row_cache = {}\r\n column_cache = {}\r\n \r\n for r in range(0,rows := len(matrix)):\r\n for c in range(0,cols := len(matrix[0])):\r\n \r\n if matrix[r][c] == 0:\r\n \r\n if not row_cache.get(r):\r\n for i in range(0,cols):\r\n if matrix[r][i] != 0:\r\n matrix[r][i] = '0' #we use strings so we only consider the initial 0s\r\n row_cache[r] = True\r\n \r\n if not column_cache.get(c):\r\n for i in range(0,rows):\r\n if matrix[i][c] != 0:\r\n matrix[i][c] = '0'\r\n column_cache[c] = True\r\n return",
"def relax(self):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n\n self.level.mid[:] = self.solver(self.level.rhs.reshape(-1)).reshape(self.level.mid.shape)",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for col in range(n):\n tmp = []\n for row in range(n):\n tmp.append(matrix[n-1-row][col])\n matrix.append(tmp)\n del(matrix[:n])",
"def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]",
"def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]",
"def add_entry(matrix,i,j,replace=False):\n if j not in matrix[i].keys():\n matrix[i][j] = abs(i - j)\n else:\n if replace:\n matrix[i][j] = abs(i - j)",
"def add_dummy_location_to_matrix(matrix):\n matrix = [row + [0] for row in matrix]\n last_row = [0 for _ in range(len(matrix) + 1)]\n matrix.append(last_row)\n return matrix",
"def setNeedToComputeMatrix(self, *args):\n return _osgAnimation.RigGeometry_setNeedToComputeMatrix(self, *args)",
"def update_matrix(self, ope, mat):\n ope_coord = []\n for coord in self.coord_name:\n if np.isnan(ope[coord]):\n return\n ope_coord.append(int(ope[coord]))\n mat[tuple(ope_coord)] += 1",
"def fast_update_col(self,j,vals):\n dataptr = self.col_view[:,j].data\n self.X.data[dataptr] = vals",
"def transform(self, transformer):\n\t\tnew_matrix = Matrix(self.dims)\n\t\tnew_matrix.data = [transformer(copy.deepcopy(c)) for c in self.data]\n\t\treturn new_matrix",
"def rotate(self, matrix: List[List[int]]) -> None:\r\n n = len(matrix)\r\n for j in range((n+1)//2):\r\n for i in range(n-2*j-1):\r\n matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i], matrix[n-1-j-i][j] = matrix[n-1-j-i][j], matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i]",
"def _inv(self) -> None:\n\n self.inv(inplace=True)",
"def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m",
"def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()",
"def copy_matrix(M):\n rows = len(M)\n cols = len(M[0])\n\n MC = zeros_matrix(rows, cols)\n\n for i in range(rows):\n for j in range(rows):\n MC[i][j] = M[i][j]\n\n return MC",
"def wrapDBMatrix(self,mat):\n return mat.todense()",
"def setZeroes(self, matrix: List[List[int]]) -> None:\n m, n = len(matrix), len(matrix[0])\n\n col_0_flag = any(matrix[i][0] == 0 for i in range(m))\n row_0_flag = any(matrix[0][i] == 0 for i in range(n))\n\n # 第 0 列, 第 0 行可以作为 [i,j] 的指示存储\n # 例如,如果 [i,j] 为 0, 那么 [0, j], [i, 0] 可以置 0 \n # 再次遍历矩阵,通过判断 [0, j], [i, 0] 去把该行,该列置 0\n # 这样的话就不需要一个额外的矩阵去 for i,j 遍历是否为 0 了\n\n # 注意是从 1 开始的。 \n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][j] == 0:\n matrix[i][0] = matrix[0][j] = 0 \n\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n if col_0_flag:\n for i in range(m):\n matrix[i][0] = 0\n\n if row_0_flag:\n for j in range(n):\n matrix[0][j] = 0",
"def copy(self):\n data = dict()\n m, n = self.shape\n for i in range(m):\n for j in range(n):\n data[i, j] = self[i, j]\n return MPMatrix(self.shape, data)",
"def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n if n <= 1:\n return\n\n for i in range((n + 1)//2):\n for j in range(i, n - 1 - i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - 1 - j]\n matrix[n - 1 - i][n - 1 - j] = matrix[j][n - 1 - i]\n matrix[j][n - 1 - i] = tmp",
"def update_E(self):\n self.grid.E[:, :, 0, :] = self.grid.E[:, :, -1, :]",
"def __neg__(self):\n #\n # TODO - your code here\n #\n matrix_neg = []\n for i in range(self.h):\n row = []\n for j in range(self.w):\n row.append(0-self.g[i][j])\n matrix_neg.append(row)\n return Matrix(matrix_neg)\n # TODO - your code here",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix[0])\n for i in range(n // 2 + n % 2):\n for j in range(n // 2):\n tmp = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - j - 1]\n matrix[n - 1 - i][n - j - 1] = matrix[j][n - 1 -i]\n matrix[j][n - 1 - i] = matrix[i][j]\n matrix[i][j] = tmp",
"def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n - 1):\n for j in range(n - 1 - i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]\n for i in range(n):\n for j in range(n // 2):\n matrix[j][i], matrix[n-1-j][i] = matrix[n-1-j][i], matrix[j][i]",
"def getMatrix(self, frame):\n self.matrix[3, 0]=self.getValue(frame)\n return self.matrix",
"def rotate(self, matrix: list[list[int]]) -> None:",
"def inverse(self) -> 'Matrix':\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Must be a square matrix. This one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n\n # 1) Construct the minor_matrix. Feel free to make this a separate method.\n minor_matrix_times_cofactor = Matrix.zeros(self.shape())\n\n for i in range (num_R):\n for j in range(num_C):\n minor_matrix_times_cofactor.mat[i][j] = self.get_minor(i,j).determinant() * (-1)**(i+j)\n\n minor_matrix_times_cofactor.display(message=\"minor\")\n # 2) Calculate the determinant, either by calling the determinant() method or by using the minor_matrix (faster)\n det = 0\n for i in range (num_R):\n det += self.mat[i][0] * minor_matrix_times_cofactor.mat[i][0]\n #print (f\"determinant: {self.determinant()}\")\n # 3) The inverse is the transpose of the minor matrix, divided by the determinant. Make sure that the determinant\n # isn't zero!\n if det == 0:\n return None\n return minor_matrix_times_cofactor.transpose().times(1/det)\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------",
"def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp",
"def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])",
"def setZeroes(self, matrix: List[List[int]]) -> None:\n row_num, col_num = len(matrix), len(matrix[0])\n # 创建集合set()用于存放需要置零的行和列\n row_set, col_set = set(), set()\n for row in range(row_num):\n for col in range(col_num):\n if matrix[row][col]==0:\n row_set.add(row)\n col_set.add(col)\n # 将记录的行、列中的元素赋值为0\n # 再次遍历赋值\n for row in range(row_num):\n for col in range(col_num):\n if row in row_set or col in col_set:\n matrix[row][col] = 0\n # # 或者行列单独赋值均可\n # for row in row_set:\n # for col in range(col_num):\n # matrix[row][col] = 0\n # for col in col_set:\n # for row in range(row_num):\n # matrix[row][col] = 0",
"def __setitem__(self, idx, value):\n row, col = idx\n\n if row < 0 or row >= self.num_rows:\n raise IndexError(\"Row out of bounds\")\n\n if col < 0 or col >= self.num_cols:\n raise IndexError(\"Col out of bounds\")\n\n if value == self.default:\n del self[row, col]\n return\n\n array_row = self._find_row_before(row)\n\n if (array_row.next_row == None or array_row.next_row.row_number > row):\n new_row = SparseMatrix.MatrixRow()\n new_row.row_number = row\n new_row.next_row = array_row.next_row\n array_row.next_row = new_row\n\n sentinel_entry = SparseMatrix.MatrixEntry()\n new_row.row_sentinel = sentinel_entry\n\n array_row = array_row.next_row\n array_entry = self._find_column_before(array_row, col)\n\n if (array_entry == None or array_entry.next_entry == None or\n array_entry.next_entry.column_number > col):\n new_entry = SparseMatrix.MatrixEntry()\n new_entry.column_number = col\n if array_entry == None:\n new_entry.next_entry = None\n else:\n new_entry.next_entry = array_entry.next_entry\n array_entry.next_entry = new_entry\n\n array_entry = array_entry.next_entry\n array_entry.value = value",
"def inv(M):\n\t#clone the matrix and append the identity matrix\n\t# [int(i==j) for j in range_M] is nothing but the i(th row of the identity matrix\n\tm2 = [row[:]+[int(i==j) for j in range(len(M) )] for i,row in enumerate(M) ]\n\t# extract the appended matrix (kind of m2[m:,...]\n\treturn [row[len(M[0]):] for row in m2] if gauss_jordan(m2) else None",
"def getMatrix(self, frame):\n self.matrix[3, 1]=self.getValue(frame)\n return self.matrix",
"def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")",
"def rotate(self, matrix: List[List[int]]) -> None:\n for i in range(len(matrix)):\n matrix[i] = matrix[i][::-1]\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][len(matrix[0])-1-j]\n matrix[i][len(matrix[0])-1-j] = matrix[j][len(matrix[0])-1-i]\n matrix[j][len(matrix[0])-1-i] = temp",
"def back_substitution(self):\r\n for col in range(self.SIZE - 1, -1, -1):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col - 1, -1, -1):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result"
] | [
"0.70664227",
"0.684492",
"0.6359314",
"0.6214309",
"0.61908424",
"0.6126168",
"0.6038709",
"0.5992883",
"0.5966755",
"0.5938156",
"0.5928229",
"0.5917942",
"0.5897078",
"0.5853082",
"0.58382195",
"0.580802",
"0.5800106",
"0.57914895",
"0.57380044",
"0.57230836",
"0.57099885",
"0.56717426",
"0.5623345",
"0.5615397",
"0.5611043",
"0.5594581",
"0.55921733",
"0.5582937",
"0.5581951",
"0.55810326",
"0.5570501",
"0.55640376",
"0.55607617",
"0.55504644",
"0.55423045",
"0.5540422",
"0.55385673",
"0.55385596",
"0.5533894",
"0.55111486",
"0.5494054",
"0.54923135",
"0.548709",
"0.5485191",
"0.54841536",
"0.54817104",
"0.5477398",
"0.54720294",
"0.5452007",
"0.545186",
"0.5450168",
"0.54383063",
"0.54213834",
"0.54208875",
"0.5414828",
"0.5412333",
"0.5412259",
"0.5411154",
"0.5410582",
"0.54047436",
"0.5402319",
"0.5397533",
"0.5395588",
"0.5394389",
"0.5390302",
"0.53901607",
"0.5383072",
"0.5369358",
"0.53691834",
"0.53650117",
"0.53644997",
"0.5361332",
"0.5355291",
"0.53519946",
"0.53508115",
"0.5348296",
"0.53448385",
"0.5338346",
"0.5329116",
"0.5322077",
"0.5321004",
"0.5315204",
"0.5313053",
"0.53071964",
"0.52905816",
"0.52772385",
"0.5274714",
"0.52720475",
"0.52710605",
"0.52658415",
"0.52649647",
"0.52642614",
"0.5260099",
"0.52586913",
"0.5258238",
"0.52579385",
"0.524606",
"0.52418184",
"0.5241748",
"0.52367955",
"0.5235445"
] | 0.0 | -1 |
Tile an image to a given width and height. | def tile_image(
im: Image.Image, width: int, height: int, mode: Optional[str] = "RGB", **kwargs: Any
) -> Image.Image:
im_out = Image.new(mode, (width, height), **kwargs)
h_tiles = ceil(width / im.width)
v_tiles = ceil(height / im.height)
for i in range(v_tiles):
y = im.height * i
for j in range(h_tiles):
x = im.width * j
im_out.paste(im, box=(x, y))
return im_out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_image(self, image_location, width, height):\n tile_image = pygame.image.load(image_location).convert_alpha()\n # The tile is a square and the height is expected to be smaller than the width\n tile_width = width\n tile_height = height\n tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))\n\n # The self.image attribute expects a Surface, so we can manually create one and \"blit\" the tile image onto the surface (i.e. paint an image onto a surface).\n # We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)\n # Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)\n image = pygame.Surface((width, height))\n blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]\n image.blits(blits_data)\n\n return image",
"def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))",
"def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory",
"def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out",
"def smaller(self):\n w1, h1 = float(self.imwidth), float(self.imheight)\n w2, h2 = float(self.__huge_size), float(self.__huge_size)\n aspect_ratio1 = w1 / h1\n aspect_ratio2 = w2 / h2 # it equals to 1.0\n if aspect_ratio1 == aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(w2) # band length\n elif aspect_ratio1 > aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(w2 / aspect_ratio1)))\n k = h2 / w1 # compression ratio\n w = int(w2) # band length\n else: # aspect_ratio1 < aspect_ration2\n image = Image.new('RGB', (int(h2 * aspect_ratio1), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(h2 * aspect_ratio1) # band length\n i, j, n = 0, 1, round(0.5 + self.imheight / self.__band_width)\n while i < self.imheight:\n print('\\rOpening image: {j} from {n}'.format(j=j, n=n), end='')\n band = min(self.__band_width, self.imheight - i) # width of the tile band\n self.__tile[1][3] = band # set band width\n self.__tile[2] = self.__offset + self.imwidth * i * 3 # tile offset (3 bytes per pixel)\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile] # set tile\n cropped = self.__image.crop((0, 0, self.imwidth, band)) # crop tile band\n image.paste(cropped.resize((w, int(band * k) + 1), self.__filter), (0, int(i * k)))\n i += band\n j += 1\n print('\\r' + 30 * ' ' + '\\r', end='') # hide printed string\n return image",
"def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')",
"def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)",
"def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img",
"def fit_image(self, img, width, height):\n if img.get_height()/height > img.get_width()/width:\n # scale is determined by width\n w = width\n h = int(math.ceil(img.get_height() * (w/img.get_width())))\n else:\n # scale is determined by height\n h = height\n w = int(math.ceil(img.get_width() * (h/img.get_height())))\n img = pygame.transform.smoothscale(img, (w,h))\n rect = img.get_rect()\n rect = rect.move((width-w)//2, (height-h)//2)\n img2 = pygame.Surface((width, height))\n img2.blit(img, rect)\n return img2",
"def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")",
"def test_image(filename, x_size=def_x_size, y_size=def_y_size):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)",
"def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')",
"def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)",
"def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)",
"def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target",
"def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles",
"def process_tile(tile):\n global base_kwds, resampling, src\n # Get the bounds of the tile.\n ulx, uly = mercantile.xy(\n *mercantile.ul(tile.x, tile.y, tile.z))\n lrx, lry = mercantile.xy(\n *mercantile.ul(tile.x + 1, tile.y + 1, tile.z))\n\n kwds = base_kwds.copy()\n kwds['transform'] = from_bounds(ulx, lry, lrx, uly, 256, 256)\n src_nodata = kwds.pop('src_nodata', None)\n dst_nodata = kwds.pop('dst_nodata', None)\n\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes),\n src_nodata=src_nodata,\n dst_nodata=dst_nodata,\n num_threads=1,\n resampling=resampling)\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n # Check for backported bug fix before re-ordering\n\tif kwds['driver'] == 'PNG' and data[0:8] == png_header:\n # Properly constructed PNG, no need to re-order bytes\n pass\n\telif kwds['driver'] == 'JPEG' and data[0:4] == jpeg_header:\n # Properly constructed JPEG, no need to re-order bytes\n pass\n\telse:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data",
"def fill_image(im):\n width, height = im.size\n # Select the larger value of the length and width of the original picture\n # as the radius of the nine palace grid of the new picture\n new_image_len = width if width > height else height\n # Create a white canvas\n new_image = Image.new(im.mode, (new_image_len, new_image_len), color=\"white\")\n # Paste the original image on the canvas at the center\n if width > height:\n new_image.paste(im, (0, int((new_image_len - height) / 2)))\n else:\n new_image.paste(im, (int((new_image_len - width) / 2), 0))\n return new_image",
"def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles",
"def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))",
"def query_image_tile(self, coord):",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)",
"def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)",
"def resize(img):\n size = (500, 500)\n img.thumbnail(size)\n return img",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap(i, 0, x_size, -1, 1)\n y = remap(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def crop_img(img_path, tile_x, tile_y , save_path):\n # make directory to save tiles\n im = Image.open(img_path)\n try:\n os.mkdir(img_path)\n except OSError:\n print(\"Creation of the directory %s failed\" % img_path)\n else:\n print(\"Successfully created the directory %s \" % img_path)\n \n # conver image to numpyarray and extract image height and width\n img_arr = np.array(im)\n shape = img_arr.shape\n img_y = shape[0]\n img_x = shape[1]\n\n #calculate point to crop\n xy_points = calcul_xy_array(img_x, img_y, tile_x, tile_y)\n\n for x, y in xy_points:\n \n #extract pixels fro array\n A = img_arr[y: y + tile_y, x: x + tile_x, :]\n \n #convert array back to img\n im = Image.fromarray(A)\n\n\n #saving the image in formate:\n #Original_img_name + \"__\" + tile_x_cor \"_\" + tile_y_cor \"__\"+\n #Original_img_height + \"_\" + Original_img_width\n name = (img_path.split('\\\\'))[-1]\n file_name = str(name) + \"__\" + str(x) + \"_\" + str(y) + \"__\" + str(\n img_x) + \"_\" + str(img_y) + \".tif\"\n\n path = save_path + '\\\\' + file_name\n im.save(path)",
"def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )",
"def test_bounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = 5\n\t\tself.expected_cols = 4\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage with specific dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image, rows=self.expected_rows, cols=self.expected_cols)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')",
"def new_image(self, width, height, background=None, mode=\"RGBA\"):\n self.img = PIL.Image.new(mode, (width, height), background)\n self.width,self.height = width,height\n self.drawer = aggdraw.Draw(self.img)",
"def crop_tile_for_heuristic_af(self, tile_img, tile_key):\n # Crop image to 512x512:\n height, width = tile_img.shape[0], tile_img.shape[1]\n self.img[tile_key] = tile_img[int(height/2 - 256):int(height/2 + 256),\n int(width/2 - 256):int(width/2 + 256)]",
"def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):\n def get_tile_shape(img_num):\n x_num = 0\n y_num = int(math.sqrt(img_num))\n while x_num * y_num < img_num:\n x_num += 1\n return x_num, y_num\n\n if tile_shape is None:\n tile_shape = get_tile_shape(len(imgs))\n\n # get max tile size to which each image should be resized\n max_height, max_width = np.inf, np.inf\n for img in imgs:\n max_height = min([max_height, img.shape[0]])\n max_width = min([max_width, img.shape[1]])\n\n # resize and concatenate images\n for i, img in enumerate(imgs):\n h, w = img.shape[:2]\n h_scale, w_scale = max_height / h, max_width / w\n scale = min([h_scale, w_scale])\n h, w = int(scale * h), int(scale * w)\n img = cv2.resize(img, (w, h))\n img = centerize(img, (max_height, max_width, 3),\n margin_color=margin_color)\n imgs[i] = img\n return _tile_images(imgs, tile_shape, result_img,\n margin_color=margin_color)",
"def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image",
"def _image_paste(self, image, dest_image, pos_x, pos_y):\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image",
"def copy_image(img: Image) -> Image:\n width, height = img.size\n new_img = Image.new(img.mode, img.size)\n new_pixels = new_img.load() # New Image pixels, default: all black.\n pixels = img.load() # Input Image pixels.\n for x in range(width):\n for y in range(height):\n new_pixels[x,y] = pixels[x,y]\n return new_img",
"def pixelate(image, n_w, n_h):\n image = image.resize((n_w, n_h), Image.ANTIALIAS)\n return image",
"def paste_chaos(image, tiles, size, shadow_off_set=(30, 30)):\n # image_all = Image.new('RGB', image.size, 0xffffff)\n image_all = image\n lst = range(len(tiles))\n random.shuffle(lst)\n fragment_size = (image.size[0] / size[0], image.size[1] / size[1])\n print 'tiles size %d X %d' % fragment_size\n print 'number of tiles one iteration: %d' % len(lst)\n for i in lst:\n im = Image.open(tiles[i])\n degree = random.randint(-20, 20)\n im = thumbnail(rotate_image(drop_shadow(add_frame(im), shadow_off_set), degree), (fragment_size[0] * 3 / 2, fragment_size[1] * 3 / 2))\n x = i % size[0] * fragment_size[0] + random.randrange(-fragment_size[0] / 2, fragment_size[0] / 2)\n y = i / size[0] * fragment_size[1] + random.randrange(-fragment_size[1] / 2, fragment_size[1] / 2)\n # print x, y\n image_all.paste(im, (x, y), im)\n return image_all",
"def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n \n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n \n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n # colors default to 0 (i.e. black), alphas defaults to 1 (fully opaque i.e.\n # corresponding pixel fully visible in image))\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8') \n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype) \n\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n \n for i in range(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n \n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n \n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.ones(out_shape, dtype=dt)*255\n \n for tile_row in range(tile_shape[0]):\n for tile_col in range(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array",
"def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()",
"def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )",
"def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)",
"def _create_tile(self, img, mask, x,y,w,h, mode=\"binary\", is_stroma=False):\n tile = img[y:y+h, x:x+w, :]\n tile_mask = mask[y:y+h, x:x+w, :]\n\n if mode == \"binary\":\n tile_mask = self._convert_rgb_to_binary_mask(tile_mask)\n # np.where(tile_mask>0,1,0) assign background as 1, tumor as 0\n tile_mask = np.where(tile_mask>0,0,1) \n\n elif mode == \"multiclass\":\n tile_mask = self._cvt_mask3d_to_mask2d(tile_mask, self.mapper, 0)\n tile_mask = tile_mask.astype('uint8')\n\n if is_stroma:\n tile_mask = np.zeros(tile_mask.shape, \"uint8\")\n\n return tile, tile_mask",
"def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.",
"def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )",
"def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)",
"def set_size(self, width, height):\r\n \r\n self.image = pygame.transform.scale(self.image, (width, height))\r\n self.rect = self.image.get_rect()",
"def join(tiles, width=0, height=0):\n # Don't calculate size if width and height are provided\n # this allows an application that knows what the\n # combined size should be to construct an image when\n # pieces are missing.\n\n if width > 0 and height > 0:\n im = Image.new(\"RGBA\", (width, height), None)\n else:\n im = Image.new(\"RGBA\", get_combined_size(tiles), None)\n columns, rows = calc_columns_rows(len(tiles))\n for tile in tiles:\n try:\n im.paste(tile.image, tile.coords)\n except IOError:\n # do nothing, blank out the image\n continue\n return im",
"def main():\n\tparser = construct_parser()\n\targs = parser.parse_args()\n\ttiles = slice(args.image, args.num_tiles, save=False)\n\tsave_tiles(tiles, prefix=get_basename(args.image), directory=args.dir,\n\t\t format=args.format)",
"def get_image(self,x,y,width,height):\n \n #Create a new blank image\n image = pygame.Surface([width,height]).convert()\n \n #Copy the sprite from the large sheet onto the smaller image\n image.blit(self.sprite_sheet,(0,0),(x,y,width,height))\n \n #Assuming black works as the transparent color\n #image.set_colorkey(constants.BLACK)\n \n #return the image\n return image",
"def placeImage(self, img, x=0, y=0):\n if img.getSize() == self.getSize() and img.getWidth() == self.__width:\n # Same dimensions\n self._c = img._c\n\n elif x == 0 and self.__height == img.getHeight():\n # Same height, just overwrite a block\n p_start = y * self.__height\n p_end = y*self.__height + img.getSize()\n self._c[p_start:p_end] = img._c\n\n else:\n # Different dimensions\n for dx in range(min(img.getWidth(), self.getWidth() - x)):\n self.writeCol(x+dx, img.getCol(dx), y)",
"def imageprepare(self,argv):\r\n\t\tim = Image.open(argv).convert('L')\r\n\t\twidth = float(im.size[0])\r\n\t\theight = float(im.size[1])\r\n\t\tnewImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\r\n\r\n\t\tif width > height: # check which dimension is bigger\r\n\t\t\t# Width is bigger. Width becomes 20 pixels.\r\n\t\t\tnheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\r\n\t\t\tif nheight == 0: # rare case but minimum is 1 pixel\r\n\t\t\t\tnheight = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twtop = int(round(((28 - nheight) / 2), 0)) # caculate horizontal pozition\r\n\t\t\tnewImage.paste(img, (4, wtop)) # paste resized image on white canvas\r\n\t\telse:\r\n\t\t\t# Height is bigger. Heigth becomes 20 pixels.\r\n\t\t\tnwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\r\n\t\t\tif (nwidth == 0): # rare case but minimum is 1 pixel\r\n\t\t\t\tnwidth = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\r\n\t\t\tnewImage.paste(img, (wleft, 4)) # paste resized image on white canvas\r\n\r\n\t\t# newImage.save(\"sample.png\")\r\n\r\n\t\ttv = list(newImage.getdata()) # get pixel values\r\n\r\n\t\t# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n\t\ttva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n\t\treturn tva",
"def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array",
"def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\r\n scale_rows_to_unit_interval=True,\r\n output_pixel_vals=True):\r\n\r\n assert len(img_shape) == 2\r\n assert len(tile_shape) == 2\r\n assert len(tile_spacing) == 2\r\n\r\n # The expression below can be re-written in a more C style as\r\n # follows :\r\n #\r\n # out_shape = [0,0]\r\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\r\n # tile_spacing[0]\r\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\r\n # tile_spacing[1]\r\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\r\n in zip(img_shape, tile_shape, tile_spacing)]\r\n\r\n if isinstance(X, tuple):\r\n assert len(X) == 4\r\n # Create an output numpy ndarray to store the image\r\n if output_pixel_vals:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype='uint8')\r\n else:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype=X.dtype)\r\n\r\n #colors default to 0, alpha defaults to 1 (opaque)\r\n if output_pixel_vals:\r\n channel_defaults = [0, 0, 0, 255]\r\n else:\r\n channel_defaults = [0., 0., 0., 1.]\r\n\r\n for i in xrange(4):\r\n if X[i] is None:\r\n # if channel is None, fill it with zeros of the correct\r\n # dtype\r\n dt = out_array.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array[:, :, i] = numpy.zeros(out_shape,\r\n dtype=dt) + channel_defaults[i]\r\n else:\r\n # use a recurrent call to compute the channel and store it\r\n # in the output\r\n out_array[:, :, i] = tile_raster_images(\r\n X[i], img_shape, tile_shape, tile_spacing,\r\n scale_rows_to_unit_interval, output_pixel_vals)\r\n return out_array\r\n\r\n else:\r\n # if we are dealing with only one channel\r\n H, W = img_shape\r\n Hs, Ws = tile_spacing\r\n\r\n # generate a matrix to store the output\r\n dt = X.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array = numpy.zeros(out_shape, dtype=dt)\r\n\r\n for tile_row in xrange(tile_shape[0]):\r\n for tile_col in xrange(tile_shape[1]):\r\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\r\n this_x = X[tile_row * tile_shape[1] + tile_col]\r\n if scale_rows_to_unit_interval:\r\n # if we should scale values to be between 0 and 1\r\n # do this by calling the `scale_to_unit_interval`\r\n # function\r\n this_img = scale_to_unit_interval(\r\n this_x.reshape(img_shape))\r\n else:\r\n this_img = this_x.reshape(img_shape)\r\n # add the slice to the corresponding position in the\r\n # output array\r\n c = 1\r\n if output_pixel_vals:\r\n c = 255\r\n out_array[\r\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\r\n tile_col * (W + Ws): tile_col * (W + Ws) + W\r\n ] = this_img * c\r\n return out_array",
"def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n # colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # functionmapping\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array",
"def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))",
"def resize_png(width, height, source, destination):\n\n os.system('convert -resize %sx%s \"%s\" \"%s\"' % (width, height, source, destination))",
"def draw(self, img, tile_img, tiles):\n rect = get_tile_rect(self.pos)\n rect = Rect([rect.x + self.anim_offset.x, rect.y + self.anim_offset.y, rect.w, rect.h])\n img.blit(tile_img, rect, tiles[self.tile])",
"def export_image(self, bbox, zoomlevel, imagepath):\n assert has_pil, _(\"Cannot export image without python PIL\")\n grid = self.grid_tiles(bbox, zoomlevel)\n width = len(grid[0])\n height = len(grid)\n widthpix = width * self.tile_size\n heightpix = height * self.tile_size\n\n result = Image.new(\"RGBA\", (widthpix, heightpix))\n offset = (0, 0)\n for i, row in enumerate(grid):\n for j, (x, y) in enumerate(row):\n offset = (j * self.tile_size, i * self.tile_size)\n img = self._tile_image(self.tile((zoomlevel, x, y)))\n result.paste(img, offset)\n logger.info(_(\"Save resulting image to '%s'\") % imagepath)\n result.save(imagepath)",
"def get_image(self, x, y, width, height):\n\n # Create a new blank image\n image = pygame.Surface([width, height]).convert()\n\n # Copy the sprite from the large sheet onto the smaller image\n image.blit(self.sheet, (0, 0), (x, y, width, height))\n\n # Assuming black works as the transparent color\n image.set_colorkey(self.background_colour)\n\n # Return the image\n return image",
"def draw(self, frame):\n xpos = OFS + self.x * TILE_SIZE\n ypos = OFS + self.y * TILE_SIZE\n frame[ypos:ypos+TILE_SIZE, xpos:xpos+TILE_SIZE] = self.image",
"def draw_image(self, path, x=0, y=0, w=128, h=128):\n x2 = x + w - 1\n y2 = y + h - 1\n if self.is_off_grid(x, y, x2, y2):\n return\n with open(path, \"rb\") as f:\n chunk_height = 1024 // w\n chunk_count, remainder = divmod(h, chunk_height)\n chunk_size = chunk_height * w * 2\n chunk_y = y\n if chunk_count:\n for c in range(0, chunk_count):\n buf = f.read(chunk_size)\n self.set_window(x, chunk_y,\n x2, chunk_y + chunk_height - 1,\n buf)\n chunk_y += chunk_height\n if remainder:\n buf = f.read(remainder * w * 2)\n self.set_window(x, chunk_y,\n x2, chunk_y + remainder - 1,\n buf)",
"def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels",
"def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles",
"def resize(self, width, height):\n\t\tself._set_image(\n\t\t\tSolidColorImagePattern(\n\t\t\t\tcolor=(self._r,self._g,self._b,self._a)\n\t\t\t).create_image(width, height)\n\t\t)",
"def draw_tile(surface, tile, x, y, size=TILE_SIZE, castle_color=None):\n tile_img = None\n if tile[0] == 'C':\n tile_img = pygame.image.load(castle_color)\n else:\n tile_img = pygame.image.load('images/' + TERRAINS[tile[0]])\n\n crown_pos = 0\n for _ in range(tile[1]):\n tile_img.blit(pygame.image.load('images/crown.png'), (crown_pos, 0))\n crown_pos += CROWN_SIZE\n\n tile_img = pygame.transform.scale(tile_img, (size, size))\n surface.blit(tile_img, (x, y))",
"def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info",
"def setImageDimensions(*args):",
"def simulate_image(b, size = [128,128]):\n place_data(np.ones(size) * b)",
"def tile_to_image(tile, tile_catalog, tile_size, visualize=False):\n new_img = np.zeros((tile_size, tile_size, 3), dtype=np.int64)\n for u in range(tile_size):\n for v in range(tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n if (visualize) and ((-1 == tile) or (WFC_PARTIAL_BLANK == tile)):\n if 0 == (u + v) % 2:\n pixel = [255, 0, 255]\n else:\n if (visualize) and -2 == tile:\n pixel = [0, 255, 255]\n else: \n pixel = tile_catalog[tile][u,v]\n new_img[u,v] = pixel",
"def slice(\n filename,\n number_tiles=None,\n col=None,\n row=None,\n save=True,\n DecompressionBombWarning=True,\n):\n if DecompressionBombWarning is False:\n Image.MAX_IMAGE_PIXELS = None\n\n im = Image.open(filename)\n im_w, im_h = im.size\n\n columns = 0\n rows = 0\n if number_tiles:\n validate_image(im, number_tiles)\n columns, rows = calc_columns_rows(number_tiles)\n else:\n validate_image_col_row(im, col, row)\n columns = col\n rows = row\n\n tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))\n\n tiles = []\n number = 1\n for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error.\n for pos_x in range(0, im_w - columns, tile_w): # as above.\n area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)\n image = im.crop(area)\n position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1)\n coords = (pos_x, pos_y)\n tile = Tile(image, number, position, coords)\n tiles.append(tile)\n number += 1\n if save:\n save_tiles(\n tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)\n )\n return tuple(tiles)",
"def setImageSize(cls, width, height):\n\t\tcls._width = width\n\t\tcls._height = height",
"def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))",
"def make_board(self):\n http = urllib3.PoolManager()\n r = http.request('GET', 'http://www.cse.msu.edu/~ruppmatt/itm891/tiles.pickle')\n tiles = pickle.loads(r.data)\n self.assets = tiles\n self.gameboard = Image.new('RGBA', (64*(self.world_width+2), 64*(self.world_height+2)))\n # Laydown land\n for c in range(0,self.world_width):\n for r in range(0, self.world_height):\n x = (c+1)*64\n y = (r+1)*64\n tile_ndx = np.random.choice(len(tiles['land']))\n self.gameboard.paste(tiles['land'][tile_ndx], (x,y)) \n # Laydown water\n for c in range(0,self.world_width):\n x = (c+1)*64\n yy = (self.world_height+1)*64\n self.gameboard.paste(tiles['water']['edge_north'], (x,0))\n self.gameboard.paste(tiles['water']['edge_south'], (x, yy))\n for r in range(0,self.world_height):\n y = (r+1)*64\n xx = (self.world_width+1)*64\n self.gameboard.paste(tiles['water']['edge_west'], (0,y))\n self.gameboard.paste(tiles['water']['edge_east'], (xx,y))\n self.gameboard.paste(tiles['water']['corner_nw'], (0,0))\n self.gameboard.paste(tiles['water']['corner_sw'], (0,(self.world_height+1)*64))\n self.gameboard.paste(tiles['water']['corner_ne'], ((self.world_width+1)*64,0))\n self.gameboard.paste(tiles['water']['corner_se'], ((self.world_width+1)*64,(self.world_height+1)*64))\n \n # Some land lines\n draw = ImageDraw.Draw(self.gameboard)\n for c in range(0,self.world_width-1):\n y_1 = 64\n y_2 = 64*(self.world_height+1)\n x = (2+c)*64\n draw.line([(x,y_1),(x,y_2)], fill='white', width=1)\n for r in range(0,self.world_height-1):\n y = (2+r)*64\n x_1= 64\n x_2 = 64 * (self.world_width+1)\n draw.line([(x_1,y),(x_2,y)], fill='white', width=1)\n return",
"def _patch_image(self, i, j):\n assert isinstance(i, int), (\"i is not an integer\")\n assert i >= 0, (\"i must be >= 0\")\n assert isinstance(j, int), (\"j is not an integer\")\n assert j >= 0, (\"j must be >= 0\")\n imin, imax = i - self.offset, i + self.offset + 1\n jmin, jmax = j - self.offset, j + self.offset + 1\n image = self.image[imin:imax, jmin:jmax, :]\n return image",
"def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array",
"def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg",
"def to_image(self, width=800, height=600, **kwargs):\n img = _white_image(\n parameters=self.parameters,\n width=width,\n height=height,\n **kwargs\n )\n return img",
"def handle_image(name):\n from_path = args.from_dir + name\n to_path = args.to_dir + name\n\n if width != args.width:\n subprocess.call('jpegtran -rotate 90 -grayscale ' + from_path + ' > ' \\\n + to_path, shell=True)\n else:\n subprocess.call('jpegtran -grayscale ' + from_path + ' > ' + to_path,\\\n shell=True)",
"def _image_paste(self, image, dest_image, pos_x, pos_y):\n dest_image.paste(image, (pos_x, pos_y))",
"def create_thumb(source_fame, target_fame, target_w = 260, target_h=205):\r\n size = target_w, target_h\r\n im = Image.open(source_fame)\r\n width = im.size[0]\r\n height = im.size[1]\r\n newwidth = int(size[0])\r\n newheight = int(height*(newwidth/float(width)))\r\n if newheight > int(size[1]):\r\n newheight = int(size[1])\r\n newwidth = int(width*(newheight/float(height)))\r\n size = newwidth, newheight\r\n # Resize and save the image\r\n im.thumbnail(size, Image.ANTIALIAS)\r\n im.save(target_fame)",
"def tile(sceneid, tile_x, tile_y, tile_z, bands=None, tilesize=256, **kwargs):\n if not bands:\n raise InvalidBandName(\"bands is required\")\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in SENTINEL_BANDS:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n scene_params = _sentinel_parse_scene_id(sceneid)\n sentinel_address = \"{}/{}/measurement\".format(SENTINEL_BUCKET, scene_params[\"key\"])\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n addresses = [\n \"{}/{}-{}.tiff\".format(sentinel_address, scene_params[\"beam\"].lower(), band)\n for band in bands\n ]\n\n def _s1_tiler(src_path):\n with rasterio.open(src_path) as src_dst:\n with WarpedVRT(\n src_dst,\n src_crs=src_dst.gcps[1],\n src_transform=transform.from_gcps(src_dst.gcps[0]),\n src_nodata=0,\n ) as vrt_dst:\n if not utils.tile_exists(vrt_dst.bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(\n tile_z, tile_x, tile_y\n )\n )\n\n return utils._tile_read(vrt_dst, bounds=tile_bounds, tilesize=tilesize)\n\n with futures.ThreadPoolExecutor() as executor:\n data, masks = zip(*list(executor.map(_s1_tiler, addresses)))\n mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255\n\n return numpy.concatenate(data), mask",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'",
"def __init__(\n self,\n tile_id,\n image_path_list=None,\n image_sequence_duration=None,\n allowed_transport=tiledata.DEFAULT_TRANSPORTATION,\n ):\n # Each tile_id should map to a distinct Tile.\n self._tile_id = tile_id\n\n self._image_sequence_duration = image_sequence_duration\n\n # Represents the base terrain image (e.g. grass, water).\n self._image_list = []\n if image_path_list:\n for image_path in image_path_list:\n rendered_image = pygame.image.load(image_path).convert_alpha()\n if rendered_image:\n self._image_list.append(rendered_image)\n else:\n LOGGER.error(\n \"Error rendering tile image %s\",\n image_path\n )\n sys.exit(2)\n else:\n # Use default tile image.\n image_path = imagepaths.TILE_DEFAULT_PATH\n rendered_image = pygame.image.load(\n image_path\n ).convert_alpha()\n\n if rendered_image:\n self._image_list.append(rendered_image)\n else:\n LOGGER.error(\n \"Error rendering tile image %s\",\n image_path\n )\n sys.exit(2)\n\n self._allowed_transport = allowed_transport\n\n self._individual_image_duration = None\n if self._image_sequence_duration and self._image_list:\n self._individual_image_duration = int(\n self._image_sequence_duration / len(self._image_list)\n )",
"def stitch_map(tiles, width, height, bbox, dpi):\n size = (int(width * dpi_to_dpmm(dpi)), int(height * dpi_to_dpmm(dpi)))\n background = Image.new('RGBA', size, (255, 255, 255))\n for layer in tiles:\n layer_img = Image.new(\"RGBA\", size)\n for (x, y), tile_path in layer.items():\n tile = Image.open(tile_path)\n layer_img.paste(tile, ((x - bbox.min.x) * TILE_SIZE, (y - bbox.min.y) * TILE_SIZE))\n background = Image.alpha_composite(background, layer_img)\n add_scales_bar(background, bbox)\n return background.convert(\"RGB\")",
"def tile(\n sceneid, tile_x, tile_y, tile_z, bands=(\"04\", \"03\", \"02\"), tilesize=256, **kwargs\n):\n scene_params = _sentinel_parse_scene_id(sceneid)\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in scene_params[\"valid_bands\"]:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n preview_file = os.path.join(\n scene_params[\"aws_bucket\"],\n scene_params[\"aws_prefix\"],\n scene_params[\"preview_file\"],\n )\n with rasterio.open(preview_file) as src:\n bounds = transform_bounds(src.crs, \"epsg:4326\", *src.bounds, densify_pts=21)\n\n if not utils.tile_exists(bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(tile_z, tile_x, tile_y)\n )\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n path_prefix = os.path.join(scene_params[\"aws_bucket\"], scene_params[\"aws_prefix\"])\n if scene_params[\"processingLevel\"] == \"L2A\":\n bands = [_l2_prefixed_band(b) for b in bands]\n else:\n bands = [\"B{}\".format(b) for b in bands]\n\n def _read_tile(path):\n with rasterio.open(path) as src_dst:\n return utils.tile_read(\n src_dst, bounds=tile_bounds, tilesize=tilesize, nodata=0, **kwargs\n )\n\n addresses = [\"{}/{}.jp2\".format(path_prefix, band) for band in bands]\n with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:\n data, masks = zip(*list(executor.map(_read_tile, addresses)))\n mask = np.all(masks, axis=0).astype(np.uint8) * 255\n\n return np.concatenate(data), mask",
"def _resize_image(image, height, width):\r\n return tf.compat.v1.image.resize(\r\n image, [height, width], method=tf.image.ResizeMethod.BILINEAR,\r\n align_corners=False)",
"def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output",
"def resize_image(image: Image) -> Image:\n expected_x: int = 1024\n expected_y: int = 768\n x, y = image.size\n if x > expected_x or y > expected_y:\n scale = min(expected_x / x, expected_y / y)\n return image.resize((int(x * scale), int(y * scale)))\n else:\n return image.resize((int(0.9*x), int(0.9*y)))",
"def image(self, img):\n # determine our effective width/height, taking rotation into account\n width = self.width\n height = self.height\n if self.rotation in (1, 3):\n width, height = height, width\n\n if isinstance(self.format, (RGB565Format, RGB888Format)) and img.mode != \"RGB\":\n raise ValueError(\"Image must be in mode RGB.\")\n if isinstance(self.format, (MHMSBFormat, MVLSBFormat)) and img.mode != \"1\":\n raise ValueError(\"Image must be in mode 1.\")\n\n imwidth, imheight = img.size\n if imwidth != width or imheight != height:\n raise ValueError(\n f\"Image must be same dimensions as display ({width}x{height}).\"\n )\n # Grab all the pixels from the image, faster than getpixel.\n pixels = img.load()\n # Clear buffer\n for i in range(len(self.buf)): # pylint: disable=consider-using-enumerate\n self.buf[i] = 0\n # Iterate through the pixels\n for x in range(width): # yes this double loop is slow,\n for y in range(height): # but these displays are small!\n if img.mode == \"RGB\":\n self.pixel(x, y, pixels[(x, y)])\n elif pixels[(x, y)]:\n self.pixel(x, y, 1) # only write if pixel is true",
"def load_image():\n # pylint: disable=global-statement\n global current_frame, current_loop, frame_count, frame_duration, bitmap\n while sprite_group:\n sprite_group.pop()\n\n filename = SPRITESHEET_FOLDER + \"/\" + file_list[current_image]\n\n bitmap = displayio.OnDiskBitmap(filename)\n ### Change the palette value proportional to BRIGHTNESS\n bitmap.pixel_shader[1] = image_brightness(brightness)\n sprite = displayio.TileGrid(\n bitmap,\n pixel_shader=bitmap.pixel_shader,\n tile_width=bitmap.width,\n tile_height=matrix.display.height,\n )\n\n sprite_group.append(sprite)\n\n current_frame = 0\n current_loop = 0\n frame_count = int(bitmap.height / matrix.display.height)\n frame_duration = DEFAULT_FRAME_DURATION",
"def scale_image(image: Image, scale: float) -> Image:\n width = round(image.width * scale)\n height = round(image.height * scale)\n image.thumbnail((width, height))\n return image",
"def prepare_image(im):\n width, height = im.size\n if width > 256 or height > 256:\n factor = 256.0 / max(width, height)\n im = im.resize((int(factor * width), int(factor * height)),\n Image.BILINEAR)\n return im",
"def position_to_tile(self, position):\r\n return position[1] + self.width * position[0]",
"def test_unbounded_tileset_image(self):\n\t\t# Create an 8x6 tileset image placeholder\n\t\tself.expected_tile_width = 8\n\t\tself.expected_tile_height = 6\n\t\tself.expected_rows = self.expected_tile_height\n\t\tself.expected_cols = self.expected_tile_width\n\n\t\tself.test_image = dummy_image(self.expected_width(), self.expected_height())\n\t\tself.test_image_grid = TextureGrid(ImageGrid(self.test_image, self.expected_rows, self.expected_cols))\n\n\t\t# Test creating a TilesetImage without specifying dimensions\n\t\tself.tileset_image = TilesetImage(self.test_image)\n\n\t\tself.assert_tileset_image('Rows and columns not specified.')"
] | [
"0.69168466",
"0.65878046",
"0.6285758",
"0.6285758",
"0.626664",
"0.62628806",
"0.619035",
"0.6130948",
"0.61250263",
"0.61242956",
"0.61224717",
"0.60801274",
"0.60514724",
"0.60491633",
"0.60491633",
"0.60491633",
"0.6043619",
"0.60361886",
"0.60337836",
"0.6016151",
"0.60026973",
"0.5998464",
"0.5982035",
"0.5976979",
"0.59613556",
"0.59522486",
"0.593237",
"0.59307677",
"0.59190404",
"0.5899575",
"0.5899339",
"0.58853483",
"0.5875387",
"0.5839495",
"0.5828247",
"0.5815414",
"0.58024484",
"0.5791161",
"0.57804555",
"0.5777048",
"0.5776462",
"0.5773768",
"0.5754135",
"0.57527494",
"0.574554",
"0.5742103",
"0.573309",
"0.57322484",
"0.5725453",
"0.5723806",
"0.5705945",
"0.57033986",
"0.56771505",
"0.5665379",
"0.56633675",
"0.56632227",
"0.5662197",
"0.56588084",
"0.56578827",
"0.5652879",
"0.5636445",
"0.563174",
"0.5619469",
"0.5607221",
"0.5606162",
"0.55954874",
"0.5580259",
"0.55740625",
"0.5571985",
"0.5564722",
"0.5557734",
"0.5557381",
"0.555707",
"0.5555809",
"0.5545733",
"0.55448025",
"0.5538466",
"0.5534892",
"0.5530271",
"0.55282384",
"0.5527923",
"0.5525655",
"0.5512103",
"0.5507349",
"0.5507154",
"0.54947364",
"0.5491038",
"0.54905677",
"0.5489918",
"0.5487589",
"0.54849946",
"0.54796124",
"0.5468396",
"0.5463629",
"0.5453041",
"0.5443758",
"0.54416984",
"0.5438653",
"0.5433815",
"0.5414138"
] | 0.78441113 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.